diff options
author | Alex Williamson <alex.williamson@redhat.com> | 2013-09-04 13:25:44 -0400 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2013-09-04 13:25:44 -0400 |
commit | 3bc4f3993b93dbf1f6402e2034a2e20eb07db807 (patch) | |
tree | 592283e59e121b76355836295d6016fe33cfc5d1 /tools/perf/util | |
parent | 17638db1b88184d8895f3f4551c936d7480a1d3f (diff) | |
parent | cb3e4330e697dffaf3d9cefebc9c7e7d39c89f2e (diff) |
Merge remote branch 'origin/master' into next-merge
Diffstat (limited to 'tools/perf/util')
55 files changed, 2436 insertions, 766 deletions
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index d102716c43a1..bfc5a27597d6 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -110,10 +110,10 @@ static int jump__parse(struct ins_operands *ops) | |||
110 | { | 110 | { |
111 | const char *s = strchr(ops->raw, '+'); | 111 | const char *s = strchr(ops->raw, '+'); |
112 | 112 | ||
113 | ops->target.addr = strtoll(ops->raw, NULL, 16); | 113 | ops->target.addr = strtoull(ops->raw, NULL, 16); |
114 | 114 | ||
115 | if (s++ != NULL) | 115 | if (s++ != NULL) |
116 | ops->target.offset = strtoll(s, NULL, 16); | 116 | ops->target.offset = strtoull(s, NULL, 16); |
117 | else | 117 | else |
118 | ops->target.offset = UINT64_MAX; | 118 | ops->target.offset = UINT64_MAX; |
119 | 119 | ||
@@ -821,11 +821,55 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, | |||
821 | if (dl == NULL) | 821 | if (dl == NULL) |
822 | return -1; | 822 | return -1; |
823 | 823 | ||
824 | if (dl->ops.target.offset == UINT64_MAX) | ||
825 | dl->ops.target.offset = dl->ops.target.addr - | ||
826 | map__rip_2objdump(map, sym->start); | ||
827 | |||
828 | /* | ||
829 | * kcore has no symbols, so add the call target name if it is on the | ||
830 | * same map. | ||
831 | */ | ||
832 | if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) { | ||
833 | struct symbol *s; | ||
834 | u64 ip = dl->ops.target.addr; | ||
835 | |||
836 | if (ip >= map->start && ip <= map->end) { | ||
837 | ip = map->map_ip(map, ip); | ||
838 | s = map__find_symbol(map, ip, NULL); | ||
839 | if (s && s->start == ip) | ||
840 | dl->ops.target.name = strdup(s->name); | ||
841 | } | ||
842 | } | ||
843 | |||
824 | disasm__add(¬es->src->source, dl); | 844 | disasm__add(¬es->src->source, dl); |
825 | 845 | ||
826 | return 0; | 846 | return 0; |
827 | } | 847 | } |
828 | 848 | ||
849 | static void delete_last_nop(struct symbol *sym) | ||
850 | { | ||
851 | struct annotation *notes = symbol__annotation(sym); | ||
852 | struct list_head *list = ¬es->src->source; | ||
853 | struct disasm_line *dl; | ||
854 | |||
855 | while (!list_empty(list)) { | ||
856 | dl = list_entry(list->prev, struct disasm_line, node); | ||
857 | |||
858 | if (dl->ins && dl->ins->ops) { | ||
859 | if (dl->ins->ops != &nop_ops) | ||
860 | return; | ||
861 | } else { | ||
862 | if (!strstr(dl->line, " nop ") && | ||
863 | !strstr(dl->line, " nopl ") && | ||
864 | !strstr(dl->line, " nopw ")) | ||
865 | return; | ||
866 | } | ||
867 | |||
868 | list_del(&dl->node); | ||
869 | disasm_line__free(dl); | ||
870 | } | ||
871 | } | ||
872 | |||
829 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) | 873 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) |
830 | { | 874 | { |
831 | struct dso *dso = map->dso; | 875 | struct dso *dso = map->dso; |
@@ -864,7 +908,8 @@ fallback: | |||
864 | free_filename = false; | 908 | free_filename = false; |
865 | } | 909 | } |
866 | 910 | ||
867 | if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { | 911 | if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && |
912 | !dso__is_kcore(dso)) { | ||
868 | char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; | 913 | char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; |
869 | char *build_id_msg = NULL; | 914 | char *build_id_msg = NULL; |
870 | 915 | ||
@@ -898,7 +943,7 @@ fallback: | |||
898 | snprintf(command, sizeof(command), | 943 | snprintf(command, sizeof(command), |
899 | "%s %s%s --start-address=0x%016" PRIx64 | 944 | "%s %s%s --start-address=0x%016" PRIx64 |
900 | " --stop-address=0x%016" PRIx64 | 945 | " --stop-address=0x%016" PRIx64 |
901 | " -d %s %s -C %s|grep -v %s|expand", | 946 | " -d %s %s -C %s 2>/dev/null|grep -v %s|expand", |
902 | objdump_path ? objdump_path : "objdump", | 947 | objdump_path ? objdump_path : "objdump", |
903 | disassembler_style ? "-M " : "", | 948 | disassembler_style ? "-M " : "", |
904 | disassembler_style ? disassembler_style : "", | 949 | disassembler_style ? disassembler_style : "", |
@@ -918,6 +963,13 @@ fallback: | |||
918 | if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) | 963 | if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) |
919 | break; | 964 | break; |
920 | 965 | ||
966 | /* | ||
967 | * kallsyms does not have symbol sizes so there may a nop at the end. | ||
968 | * Remove it. | ||
969 | */ | ||
970 | if (dso__is_kcore(dso)) | ||
971 | delete_last_nop(sym); | ||
972 | |||
921 | pclose(file); | 973 | pclose(file); |
922 | out_free_filename: | 974 | out_free_filename: |
923 | if (free_filename) | 975 | if (free_filename) |
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 5295625c0c00..fb584092eb88 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -18,13 +18,14 @@ | |||
18 | 18 | ||
19 | int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, | 19 | int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, |
20 | union perf_event *event, | 20 | union perf_event *event, |
21 | struct perf_sample *sample __maybe_unused, | 21 | struct perf_sample *sample, |
22 | struct perf_evsel *evsel __maybe_unused, | 22 | struct perf_evsel *evsel __maybe_unused, |
23 | struct machine *machine) | 23 | struct machine *machine) |
24 | { | 24 | { |
25 | struct addr_location al; | 25 | struct addr_location al; |
26 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 26 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
27 | struct thread *thread = machine__findnew_thread(machine, event->ip.pid); | 27 | struct thread *thread = machine__findnew_thread(machine, sample->pid, |
28 | sample->pid); | ||
28 | 29 | ||
29 | if (thread == NULL) { | 30 | if (thread == NULL) { |
30 | pr_err("problem processing %d event, skipping it.\n", | 31 | pr_err("problem processing %d event, skipping it.\n", |
@@ -33,7 +34,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, | |||
33 | } | 34 | } |
34 | 35 | ||
35 | thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, | 36 | thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, |
36 | event->ip.ip, &al); | 37 | sample->ip, &al); |
37 | 38 | ||
38 | if (al.map != NULL) | 39 | if (al.map != NULL) |
39 | al.map->dso->hit = 1; | 40 | al.map->dso->hit = 1; |
@@ -47,7 +48,9 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, | |||
47 | __maybe_unused, | 48 | __maybe_unused, |
48 | struct machine *machine) | 49 | struct machine *machine) |
49 | { | 50 | { |
50 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 51 | struct thread *thread = machine__findnew_thread(machine, |
52 | event->fork.pid, | ||
53 | event->fork.tid); | ||
51 | 54 | ||
52 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, | 55 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, |
53 | event->fork.ppid, event->fork.ptid); | 56 | event->fork.ppid, event->fork.ptid); |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 42b6a632fe7b..482f68081cd8 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -15,19 +15,12 @@ | |||
15 | #include <errno.h> | 15 | #include <errno.h> |
16 | #include <math.h> | 16 | #include <math.h> |
17 | 17 | ||
18 | #include "hist.h" | ||
18 | #include "util.h" | 19 | #include "util.h" |
19 | #include "callchain.h" | 20 | #include "callchain.h" |
20 | 21 | ||
21 | __thread struct callchain_cursor callchain_cursor; | 22 | __thread struct callchain_cursor callchain_cursor; |
22 | 23 | ||
23 | bool ip_callchain__valid(struct ip_callchain *chain, | ||
24 | const union perf_event *event) | ||
25 | { | ||
26 | unsigned int chain_size = event->header.size; | ||
27 | chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; | ||
28 | return chain->nr * sizeof(u64) <= chain_size; | ||
29 | } | ||
30 | |||
31 | #define chain_for_each_child(child, parent) \ | 24 | #define chain_for_each_child(child, parent) \ |
32 | list_for_each_entry(child, &parent->children, siblings) | 25 | list_for_each_entry(child, &parent->children, siblings) |
33 | 26 | ||
@@ -327,7 +320,8 @@ append_chain(struct callchain_node *root, | |||
327 | /* | 320 | /* |
328 | * Lookup in the current node | 321 | * Lookup in the current node |
329 | * If we have a symbol, then compare the start to match | 322 | * If we have a symbol, then compare the start to match |
330 | * anywhere inside a function. | 323 | * anywhere inside a function, unless function |
324 | * mode is disabled. | ||
331 | */ | 325 | */ |
332 | list_for_each_entry(cnode, &root->val, list) { | 326 | list_for_each_entry(cnode, &root->val, list) { |
333 | struct callchain_cursor_node *node; | 327 | struct callchain_cursor_node *node; |
@@ -339,7 +333,8 @@ append_chain(struct callchain_node *root, | |||
339 | 333 | ||
340 | sym = node->sym; | 334 | sym = node->sym; |
341 | 335 | ||
342 | if (cnode->ms.sym && sym) { | 336 | if (cnode->ms.sym && sym && |
337 | callchain_param.key == CCKEY_FUNCTION) { | ||
343 | if (cnode->ms.sym->start != sym->start) | 338 | if (cnode->ms.sym->start != sym->start) |
344 | break; | 339 | break; |
345 | } else if (cnode->ip != node->ip) | 340 | } else if (cnode->ip != node->ip) |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 3ee9f67d5af0..2b585bc308cf 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -41,12 +41,18 @@ struct callchain_param; | |||
41 | typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *, | 41 | typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *, |
42 | u64, struct callchain_param *); | 42 | u64, struct callchain_param *); |
43 | 43 | ||
44 | enum chain_key { | ||
45 | CCKEY_FUNCTION, | ||
46 | CCKEY_ADDRESS | ||
47 | }; | ||
48 | |||
44 | struct callchain_param { | 49 | struct callchain_param { |
45 | enum chain_mode mode; | 50 | enum chain_mode mode; |
46 | u32 print_limit; | 51 | u32 print_limit; |
47 | double min_percent; | 52 | double min_percent; |
48 | sort_chain_func_t sort; | 53 | sort_chain_func_t sort; |
49 | enum chain_order order; | 54 | enum chain_order order; |
55 | enum chain_key key; | ||
50 | }; | 56 | }; |
51 | 57 | ||
52 | struct callchain_list { | 58 | struct callchain_list { |
@@ -103,11 +109,6 @@ int callchain_append(struct callchain_root *root, | |||
103 | int callchain_merge(struct callchain_cursor *cursor, | 109 | int callchain_merge(struct callchain_cursor *cursor, |
104 | struct callchain_root *dst, struct callchain_root *src); | 110 | struct callchain_root *dst, struct callchain_root *src); |
105 | 111 | ||
106 | struct ip_callchain; | ||
107 | union perf_event; | ||
108 | |||
109 | bool ip_callchain__valid(struct ip_callchain *chain, | ||
110 | const union perf_event *event); | ||
111 | /* | 112 | /* |
112 | * Initialize a cursor before adding entries inside, but keep | 113 | * Initialize a cursor before adding entries inside, but keep |
113 | * the previously allocated entries as a cache. | 114 | * the previously allocated entries as a cache. |
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 9bed02e5fb3d..b123bb9d6f55 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h | |||
@@ -41,7 +41,7 @@ static inline int cpu_map__nr(const struct cpu_map *map) | |||
41 | return map ? map->nr : 1; | 41 | return map ? map->nr : 1; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline bool cpu_map__all(const struct cpu_map *map) | 44 | static inline bool cpu_map__empty(const struct cpu_map *map) |
45 | { | 45 | { |
46 | return map ? map->map[0] == -1 : true; | 46 | return map ? map->map[0] == -1 : true; |
47 | } | 47 | } |
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index c4374f07603c..e3c1ff8512c8 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c | |||
@@ -78,6 +78,8 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, | |||
78 | symbol_conf.symfs, build_id_hex, build_id_hex + 2); | 78 | symbol_conf.symfs, build_id_hex, build_id_hex + 2); |
79 | break; | 79 | break; |
80 | 80 | ||
81 | case DSO_BINARY_TYPE__VMLINUX: | ||
82 | case DSO_BINARY_TYPE__GUEST_VMLINUX: | ||
81 | case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: | 83 | case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: |
82 | snprintf(file, size, "%s%s", | 84 | snprintf(file, size, "%s%s", |
83 | symbol_conf.symfs, dso->long_name); | 85 | symbol_conf.symfs, dso->long_name); |
@@ -93,11 +95,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, | |||
93 | dso->long_name); | 95 | dso->long_name); |
94 | break; | 96 | break; |
95 | 97 | ||
98 | case DSO_BINARY_TYPE__KCORE: | ||
99 | case DSO_BINARY_TYPE__GUEST_KCORE: | ||
100 | snprintf(file, size, "%s", dso->long_name); | ||
101 | break; | ||
102 | |||
96 | default: | 103 | default: |
97 | case DSO_BINARY_TYPE__KALLSYMS: | 104 | case DSO_BINARY_TYPE__KALLSYMS: |
98 | case DSO_BINARY_TYPE__VMLINUX: | ||
99 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: | 105 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: |
100 | case DSO_BINARY_TYPE__GUEST_VMLINUX: | ||
101 | case DSO_BINARY_TYPE__JAVA_JIT: | 106 | case DSO_BINARY_TYPE__JAVA_JIT: |
102 | case DSO_BINARY_TYPE__NOT_FOUND: | 107 | case DSO_BINARY_TYPE__NOT_FOUND: |
103 | ret = -1; | 108 | ret = -1; |
@@ -419,6 +424,7 @@ struct dso *dso__new(const char *name) | |||
419 | dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; | 424 | dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; |
420 | dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; | 425 | dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; |
421 | dso->loaded = 0; | 426 | dso->loaded = 0; |
427 | dso->rel = 0; | ||
422 | dso->sorted_by_name = 0; | 428 | dso->sorted_by_name = 0; |
423 | dso->has_build_id = 0; | 429 | dso->has_build_id = 0; |
424 | dso->kernel = DSO_TYPE_USER; | 430 | dso->kernel = DSO_TYPE_USER; |
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index d51aaf272c68..b793053335d6 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/rbtree.h> | 5 | #include <linux/rbtree.h> |
6 | #include <stdbool.h> | ||
6 | #include "types.h" | 7 | #include "types.h" |
7 | #include "map.h" | 8 | #include "map.h" |
8 | 9 | ||
@@ -20,6 +21,8 @@ enum dso_binary_type { | |||
20 | DSO_BINARY_TYPE__SYSTEM_PATH_DSO, | 21 | DSO_BINARY_TYPE__SYSTEM_PATH_DSO, |
21 | DSO_BINARY_TYPE__GUEST_KMODULE, | 22 | DSO_BINARY_TYPE__GUEST_KMODULE, |
22 | DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, | 23 | DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, |
24 | DSO_BINARY_TYPE__KCORE, | ||
25 | DSO_BINARY_TYPE__GUEST_KCORE, | ||
23 | DSO_BINARY_TYPE__NOT_FOUND, | 26 | DSO_BINARY_TYPE__NOT_FOUND, |
24 | }; | 27 | }; |
25 | 28 | ||
@@ -84,6 +87,7 @@ struct dso { | |||
84 | u8 lname_alloc:1; | 87 | u8 lname_alloc:1; |
85 | u8 sorted_by_name; | 88 | u8 sorted_by_name; |
86 | u8 loaded; | 89 | u8 loaded; |
90 | u8 rel; | ||
87 | u8 build_id[BUILD_ID_SIZE]; | 91 | u8 build_id[BUILD_ID_SIZE]; |
88 | const char *short_name; | 92 | const char *short_name; |
89 | char *long_name; | 93 | char *long_name; |
@@ -146,4 +150,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); | |||
146 | size_t dso__fprintf_symbols_by_name(struct dso *dso, | 150 | size_t dso__fprintf_symbols_by_name(struct dso *dso, |
147 | enum map_type type, FILE *fp); | 151 | enum map_type type, FILE *fp); |
148 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); | 152 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); |
153 | |||
154 | static inline bool dso__is_vmlinux(struct dso *dso) | ||
155 | { | ||
156 | return dso->data_type == DSO_BINARY_TYPE__VMLINUX || | ||
157 | dso->data_type == DSO_BINARY_TYPE__GUEST_VMLINUX; | ||
158 | } | ||
159 | |||
160 | static inline bool dso__is_kcore(struct dso *dso) | ||
161 | { | ||
162 | return dso->data_type == DSO_BINARY_TYPE__KCORE || | ||
163 | dso->data_type == DSO_BINARY_TYPE__GUEST_KCORE; | ||
164 | } | ||
165 | |||
149 | #endif /* __PERF_DSO */ | 166 | #endif /* __PERF_DSO */ |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 5cd13d768cec..8d51f21107aa 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -595,6 +595,7 @@ void thread__find_addr_map(struct thread *self, | |||
595 | struct addr_location *al) | 595 | struct addr_location *al) |
596 | { | 596 | { |
597 | struct map_groups *mg = &self->mg; | 597 | struct map_groups *mg = &self->mg; |
598 | bool load_map = false; | ||
598 | 599 | ||
599 | al->thread = self; | 600 | al->thread = self; |
600 | al->addr = addr; | 601 | al->addr = addr; |
@@ -609,11 +610,13 @@ void thread__find_addr_map(struct thread *self, | |||
609 | if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { | 610 | if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { |
610 | al->level = 'k'; | 611 | al->level = 'k'; |
611 | mg = &machine->kmaps; | 612 | mg = &machine->kmaps; |
613 | load_map = true; | ||
612 | } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { | 614 | } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { |
613 | al->level = '.'; | 615 | al->level = '.'; |
614 | } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { | 616 | } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { |
615 | al->level = 'g'; | 617 | al->level = 'g'; |
616 | mg = &machine->kmaps; | 618 | mg = &machine->kmaps; |
619 | load_map = true; | ||
617 | } else { | 620 | } else { |
618 | /* | 621 | /* |
619 | * 'u' means guest os user space. | 622 | * 'u' means guest os user space. |
@@ -654,18 +657,25 @@ try_again: | |||
654 | mg = &machine->kmaps; | 657 | mg = &machine->kmaps; |
655 | goto try_again; | 658 | goto try_again; |
656 | } | 659 | } |
657 | } else | 660 | } else { |
661 | /* | ||
662 | * Kernel maps might be changed when loading symbols so loading | ||
663 | * must be done prior to using kernel maps. | ||
664 | */ | ||
665 | if (load_map) | ||
666 | map__load(al->map, machine->symbol_filter); | ||
658 | al->addr = al->map->map_ip(al->map, al->addr); | 667 | al->addr = al->map->map_ip(al->map, al->addr); |
668 | } | ||
659 | } | 669 | } |
660 | 670 | ||
661 | void thread__find_addr_location(struct thread *thread, struct machine *machine, | 671 | void thread__find_addr_location(struct thread *thread, struct machine *machine, |
662 | u8 cpumode, enum map_type type, u64 addr, | 672 | u8 cpumode, enum map_type type, u64 addr, |
663 | struct addr_location *al, | 673 | struct addr_location *al) |
664 | symbol_filter_t filter) | ||
665 | { | 674 | { |
666 | thread__find_addr_map(thread, machine, cpumode, type, addr, al); | 675 | thread__find_addr_map(thread, machine, cpumode, type, addr, al); |
667 | if (al->map != NULL) | 676 | if (al->map != NULL) |
668 | al->sym = map__find_symbol(al->map, al->addr, filter); | 677 | al->sym = map__find_symbol(al->map, al->addr, |
678 | machine->symbol_filter); | ||
669 | else | 679 | else |
670 | al->sym = NULL; | 680 | al->sym = NULL; |
671 | } | 681 | } |
@@ -673,11 +683,11 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine, | |||
673 | int perf_event__preprocess_sample(const union perf_event *event, | 683 | int perf_event__preprocess_sample(const union perf_event *event, |
674 | struct machine *machine, | 684 | struct machine *machine, |
675 | struct addr_location *al, | 685 | struct addr_location *al, |
676 | struct perf_sample *sample, | 686 | struct perf_sample *sample) |
677 | symbol_filter_t filter) | ||
678 | { | 687 | { |
679 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 688 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
680 | struct thread *thread = machine__findnew_thread(machine, event->ip.pid); | 689 | struct thread *thread = machine__findnew_thread(machine, sample->pid, |
690 | sample->pid); | ||
681 | 691 | ||
682 | if (thread == NULL) | 692 | if (thread == NULL) |
683 | return -1; | 693 | return -1; |
@@ -686,7 +696,7 @@ int perf_event__preprocess_sample(const union perf_event *event, | |||
686 | !strlist__has_entry(symbol_conf.comm_list, thread->comm)) | 696 | !strlist__has_entry(symbol_conf.comm_list, thread->comm)) |
687 | goto out_filtered; | 697 | goto out_filtered; |
688 | 698 | ||
689 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | 699 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid); |
690 | /* | 700 | /* |
691 | * Have we already created the kernel maps for this machine? | 701 | * Have we already created the kernel maps for this machine? |
692 | * | 702 | * |
@@ -699,7 +709,7 @@ int perf_event__preprocess_sample(const union perf_event *event, | |||
699 | machine__create_kernel_maps(machine); | 709 | machine__create_kernel_maps(machine); |
700 | 710 | ||
701 | thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, | 711 | thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, |
702 | event->ip.ip, al); | 712 | sample->ip, al); |
703 | dump_printf(" ...... dso: %s\n", | 713 | dump_printf(" ...... dso: %s\n", |
704 | al->map ? al->map->dso->long_name : | 714 | al->map ? al->map->dso->long_name : |
705 | al->level == 'H' ? "[hypervisor]" : "<not found>"); | 715 | al->level == 'H' ? "[hypervisor]" : "<not found>"); |
@@ -717,7 +727,8 @@ int perf_event__preprocess_sample(const union perf_event *event, | |||
717 | dso->long_name))))) | 727 | dso->long_name))))) |
718 | goto out_filtered; | 728 | goto out_filtered; |
719 | 729 | ||
720 | al->sym = map__find_symbol(al->map, al->addr, filter); | 730 | al->sym = map__find_symbol(al->map, al->addr, |
731 | machine->symbol_filter); | ||
721 | } | 732 | } |
722 | 733 | ||
723 | if (symbol_conf.sym_list && | 734 | if (symbol_conf.sym_list && |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 181389535c0c..93130d856bf0 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -8,16 +8,6 @@ | |||
8 | #include "map.h" | 8 | #include "map.h" |
9 | #include "build-id.h" | 9 | #include "build-id.h" |
10 | 10 | ||
11 | /* | ||
12 | * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * | ||
13 | */ | ||
14 | struct ip_event { | ||
15 | struct perf_event_header header; | ||
16 | u64 ip; | ||
17 | u32 pid, tid; | ||
18 | unsigned char __more_data[]; | ||
19 | }; | ||
20 | |||
21 | struct mmap_event { | 11 | struct mmap_event { |
22 | struct perf_event_header header; | 12 | struct perf_event_header header; |
23 | u32 pid, tid; | 13 | u32 pid, tid; |
@@ -63,7 +53,8 @@ struct read_event { | |||
63 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ | 53 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ |
64 | PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ | 54 | PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ |
65 | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ | 55 | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ |
66 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) | 56 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ |
57 | PERF_SAMPLE_IDENTIFIER) | ||
67 | 58 | ||
68 | struct sample_event { | 59 | struct sample_event { |
69 | struct perf_event_header header; | 60 | struct perf_event_header header; |
@@ -71,6 +62,7 @@ struct sample_event { | |||
71 | }; | 62 | }; |
72 | 63 | ||
73 | struct regs_dump { | 64 | struct regs_dump { |
65 | u64 abi; | ||
74 | u64 *regs; | 66 | u64 *regs; |
75 | }; | 67 | }; |
76 | 68 | ||
@@ -80,6 +72,23 @@ struct stack_dump { | |||
80 | char *data; | 72 | char *data; |
81 | }; | 73 | }; |
82 | 74 | ||
75 | struct sample_read_value { | ||
76 | u64 value; | ||
77 | u64 id; | ||
78 | }; | ||
79 | |||
80 | struct sample_read { | ||
81 | u64 time_enabled; | ||
82 | u64 time_running; | ||
83 | union { | ||
84 | struct { | ||
85 | u64 nr; | ||
86 | struct sample_read_value *values; | ||
87 | } group; | ||
88 | struct sample_read_value one; | ||
89 | }; | ||
90 | }; | ||
91 | |||
83 | struct perf_sample { | 92 | struct perf_sample { |
84 | u64 ip; | 93 | u64 ip; |
85 | u32 pid, tid; | 94 | u32 pid, tid; |
@@ -97,6 +106,7 @@ struct perf_sample { | |||
97 | struct branch_stack *branch_stack; | 106 | struct branch_stack *branch_stack; |
98 | struct regs_dump user_regs; | 107 | struct regs_dump user_regs; |
99 | struct stack_dump user_stack; | 108 | struct stack_dump user_stack; |
109 | struct sample_read read; | ||
100 | }; | 110 | }; |
101 | 111 | ||
102 | #define PERF_MEM_DATA_SRC_NONE \ | 112 | #define PERF_MEM_DATA_SRC_NONE \ |
@@ -116,7 +126,7 @@ struct build_id_event { | |||
116 | enum perf_user_event_type { /* above any possible kernel type */ | 126 | enum perf_user_event_type { /* above any possible kernel type */ |
117 | PERF_RECORD_USER_TYPE_START = 64, | 127 | PERF_RECORD_USER_TYPE_START = 64, |
118 | PERF_RECORD_HEADER_ATTR = 64, | 128 | PERF_RECORD_HEADER_ATTR = 64, |
119 | PERF_RECORD_HEADER_EVENT_TYPE = 65, | 129 | PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */ |
120 | PERF_RECORD_HEADER_TRACING_DATA = 66, | 130 | PERF_RECORD_HEADER_TRACING_DATA = 66, |
121 | PERF_RECORD_HEADER_BUILD_ID = 67, | 131 | PERF_RECORD_HEADER_BUILD_ID = 67, |
122 | PERF_RECORD_FINISHED_ROUND = 68, | 132 | PERF_RECORD_FINISHED_ROUND = 68, |
@@ -148,7 +158,6 @@ struct tracing_data_event { | |||
148 | 158 | ||
149 | union perf_event { | 159 | union perf_event { |
150 | struct perf_event_header header; | 160 | struct perf_event_header header; |
151 | struct ip_event ip; | ||
152 | struct mmap_event mmap; | 161 | struct mmap_event mmap; |
153 | struct comm_event comm; | 162 | struct comm_event comm; |
154 | struct fork_event fork; | 163 | struct fork_event fork; |
@@ -216,12 +225,14 @@ struct addr_location; | |||
216 | int perf_event__preprocess_sample(const union perf_event *self, | 225 | int perf_event__preprocess_sample(const union perf_event *self, |
217 | struct machine *machine, | 226 | struct machine *machine, |
218 | struct addr_location *al, | 227 | struct addr_location *al, |
219 | struct perf_sample *sample, | 228 | struct perf_sample *sample); |
220 | symbol_filter_t filter); | ||
221 | 229 | ||
222 | const char *perf_event__name(unsigned int id); | 230 | const char *perf_event__name(unsigned int id); |
223 | 231 | ||
232 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | ||
233 | u64 sample_regs_user, u64 read_format); | ||
224 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | 234 | int perf_event__synthesize_sample(union perf_event *event, u64 type, |
235 | u64 sample_regs_user, u64 read_format, | ||
225 | const struct perf_sample *sample, | 236 | const struct perf_sample *sample, |
226 | bool swapped); | 237 | bool swapped); |
227 | 238 | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 8065ce8fa9a5..b8727ae45e3b 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "target.h" | 14 | #include "target.h" |
15 | #include "evlist.h" | 15 | #include "evlist.h" |
16 | #include "evsel.h" | 16 | #include "evsel.h" |
17 | #include "debug.h" | ||
17 | #include <unistd.h> | 18 | #include <unistd.h> |
18 | 19 | ||
19 | #include "parse-events.h" | 20 | #include "parse-events.h" |
@@ -48,26 +49,19 @@ struct perf_evlist *perf_evlist__new(void) | |||
48 | return evlist; | 49 | return evlist; |
49 | } | 50 | } |
50 | 51 | ||
51 | void perf_evlist__config(struct perf_evlist *evlist, | 52 | /** |
52 | struct perf_record_opts *opts) | 53 | * perf_evlist__set_id_pos - set the positions of event ids. |
54 | * @evlist: selected event list | ||
55 | * | ||
56 | * Events with compatible sample types all have the same id_pos | ||
57 | * and is_pos. For convenience, put a copy on evlist. | ||
58 | */ | ||
59 | void perf_evlist__set_id_pos(struct perf_evlist *evlist) | ||
53 | { | 60 | { |
54 | struct perf_evsel *evsel; | 61 | struct perf_evsel *first = perf_evlist__first(evlist); |
55 | /* | ||
56 | * Set the evsel leader links before we configure attributes, | ||
57 | * since some might depend on this info. | ||
58 | */ | ||
59 | if (opts->group) | ||
60 | perf_evlist__set_leader(evlist); | ||
61 | |||
62 | if (evlist->cpus->map[0] < 0) | ||
63 | opts->no_inherit = true; | ||
64 | |||
65 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
66 | perf_evsel__config(evsel, opts); | ||
67 | 62 | ||
68 | if (evlist->nr_entries > 1) | 63 | evlist->id_pos = first->id_pos; |
69 | perf_evsel__set_sample_id(evsel); | 64 | evlist->is_pos = first->is_pos; |
70 | } | ||
71 | } | 65 | } |
72 | 66 | ||
73 | static void perf_evlist__purge(struct perf_evlist *evlist) | 67 | static void perf_evlist__purge(struct perf_evlist *evlist) |
@@ -100,15 +94,20 @@ void perf_evlist__delete(struct perf_evlist *evlist) | |||
100 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | 94 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
101 | { | 95 | { |
102 | list_add_tail(&entry->node, &evlist->entries); | 96 | list_add_tail(&entry->node, &evlist->entries); |
103 | ++evlist->nr_entries; | 97 | if (!evlist->nr_entries++) |
98 | perf_evlist__set_id_pos(evlist); | ||
104 | } | 99 | } |
105 | 100 | ||
106 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | 101 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
107 | struct list_head *list, | 102 | struct list_head *list, |
108 | int nr_entries) | 103 | int nr_entries) |
109 | { | 104 | { |
105 | bool set_id_pos = !evlist->nr_entries; | ||
106 | |||
110 | list_splice_tail(list, &evlist->entries); | 107 | list_splice_tail(list, &evlist->entries); |
111 | evlist->nr_entries += nr_entries; | 108 | evlist->nr_entries += nr_entries; |
109 | if (set_id_pos) | ||
110 | perf_evlist__set_id_pos(evlist); | ||
112 | } | 111 | } |
113 | 112 | ||
114 | void __perf_evlist__set_leader(struct list_head *list) | 113 | void __perf_evlist__set_leader(struct list_head *list) |
@@ -209,6 +208,21 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) | |||
209 | return NULL; | 208 | return NULL; |
210 | } | 209 | } |
211 | 210 | ||
211 | struct perf_evsel * | ||
212 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | ||
213 | const char *name) | ||
214 | { | ||
215 | struct perf_evsel *evsel; | ||
216 | |||
217 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
218 | if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && | ||
219 | (strcmp(evsel->name, name) == 0)) | ||
220 | return evsel; | ||
221 | } | ||
222 | |||
223 | return NULL; | ||
224 | } | ||
225 | |||
212 | int perf_evlist__add_newtp(struct perf_evlist *evlist, | 226 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
213 | const char *sys, const char *name, void *handler) | 227 | const char *sys, const char *name, void *handler) |
214 | { | 228 | { |
@@ -232,7 +246,7 @@ void perf_evlist__disable(struct perf_evlist *evlist) | |||
232 | 246 | ||
233 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 247 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
234 | list_for_each_entry(pos, &evlist->entries, node) { | 248 | list_for_each_entry(pos, &evlist->entries, node) { |
235 | if (!perf_evsel__is_group_leader(pos)) | 249 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
236 | continue; | 250 | continue; |
237 | for (thread = 0; thread < nr_threads; thread++) | 251 | for (thread = 0; thread < nr_threads; thread++) |
238 | ioctl(FD(pos, cpu, thread), | 252 | ioctl(FD(pos, cpu, thread), |
@@ -250,7 +264,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
250 | 264 | ||
251 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 265 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
252 | list_for_each_entry(pos, &evlist->entries, node) { | 266 | list_for_each_entry(pos, &evlist->entries, node) { |
253 | if (!perf_evsel__is_group_leader(pos)) | 267 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
254 | continue; | 268 | continue; |
255 | for (thread = 0; thread < nr_threads; thread++) | 269 | for (thread = 0; thread < nr_threads; thread++) |
256 | ioctl(FD(pos, cpu, thread), | 270 | ioctl(FD(pos, cpu, thread), |
@@ -259,6 +273,44 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
259 | } | 273 | } |
260 | } | 274 | } |
261 | 275 | ||
276 | int perf_evlist__disable_event(struct perf_evlist *evlist, | ||
277 | struct perf_evsel *evsel) | ||
278 | { | ||
279 | int cpu, thread, err; | ||
280 | |||
281 | if (!evsel->fd) | ||
282 | return 0; | ||
283 | |||
284 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
285 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
286 | err = ioctl(FD(evsel, cpu, thread), | ||
287 | PERF_EVENT_IOC_DISABLE, 0); | ||
288 | if (err) | ||
289 | return err; | ||
290 | } | ||
291 | } | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | int perf_evlist__enable_event(struct perf_evlist *evlist, | ||
296 | struct perf_evsel *evsel) | ||
297 | { | ||
298 | int cpu, thread, err; | ||
299 | |||
300 | if (!evsel->fd) | ||
301 | return -EINVAL; | ||
302 | |||
303 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
304 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
305 | err = ioctl(FD(evsel, cpu, thread), | ||
306 | PERF_EVENT_IOC_ENABLE, 0); | ||
307 | if (err) | ||
308 | return err; | ||
309 | } | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | |||
262 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | 314 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
263 | { | 315 | { |
264 | int nr_cpus = cpu_map__nr(evlist->cpus); | 316 | int nr_cpus = cpu_map__nr(evlist->cpus); |
@@ -302,6 +354,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |||
302 | { | 354 | { |
303 | u64 read_data[4] = { 0, }; | 355 | u64 read_data[4] = { 0, }; |
304 | int id_idx = 1; /* The first entry is the counter value */ | 356 | int id_idx = 1; /* The first entry is the counter value */ |
357 | u64 id; | ||
358 | int ret; | ||
359 | |||
360 | ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); | ||
361 | if (!ret) | ||
362 | goto add; | ||
363 | |||
364 | if (errno != ENOTTY) | ||
365 | return -1; | ||
366 | |||
367 | /* Legacy way to get event id.. All hail to old kernels! */ | ||
368 | |||
369 | /* | ||
370 | * This way does not work with group format read, so bail | ||
371 | * out in that case. | ||
372 | */ | ||
373 | if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) | ||
374 | return -1; | ||
305 | 375 | ||
306 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | 376 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
307 | read(fd, &read_data, sizeof(read_data)) == -1) | 377 | read(fd, &read_data, sizeof(read_data)) == -1) |
@@ -312,25 +382,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |||
312 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 382 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
313 | ++id_idx; | 383 | ++id_idx; |
314 | 384 | ||
315 | perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); | 385 | id = read_data[id_idx]; |
386 | |||
387 | add: | ||
388 | perf_evlist__id_add(evlist, evsel, cpu, thread, id); | ||
316 | return 0; | 389 | return 0; |
317 | } | 390 | } |
318 | 391 | ||
319 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | 392 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) |
320 | { | 393 | { |
321 | struct hlist_head *head; | 394 | struct hlist_head *head; |
322 | struct perf_sample_id *sid; | 395 | struct perf_sample_id *sid; |
323 | int hash; | 396 | int hash; |
324 | 397 | ||
325 | if (evlist->nr_entries == 1) | ||
326 | return perf_evlist__first(evlist); | ||
327 | |||
328 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | 398 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
329 | head = &evlist->heads[hash]; | 399 | head = &evlist->heads[hash]; |
330 | 400 | ||
331 | hlist_for_each_entry(sid, head, node) | 401 | hlist_for_each_entry(sid, head, node) |
332 | if (sid->id == id) | 402 | if (sid->id == id) |
333 | return sid->evsel; | 403 | return sid; |
404 | |||
405 | return NULL; | ||
406 | } | ||
407 | |||
408 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | ||
409 | { | ||
410 | struct perf_sample_id *sid; | ||
411 | |||
412 | if (evlist->nr_entries == 1) | ||
413 | return perf_evlist__first(evlist); | ||
414 | |||
415 | sid = perf_evlist__id2sid(evlist, id); | ||
416 | if (sid) | ||
417 | return sid->evsel; | ||
334 | 418 | ||
335 | if (!perf_evlist__sample_id_all(evlist)) | 419 | if (!perf_evlist__sample_id_all(evlist)) |
336 | return perf_evlist__first(evlist); | 420 | return perf_evlist__first(evlist); |
@@ -338,6 +422,55 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
338 | return NULL; | 422 | return NULL; |
339 | } | 423 | } |
340 | 424 | ||
425 | static int perf_evlist__event2id(struct perf_evlist *evlist, | ||
426 | union perf_event *event, u64 *id) | ||
427 | { | ||
428 | const u64 *array = event->sample.array; | ||
429 | ssize_t n; | ||
430 | |||
431 | n = (event->header.size - sizeof(event->header)) >> 3; | ||
432 | |||
433 | if (event->header.type == PERF_RECORD_SAMPLE) { | ||
434 | if (evlist->id_pos >= n) | ||
435 | return -1; | ||
436 | *id = array[evlist->id_pos]; | ||
437 | } else { | ||
438 | if (evlist->is_pos > n) | ||
439 | return -1; | ||
440 | n -= evlist->is_pos; | ||
441 | *id = array[n]; | ||
442 | } | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, | ||
447 | union perf_event *event) | ||
448 | { | ||
449 | struct hlist_head *head; | ||
450 | struct perf_sample_id *sid; | ||
451 | int hash; | ||
452 | u64 id; | ||
453 | |||
454 | if (evlist->nr_entries == 1) | ||
455 | return perf_evlist__first(evlist); | ||
456 | |||
457 | if (perf_evlist__event2id(evlist, event, &id)) | ||
458 | return NULL; | ||
459 | |||
460 | /* Synthesized events have an id of zero */ | ||
461 | if (!id) | ||
462 | return perf_evlist__first(evlist); | ||
463 | |||
464 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | ||
465 | head = &evlist->heads[hash]; | ||
466 | |||
467 | hlist_for_each_entry(sid, head, node) { | ||
468 | if (sid->id == id) | ||
469 | return sid->evsel; | ||
470 | } | ||
471 | return NULL; | ||
472 | } | ||
473 | |||
341 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | 474 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
342 | { | 475 | { |
343 | struct perf_mmap *md = &evlist->mmap[idx]; | 476 | struct perf_mmap *md = &evlist->mmap[idx]; |
@@ -403,16 +536,20 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | |||
403 | return event; | 536 | return event; |
404 | } | 537 | } |
405 | 538 | ||
539 | static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) | ||
540 | { | ||
541 | if (evlist->mmap[idx].base != NULL) { | ||
542 | munmap(evlist->mmap[idx].base, evlist->mmap_len); | ||
543 | evlist->mmap[idx].base = NULL; | ||
544 | } | ||
545 | } | ||
546 | |||
406 | void perf_evlist__munmap(struct perf_evlist *evlist) | 547 | void perf_evlist__munmap(struct perf_evlist *evlist) |
407 | { | 548 | { |
408 | int i; | 549 | int i; |
409 | 550 | ||
410 | for (i = 0; i < evlist->nr_mmaps; i++) { | 551 | for (i = 0; i < evlist->nr_mmaps; i++) |
411 | if (evlist->mmap[i].base != NULL) { | 552 | __perf_evlist__munmap(evlist, i); |
412 | munmap(evlist->mmap[i].base, evlist->mmap_len); | ||
413 | evlist->mmap[i].base = NULL; | ||
414 | } | ||
415 | } | ||
416 | 553 | ||
417 | free(evlist->mmap); | 554 | free(evlist->mmap); |
418 | evlist->mmap = NULL; | 555 | evlist->mmap = NULL; |
@@ -421,7 +558,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist) | |||
421 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 558 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
422 | { | 559 | { |
423 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); | 560 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
424 | if (cpu_map__all(evlist->cpus)) | 561 | if (cpu_map__empty(evlist->cpus)) |
425 | evlist->nr_mmaps = thread_map__nr(evlist->threads); | 562 | evlist->nr_mmaps = thread_map__nr(evlist->threads); |
426 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | 563 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
427 | return evlist->mmap != NULL ? 0 : -ENOMEM; | 564 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
@@ -450,6 +587,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m | |||
450 | int nr_cpus = cpu_map__nr(evlist->cpus); | 587 | int nr_cpus = cpu_map__nr(evlist->cpus); |
451 | int nr_threads = thread_map__nr(evlist->threads); | 588 | int nr_threads = thread_map__nr(evlist->threads); |
452 | 589 | ||
590 | pr_debug2("perf event ring buffer mmapped per cpu\n"); | ||
453 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 591 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
454 | int output = -1; | 592 | int output = -1; |
455 | 593 | ||
@@ -477,12 +615,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m | |||
477 | return 0; | 615 | return 0; |
478 | 616 | ||
479 | out_unmap: | 617 | out_unmap: |
480 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 618 | for (cpu = 0; cpu < nr_cpus; cpu++) |
481 | if (evlist->mmap[cpu].base != NULL) { | 619 | __perf_evlist__munmap(evlist, cpu); |
482 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
483 | evlist->mmap[cpu].base = NULL; | ||
484 | } | ||
485 | } | ||
486 | return -1; | 620 | return -1; |
487 | } | 621 | } |
488 | 622 | ||
@@ -492,6 +626,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in | |||
492 | int thread; | 626 | int thread; |
493 | int nr_threads = thread_map__nr(evlist->threads); | 627 | int nr_threads = thread_map__nr(evlist->threads); |
494 | 628 | ||
629 | pr_debug2("perf event ring buffer mmapped per thread\n"); | ||
495 | for (thread = 0; thread < nr_threads; thread++) { | 630 | for (thread = 0; thread < nr_threads; thread++) { |
496 | int output = -1; | 631 | int output = -1; |
497 | 632 | ||
@@ -517,12 +652,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in | |||
517 | return 0; | 652 | return 0; |
518 | 653 | ||
519 | out_unmap: | 654 | out_unmap: |
520 | for (thread = 0; thread < nr_threads; thread++) { | 655 | for (thread = 0; thread < nr_threads; thread++) |
521 | if (evlist->mmap[thread].base != NULL) { | 656 | __perf_evlist__munmap(evlist, thread); |
522 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | ||
523 | evlist->mmap[thread].base = NULL; | ||
524 | } | ||
525 | } | ||
526 | return -1; | 657 | return -1; |
527 | } | 658 | } |
528 | 659 | ||
@@ -573,7 +704,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, | |||
573 | return -ENOMEM; | 704 | return -ENOMEM; |
574 | } | 705 | } |
575 | 706 | ||
576 | if (cpu_map__all(cpus)) | 707 | if (cpu_map__empty(cpus)) |
577 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | 708 | return perf_evlist__mmap_per_thread(evlist, prot, mask); |
578 | 709 | ||
579 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); | 710 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
@@ -650,20 +781,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) | |||
650 | 781 | ||
651 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) | 782 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
652 | { | 783 | { |
784 | struct perf_evsel *pos; | ||
785 | |||
786 | if (evlist->nr_entries == 1) | ||
787 | return true; | ||
788 | |||
789 | if (evlist->id_pos < 0 || evlist->is_pos < 0) | ||
790 | return false; | ||
791 | |||
792 | list_for_each_entry(pos, &evlist->entries, node) { | ||
793 | if (pos->id_pos != evlist->id_pos || | ||
794 | pos->is_pos != evlist->is_pos) | ||
795 | return false; | ||
796 | } | ||
797 | |||
798 | return true; | ||
799 | } | ||
800 | |||
801 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) | ||
802 | { | ||
803 | struct perf_evsel *evsel; | ||
804 | |||
805 | if (evlist->combined_sample_type) | ||
806 | return evlist->combined_sample_type; | ||
807 | |||
808 | list_for_each_entry(evsel, &evlist->entries, node) | ||
809 | evlist->combined_sample_type |= evsel->attr.sample_type; | ||
810 | |||
811 | return evlist->combined_sample_type; | ||
812 | } | ||
813 | |||
814 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) | ||
815 | { | ||
816 | evlist->combined_sample_type = 0; | ||
817 | return __perf_evlist__combined_sample_type(evlist); | ||
818 | } | ||
819 | |||
820 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist) | ||
821 | { | ||
653 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; | 822 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
823 | u64 read_format = first->attr.read_format; | ||
824 | u64 sample_type = first->attr.sample_type; | ||
654 | 825 | ||
655 | list_for_each_entry_continue(pos, &evlist->entries, node) { | 826 | list_for_each_entry_continue(pos, &evlist->entries, node) { |
656 | if (first->attr.sample_type != pos->attr.sample_type) | 827 | if (read_format != pos->attr.read_format) |
657 | return false; | 828 | return false; |
658 | } | 829 | } |
659 | 830 | ||
831 | /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ | ||
832 | if ((sample_type & PERF_SAMPLE_READ) && | ||
833 | !(read_format & PERF_FORMAT_ID)) { | ||
834 | return false; | ||
835 | } | ||
836 | |||
660 | return true; | 837 | return true; |
661 | } | 838 | } |
662 | 839 | ||
663 | u64 perf_evlist__sample_type(struct perf_evlist *evlist) | 840 | u64 perf_evlist__read_format(struct perf_evlist *evlist) |
664 | { | 841 | { |
665 | struct perf_evsel *first = perf_evlist__first(evlist); | 842 | struct perf_evsel *first = perf_evlist__first(evlist); |
666 | return first->attr.sample_type; | 843 | return first->attr.read_format; |
667 | } | 844 | } |
668 | 845 | ||
669 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) | 846 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
@@ -692,6 +869,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) | |||
692 | 869 | ||
693 | if (sample_type & PERF_SAMPLE_CPU) | 870 | if (sample_type & PERF_SAMPLE_CPU) |
694 | size += sizeof(data->cpu) * 2; | 871 | size += sizeof(data->cpu) * 2; |
872 | |||
873 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | ||
874 | size += sizeof(data->id); | ||
695 | out: | 875 | out: |
696 | return size; | 876 | return size; |
697 | } | 877 | } |
@@ -783,13 +963,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, | |||
783 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); | 963 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); |
784 | 964 | ||
785 | /* | 965 | /* |
786 | * Do a dummy execvp to get the PLT entry resolved, | ||
787 | * so we avoid the resolver overhead on the real | ||
788 | * execvp call. | ||
789 | */ | ||
790 | execvp("", (char **)argv); | ||
791 | |||
792 | /* | ||
793 | * Tell the parent we're ready to go | 966 | * Tell the parent we're ready to go |
794 | */ | 967 | */ |
795 | close(child_ready_pipe[1]); | 968 | close(child_ready_pipe[1]); |
@@ -838,7 +1011,7 @@ out_close_ready_pipe: | |||
838 | int perf_evlist__start_workload(struct perf_evlist *evlist) | 1011 | int perf_evlist__start_workload(struct perf_evlist *evlist) |
839 | { | 1012 | { |
840 | if (evlist->workload.cork_fd > 0) { | 1013 | if (evlist->workload.cork_fd > 0) { |
841 | char bf; | 1014 | char bf = 0; |
842 | int ret; | 1015 | int ret; |
843 | /* | 1016 | /* |
844 | * Remove the cork, let it rip! | 1017 | * Remove the cork, let it rip! |
@@ -857,7 +1030,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist) | |||
857 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, | 1030 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
858 | struct perf_sample *sample) | 1031 | struct perf_sample *sample) |
859 | { | 1032 | { |
860 | struct perf_evsel *evsel = perf_evlist__first(evlist); | 1033 | struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); |
1034 | |||
1035 | if (!evsel) | ||
1036 | return -EFAULT; | ||
861 | return perf_evsel__parse_sample(evsel, event, sample); | 1037 | return perf_evsel__parse_sample(evsel, event, sample); |
862 | } | 1038 | } |
863 | 1039 | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 0583d36252be..880d7139d2fb 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -32,6 +32,9 @@ struct perf_evlist { | |||
32 | int nr_fds; | 32 | int nr_fds; |
33 | int nr_mmaps; | 33 | int nr_mmaps; |
34 | int mmap_len; | 34 | int mmap_len; |
35 | int id_pos; | ||
36 | int is_pos; | ||
37 | u64 combined_sample_type; | ||
35 | struct { | 38 | struct { |
36 | int cork_fd; | 39 | int cork_fd; |
37 | pid_t pid; | 40 | pid_t pid; |
@@ -71,6 +74,10 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); | |||
71 | struct perf_evsel * | 74 | struct perf_evsel * |
72 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); | 75 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); |
73 | 76 | ||
77 | struct perf_evsel * | ||
78 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | ||
79 | const char *name); | ||
80 | |||
74 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, | 81 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
75 | int cpu, int thread, u64 id); | 82 | int cpu, int thread, u64 id); |
76 | 83 | ||
@@ -78,11 +85,15 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | |||
78 | 85 | ||
79 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | 86 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
80 | 87 | ||
88 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); | ||
89 | |||
81 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); | 90 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); |
82 | 91 | ||
83 | int perf_evlist__open(struct perf_evlist *evlist); | 92 | int perf_evlist__open(struct perf_evlist *evlist); |
84 | void perf_evlist__close(struct perf_evlist *evlist); | 93 | void perf_evlist__close(struct perf_evlist *evlist); |
85 | 94 | ||
95 | void perf_evlist__set_id_pos(struct perf_evlist *evlist); | ||
96 | bool perf_can_sample_identifier(void); | ||
86 | void perf_evlist__config(struct perf_evlist *evlist, | 97 | void perf_evlist__config(struct perf_evlist *evlist, |
87 | struct perf_record_opts *opts); | 98 | struct perf_record_opts *opts); |
88 | 99 | ||
@@ -99,6 +110,11 @@ void perf_evlist__munmap(struct perf_evlist *evlist); | |||
99 | void perf_evlist__disable(struct perf_evlist *evlist); | 110 | void perf_evlist__disable(struct perf_evlist *evlist); |
100 | void perf_evlist__enable(struct perf_evlist *evlist); | 111 | void perf_evlist__enable(struct perf_evlist *evlist); |
101 | 112 | ||
113 | int perf_evlist__disable_event(struct perf_evlist *evlist, | ||
114 | struct perf_evsel *evsel); | ||
115 | int perf_evlist__enable_event(struct perf_evlist *evlist, | ||
116 | struct perf_evsel *evsel); | ||
117 | |||
102 | void perf_evlist__set_selected(struct perf_evlist *evlist, | 118 | void perf_evlist__set_selected(struct perf_evlist *evlist, |
103 | struct perf_evsel *evsel); | 119 | struct perf_evsel *evsel); |
104 | 120 | ||
@@ -118,7 +134,9 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist); | |||
118 | void __perf_evlist__set_leader(struct list_head *list); | 134 | void __perf_evlist__set_leader(struct list_head *list); |
119 | void perf_evlist__set_leader(struct perf_evlist *evlist); | 135 | void perf_evlist__set_leader(struct perf_evlist *evlist); |
120 | 136 | ||
121 | u64 perf_evlist__sample_type(struct perf_evlist *evlist); | 137 | u64 perf_evlist__read_format(struct perf_evlist *evlist); |
138 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); | ||
139 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); | ||
122 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist); | 140 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist); |
123 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); | 141 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); |
124 | 142 | ||
@@ -127,6 +145,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even | |||
127 | 145 | ||
128 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); | 146 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); |
129 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); | 147 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); |
148 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist); | ||
130 | 149 | ||
131 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | 150 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
132 | struct list_head *list, | 151 | struct list_head *list, |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c9c7494506a1..3612183e2cc5 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -9,18 +9,20 @@ | |||
9 | 9 | ||
10 | #include <byteswap.h> | 10 | #include <byteswap.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include "asm/bug.h" | ||
13 | #include <lk/debugfs.h> | 12 | #include <lk/debugfs.h> |
14 | #include "event-parse.h" | 13 | #include <traceevent/event-parse.h> |
14 | #include <linux/hw_breakpoint.h> | ||
15 | #include <linux/perf_event.h> | ||
16 | #include <sys/resource.h> | ||
17 | #include "asm/bug.h" | ||
15 | #include "evsel.h" | 18 | #include "evsel.h" |
16 | #include "evlist.h" | 19 | #include "evlist.h" |
17 | #include "util.h" | 20 | #include "util.h" |
18 | #include "cpumap.h" | 21 | #include "cpumap.h" |
19 | #include "thread_map.h" | 22 | #include "thread_map.h" |
20 | #include "target.h" | 23 | #include "target.h" |
21 | #include <linux/hw_breakpoint.h> | ||
22 | #include <linux/perf_event.h> | ||
23 | #include "perf_regs.h" | 24 | #include "perf_regs.h" |
25 | #include "debug.h" | ||
24 | 26 | ||
25 | static struct { | 27 | static struct { |
26 | bool sample_id_all; | 28 | bool sample_id_all; |
@@ -29,7 +31,7 @@ static struct { | |||
29 | 31 | ||
30 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | 32 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
31 | 33 | ||
32 | static int __perf_evsel__sample_size(u64 sample_type) | 34 | int __perf_evsel__sample_size(u64 sample_type) |
33 | { | 35 | { |
34 | u64 mask = sample_type & PERF_SAMPLE_MASK; | 36 | u64 mask = sample_type & PERF_SAMPLE_MASK; |
35 | int size = 0; | 37 | int size = 0; |
@@ -45,6 +47,72 @@ static int __perf_evsel__sample_size(u64 sample_type) | |||
45 | return size; | 47 | return size; |
46 | } | 48 | } |
47 | 49 | ||
50 | /** | ||
51 | * __perf_evsel__calc_id_pos - calculate id_pos. | ||
52 | * @sample_type: sample type | ||
53 | * | ||
54 | * This function returns the position of the event id (PERF_SAMPLE_ID or | ||
55 | * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct | ||
56 | * sample_event. | ||
57 | */ | ||
58 | static int __perf_evsel__calc_id_pos(u64 sample_type) | ||
59 | { | ||
60 | int idx = 0; | ||
61 | |||
62 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | ||
63 | return 0; | ||
64 | |||
65 | if (!(sample_type & PERF_SAMPLE_ID)) | ||
66 | return -1; | ||
67 | |||
68 | if (sample_type & PERF_SAMPLE_IP) | ||
69 | idx += 1; | ||
70 | |||
71 | if (sample_type & PERF_SAMPLE_TID) | ||
72 | idx += 1; | ||
73 | |||
74 | if (sample_type & PERF_SAMPLE_TIME) | ||
75 | idx += 1; | ||
76 | |||
77 | if (sample_type & PERF_SAMPLE_ADDR) | ||
78 | idx += 1; | ||
79 | |||
80 | return idx; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * __perf_evsel__calc_is_pos - calculate is_pos. | ||
85 | * @sample_type: sample type | ||
86 | * | ||
87 | * This function returns the position (counting backwards) of the event id | ||
88 | * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if | ||
89 | * sample_id_all is used there is an id sample appended to non-sample events. | ||
90 | */ | ||
91 | static int __perf_evsel__calc_is_pos(u64 sample_type) | ||
92 | { | ||
93 | int idx = 1; | ||
94 | |||
95 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | ||
96 | return 1; | ||
97 | |||
98 | if (!(sample_type & PERF_SAMPLE_ID)) | ||
99 | return -1; | ||
100 | |||
101 | if (sample_type & PERF_SAMPLE_CPU) | ||
102 | idx += 1; | ||
103 | |||
104 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
105 | idx += 1; | ||
106 | |||
107 | return idx; | ||
108 | } | ||
109 | |||
110 | void perf_evsel__calc_id_pos(struct perf_evsel *evsel) | ||
111 | { | ||
112 | evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); | ||
113 | evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); | ||
114 | } | ||
115 | |||
48 | void hists__init(struct hists *hists) | 116 | void hists__init(struct hists *hists) |
49 | { | 117 | { |
50 | memset(hists, 0, sizeof(*hists)); | 118 | memset(hists, 0, sizeof(*hists)); |
@@ -61,6 +129,7 @@ void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, | |||
61 | if (!(evsel->attr.sample_type & bit)) { | 129 | if (!(evsel->attr.sample_type & bit)) { |
62 | evsel->attr.sample_type |= bit; | 130 | evsel->attr.sample_type |= bit; |
63 | evsel->sample_size += sizeof(u64); | 131 | evsel->sample_size += sizeof(u64); |
132 | perf_evsel__calc_id_pos(evsel); | ||
64 | } | 133 | } |
65 | } | 134 | } |
66 | 135 | ||
@@ -70,12 +139,19 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, | |||
70 | if (evsel->attr.sample_type & bit) { | 139 | if (evsel->attr.sample_type & bit) { |
71 | evsel->attr.sample_type &= ~bit; | 140 | evsel->attr.sample_type &= ~bit; |
72 | evsel->sample_size -= sizeof(u64); | 141 | evsel->sample_size -= sizeof(u64); |
142 | perf_evsel__calc_id_pos(evsel); | ||
73 | } | 143 | } |
74 | } | 144 | } |
75 | 145 | ||
76 | void perf_evsel__set_sample_id(struct perf_evsel *evsel) | 146 | void perf_evsel__set_sample_id(struct perf_evsel *evsel, |
147 | bool can_sample_identifier) | ||
77 | { | 148 | { |
78 | perf_evsel__set_sample_bit(evsel, ID); | 149 | if (can_sample_identifier) { |
150 | perf_evsel__reset_sample_bit(evsel, ID); | ||
151 | perf_evsel__set_sample_bit(evsel, IDENTIFIER); | ||
152 | } else { | ||
153 | perf_evsel__set_sample_bit(evsel, ID); | ||
154 | } | ||
79 | evsel->attr.read_format |= PERF_FORMAT_ID; | 155 | evsel->attr.read_format |= PERF_FORMAT_ID; |
80 | } | 156 | } |
81 | 157 | ||
@@ -88,6 +164,7 @@ void perf_evsel__init(struct perf_evsel *evsel, | |||
88 | INIT_LIST_HEAD(&evsel->node); | 164 | INIT_LIST_HEAD(&evsel->node); |
89 | hists__init(&evsel->hists); | 165 | hists__init(&evsel->hists); |
90 | evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); | 166 | evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); |
167 | perf_evsel__calc_id_pos(evsel); | ||
91 | } | 168 | } |
92 | 169 | ||
93 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | 170 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) |
@@ -246,6 +323,7 @@ const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { | |||
246 | "major-faults", | 323 | "major-faults", |
247 | "alignment-faults", | 324 | "alignment-faults", |
248 | "emulation-faults", | 325 | "emulation-faults", |
326 | "dummy", | ||
249 | }; | 327 | }; |
250 | 328 | ||
251 | static const char *__perf_evsel__sw_name(u64 config) | 329 | static const char *__perf_evsel__sw_name(u64 config) |
@@ -490,6 +568,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | |||
490 | void perf_evsel__config(struct perf_evsel *evsel, | 568 | void perf_evsel__config(struct perf_evsel *evsel, |
491 | struct perf_record_opts *opts) | 569 | struct perf_record_opts *opts) |
492 | { | 570 | { |
571 | struct perf_evsel *leader = evsel->leader; | ||
493 | struct perf_event_attr *attr = &evsel->attr; | 572 | struct perf_event_attr *attr = &evsel->attr; |
494 | int track = !evsel->idx; /* only the first counter needs these */ | 573 | int track = !evsel->idx; /* only the first counter needs these */ |
495 | 574 | ||
@@ -499,6 +578,25 @@ void perf_evsel__config(struct perf_evsel *evsel, | |||
499 | perf_evsel__set_sample_bit(evsel, IP); | 578 | perf_evsel__set_sample_bit(evsel, IP); |
500 | perf_evsel__set_sample_bit(evsel, TID); | 579 | perf_evsel__set_sample_bit(evsel, TID); |
501 | 580 | ||
581 | if (evsel->sample_read) { | ||
582 | perf_evsel__set_sample_bit(evsel, READ); | ||
583 | |||
584 | /* | ||
585 | * We need ID even in case of single event, because | ||
586 | * PERF_SAMPLE_READ process ID specific data. | ||
587 | */ | ||
588 | perf_evsel__set_sample_id(evsel, false); | ||
589 | |||
590 | /* | ||
591 | * Apply group format only if we belong to group | ||
592 | * with more than one members. | ||
593 | */ | ||
594 | if (leader->nr_members > 1) { | ||
595 | attr->read_format |= PERF_FORMAT_GROUP; | ||
596 | attr->inherit = 0; | ||
597 | } | ||
598 | } | ||
599 | |||
502 | /* | 600 | /* |
503 | * We default some events to a 1 default interval. But keep | 601 | * We default some events to a 1 default interval. But keep |
504 | * it a weak assumption overridable by the user. | 602 | * it a weak assumption overridable by the user. |
@@ -514,6 +612,15 @@ void perf_evsel__config(struct perf_evsel *evsel, | |||
514 | } | 612 | } |
515 | } | 613 | } |
516 | 614 | ||
615 | /* | ||
616 | * Disable sampling for all group members other | ||
617 | * than leader in case leader 'leads' the sampling. | ||
618 | */ | ||
619 | if ((leader != evsel) && leader->sample_read) { | ||
620 | attr->sample_freq = 0; | ||
621 | attr->sample_period = 0; | ||
622 | } | ||
623 | |||
517 | if (opts->no_samples) | 624 | if (opts->no_samples) |
518 | attr->sample_freq = 0; | 625 | attr->sample_freq = 0; |
519 | 626 | ||
@@ -605,15 +712,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
605 | return evsel->fd != NULL ? 0 : -ENOMEM; | 712 | return evsel->fd != NULL ? 0 : -ENOMEM; |
606 | } | 713 | } |
607 | 714 | ||
608 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | 715 | static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, |
609 | const char *filter) | 716 | int ioc, void *arg) |
610 | { | 717 | { |
611 | int cpu, thread; | 718 | int cpu, thread; |
612 | 719 | ||
613 | for (cpu = 0; cpu < ncpus; cpu++) { | 720 | for (cpu = 0; cpu < ncpus; cpu++) { |
614 | for (thread = 0; thread < nthreads; thread++) { | 721 | for (thread = 0; thread < nthreads; thread++) { |
615 | int fd = FD(evsel, cpu, thread), | 722 | int fd = FD(evsel, cpu, thread), |
616 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | 723 | err = ioctl(fd, ioc, arg); |
617 | 724 | ||
618 | if (err) | 725 | if (err) |
619 | return err; | 726 | return err; |
@@ -623,6 +730,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | |||
623 | return 0; | 730 | return 0; |
624 | } | 731 | } |
625 | 732 | ||
733 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | ||
734 | const char *filter) | ||
735 | { | ||
736 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | ||
737 | PERF_EVENT_IOC_SET_FILTER, | ||
738 | (void *)filter); | ||
739 | } | ||
740 | |||
741 | int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
742 | { | ||
743 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | ||
744 | PERF_EVENT_IOC_ENABLE, | ||
745 | 0); | ||
746 | } | ||
747 | |||
626 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | 748 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) |
627 | { | 749 | { |
628 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | 750 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); |
@@ -817,12 +939,72 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) | |||
817 | return fd; | 939 | return fd; |
818 | } | 940 | } |
819 | 941 | ||
942 | #define __PRINT_ATTR(fmt, cast, field) \ | ||
943 | fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field) | ||
944 | |||
945 | #define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field) | ||
946 | #define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field) | ||
947 | #define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field) | ||
948 | #define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field) | ||
949 | |||
950 | #define PRINT_ATTR2N(name1, field1, name2, field2) \ | ||
951 | fprintf(fp, " %-19s %u %-19s %u\n", \ | ||
952 | name1, attr->field1, name2, attr->field2) | ||
953 | |||
954 | #define PRINT_ATTR2(field1, field2) \ | ||
955 | PRINT_ATTR2N(#field1, field1, #field2, field2) | ||
956 | |||
957 | static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) | ||
958 | { | ||
959 | size_t ret = 0; | ||
960 | |||
961 | ret += fprintf(fp, "%.60s\n", graph_dotted_line); | ||
962 | ret += fprintf(fp, "perf_event_attr:\n"); | ||
963 | |||
964 | ret += PRINT_ATTR_U32(type); | ||
965 | ret += PRINT_ATTR_U32(size); | ||
966 | ret += PRINT_ATTR_X64(config); | ||
967 | ret += PRINT_ATTR_U64(sample_period); | ||
968 | ret += PRINT_ATTR_U64(sample_freq); | ||
969 | ret += PRINT_ATTR_X64(sample_type); | ||
970 | ret += PRINT_ATTR_X64(read_format); | ||
971 | |||
972 | ret += PRINT_ATTR2(disabled, inherit); | ||
973 | ret += PRINT_ATTR2(pinned, exclusive); | ||
974 | ret += PRINT_ATTR2(exclude_user, exclude_kernel); | ||
975 | ret += PRINT_ATTR2(exclude_hv, exclude_idle); | ||
976 | ret += PRINT_ATTR2(mmap, comm); | ||
977 | ret += PRINT_ATTR2(freq, inherit_stat); | ||
978 | ret += PRINT_ATTR2(enable_on_exec, task); | ||
979 | ret += PRINT_ATTR2(watermark, precise_ip); | ||
980 | ret += PRINT_ATTR2(mmap_data, sample_id_all); | ||
981 | ret += PRINT_ATTR2(exclude_host, exclude_guest); | ||
982 | ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel, | ||
983 | "excl.callchain_user", exclude_callchain_user); | ||
984 | |||
985 | ret += PRINT_ATTR_U32(wakeup_events); | ||
986 | ret += PRINT_ATTR_U32(wakeup_watermark); | ||
987 | ret += PRINT_ATTR_X32(bp_type); | ||
988 | ret += PRINT_ATTR_X64(bp_addr); | ||
989 | ret += PRINT_ATTR_X64(config1); | ||
990 | ret += PRINT_ATTR_U64(bp_len); | ||
991 | ret += PRINT_ATTR_X64(config2); | ||
992 | ret += PRINT_ATTR_X64(branch_sample_type); | ||
993 | ret += PRINT_ATTR_X64(sample_regs_user); | ||
994 | ret += PRINT_ATTR_U32(sample_stack_user); | ||
995 | |||
996 | ret += fprintf(fp, "%.60s\n", graph_dotted_line); | ||
997 | |||
998 | return ret; | ||
999 | } | ||
1000 | |||
820 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 1001 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
821 | struct thread_map *threads) | 1002 | struct thread_map *threads) |
822 | { | 1003 | { |
823 | int cpu, thread; | 1004 | int cpu, thread; |
824 | unsigned long flags = 0; | 1005 | unsigned long flags = 0; |
825 | int pid = -1, err; | 1006 | int pid = -1, err; |
1007 | enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; | ||
826 | 1008 | ||
827 | if (evsel->fd == NULL && | 1009 | if (evsel->fd == NULL && |
828 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | 1010 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) |
@@ -840,6 +1022,9 @@ retry_sample_id: | |||
840 | if (perf_missing_features.sample_id_all) | 1022 | if (perf_missing_features.sample_id_all) |
841 | evsel->attr.sample_id_all = 0; | 1023 | evsel->attr.sample_id_all = 0; |
842 | 1024 | ||
1025 | if (verbose >= 2) | ||
1026 | perf_event_attr__fprintf(&evsel->attr, stderr); | ||
1027 | |||
843 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 1028 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
844 | 1029 | ||
845 | for (thread = 0; thread < threads->nr; thread++) { | 1030 | for (thread = 0; thread < threads->nr; thread++) { |
@@ -849,6 +1034,9 @@ retry_sample_id: | |||
849 | pid = threads->map[thread]; | 1034 | pid = threads->map[thread]; |
850 | 1035 | ||
851 | group_fd = get_group_fd(evsel, cpu, thread); | 1036 | group_fd = get_group_fd(evsel, cpu, thread); |
1037 | retry_open: | ||
1038 | pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", | ||
1039 | pid, cpus->map[cpu], group_fd, flags); | ||
852 | 1040 | ||
853 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, | 1041 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, |
854 | pid, | 1042 | pid, |
@@ -858,12 +1046,37 @@ retry_sample_id: | |||
858 | err = -errno; | 1046 | err = -errno; |
859 | goto try_fallback; | 1047 | goto try_fallback; |
860 | } | 1048 | } |
1049 | set_rlimit = NO_CHANGE; | ||
861 | } | 1050 | } |
862 | } | 1051 | } |
863 | 1052 | ||
864 | return 0; | 1053 | return 0; |
865 | 1054 | ||
866 | try_fallback: | 1055 | try_fallback: |
1056 | /* | ||
1057 | * perf stat needs between 5 and 22 fds per CPU. When we run out | ||
1058 | * of them try to increase the limits. | ||
1059 | */ | ||
1060 | if (err == -EMFILE && set_rlimit < INCREASED_MAX) { | ||
1061 | struct rlimit l; | ||
1062 | int old_errno = errno; | ||
1063 | |||
1064 | if (getrlimit(RLIMIT_NOFILE, &l) == 0) { | ||
1065 | if (set_rlimit == NO_CHANGE) | ||
1066 | l.rlim_cur = l.rlim_max; | ||
1067 | else { | ||
1068 | l.rlim_cur = l.rlim_max + 1000; | ||
1069 | l.rlim_max = l.rlim_cur; | ||
1070 | } | ||
1071 | if (setrlimit(RLIMIT_NOFILE, &l) == 0) { | ||
1072 | set_rlimit++; | ||
1073 | errno = old_errno; | ||
1074 | goto retry_open; | ||
1075 | } | ||
1076 | } | ||
1077 | errno = old_errno; | ||
1078 | } | ||
1079 | |||
867 | if (err != -EINVAL || cpu > 0 || thread > 0) | 1080 | if (err != -EINVAL || cpu > 0 || thread > 0) |
868 | goto out_close; | 1081 | goto out_close; |
869 | 1082 | ||
@@ -951,6 +1164,11 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, | |||
951 | array += ((event->header.size - | 1164 | array += ((event->header.size - |
952 | sizeof(event->header)) / sizeof(u64)) - 1; | 1165 | sizeof(event->header)) / sizeof(u64)) - 1; |
953 | 1166 | ||
1167 | if (type & PERF_SAMPLE_IDENTIFIER) { | ||
1168 | sample->id = *array; | ||
1169 | array--; | ||
1170 | } | ||
1171 | |||
954 | if (type & PERF_SAMPLE_CPU) { | 1172 | if (type & PERF_SAMPLE_CPU) { |
955 | u.val64 = *array; | 1173 | u.val64 = *array; |
956 | if (swapped) { | 1174 | if (swapped) { |
@@ -994,24 +1212,30 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, | |||
994 | return 0; | 1212 | return 0; |
995 | } | 1213 | } |
996 | 1214 | ||
997 | static bool sample_overlap(const union perf_event *event, | 1215 | static inline bool overflow(const void *endp, u16 max_size, const void *offset, |
998 | const void *offset, u64 size) | 1216 | u64 size) |
999 | { | 1217 | { |
1000 | const void *base = event; | 1218 | return size > max_size || offset + size > endp; |
1219 | } | ||
1001 | 1220 | ||
1002 | if (offset + size > base + event->header.size) | 1221 | #define OVERFLOW_CHECK(offset, size, max_size) \ |
1003 | return true; | 1222 | do { \ |
1223 | if (overflow(endp, (max_size), (offset), (size))) \ | ||
1224 | return -EFAULT; \ | ||
1225 | } while (0) | ||
1004 | 1226 | ||
1005 | return false; | 1227 | #define OVERFLOW_CHECK_u64(offset) \ |
1006 | } | 1228 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) |
1007 | 1229 | ||
1008 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 1230 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
1009 | struct perf_sample *data) | 1231 | struct perf_sample *data) |
1010 | { | 1232 | { |
1011 | u64 type = evsel->attr.sample_type; | 1233 | u64 type = evsel->attr.sample_type; |
1012 | u64 regs_user = evsel->attr.sample_regs_user; | ||
1013 | bool swapped = evsel->needs_swap; | 1234 | bool swapped = evsel->needs_swap; |
1014 | const u64 *array; | 1235 | const u64 *array; |
1236 | u16 max_size = event->header.size; | ||
1237 | const void *endp = (void *)event + max_size; | ||
1238 | u64 sz; | ||
1015 | 1239 | ||
1016 | /* | 1240 | /* |
1017 | * used for cross-endian analysis. See git commit 65014ab3 | 1241 | * used for cross-endian analysis. See git commit 65014ab3 |
@@ -1033,11 +1257,22 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1033 | 1257 | ||
1034 | array = event->sample.array; | 1258 | array = event->sample.array; |
1035 | 1259 | ||
1260 | /* | ||
1261 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | ||
1262 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | ||
1263 | * check the format does not go past the end of the event. | ||
1264 | */ | ||
1036 | if (evsel->sample_size + sizeof(event->header) > event->header.size) | 1265 | if (evsel->sample_size + sizeof(event->header) > event->header.size) |
1037 | return -EFAULT; | 1266 | return -EFAULT; |
1038 | 1267 | ||
1268 | data->id = -1ULL; | ||
1269 | if (type & PERF_SAMPLE_IDENTIFIER) { | ||
1270 | data->id = *array; | ||
1271 | array++; | ||
1272 | } | ||
1273 | |||
1039 | if (type & PERF_SAMPLE_IP) { | 1274 | if (type & PERF_SAMPLE_IP) { |
1040 | data->ip = event->ip.ip; | 1275 | data->ip = *array; |
1041 | array++; | 1276 | array++; |
1042 | } | 1277 | } |
1043 | 1278 | ||
@@ -1066,7 +1301,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1066 | array++; | 1301 | array++; |
1067 | } | 1302 | } |
1068 | 1303 | ||
1069 | data->id = -1ULL; | ||
1070 | if (type & PERF_SAMPLE_ID) { | 1304 | if (type & PERF_SAMPLE_ID) { |
1071 | data->id = *array; | 1305 | data->id = *array; |
1072 | array++; | 1306 | array++; |
@@ -1096,25 +1330,62 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1096 | } | 1330 | } |
1097 | 1331 | ||
1098 | if (type & PERF_SAMPLE_READ) { | 1332 | if (type & PERF_SAMPLE_READ) { |
1099 | fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); | 1333 | u64 read_format = evsel->attr.read_format; |
1100 | return -1; | 1334 | |
1335 | OVERFLOW_CHECK_u64(array); | ||
1336 | if (read_format & PERF_FORMAT_GROUP) | ||
1337 | data->read.group.nr = *array; | ||
1338 | else | ||
1339 | data->read.one.value = *array; | ||
1340 | |||
1341 | array++; | ||
1342 | |||
1343 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1344 | OVERFLOW_CHECK_u64(array); | ||
1345 | data->read.time_enabled = *array; | ||
1346 | array++; | ||
1347 | } | ||
1348 | |||
1349 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1350 | OVERFLOW_CHECK_u64(array); | ||
1351 | data->read.time_running = *array; | ||
1352 | array++; | ||
1353 | } | ||
1354 | |||
1355 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | ||
1356 | if (read_format & PERF_FORMAT_GROUP) { | ||
1357 | const u64 max_group_nr = UINT64_MAX / | ||
1358 | sizeof(struct sample_read_value); | ||
1359 | |||
1360 | if (data->read.group.nr > max_group_nr) | ||
1361 | return -EFAULT; | ||
1362 | sz = data->read.group.nr * | ||
1363 | sizeof(struct sample_read_value); | ||
1364 | OVERFLOW_CHECK(array, sz, max_size); | ||
1365 | data->read.group.values = | ||
1366 | (struct sample_read_value *)array; | ||
1367 | array = (void *)array + sz; | ||
1368 | } else { | ||
1369 | OVERFLOW_CHECK_u64(array); | ||
1370 | data->read.one.id = *array; | ||
1371 | array++; | ||
1372 | } | ||
1101 | } | 1373 | } |
1102 | 1374 | ||
1103 | if (type & PERF_SAMPLE_CALLCHAIN) { | 1375 | if (type & PERF_SAMPLE_CALLCHAIN) { |
1104 | if (sample_overlap(event, array, sizeof(data->callchain->nr))) | 1376 | const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); |
1105 | return -EFAULT; | ||
1106 | |||
1107 | data->callchain = (struct ip_callchain *)array; | ||
1108 | 1377 | ||
1109 | if (sample_overlap(event, array, data->callchain->nr)) | 1378 | OVERFLOW_CHECK_u64(array); |
1379 | data->callchain = (struct ip_callchain *)array++; | ||
1380 | if (data->callchain->nr > max_callchain_nr) | ||
1110 | return -EFAULT; | 1381 | return -EFAULT; |
1111 | 1382 | sz = data->callchain->nr * sizeof(u64); | |
1112 | array += 1 + data->callchain->nr; | 1383 | OVERFLOW_CHECK(array, sz, max_size); |
1384 | array = (void *)array + sz; | ||
1113 | } | 1385 | } |
1114 | 1386 | ||
1115 | if (type & PERF_SAMPLE_RAW) { | 1387 | if (type & PERF_SAMPLE_RAW) { |
1116 | const u64 *pdata; | 1388 | OVERFLOW_CHECK_u64(array); |
1117 | |||
1118 | u.val64 = *array; | 1389 | u.val64 = *array; |
1119 | if (WARN_ONCE(swapped, | 1390 | if (WARN_ONCE(swapped, |
1120 | "Endianness of raw data not corrected!\n")) { | 1391 | "Endianness of raw data not corrected!\n")) { |
@@ -1123,65 +1394,71 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1123 | u.val32[0] = bswap_32(u.val32[0]); | 1394 | u.val32[0] = bswap_32(u.val32[0]); |
1124 | u.val32[1] = bswap_32(u.val32[1]); | 1395 | u.val32[1] = bswap_32(u.val32[1]); |
1125 | } | 1396 | } |
1126 | |||
1127 | if (sample_overlap(event, array, sizeof(u32))) | ||
1128 | return -EFAULT; | ||
1129 | |||
1130 | data->raw_size = u.val32[0]; | 1397 | data->raw_size = u.val32[0]; |
1131 | pdata = (void *) array + sizeof(u32); | 1398 | array = (void *)array + sizeof(u32); |
1132 | 1399 | ||
1133 | if (sample_overlap(event, pdata, data->raw_size)) | 1400 | OVERFLOW_CHECK(array, data->raw_size, max_size); |
1134 | return -EFAULT; | 1401 | data->raw_data = (void *)array; |
1135 | 1402 | array = (void *)array + data->raw_size; | |
1136 | data->raw_data = (void *) pdata; | ||
1137 | |||
1138 | array = (void *)array + data->raw_size + sizeof(u32); | ||
1139 | } | 1403 | } |
1140 | 1404 | ||
1141 | if (type & PERF_SAMPLE_BRANCH_STACK) { | 1405 | if (type & PERF_SAMPLE_BRANCH_STACK) { |
1142 | u64 sz; | 1406 | const u64 max_branch_nr = UINT64_MAX / |
1407 | sizeof(struct branch_entry); | ||
1143 | 1408 | ||
1144 | data->branch_stack = (struct branch_stack *)array; | 1409 | OVERFLOW_CHECK_u64(array); |
1145 | array++; /* nr */ | 1410 | data->branch_stack = (struct branch_stack *)array++; |
1146 | 1411 | ||
1412 | if (data->branch_stack->nr > max_branch_nr) | ||
1413 | return -EFAULT; | ||
1147 | sz = data->branch_stack->nr * sizeof(struct branch_entry); | 1414 | sz = data->branch_stack->nr * sizeof(struct branch_entry); |
1148 | sz /= sizeof(u64); | 1415 | OVERFLOW_CHECK(array, sz, max_size); |
1149 | array += sz; | 1416 | array = (void *)array + sz; |
1150 | } | 1417 | } |
1151 | 1418 | ||
1152 | if (type & PERF_SAMPLE_REGS_USER) { | 1419 | if (type & PERF_SAMPLE_REGS_USER) { |
1153 | /* First u64 tells us if we have any regs in sample. */ | 1420 | OVERFLOW_CHECK_u64(array); |
1154 | u64 avail = *array++; | 1421 | data->user_regs.abi = *array; |
1422 | array++; | ||
1423 | |||
1424 | if (data->user_regs.abi) { | ||
1425 | u64 regs_user = evsel->attr.sample_regs_user; | ||
1155 | 1426 | ||
1156 | if (avail) { | 1427 | sz = hweight_long(regs_user) * sizeof(u64); |
1428 | OVERFLOW_CHECK(array, sz, max_size); | ||
1157 | data->user_regs.regs = (u64 *)array; | 1429 | data->user_regs.regs = (u64 *)array; |
1158 | array += hweight_long(regs_user); | 1430 | array = (void *)array + sz; |
1159 | } | 1431 | } |
1160 | } | 1432 | } |
1161 | 1433 | ||
1162 | if (type & PERF_SAMPLE_STACK_USER) { | 1434 | if (type & PERF_SAMPLE_STACK_USER) { |
1163 | u64 size = *array++; | 1435 | OVERFLOW_CHECK_u64(array); |
1436 | sz = *array++; | ||
1164 | 1437 | ||
1165 | data->user_stack.offset = ((char *)(array - 1) | 1438 | data->user_stack.offset = ((char *)(array - 1) |
1166 | - (char *) event); | 1439 | - (char *) event); |
1167 | 1440 | ||
1168 | if (!size) { | 1441 | if (!sz) { |
1169 | data->user_stack.size = 0; | 1442 | data->user_stack.size = 0; |
1170 | } else { | 1443 | } else { |
1444 | OVERFLOW_CHECK(array, sz, max_size); | ||
1171 | data->user_stack.data = (char *)array; | 1445 | data->user_stack.data = (char *)array; |
1172 | array += size / sizeof(*array); | 1446 | array = (void *)array + sz; |
1447 | OVERFLOW_CHECK_u64(array); | ||
1173 | data->user_stack.size = *array++; | 1448 | data->user_stack.size = *array++; |
1174 | } | 1449 | } |
1175 | } | 1450 | } |
1176 | 1451 | ||
1177 | data->weight = 0; | 1452 | data->weight = 0; |
1178 | if (type & PERF_SAMPLE_WEIGHT) { | 1453 | if (type & PERF_SAMPLE_WEIGHT) { |
1454 | OVERFLOW_CHECK_u64(array); | ||
1179 | data->weight = *array; | 1455 | data->weight = *array; |
1180 | array++; | 1456 | array++; |
1181 | } | 1457 | } |
1182 | 1458 | ||
1183 | data->data_src = PERF_MEM_DATA_SRC_NONE; | 1459 | data->data_src = PERF_MEM_DATA_SRC_NONE; |
1184 | if (type & PERF_SAMPLE_DATA_SRC) { | 1460 | if (type & PERF_SAMPLE_DATA_SRC) { |
1461 | OVERFLOW_CHECK_u64(array); | ||
1185 | data->data_src = *array; | 1462 | data->data_src = *array; |
1186 | array++; | 1463 | array++; |
1187 | } | 1464 | } |
@@ -1189,12 +1466,105 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1189 | return 0; | 1466 | return 0; |
1190 | } | 1467 | } |
1191 | 1468 | ||
1469 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | ||
1470 | u64 sample_regs_user, u64 read_format) | ||
1471 | { | ||
1472 | size_t sz, result = sizeof(struct sample_event); | ||
1473 | |||
1474 | if (type & PERF_SAMPLE_IDENTIFIER) | ||
1475 | result += sizeof(u64); | ||
1476 | |||
1477 | if (type & PERF_SAMPLE_IP) | ||
1478 | result += sizeof(u64); | ||
1479 | |||
1480 | if (type & PERF_SAMPLE_TID) | ||
1481 | result += sizeof(u64); | ||
1482 | |||
1483 | if (type & PERF_SAMPLE_TIME) | ||
1484 | result += sizeof(u64); | ||
1485 | |||
1486 | if (type & PERF_SAMPLE_ADDR) | ||
1487 | result += sizeof(u64); | ||
1488 | |||
1489 | if (type & PERF_SAMPLE_ID) | ||
1490 | result += sizeof(u64); | ||
1491 | |||
1492 | if (type & PERF_SAMPLE_STREAM_ID) | ||
1493 | result += sizeof(u64); | ||
1494 | |||
1495 | if (type & PERF_SAMPLE_CPU) | ||
1496 | result += sizeof(u64); | ||
1497 | |||
1498 | if (type & PERF_SAMPLE_PERIOD) | ||
1499 | result += sizeof(u64); | ||
1500 | |||
1501 | if (type & PERF_SAMPLE_READ) { | ||
1502 | result += sizeof(u64); | ||
1503 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1504 | result += sizeof(u64); | ||
1505 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1506 | result += sizeof(u64); | ||
1507 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | ||
1508 | if (read_format & PERF_FORMAT_GROUP) { | ||
1509 | sz = sample->read.group.nr * | ||
1510 | sizeof(struct sample_read_value); | ||
1511 | result += sz; | ||
1512 | } else { | ||
1513 | result += sizeof(u64); | ||
1514 | } | ||
1515 | } | ||
1516 | |||
1517 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
1518 | sz = (sample->callchain->nr + 1) * sizeof(u64); | ||
1519 | result += sz; | ||
1520 | } | ||
1521 | |||
1522 | if (type & PERF_SAMPLE_RAW) { | ||
1523 | result += sizeof(u32); | ||
1524 | result += sample->raw_size; | ||
1525 | } | ||
1526 | |||
1527 | if (type & PERF_SAMPLE_BRANCH_STACK) { | ||
1528 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | ||
1529 | sz += sizeof(u64); | ||
1530 | result += sz; | ||
1531 | } | ||
1532 | |||
1533 | if (type & PERF_SAMPLE_REGS_USER) { | ||
1534 | if (sample->user_regs.abi) { | ||
1535 | result += sizeof(u64); | ||
1536 | sz = hweight_long(sample_regs_user) * sizeof(u64); | ||
1537 | result += sz; | ||
1538 | } else { | ||
1539 | result += sizeof(u64); | ||
1540 | } | ||
1541 | } | ||
1542 | |||
1543 | if (type & PERF_SAMPLE_STACK_USER) { | ||
1544 | sz = sample->user_stack.size; | ||
1545 | result += sizeof(u64); | ||
1546 | if (sz) { | ||
1547 | result += sz; | ||
1548 | result += sizeof(u64); | ||
1549 | } | ||
1550 | } | ||
1551 | |||
1552 | if (type & PERF_SAMPLE_WEIGHT) | ||
1553 | result += sizeof(u64); | ||
1554 | |||
1555 | if (type & PERF_SAMPLE_DATA_SRC) | ||
1556 | result += sizeof(u64); | ||
1557 | |||
1558 | return result; | ||
1559 | } | ||
1560 | |||
1192 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | 1561 | int perf_event__synthesize_sample(union perf_event *event, u64 type, |
1562 | u64 sample_regs_user, u64 read_format, | ||
1193 | const struct perf_sample *sample, | 1563 | const struct perf_sample *sample, |
1194 | bool swapped) | 1564 | bool swapped) |
1195 | { | 1565 | { |
1196 | u64 *array; | 1566 | u64 *array; |
1197 | 1567 | size_t sz; | |
1198 | /* | 1568 | /* |
1199 | * used for cross-endian analysis. See git commit 65014ab3 | 1569 | * used for cross-endian analysis. See git commit 65014ab3 |
1200 | * for why this goofiness is needed. | 1570 | * for why this goofiness is needed. |
@@ -1203,8 +1573,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
1203 | 1573 | ||
1204 | array = event->sample.array; | 1574 | array = event->sample.array; |
1205 | 1575 | ||
1576 | if (type & PERF_SAMPLE_IDENTIFIER) { | ||
1577 | *array = sample->id; | ||
1578 | array++; | ||
1579 | } | ||
1580 | |||
1206 | if (type & PERF_SAMPLE_IP) { | 1581 | if (type & PERF_SAMPLE_IP) { |
1207 | event->ip.ip = sample->ip; | 1582 | *array = sample->ip; |
1208 | array++; | 1583 | array++; |
1209 | } | 1584 | } |
1210 | 1585 | ||
@@ -1262,6 +1637,97 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
1262 | array++; | 1637 | array++; |
1263 | } | 1638 | } |
1264 | 1639 | ||
1640 | if (type & PERF_SAMPLE_READ) { | ||
1641 | if (read_format & PERF_FORMAT_GROUP) | ||
1642 | *array = sample->read.group.nr; | ||
1643 | else | ||
1644 | *array = sample->read.one.value; | ||
1645 | array++; | ||
1646 | |||
1647 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1648 | *array = sample->read.time_enabled; | ||
1649 | array++; | ||
1650 | } | ||
1651 | |||
1652 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1653 | *array = sample->read.time_running; | ||
1654 | array++; | ||
1655 | } | ||
1656 | |||
1657 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | ||
1658 | if (read_format & PERF_FORMAT_GROUP) { | ||
1659 | sz = sample->read.group.nr * | ||
1660 | sizeof(struct sample_read_value); | ||
1661 | memcpy(array, sample->read.group.values, sz); | ||
1662 | array = (void *)array + sz; | ||
1663 | } else { | ||
1664 | *array = sample->read.one.id; | ||
1665 | array++; | ||
1666 | } | ||
1667 | } | ||
1668 | |||
1669 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
1670 | sz = (sample->callchain->nr + 1) * sizeof(u64); | ||
1671 | memcpy(array, sample->callchain, sz); | ||
1672 | array = (void *)array + sz; | ||
1673 | } | ||
1674 | |||
1675 | if (type & PERF_SAMPLE_RAW) { | ||
1676 | u.val32[0] = sample->raw_size; | ||
1677 | if (WARN_ONCE(swapped, | ||
1678 | "Endianness of raw data not corrected!\n")) { | ||
1679 | /* | ||
1680 | * Inverse of what is done in perf_evsel__parse_sample | ||
1681 | */ | ||
1682 | u.val32[0] = bswap_32(u.val32[0]); | ||
1683 | u.val32[1] = bswap_32(u.val32[1]); | ||
1684 | u.val64 = bswap_64(u.val64); | ||
1685 | } | ||
1686 | *array = u.val64; | ||
1687 | array = (void *)array + sizeof(u32); | ||
1688 | |||
1689 | memcpy(array, sample->raw_data, sample->raw_size); | ||
1690 | array = (void *)array + sample->raw_size; | ||
1691 | } | ||
1692 | |||
1693 | if (type & PERF_SAMPLE_BRANCH_STACK) { | ||
1694 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | ||
1695 | sz += sizeof(u64); | ||
1696 | memcpy(array, sample->branch_stack, sz); | ||
1697 | array = (void *)array + sz; | ||
1698 | } | ||
1699 | |||
1700 | if (type & PERF_SAMPLE_REGS_USER) { | ||
1701 | if (sample->user_regs.abi) { | ||
1702 | *array++ = sample->user_regs.abi; | ||
1703 | sz = hweight_long(sample_regs_user) * sizeof(u64); | ||
1704 | memcpy(array, sample->user_regs.regs, sz); | ||
1705 | array = (void *)array + sz; | ||
1706 | } else { | ||
1707 | *array++ = 0; | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1711 | if (type & PERF_SAMPLE_STACK_USER) { | ||
1712 | sz = sample->user_stack.size; | ||
1713 | *array++ = sz; | ||
1714 | if (sz) { | ||
1715 | memcpy(array, sample->user_stack.data, sz); | ||
1716 | array = (void *)array + sz; | ||
1717 | *array++ = sz; | ||
1718 | } | ||
1719 | } | ||
1720 | |||
1721 | if (type & PERF_SAMPLE_WEIGHT) { | ||
1722 | *array = sample->weight; | ||
1723 | array++; | ||
1724 | } | ||
1725 | |||
1726 | if (type & PERF_SAMPLE_DATA_SRC) { | ||
1727 | *array = sample->data_src; | ||
1728 | array++; | ||
1729 | } | ||
1730 | |||
1265 | return 0; | 1731 | return 0; |
1266 | } | 1732 | } |
1267 | 1733 | ||
@@ -1391,6 +1857,7 @@ static int sample_type__fprintf(FILE *fp, bool *first, u64 value) | |||
1391 | bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), | 1857 | bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), |
1392 | bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), | 1858 | bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), |
1393 | bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), | 1859 | bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), |
1860 | bit_name(IDENTIFIER), | ||
1394 | { .name = NULL, } | 1861 | { .name = NULL, } |
1395 | }; | 1862 | }; |
1396 | #undef bit_name | 1863 | #undef bit_name |
@@ -1482,7 +1949,7 @@ out: | |||
1482 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | 1949 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, |
1483 | char *msg, size_t msgsize) | 1950 | char *msg, size_t msgsize) |
1484 | { | 1951 | { |
1485 | if ((err == ENOENT || err == ENXIO) && | 1952 | if ((err == ENOENT || err == ENXIO || err == ENODEV) && |
1486 | evsel->attr.type == PERF_TYPE_HARDWARE && | 1953 | evsel->attr.type == PERF_TYPE_HARDWARE && |
1487 | evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { | 1954 | evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { |
1488 | /* | 1955 | /* |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 3f156ccc1acb..4a7bdc713bab 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -38,6 +38,9 @@ struct perf_sample_id { | |||
38 | struct hlist_node node; | 38 | struct hlist_node node; |
39 | u64 id; | 39 | u64 id; |
40 | struct perf_evsel *evsel; | 40 | struct perf_evsel *evsel; |
41 | |||
42 | /* Holds total ID period value for PERF_SAMPLE_READ processing. */ | ||
43 | u64 period; | ||
41 | }; | 44 | }; |
42 | 45 | ||
43 | /** struct perf_evsel - event selector | 46 | /** struct perf_evsel - event selector |
@@ -45,6 +48,12 @@ struct perf_sample_id { | |||
45 | * @name - Can be set to retain the original event name passed by the user, | 48 | * @name - Can be set to retain the original event name passed by the user, |
46 | * so that when showing results in tools such as 'perf stat', we | 49 | * so that when showing results in tools such as 'perf stat', we |
47 | * show the name used, not some alias. | 50 | * show the name used, not some alias. |
51 | * @id_pos: the position of the event id (PERF_SAMPLE_ID or | ||
52 | * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of | ||
53 | * struct sample_event | ||
54 | * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or | ||
55 | * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all | ||
56 | * is used there is an id sample appended to non-sample events | ||
48 | */ | 57 | */ |
49 | struct perf_evsel { | 58 | struct perf_evsel { |
50 | struct list_head node; | 59 | struct list_head node; |
@@ -71,11 +80,14 @@ struct perf_evsel { | |||
71 | } handler; | 80 | } handler; |
72 | struct cpu_map *cpus; | 81 | struct cpu_map *cpus; |
73 | unsigned int sample_size; | 82 | unsigned int sample_size; |
83 | int id_pos; | ||
84 | int is_pos; | ||
74 | bool supported; | 85 | bool supported; |
75 | bool needs_swap; | 86 | bool needs_swap; |
76 | /* parse modifier helper */ | 87 | /* parse modifier helper */ |
77 | int exclude_GH; | 88 | int exclude_GH; |
78 | int nr_members; | 89 | int nr_members; |
90 | int sample_read; | ||
79 | struct perf_evsel *leader; | 91 | struct perf_evsel *leader; |
80 | char *group_name; | 92 | char *group_name; |
81 | }; | 93 | }; |
@@ -100,6 +112,9 @@ void perf_evsel__delete(struct perf_evsel *evsel); | |||
100 | void perf_evsel__config(struct perf_evsel *evsel, | 112 | void perf_evsel__config(struct perf_evsel *evsel, |
101 | struct perf_record_opts *opts); | 113 | struct perf_record_opts *opts); |
102 | 114 | ||
115 | int __perf_evsel__sample_size(u64 sample_type); | ||
116 | void perf_evsel__calc_id_pos(struct perf_evsel *evsel); | ||
117 | |||
103 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op); | 118 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op); |
104 | 119 | ||
105 | #define PERF_EVSEL__MAX_ALIASES 8 | 120 | #define PERF_EVSEL__MAX_ALIASES 8 |
@@ -138,10 +153,12 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, | |||
138 | #define perf_evsel__reset_sample_bit(evsel, bit) \ | 153 | #define perf_evsel__reset_sample_bit(evsel, bit) \ |
139 | __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) | 154 | __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) |
140 | 155 | ||
141 | void perf_evsel__set_sample_id(struct perf_evsel *evsel); | 156 | void perf_evsel__set_sample_id(struct perf_evsel *evsel, |
157 | bool use_sample_identifier); | ||
142 | 158 | ||
143 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | 159 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, |
144 | const char *filter); | 160 | const char *filter); |
161 | int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
145 | 162 | ||
146 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | 163 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
147 | struct cpu_map *cpus); | 164 | struct cpu_map *cpus); |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index a4dafbee2511..a33197a4fd21 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -25,41 +25,9 @@ | |||
25 | 25 | ||
26 | static bool no_buildid_cache = false; | 26 | static bool no_buildid_cache = false; |
27 | 27 | ||
28 | static int trace_event_count; | ||
29 | static struct perf_trace_event_type *trace_events; | ||
30 | |||
31 | static u32 header_argc; | 28 | static u32 header_argc; |
32 | static const char **header_argv; | 29 | static const char **header_argv; |
33 | 30 | ||
34 | int perf_header__push_event(u64 id, const char *name) | ||
35 | { | ||
36 | struct perf_trace_event_type *nevents; | ||
37 | |||
38 | if (strlen(name) > MAX_EVENT_NAME) | ||
39 | pr_warning("Event %s will be truncated\n", name); | ||
40 | |||
41 | nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events)); | ||
42 | if (nevents == NULL) | ||
43 | return -ENOMEM; | ||
44 | trace_events = nevents; | ||
45 | |||
46 | memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type)); | ||
47 | trace_events[trace_event_count].event_id = id; | ||
48 | strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1); | ||
49 | trace_event_count++; | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | char *perf_header__find_event(u64 id) | ||
54 | { | ||
55 | int i; | ||
56 | for (i = 0 ; i < trace_event_count; i++) { | ||
57 | if (trace_events[i].event_id == id) | ||
58 | return trace_events[i].name; | ||
59 | } | ||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | /* | 31 | /* |
64 | * magic2 = "PERFILE2" | 32 | * magic2 = "PERFILE2" |
65 | * must be a numerical value to let the endianness | 33 | * must be a numerical value to let the endianness |
@@ -748,18 +716,19 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu) | |||
748 | char filename[MAXPATHLEN]; | 716 | char filename[MAXPATHLEN]; |
749 | char *buf = NULL, *p; | 717 | char *buf = NULL, *p; |
750 | size_t len = 0; | 718 | size_t len = 0; |
719 | ssize_t sret; | ||
751 | u32 i = 0; | 720 | u32 i = 0; |
752 | int ret = -1; | 721 | int ret = -1; |
753 | 722 | ||
754 | sprintf(filename, CORE_SIB_FMT, cpu); | 723 | sprintf(filename, CORE_SIB_FMT, cpu); |
755 | fp = fopen(filename, "r"); | 724 | fp = fopen(filename, "r"); |
756 | if (!fp) | 725 | if (!fp) |
757 | return -1; | 726 | goto try_threads; |
758 | |||
759 | if (getline(&buf, &len, fp) <= 0) | ||
760 | goto done; | ||
761 | 727 | ||
728 | sret = getline(&buf, &len, fp); | ||
762 | fclose(fp); | 729 | fclose(fp); |
730 | if (sret <= 0) | ||
731 | goto try_threads; | ||
763 | 732 | ||
764 | p = strchr(buf, '\n'); | 733 | p = strchr(buf, '\n'); |
765 | if (p) | 734 | if (p) |
@@ -775,7 +744,9 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu) | |||
775 | buf = NULL; | 744 | buf = NULL; |
776 | len = 0; | 745 | len = 0; |
777 | } | 746 | } |
747 | ret = 0; | ||
778 | 748 | ||
749 | try_threads: | ||
779 | sprintf(filename, THRD_SIB_FMT, cpu); | 750 | sprintf(filename, THRD_SIB_FMT, cpu); |
780 | fp = fopen(filename, "r"); | 751 | fp = fopen(filename, "r"); |
781 | if (!fp) | 752 | if (!fp) |
@@ -2257,7 +2228,7 @@ static int perf_header__adds_write(struct perf_header *header, | |||
2257 | 2228 | ||
2258 | sec_size = sizeof(*feat_sec) * nr_sections; | 2229 | sec_size = sizeof(*feat_sec) * nr_sections; |
2259 | 2230 | ||
2260 | sec_start = header->data_offset + header->data_size; | 2231 | sec_start = header->feat_offset; |
2261 | lseek(fd, sec_start + sec_size, SEEK_SET); | 2232 | lseek(fd, sec_start + sec_size, SEEK_SET); |
2262 | 2233 | ||
2263 | for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { | 2234 | for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { |
@@ -2304,6 +2275,7 @@ int perf_session__write_header(struct perf_session *session, | |||
2304 | struct perf_file_attr f_attr; | 2275 | struct perf_file_attr f_attr; |
2305 | struct perf_header *header = &session->header; | 2276 | struct perf_header *header = &session->header; |
2306 | struct perf_evsel *evsel; | 2277 | struct perf_evsel *evsel; |
2278 | u64 attr_offset; | ||
2307 | int err; | 2279 | int err; |
2308 | 2280 | ||
2309 | lseek(fd, sizeof(f_header), SEEK_SET); | 2281 | lseek(fd, sizeof(f_header), SEEK_SET); |
@@ -2317,7 +2289,7 @@ int perf_session__write_header(struct perf_session *session, | |||
2317 | } | 2289 | } |
2318 | } | 2290 | } |
2319 | 2291 | ||
2320 | header->attr_offset = lseek(fd, 0, SEEK_CUR); | 2292 | attr_offset = lseek(fd, 0, SEEK_CUR); |
2321 | 2293 | ||
2322 | list_for_each_entry(evsel, &evlist->entries, node) { | 2294 | list_for_each_entry(evsel, &evlist->entries, node) { |
2323 | f_attr = (struct perf_file_attr){ | 2295 | f_attr = (struct perf_file_attr){ |
@@ -2334,17 +2306,8 @@ int perf_session__write_header(struct perf_session *session, | |||
2334 | } | 2306 | } |
2335 | } | 2307 | } |
2336 | 2308 | ||
2337 | header->event_offset = lseek(fd, 0, SEEK_CUR); | ||
2338 | header->event_size = trace_event_count * sizeof(struct perf_trace_event_type); | ||
2339 | if (trace_events) { | ||
2340 | err = do_write(fd, trace_events, header->event_size); | ||
2341 | if (err < 0) { | ||
2342 | pr_debug("failed to write perf header events\n"); | ||
2343 | return err; | ||
2344 | } | ||
2345 | } | ||
2346 | |||
2347 | header->data_offset = lseek(fd, 0, SEEK_CUR); | 2309 | header->data_offset = lseek(fd, 0, SEEK_CUR); |
2310 | header->feat_offset = header->data_offset + header->data_size; | ||
2348 | 2311 | ||
2349 | if (at_exit) { | 2312 | if (at_exit) { |
2350 | err = perf_header__adds_write(header, evlist, fd); | 2313 | err = perf_header__adds_write(header, evlist, fd); |
@@ -2357,17 +2320,14 @@ int perf_session__write_header(struct perf_session *session, | |||
2357 | .size = sizeof(f_header), | 2320 | .size = sizeof(f_header), |
2358 | .attr_size = sizeof(f_attr), | 2321 | .attr_size = sizeof(f_attr), |
2359 | .attrs = { | 2322 | .attrs = { |
2360 | .offset = header->attr_offset, | 2323 | .offset = attr_offset, |
2361 | .size = evlist->nr_entries * sizeof(f_attr), | 2324 | .size = evlist->nr_entries * sizeof(f_attr), |
2362 | }, | 2325 | }, |
2363 | .data = { | 2326 | .data = { |
2364 | .offset = header->data_offset, | 2327 | .offset = header->data_offset, |
2365 | .size = header->data_size, | 2328 | .size = header->data_size, |
2366 | }, | 2329 | }, |
2367 | .event_types = { | 2330 | /* event_types is ignored, store zeros */ |
2368 | .offset = header->event_offset, | ||
2369 | .size = header->event_size, | ||
2370 | }, | ||
2371 | }; | 2331 | }; |
2372 | 2332 | ||
2373 | memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); | 2333 | memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); |
@@ -2417,7 +2377,7 @@ int perf_header__process_sections(struct perf_header *header, int fd, | |||
2417 | 2377 | ||
2418 | sec_size = sizeof(*feat_sec) * nr_sections; | 2378 | sec_size = sizeof(*feat_sec) * nr_sections; |
2419 | 2379 | ||
2420 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); | 2380 | lseek(fd, header->feat_offset, SEEK_SET); |
2421 | 2381 | ||
2422 | err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); | 2382 | err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); |
2423 | if (err < 0) | 2383 | if (err < 0) |
@@ -2523,6 +2483,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, | |||
2523 | /* check for legacy format */ | 2483 | /* check for legacy format */ |
2524 | ret = memcmp(&magic, __perf_magic1, sizeof(magic)); | 2484 | ret = memcmp(&magic, __perf_magic1, sizeof(magic)); |
2525 | if (ret == 0) { | 2485 | if (ret == 0) { |
2486 | ph->version = PERF_HEADER_VERSION_1; | ||
2526 | pr_debug("legacy perf.data format\n"); | 2487 | pr_debug("legacy perf.data format\n"); |
2527 | if (is_pipe) | 2488 | if (is_pipe) |
2528 | return try_all_pipe_abis(hdr_sz, ph); | 2489 | return try_all_pipe_abis(hdr_sz, ph); |
@@ -2544,6 +2505,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, | |||
2544 | return -1; | 2505 | return -1; |
2545 | 2506 | ||
2546 | ph->needs_swap = true; | 2507 | ph->needs_swap = true; |
2508 | ph->version = PERF_HEADER_VERSION_2; | ||
2547 | 2509 | ||
2548 | return 0; | 2510 | return 0; |
2549 | } | 2511 | } |
@@ -2614,10 +2576,9 @@ int perf_file_header__read(struct perf_file_header *header, | |||
2614 | memcpy(&ph->adds_features, &header->adds_features, | 2576 | memcpy(&ph->adds_features, &header->adds_features, |
2615 | sizeof(ph->adds_features)); | 2577 | sizeof(ph->adds_features)); |
2616 | 2578 | ||
2617 | ph->event_offset = header->event_types.offset; | ||
2618 | ph->event_size = header->event_types.size; | ||
2619 | ph->data_offset = header->data.offset; | 2579 | ph->data_offset = header->data.offset; |
2620 | ph->data_size = header->data.size; | 2580 | ph->data_size = header->data.size; |
2581 | ph->feat_offset = header->data.offset + header->data.size; | ||
2621 | return 0; | 2582 | return 0; |
2622 | } | 2583 | } |
2623 | 2584 | ||
@@ -2666,19 +2627,17 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, | |||
2666 | return 0; | 2627 | return 0; |
2667 | } | 2628 | } |
2668 | 2629 | ||
2669 | static int perf_header__read_pipe(struct perf_session *session, int fd) | 2630 | static int perf_header__read_pipe(struct perf_session *session) |
2670 | { | 2631 | { |
2671 | struct perf_header *header = &session->header; | 2632 | struct perf_header *header = &session->header; |
2672 | struct perf_pipe_file_header f_header; | 2633 | struct perf_pipe_file_header f_header; |
2673 | 2634 | ||
2674 | if (perf_file_header__read_pipe(&f_header, header, fd, | 2635 | if (perf_file_header__read_pipe(&f_header, header, session->fd, |
2675 | session->repipe) < 0) { | 2636 | session->repipe) < 0) { |
2676 | pr_debug("incompatible file format\n"); | 2637 | pr_debug("incompatible file format\n"); |
2677 | return -EINVAL; | 2638 | return -EINVAL; |
2678 | } | 2639 | } |
2679 | 2640 | ||
2680 | session->fd = fd; | ||
2681 | |||
2682 | return 0; | 2641 | return 0; |
2683 | } | 2642 | } |
2684 | 2643 | ||
@@ -2772,20 +2731,21 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, | |||
2772 | return 0; | 2731 | return 0; |
2773 | } | 2732 | } |
2774 | 2733 | ||
2775 | int perf_session__read_header(struct perf_session *session, int fd) | 2734 | int perf_session__read_header(struct perf_session *session) |
2776 | { | 2735 | { |
2777 | struct perf_header *header = &session->header; | 2736 | struct perf_header *header = &session->header; |
2778 | struct perf_file_header f_header; | 2737 | struct perf_file_header f_header; |
2779 | struct perf_file_attr f_attr; | 2738 | struct perf_file_attr f_attr; |
2780 | u64 f_id; | 2739 | u64 f_id; |
2781 | int nr_attrs, nr_ids, i, j; | 2740 | int nr_attrs, nr_ids, i, j; |
2741 | int fd = session->fd; | ||
2782 | 2742 | ||
2783 | session->evlist = perf_evlist__new(); | 2743 | session->evlist = perf_evlist__new(); |
2784 | if (session->evlist == NULL) | 2744 | if (session->evlist == NULL) |
2785 | return -ENOMEM; | 2745 | return -ENOMEM; |
2786 | 2746 | ||
2787 | if (session->fd_pipe) | 2747 | if (session->fd_pipe) |
2788 | return perf_header__read_pipe(session, fd); | 2748 | return perf_header__read_pipe(session); |
2789 | 2749 | ||
2790 | if (perf_file_header__read(&f_header, header, fd) < 0) | 2750 | if (perf_file_header__read(&f_header, header, fd) < 0) |
2791 | return -EINVAL; | 2751 | return -EINVAL; |
@@ -2839,22 +2799,9 @@ int perf_session__read_header(struct perf_session *session, int fd) | |||
2839 | 2799 | ||
2840 | symbol_conf.nr_events = nr_attrs; | 2800 | symbol_conf.nr_events = nr_attrs; |
2841 | 2801 | ||
2842 | if (f_header.event_types.size) { | ||
2843 | lseek(fd, f_header.event_types.offset, SEEK_SET); | ||
2844 | trace_events = malloc(f_header.event_types.size); | ||
2845 | if (trace_events == NULL) | ||
2846 | return -ENOMEM; | ||
2847 | if (perf_header__getbuffer64(header, fd, trace_events, | ||
2848 | f_header.event_types.size)) | ||
2849 | goto out_errno; | ||
2850 | trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); | ||
2851 | } | ||
2852 | |||
2853 | perf_header__process_sections(header, fd, &session->pevent, | 2802 | perf_header__process_sections(header, fd, &session->pevent, |
2854 | perf_file_section__process); | 2803 | perf_file_section__process); |
2855 | 2804 | ||
2856 | lseek(fd, header->data_offset, SEEK_SET); | ||
2857 | |||
2858 | if (perf_evlist__prepare_tracepoint_events(session->evlist, | 2805 | if (perf_evlist__prepare_tracepoint_events(session->evlist, |
2859 | session->pevent)) | 2806 | session->pevent)) |
2860 | goto out_delete_evlist; | 2807 | goto out_delete_evlist; |
@@ -2922,7 +2869,8 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, | |||
2922 | return err; | 2869 | return err; |
2923 | } | 2870 | } |
2924 | 2871 | ||
2925 | int perf_event__process_attr(union perf_event *event, | 2872 | int perf_event__process_attr(struct perf_tool *tool __maybe_unused, |
2873 | union perf_event *event, | ||
2926 | struct perf_evlist **pevlist) | 2874 | struct perf_evlist **pevlist) |
2927 | { | 2875 | { |
2928 | u32 i, ids, n_ids; | 2876 | u32 i, ids, n_ids; |
@@ -2961,64 +2909,6 @@ int perf_event__process_attr(union perf_event *event, | |||
2961 | return 0; | 2909 | return 0; |
2962 | } | 2910 | } |
2963 | 2911 | ||
2964 | int perf_event__synthesize_event_type(struct perf_tool *tool, | ||
2965 | u64 event_id, char *name, | ||
2966 | perf_event__handler_t process, | ||
2967 | struct machine *machine) | ||
2968 | { | ||
2969 | union perf_event ev; | ||
2970 | size_t size = 0; | ||
2971 | int err = 0; | ||
2972 | |||
2973 | memset(&ev, 0, sizeof(ev)); | ||
2974 | |||
2975 | ev.event_type.event_type.event_id = event_id; | ||
2976 | memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME); | ||
2977 | strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); | ||
2978 | |||
2979 | ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; | ||
2980 | size = strlen(ev.event_type.event_type.name); | ||
2981 | size = PERF_ALIGN(size, sizeof(u64)); | ||
2982 | ev.event_type.header.size = sizeof(ev.event_type) - | ||
2983 | (sizeof(ev.event_type.event_type.name) - size); | ||
2984 | |||
2985 | err = process(tool, &ev, NULL, machine); | ||
2986 | |||
2987 | return err; | ||
2988 | } | ||
2989 | |||
2990 | int perf_event__synthesize_event_types(struct perf_tool *tool, | ||
2991 | perf_event__handler_t process, | ||
2992 | struct machine *machine) | ||
2993 | { | ||
2994 | struct perf_trace_event_type *type; | ||
2995 | int i, err = 0; | ||
2996 | |||
2997 | for (i = 0; i < trace_event_count; i++) { | ||
2998 | type = &trace_events[i]; | ||
2999 | |||
3000 | err = perf_event__synthesize_event_type(tool, type->event_id, | ||
3001 | type->name, process, | ||
3002 | machine); | ||
3003 | if (err) { | ||
3004 | pr_debug("failed to create perf header event type\n"); | ||
3005 | return err; | ||
3006 | } | ||
3007 | } | ||
3008 | |||
3009 | return err; | ||
3010 | } | ||
3011 | |||
3012 | int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, | ||
3013 | union perf_event *event) | ||
3014 | { | ||
3015 | if (perf_header__push_event(event->event_type.event_type.event_id, | ||
3016 | event->event_type.event_type.name) < 0) | ||
3017 | return -ENOMEM; | ||
3018 | |||
3019 | return 0; | ||
3020 | } | ||
3021 | |||
3022 | int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | 2912 | int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, |
3023 | struct perf_evlist *evlist, | 2913 | struct perf_evlist *evlist, |
3024 | perf_event__handler_t process) | 2914 | perf_event__handler_t process) |
@@ -3065,7 +2955,8 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | |||
3065 | return aligned_size; | 2955 | return aligned_size; |
3066 | } | 2956 | } |
3067 | 2957 | ||
3068 | int perf_event__process_tracing_data(union perf_event *event, | 2958 | int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, |
2959 | union perf_event *event, | ||
3069 | struct perf_session *session) | 2960 | struct perf_session *session) |
3070 | { | 2961 | { |
3071 | ssize_t size_read, padding, size = event->tracing_data.size; | 2962 | ssize_t size_read, padding, size = event->tracing_data.size; |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 16a3e83c584e..307c9aed972e 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -34,6 +34,11 @@ enum { | |||
34 | HEADER_FEAT_BITS = 256, | 34 | HEADER_FEAT_BITS = 256, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | enum perf_header_version { | ||
38 | PERF_HEADER_VERSION_1, | ||
39 | PERF_HEADER_VERSION_2, | ||
40 | }; | ||
41 | |||
37 | struct perf_file_section { | 42 | struct perf_file_section { |
38 | u64 offset; | 43 | u64 offset; |
39 | u64 size; | 44 | u64 size; |
@@ -45,6 +50,7 @@ struct perf_file_header { | |||
45 | u64 attr_size; | 50 | u64 attr_size; |
46 | struct perf_file_section attrs; | 51 | struct perf_file_section attrs; |
47 | struct perf_file_section data; | 52 | struct perf_file_section data; |
53 | /* event_types is ignored */ | ||
48 | struct perf_file_section event_types; | 54 | struct perf_file_section event_types; |
49 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | 55 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); |
50 | }; | 56 | }; |
@@ -84,28 +90,24 @@ struct perf_session_env { | |||
84 | }; | 90 | }; |
85 | 91 | ||
86 | struct perf_header { | 92 | struct perf_header { |
87 | bool needs_swap; | 93 | enum perf_header_version version; |
88 | s64 attr_offset; | 94 | bool needs_swap; |
89 | u64 data_offset; | 95 | u64 data_offset; |
90 | u64 data_size; | 96 | u64 data_size; |
91 | u64 event_offset; | 97 | u64 feat_offset; |
92 | u64 event_size; | ||
93 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | 98 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); |
94 | struct perf_session_env env; | 99 | struct perf_session_env env; |
95 | }; | 100 | }; |
96 | 101 | ||
97 | struct perf_evlist; | 102 | struct perf_evlist; |
98 | struct perf_session; | 103 | struct perf_session; |
99 | 104 | ||
100 | int perf_session__read_header(struct perf_session *session, int fd); | 105 | int perf_session__read_header(struct perf_session *session); |
101 | int perf_session__write_header(struct perf_session *session, | 106 | int perf_session__write_header(struct perf_session *session, |
102 | struct perf_evlist *evlist, | 107 | struct perf_evlist *evlist, |
103 | int fd, bool at_exit); | 108 | int fd, bool at_exit); |
104 | int perf_header__write_pipe(int fd); | 109 | int perf_header__write_pipe(int fd); |
105 | 110 | ||
106 | int perf_header__push_event(u64 id, const char *name); | ||
107 | char *perf_header__find_event(u64 id); | ||
108 | |||
109 | void perf_header__set_feat(struct perf_header *header, int feat); | 111 | void perf_header__set_feat(struct perf_header *header, int feat); |
110 | void perf_header__clear_feat(struct perf_header *header, int feat); | 112 | void perf_header__clear_feat(struct perf_header *header, int feat); |
111 | bool perf_header__has_feat(const struct perf_header *header, int feat); | 113 | bool perf_header__has_feat(const struct perf_header *header, int feat); |
@@ -130,22 +132,14 @@ int perf_event__synthesize_attr(struct perf_tool *tool, | |||
130 | int perf_event__synthesize_attrs(struct perf_tool *tool, | 132 | int perf_event__synthesize_attrs(struct perf_tool *tool, |
131 | struct perf_session *session, | 133 | struct perf_session *session, |
132 | perf_event__handler_t process); | 134 | perf_event__handler_t process); |
133 | int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist); | 135 | int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, |
134 | 136 | struct perf_evlist **pevlist); | |
135 | int perf_event__synthesize_event_type(struct perf_tool *tool, | ||
136 | u64 event_id, char *name, | ||
137 | perf_event__handler_t process, | ||
138 | struct machine *machine); | ||
139 | int perf_event__synthesize_event_types(struct perf_tool *tool, | ||
140 | perf_event__handler_t process, | ||
141 | struct machine *machine); | ||
142 | int perf_event__process_event_type(struct perf_tool *tool, | ||
143 | union perf_event *event); | ||
144 | 137 | ||
145 | int perf_event__synthesize_tracing_data(struct perf_tool *tool, | 138 | int perf_event__synthesize_tracing_data(struct perf_tool *tool, |
146 | int fd, struct perf_evlist *evlist, | 139 | int fd, struct perf_evlist *evlist, |
147 | perf_event__handler_t process); | 140 | perf_event__handler_t process); |
148 | int perf_event__process_tracing_data(union perf_event *event, | 141 | int perf_event__process_tracing_data(struct perf_tool *tool, |
142 | union perf_event *event, | ||
149 | struct perf_session *session); | 143 | struct perf_session *session); |
150 | 144 | ||
151 | int perf_event__synthesize_build_id(struct perf_tool *tool, | 145 | int perf_event__synthesize_build_id(struct perf_tool *tool, |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b11a6cfdb414..46a0d35a05e1 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -24,7 +24,8 @@ enum hist_filter { | |||
24 | struct callchain_param callchain_param = { | 24 | struct callchain_param callchain_param = { |
25 | .mode = CHAIN_GRAPH_REL, | 25 | .mode = CHAIN_GRAPH_REL, |
26 | .min_percent = 0.5, | 26 | .min_percent = 0.5, |
27 | .order = ORDER_CALLEE | 27 | .order = ORDER_CALLEE, |
28 | .key = CCKEY_FUNCTION | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | u16 hists__col_len(struct hists *hists, enum hist_column col) | 31 | u16 hists__col_len(struct hists *hists, enum hist_column col) |
@@ -912,6 +913,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists, | |||
912 | rb_link_node(&he->rb_node_in, parent, p); | 913 | rb_link_node(&he->rb_node_in, parent, p); |
913 | rb_insert_color(&he->rb_node_in, root); | 914 | rb_insert_color(&he->rb_node_in, root); |
914 | hists__inc_nr_entries(hists, he); | 915 | hists__inc_nr_entries(hists, he); |
916 | he->dummy = true; | ||
915 | } | 917 | } |
916 | out: | 918 | out: |
917 | return he; | 919 | return he; |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 2d3790fd99bb..1329b6b6ffe6 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -141,10 +141,12 @@ struct perf_hpp { | |||
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct perf_hpp_fmt { | 143 | struct perf_hpp_fmt { |
144 | int (*header)(struct perf_hpp *hpp); | 144 | int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp); |
145 | int (*width)(struct perf_hpp *hpp); | 145 | int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp); |
146 | int (*color)(struct perf_hpp *hpp, struct hist_entry *he); | 146 | int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, |
147 | int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); | 147 | struct hist_entry *he); |
148 | int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, | ||
149 | struct hist_entry *he); | ||
148 | 150 | ||
149 | struct list_head list; | 151 | struct list_head list; |
150 | }; | 152 | }; |
@@ -157,7 +159,7 @@ extern struct list_head perf_hpp__list; | |||
157 | extern struct perf_hpp_fmt perf_hpp__format[]; | 159 | extern struct perf_hpp_fmt perf_hpp__format[]; |
158 | 160 | ||
159 | enum { | 161 | enum { |
160 | PERF_HPP__BASELINE, | 162 | /* Matches perf_hpp__format array. */ |
161 | PERF_HPP__OVERHEAD, | 163 | PERF_HPP__OVERHEAD, |
162 | PERF_HPP__OVERHEAD_SYS, | 164 | PERF_HPP__OVERHEAD_SYS, |
163 | PERF_HPP__OVERHEAD_US, | 165 | PERF_HPP__OVERHEAD_US, |
@@ -165,11 +167,6 @@ enum { | |||
165 | PERF_HPP__OVERHEAD_GUEST_US, | 167 | PERF_HPP__OVERHEAD_GUEST_US, |
166 | PERF_HPP__SAMPLES, | 168 | PERF_HPP__SAMPLES, |
167 | PERF_HPP__PERIOD, | 169 | PERF_HPP__PERIOD, |
168 | PERF_HPP__PERIOD_BASELINE, | ||
169 | PERF_HPP__DELTA, | ||
170 | PERF_HPP__RATIO, | ||
171 | PERF_HPP__WEIGHTED_DIFF, | ||
172 | PERF_HPP__FORMULA, | ||
173 | 170 | ||
174 | PERF_HPP__MAX_INDEX | 171 | PERF_HPP__MAX_INDEX |
175 | }; | 172 | }; |
@@ -177,8 +174,6 @@ enum { | |||
177 | void perf_hpp__init(void); | 174 | void perf_hpp__init(void); |
178 | void perf_hpp__column_register(struct perf_hpp_fmt *format); | 175 | void perf_hpp__column_register(struct perf_hpp_fmt *format); |
179 | void perf_hpp__column_enable(unsigned col); | 176 | void perf_hpp__column_enable(unsigned col); |
180 | int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, | ||
181 | bool color); | ||
182 | 177 | ||
183 | struct perf_evlist; | 178 | struct perf_evlist; |
184 | 179 | ||
@@ -245,11 +240,4 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, | |||
245 | #endif | 240 | #endif |
246 | 241 | ||
247 | unsigned int hists__sort_list_width(struct hists *self); | 242 | unsigned int hists__sort_list_width(struct hists *self); |
248 | |||
249 | double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair); | ||
250 | double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair); | ||
251 | s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair); | ||
252 | int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, | ||
253 | char *buf, size_t size); | ||
254 | double perf_diff__period_percent(struct hist_entry *he, u64 period); | ||
255 | #endif /* __PERF_HIST_H */ | 243 | #endif /* __PERF_HIST_H */ |
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h index 6f19c548ecc0..97a800738226 100644 --- a/tools/perf/util/include/linux/string.h +++ b/tools/perf/util/include/linux/string.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <string.h> | 1 | #include <string.h> |
2 | 2 | ||
3 | void *memdup(const void *src, size_t len); | 3 | void *memdup(const void *src, size_t len); |
4 | int str_append(char **s, int *len, const char *a); | ||
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index b2ecad6ec46b..1dca61f0512d 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -25,12 +25,15 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) | |||
25 | machine->kmaps.machine = machine; | 25 | machine->kmaps.machine = machine; |
26 | machine->pid = pid; | 26 | machine->pid = pid; |
27 | 27 | ||
28 | machine->symbol_filter = NULL; | ||
29 | |||
28 | machine->root_dir = strdup(root_dir); | 30 | machine->root_dir = strdup(root_dir); |
29 | if (machine->root_dir == NULL) | 31 | if (machine->root_dir == NULL) |
30 | return -ENOMEM; | 32 | return -ENOMEM; |
31 | 33 | ||
32 | if (pid != HOST_KERNEL_ID) { | 34 | if (pid != HOST_KERNEL_ID) { |
33 | struct thread *thread = machine__findnew_thread(machine, pid); | 35 | struct thread *thread = machine__findnew_thread(machine, 0, |
36 | pid); | ||
34 | char comm[64]; | 37 | char comm[64]; |
35 | 38 | ||
36 | if (thread == NULL) | 39 | if (thread == NULL) |
@@ -95,6 +98,7 @@ void machines__init(struct machines *machines) | |||
95 | { | 98 | { |
96 | machine__init(&machines->host, "", HOST_KERNEL_ID); | 99 | machine__init(&machines->host, "", HOST_KERNEL_ID); |
97 | machines->guests = RB_ROOT; | 100 | machines->guests = RB_ROOT; |
101 | machines->symbol_filter = NULL; | ||
98 | } | 102 | } |
99 | 103 | ||
100 | void machines__exit(struct machines *machines) | 104 | void machines__exit(struct machines *machines) |
@@ -118,6 +122,8 @@ struct machine *machines__add(struct machines *machines, pid_t pid, | |||
118 | return NULL; | 122 | return NULL; |
119 | } | 123 | } |
120 | 124 | ||
125 | machine->symbol_filter = machines->symbol_filter; | ||
126 | |||
121 | while (*p != NULL) { | 127 | while (*p != NULL) { |
122 | parent = *p; | 128 | parent = *p; |
123 | pos = rb_entry(parent, struct machine, rb_node); | 129 | pos = rb_entry(parent, struct machine, rb_node); |
@@ -133,6 +139,21 @@ struct machine *machines__add(struct machines *machines, pid_t pid, | |||
133 | return machine; | 139 | return machine; |
134 | } | 140 | } |
135 | 141 | ||
142 | void machines__set_symbol_filter(struct machines *machines, | ||
143 | symbol_filter_t symbol_filter) | ||
144 | { | ||
145 | struct rb_node *nd; | ||
146 | |||
147 | machines->symbol_filter = symbol_filter; | ||
148 | machines->host.symbol_filter = symbol_filter; | ||
149 | |||
150 | for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { | ||
151 | struct machine *machine = rb_entry(nd, struct machine, rb_node); | ||
152 | |||
153 | machine->symbol_filter = symbol_filter; | ||
154 | } | ||
155 | } | ||
156 | |||
136 | struct machine *machines__find(struct machines *machines, pid_t pid) | 157 | struct machine *machines__find(struct machines *machines, pid_t pid) |
137 | { | 158 | { |
138 | struct rb_node **p = &machines->guests.rb_node; | 159 | struct rb_node **p = &machines->guests.rb_node; |
@@ -233,7 +254,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) | |||
233 | return; | 254 | return; |
234 | } | 255 | } |
235 | 256 | ||
236 | static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, | 257 | static struct thread *__machine__findnew_thread(struct machine *machine, |
258 | pid_t pid, pid_t tid, | ||
237 | bool create) | 259 | bool create) |
238 | { | 260 | { |
239 | struct rb_node **p = &machine->threads.rb_node; | 261 | struct rb_node **p = &machine->threads.rb_node; |
@@ -241,23 +263,28 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p | |||
241 | struct thread *th; | 263 | struct thread *th; |
242 | 264 | ||
243 | /* | 265 | /* |
244 | * Font-end cache - PID lookups come in blocks, | 266 | * Front-end cache - TID lookups come in blocks, |
245 | * so most of the time we dont have to look up | 267 | * so most of the time we dont have to look up |
246 | * the full rbtree: | 268 | * the full rbtree: |
247 | */ | 269 | */ |
248 | if (machine->last_match && machine->last_match->pid == pid) | 270 | if (machine->last_match && machine->last_match->tid == tid) { |
271 | if (pid && pid != machine->last_match->pid_) | ||
272 | machine->last_match->pid_ = pid; | ||
249 | return machine->last_match; | 273 | return machine->last_match; |
274 | } | ||
250 | 275 | ||
251 | while (*p != NULL) { | 276 | while (*p != NULL) { |
252 | parent = *p; | 277 | parent = *p; |
253 | th = rb_entry(parent, struct thread, rb_node); | 278 | th = rb_entry(parent, struct thread, rb_node); |
254 | 279 | ||
255 | if (th->pid == pid) { | 280 | if (th->tid == tid) { |
256 | machine->last_match = th; | 281 | machine->last_match = th; |
282 | if (pid && pid != th->pid_) | ||
283 | th->pid_ = pid; | ||
257 | return th; | 284 | return th; |
258 | } | 285 | } |
259 | 286 | ||
260 | if (pid < th->pid) | 287 | if (tid < th->tid) |
261 | p = &(*p)->rb_left; | 288 | p = &(*p)->rb_left; |
262 | else | 289 | else |
263 | p = &(*p)->rb_right; | 290 | p = &(*p)->rb_right; |
@@ -266,7 +293,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p | |||
266 | if (!create) | 293 | if (!create) |
267 | return NULL; | 294 | return NULL; |
268 | 295 | ||
269 | th = thread__new(pid); | 296 | th = thread__new(pid, tid); |
270 | if (th != NULL) { | 297 | if (th != NULL) { |
271 | rb_link_node(&th->rb_node, parent, p); | 298 | rb_link_node(&th->rb_node, parent, p); |
272 | rb_insert_color(&th->rb_node, &machine->threads); | 299 | rb_insert_color(&th->rb_node, &machine->threads); |
@@ -276,19 +303,22 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p | |||
276 | return th; | 303 | return th; |
277 | } | 304 | } |
278 | 305 | ||
279 | struct thread *machine__findnew_thread(struct machine *machine, pid_t pid) | 306 | struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, |
307 | pid_t tid) | ||
280 | { | 308 | { |
281 | return __machine__findnew_thread(machine, pid, true); | 309 | return __machine__findnew_thread(machine, pid, tid, true); |
282 | } | 310 | } |
283 | 311 | ||
284 | struct thread *machine__find_thread(struct machine *machine, pid_t pid) | 312 | struct thread *machine__find_thread(struct machine *machine, pid_t tid) |
285 | { | 313 | { |
286 | return __machine__findnew_thread(machine, pid, false); | 314 | return __machine__findnew_thread(machine, 0, tid, false); |
287 | } | 315 | } |
288 | 316 | ||
289 | int machine__process_comm_event(struct machine *machine, union perf_event *event) | 317 | int machine__process_comm_event(struct machine *machine, union perf_event *event) |
290 | { | 318 | { |
291 | struct thread *thread = machine__findnew_thread(machine, event->comm.tid); | 319 | struct thread *thread = machine__findnew_thread(machine, |
320 | event->comm.pid, | ||
321 | event->comm.tid); | ||
292 | 322 | ||
293 | if (dump_trace) | 323 | if (dump_trace) |
294 | perf_event__fprintf_comm(event, stdout); | 324 | perf_event__fprintf_comm(event, stdout); |
@@ -628,10 +658,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, | |||
628 | struct map *map = machine->vmlinux_maps[type]; | 658 | struct map *map = machine->vmlinux_maps[type]; |
629 | int ret = dso__load_vmlinux_path(map->dso, map, filter); | 659 | int ret = dso__load_vmlinux_path(map->dso, map, filter); |
630 | 660 | ||
631 | if (ret > 0) { | 661 | if (ret > 0) |
632 | dso__set_loaded(map->dso, type); | 662 | dso__set_loaded(map->dso, type); |
633 | map__reloc_vmlinux(map); | ||
634 | } | ||
635 | 663 | ||
636 | return ret; | 664 | return ret; |
637 | } | 665 | } |
@@ -808,7 +836,10 @@ static int machine__create_modules(struct machine *machine) | |||
808 | free(line); | 836 | free(line); |
809 | fclose(file); | 837 | fclose(file); |
810 | 838 | ||
811 | return machine__set_modules_path(machine); | 839 | if (machine__set_modules_path(machine) < 0) { |
840 | pr_debug("Problems setting modules path maps, continuing anyway...\n"); | ||
841 | } | ||
842 | return 0; | ||
812 | 843 | ||
813 | out_delete_line: | 844 | out_delete_line: |
814 | free(line); | 845 | free(line); |
@@ -858,6 +889,18 @@ static void machine__set_kernel_mmap_len(struct machine *machine, | |||
858 | } | 889 | } |
859 | } | 890 | } |
860 | 891 | ||
892 | static bool machine__uses_kcore(struct machine *machine) | ||
893 | { | ||
894 | struct dso *dso; | ||
895 | |||
896 | list_for_each_entry(dso, &machine->kernel_dsos, node) { | ||
897 | if (dso__is_kcore(dso)) | ||
898 | return true; | ||
899 | } | ||
900 | |||
901 | return false; | ||
902 | } | ||
903 | |||
861 | static int machine__process_kernel_mmap_event(struct machine *machine, | 904 | static int machine__process_kernel_mmap_event(struct machine *machine, |
862 | union perf_event *event) | 905 | union perf_event *event) |
863 | { | 906 | { |
@@ -866,6 +909,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine, | |||
866 | enum dso_kernel_type kernel_type; | 909 | enum dso_kernel_type kernel_type; |
867 | bool is_kernel_mmap; | 910 | bool is_kernel_mmap; |
868 | 911 | ||
912 | /* If we have maps from kcore then we do not need or want any others */ | ||
913 | if (machine__uses_kcore(machine)) | ||
914 | return 0; | ||
915 | |||
869 | machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); | 916 | machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); |
870 | if (machine__is_host(machine)) | 917 | if (machine__is_host(machine)) |
871 | kernel_type = DSO_TYPE_KERNEL; | 918 | kernel_type = DSO_TYPE_KERNEL; |
@@ -969,7 +1016,8 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event | |||
969 | return 0; | 1016 | return 0; |
970 | } | 1017 | } |
971 | 1018 | ||
972 | thread = machine__findnew_thread(machine, event->mmap.pid); | 1019 | thread = machine__findnew_thread(machine, event->mmap.pid, |
1020 | event->mmap.pid); | ||
973 | if (thread == NULL) | 1021 | if (thread == NULL) |
974 | goto out_problem; | 1022 | goto out_problem; |
975 | 1023 | ||
@@ -994,11 +1042,30 @@ out_problem: | |||
994 | return 0; | 1042 | return 0; |
995 | } | 1043 | } |
996 | 1044 | ||
1045 | static void machine__remove_thread(struct machine *machine, struct thread *th) | ||
1046 | { | ||
1047 | machine->last_match = NULL; | ||
1048 | rb_erase(&th->rb_node, &machine->threads); | ||
1049 | /* | ||
1050 | * We may have references to this thread, for instance in some hist_entry | ||
1051 | * instances, so just move them to a separate list. | ||
1052 | */ | ||
1053 | list_add_tail(&th->node, &machine->dead_threads); | ||
1054 | } | ||
1055 | |||
997 | int machine__process_fork_event(struct machine *machine, union perf_event *event) | 1056 | int machine__process_fork_event(struct machine *machine, union perf_event *event) |
998 | { | 1057 | { |
999 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 1058 | struct thread *thread = machine__find_thread(machine, event->fork.tid); |
1000 | struct thread *parent = machine__findnew_thread(machine, event->fork.ptid); | 1059 | struct thread *parent = machine__findnew_thread(machine, |
1060 | event->fork.ppid, | ||
1061 | event->fork.ptid); | ||
1001 | 1062 | ||
1063 | /* if a thread currently exists for the thread id remove it */ | ||
1064 | if (thread != NULL) | ||
1065 | machine__remove_thread(machine, thread); | ||
1066 | |||
1067 | thread = machine__findnew_thread(machine, event->fork.pid, | ||
1068 | event->fork.tid); | ||
1002 | if (dump_trace) | 1069 | if (dump_trace) |
1003 | perf_event__fprintf_task(event, stdout); | 1070 | perf_event__fprintf_task(event, stdout); |
1004 | 1071 | ||
@@ -1011,18 +1078,8 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event | |||
1011 | return 0; | 1078 | return 0; |
1012 | } | 1079 | } |
1013 | 1080 | ||
1014 | static void machine__remove_thread(struct machine *machine, struct thread *th) | 1081 | int machine__process_exit_event(struct machine *machine __maybe_unused, |
1015 | { | 1082 | union perf_event *event) |
1016 | machine->last_match = NULL; | ||
1017 | rb_erase(&th->rb_node, &machine->threads); | ||
1018 | /* | ||
1019 | * We may have references to this thread, for instance in some hist_entry | ||
1020 | * instances, so just move them to a separate list. | ||
1021 | */ | ||
1022 | list_add_tail(&th->node, &machine->dead_threads); | ||
1023 | } | ||
1024 | |||
1025 | int machine__process_exit_event(struct machine *machine, union perf_event *event) | ||
1026 | { | 1083 | { |
1027 | struct thread *thread = machine__find_thread(machine, event->fork.tid); | 1084 | struct thread *thread = machine__find_thread(machine, event->fork.tid); |
1028 | 1085 | ||
@@ -1030,7 +1087,7 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event | |||
1030 | perf_event__fprintf_task(event, stdout); | 1087 | perf_event__fprintf_task(event, stdout); |
1031 | 1088 | ||
1032 | if (thread != NULL) | 1089 | if (thread != NULL) |
1033 | machine__remove_thread(machine, thread); | 1090 | thread__exited(thread); |
1034 | 1091 | ||
1035 | return 0; | 1092 | return 0; |
1036 | } | 1093 | } |
@@ -1058,11 +1115,10 @@ int machine__process_event(struct machine *machine, union perf_event *event) | |||
1058 | return ret; | 1115 | return ret; |
1059 | } | 1116 | } |
1060 | 1117 | ||
1061 | static bool symbol__match_parent_regex(struct symbol *sym) | 1118 | static bool symbol__match_regex(struct symbol *sym, regex_t *regex) |
1062 | { | 1119 | { |
1063 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) | 1120 | if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) |
1064 | return 1; | 1121 | return 1; |
1065 | |||
1066 | return 0; | 1122 | return 0; |
1067 | } | 1123 | } |
1068 | 1124 | ||
@@ -1094,7 +1150,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread, | |||
1094 | * or else, the symbol is unknown | 1150 | * or else, the symbol is unknown |
1095 | */ | 1151 | */ |
1096 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, | 1152 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, |
1097 | ip, &al, NULL); | 1153 | ip, &al); |
1098 | if (al.sym) | 1154 | if (al.sym) |
1099 | goto found; | 1155 | goto found; |
1100 | } | 1156 | } |
@@ -1112,8 +1168,8 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread, | |||
1112 | 1168 | ||
1113 | memset(&al, 0, sizeof(al)); | 1169 | memset(&al, 0, sizeof(al)); |
1114 | 1170 | ||
1115 | thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, &al, | 1171 | thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, |
1116 | NULL); | 1172 | &al); |
1117 | ams->addr = addr; | 1173 | ams->addr = addr; |
1118 | ams->al_addr = al.addr; | 1174 | ams->al_addr = al.addr; |
1119 | ams->sym = al.sym; | 1175 | ams->sym = al.sym; |
@@ -1159,8 +1215,8 @@ struct branch_info *machine__resolve_bstack(struct machine *machine, | |||
1159 | static int machine__resolve_callchain_sample(struct machine *machine, | 1215 | static int machine__resolve_callchain_sample(struct machine *machine, |
1160 | struct thread *thread, | 1216 | struct thread *thread, |
1161 | struct ip_callchain *chain, | 1217 | struct ip_callchain *chain, |
1162 | struct symbol **parent) | 1218 | struct symbol **parent, |
1163 | 1219 | struct addr_location *root_al) | |
1164 | { | 1220 | { |
1165 | u8 cpumode = PERF_RECORD_MISC_USER; | 1221 | u8 cpumode = PERF_RECORD_MISC_USER; |
1166 | unsigned int i; | 1222 | unsigned int i; |
@@ -1208,11 +1264,18 @@ static int machine__resolve_callchain_sample(struct machine *machine, | |||
1208 | 1264 | ||
1209 | al.filtered = false; | 1265 | al.filtered = false; |
1210 | thread__find_addr_location(thread, machine, cpumode, | 1266 | thread__find_addr_location(thread, machine, cpumode, |
1211 | MAP__FUNCTION, ip, &al, NULL); | 1267 | MAP__FUNCTION, ip, &al); |
1212 | if (al.sym != NULL) { | 1268 | if (al.sym != NULL) { |
1213 | if (sort__has_parent && !*parent && | 1269 | if (sort__has_parent && !*parent && |
1214 | symbol__match_parent_regex(al.sym)) | 1270 | symbol__match_regex(al.sym, &parent_regex)) |
1215 | *parent = al.sym; | 1271 | *parent = al.sym; |
1272 | else if (have_ignore_callees && root_al && | ||
1273 | symbol__match_regex(al.sym, &ignore_callees_regex)) { | ||
1274 | /* Treat this symbol as the root, | ||
1275 | forgetting its callees. */ | ||
1276 | *root_al = al; | ||
1277 | callchain_cursor_reset(&callchain_cursor); | ||
1278 | } | ||
1216 | if (!symbol_conf.use_callchain) | 1279 | if (!symbol_conf.use_callchain) |
1217 | break; | 1280 | break; |
1218 | } | 1281 | } |
@@ -1237,15 +1300,13 @@ int machine__resolve_callchain(struct machine *machine, | |||
1237 | struct perf_evsel *evsel, | 1300 | struct perf_evsel *evsel, |
1238 | struct thread *thread, | 1301 | struct thread *thread, |
1239 | struct perf_sample *sample, | 1302 | struct perf_sample *sample, |
1240 | struct symbol **parent) | 1303 | struct symbol **parent, |
1241 | 1304 | struct addr_location *root_al) | |
1242 | { | 1305 | { |
1243 | int ret; | 1306 | int ret; |
1244 | 1307 | ||
1245 | callchain_cursor_reset(&callchain_cursor); | ||
1246 | |||
1247 | ret = machine__resolve_callchain_sample(machine, thread, | 1308 | ret = machine__resolve_callchain_sample(machine, thread, |
1248 | sample->callchain, parent); | 1309 | sample->callchain, parent, root_al); |
1249 | if (ret) | 1310 | if (ret) |
1250 | return ret; | 1311 | return ret; |
1251 | 1312 | ||
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 77940680f1fc..0df925ba6a44 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/rbtree.h> | 5 | #include <linux/rbtree.h> |
6 | #include "map.h" | 6 | #include "map.h" |
7 | 7 | ||
8 | struct addr_location; | ||
8 | struct branch_stack; | 9 | struct branch_stack; |
9 | struct perf_evsel; | 10 | struct perf_evsel; |
10 | struct perf_sample; | 11 | struct perf_sample; |
@@ -28,6 +29,7 @@ struct machine { | |||
28 | struct list_head kernel_dsos; | 29 | struct list_head kernel_dsos; |
29 | struct map_groups kmaps; | 30 | struct map_groups kmaps; |
30 | struct map *vmlinux_maps[MAP__NR_TYPES]; | 31 | struct map *vmlinux_maps[MAP__NR_TYPES]; |
32 | symbol_filter_t symbol_filter; | ||
31 | }; | 33 | }; |
32 | 34 | ||
33 | static inline | 35 | static inline |
@@ -36,7 +38,7 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type) | |||
36 | return machine->vmlinux_maps[type]; | 38 | return machine->vmlinux_maps[type]; |
37 | } | 39 | } |
38 | 40 | ||
39 | struct thread *machine__find_thread(struct machine *machine, pid_t pid); | 41 | struct thread *machine__find_thread(struct machine *machine, pid_t tid); |
40 | 42 | ||
41 | int machine__process_comm_event(struct machine *machine, union perf_event *event); | 43 | int machine__process_comm_event(struct machine *machine, union perf_event *event); |
42 | int machine__process_exit_event(struct machine *machine, union perf_event *event); | 44 | int machine__process_exit_event(struct machine *machine, union perf_event *event); |
@@ -50,6 +52,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data); | |||
50 | struct machines { | 52 | struct machines { |
51 | struct machine host; | 53 | struct machine host; |
52 | struct rb_root guests; | 54 | struct rb_root guests; |
55 | symbol_filter_t symbol_filter; | ||
53 | }; | 56 | }; |
54 | 57 | ||
55 | void machines__init(struct machines *machines); | 58 | void machines__init(struct machines *machines); |
@@ -67,6 +70,9 @@ struct machine *machines__findnew(struct machines *machines, pid_t pid); | |||
67 | void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); | 70 | void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); |
68 | char *machine__mmap_name(struct machine *machine, char *bf, size_t size); | 71 | char *machine__mmap_name(struct machine *machine, char *bf, size_t size); |
69 | 72 | ||
73 | void machines__set_symbol_filter(struct machines *machines, | ||
74 | symbol_filter_t symbol_filter); | ||
75 | |||
70 | int machine__init(struct machine *machine, const char *root_dir, pid_t pid); | 76 | int machine__init(struct machine *machine, const char *root_dir, pid_t pid); |
71 | void machine__exit(struct machine *machine); | 77 | void machine__exit(struct machine *machine); |
72 | void machine__delete_dead_threads(struct machine *machine); | 78 | void machine__delete_dead_threads(struct machine *machine); |
@@ -83,7 +89,8 @@ int machine__resolve_callchain(struct machine *machine, | |||
83 | struct perf_evsel *evsel, | 89 | struct perf_evsel *evsel, |
84 | struct thread *thread, | 90 | struct thread *thread, |
85 | struct perf_sample *sample, | 91 | struct perf_sample *sample, |
86 | struct symbol **parent); | 92 | struct symbol **parent, |
93 | struct addr_location *root_al); | ||
87 | 94 | ||
88 | /* | 95 | /* |
89 | * Default guest kernel is defined by parameter --guestkallsyms | 96 | * Default guest kernel is defined by parameter --guestkallsyms |
@@ -99,7 +106,8 @@ static inline bool machine__is_host(struct machine *machine) | |||
99 | return machine ? machine->pid == HOST_KERNEL_ID : false; | 106 | return machine ? machine->pid == HOST_KERNEL_ID : false; |
100 | } | 107 | } |
101 | 108 | ||
102 | struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); | 109 | struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, |
110 | pid_t tid); | ||
103 | 111 | ||
104 | size_t machine__fprintf(struct machine *machine, FILE *fp); | 112 | size_t machine__fprintf(struct machine *machine, FILE *fp); |
105 | 113 | ||
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 8bcdf9e54089..9e8304ca343e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -182,12 +182,6 @@ int map__load(struct map *map, symbol_filter_t filter) | |||
182 | #endif | 182 | #endif |
183 | return -1; | 183 | return -1; |
184 | } | 184 | } |
185 | /* | ||
186 | * Only applies to the kernel, as its symtabs aren't relative like the | ||
187 | * module ones. | ||
188 | */ | ||
189 | if (map->dso->kernel) | ||
190 | map__reloc_vmlinux(map); | ||
191 | 185 | ||
192 | return 0; | 186 | return 0; |
193 | } | 187 | } |
@@ -254,14 +248,18 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp) | |||
254 | 248 | ||
255 | /* | 249 | /* |
256 | * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. | 250 | * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. |
257 | * map->dso->adjust_symbols==1 for ET_EXEC-like cases. | 251 | * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is |
252 | * relative to section start. | ||
258 | */ | 253 | */ |
259 | u64 map__rip_2objdump(struct map *map, u64 rip) | 254 | u64 map__rip_2objdump(struct map *map, u64 rip) |
260 | { | 255 | { |
261 | u64 addr = map->dso->adjust_symbols ? | 256 | if (!map->dso->adjust_symbols) |
262 | map->unmap_ip(map, rip) : /* RIP -> IP */ | 257 | return rip; |
263 | rip; | 258 | |
264 | return addr; | 259 | if (map->dso->rel) |
260 | return rip - map->pgoff; | ||
261 | |||
262 | return map->unmap_ip(map, rip); | ||
265 | } | 263 | } |
266 | 264 | ||
267 | void map_groups__init(struct map_groups *mg) | 265 | void map_groups__init(struct map_groups *mg) |
@@ -513,35 +511,6 @@ int map_groups__clone(struct map_groups *mg, | |||
513 | return 0; | 511 | return 0; |
514 | } | 512 | } |
515 | 513 | ||
516 | static u64 map__reloc_map_ip(struct map *map, u64 ip) | ||
517 | { | ||
518 | return ip + (s64)map->pgoff; | ||
519 | } | ||
520 | |||
521 | static u64 map__reloc_unmap_ip(struct map *map, u64 ip) | ||
522 | { | ||
523 | return ip - (s64)map->pgoff; | ||
524 | } | ||
525 | |||
526 | void map__reloc_vmlinux(struct map *map) | ||
527 | { | ||
528 | struct kmap *kmap = map__kmap(map); | ||
529 | s64 reloc; | ||
530 | |||
531 | if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) | ||
532 | return; | ||
533 | |||
534 | reloc = (kmap->ref_reloc_sym->unrelocated_addr - | ||
535 | kmap->ref_reloc_sym->addr); | ||
536 | |||
537 | if (!reloc) | ||
538 | return; | ||
539 | |||
540 | map->map_ip = map__reloc_map_ip; | ||
541 | map->unmap_ip = map__reloc_unmap_ip; | ||
542 | map->pgoff = reloc; | ||
543 | } | ||
544 | |||
545 | void maps__insert(struct rb_root *maps, struct map *map) | 514 | void maps__insert(struct rb_root *maps, struct map *map) |
546 | { | 515 | { |
547 | struct rb_node **p = &maps->rb_node; | 516 | struct rb_node **p = &maps->rb_node; |
@@ -586,3 +555,21 @@ struct map *maps__find(struct rb_root *maps, u64 ip) | |||
586 | 555 | ||
587 | return NULL; | 556 | return NULL; |
588 | } | 557 | } |
558 | |||
559 | struct map *maps__first(struct rb_root *maps) | ||
560 | { | ||
561 | struct rb_node *first = rb_first(maps); | ||
562 | |||
563 | if (first) | ||
564 | return rb_entry(first, struct map, rb_node); | ||
565 | return NULL; | ||
566 | } | ||
567 | |||
568 | struct map *maps__next(struct map *map) | ||
569 | { | ||
570 | struct rb_node *next = rb_next(&map->rb_node); | ||
571 | |||
572 | if (next) | ||
573 | return rb_entry(next, struct map, rb_node); | ||
574 | return NULL; | ||
575 | } | ||
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index a887f2c9dfbb..2cc93cbf0e17 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -112,6 +112,8 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg, | |||
112 | void maps__insert(struct rb_root *maps, struct map *map); | 112 | void maps__insert(struct rb_root *maps, struct map *map); |
113 | void maps__remove(struct rb_root *maps, struct map *map); | 113 | void maps__remove(struct rb_root *maps, struct map *map); |
114 | struct map *maps__find(struct rb_root *maps, u64 addr); | 114 | struct map *maps__find(struct rb_root *maps, u64 addr); |
115 | struct map *maps__first(struct rb_root *maps); | ||
116 | struct map *maps__next(struct map *map); | ||
115 | void map_groups__init(struct map_groups *mg); | 117 | void map_groups__init(struct map_groups *mg); |
116 | void map_groups__exit(struct map_groups *mg); | 118 | void map_groups__exit(struct map_groups *mg); |
117 | int map_groups__clone(struct map_groups *mg, | 119 | int map_groups__clone(struct map_groups *mg, |
@@ -139,6 +141,17 @@ static inline struct map *map_groups__find(struct map_groups *mg, | |||
139 | return maps__find(&mg->maps[type], addr); | 141 | return maps__find(&mg->maps[type], addr); |
140 | } | 142 | } |
141 | 143 | ||
144 | static inline struct map *map_groups__first(struct map_groups *mg, | ||
145 | enum map_type type) | ||
146 | { | ||
147 | return maps__first(&mg->maps[type]); | ||
148 | } | ||
149 | |||
150 | static inline struct map *map_groups__next(struct map *map) | ||
151 | { | ||
152 | return maps__next(map); | ||
153 | } | ||
154 | |||
142 | struct symbol *map_groups__find_symbol(struct map_groups *mg, | 155 | struct symbol *map_groups__find_symbol(struct map_groups *mg, |
143 | enum map_type type, u64 addr, | 156 | enum map_type type, u64 addr, |
144 | struct map **mapp, | 157 | struct map **mapp, |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 995fc25db8c6..98125319b158 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include "parse-options.h" | 6 | #include "parse-options.h" |
7 | #include "parse-events.h" | 7 | #include "parse-events.h" |
8 | #include "exec_cmd.h" | 8 | #include "exec_cmd.h" |
9 | #include "string.h" | 9 | #include "linux/string.h" |
10 | #include "symbol.h" | 10 | #include "symbol.h" |
11 | #include "cache.h" | 11 | #include "cache.h" |
12 | #include "header.h" | 12 | #include "header.h" |
@@ -15,6 +15,7 @@ | |||
15 | #define YY_EXTRA_TYPE int | 15 | #define YY_EXTRA_TYPE int |
16 | #include "parse-events-flex.h" | 16 | #include "parse-events-flex.h" |
17 | #include "pmu.h" | 17 | #include "pmu.h" |
18 | #include "thread_map.h" | ||
18 | 19 | ||
19 | #define MAX_NAME_LEN 100 | 20 | #define MAX_NAME_LEN 100 |
20 | 21 | ||
@@ -108,6 +109,10 @@ static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { | |||
108 | .symbol = "emulation-faults", | 109 | .symbol = "emulation-faults", |
109 | .alias = "", | 110 | .alias = "", |
110 | }, | 111 | }, |
112 | [PERF_COUNT_SW_DUMMY] = { | ||
113 | .symbol = "dummy", | ||
114 | .alias = "", | ||
115 | }, | ||
111 | }; | 116 | }; |
112 | 117 | ||
113 | #define __PERF_EVENT_FIELD(config, name) \ | 118 | #define __PERF_EVENT_FIELD(config, name) \ |
@@ -217,6 +222,29 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) | |||
217 | return NULL; | 222 | return NULL; |
218 | } | 223 | } |
219 | 224 | ||
225 | struct tracepoint_path *tracepoint_name_to_path(const char *name) | ||
226 | { | ||
227 | struct tracepoint_path *path = zalloc(sizeof(*path)); | ||
228 | char *str = strchr(name, ':'); | ||
229 | |||
230 | if (path == NULL || str == NULL) { | ||
231 | free(path); | ||
232 | return NULL; | ||
233 | } | ||
234 | |||
235 | path->system = strndup(name, str - name); | ||
236 | path->name = strdup(str+1); | ||
237 | |||
238 | if (path->system == NULL || path->name == NULL) { | ||
239 | free(path->system); | ||
240 | free(path->name); | ||
241 | free(path); | ||
242 | path = NULL; | ||
243 | } | ||
244 | |||
245 | return path; | ||
246 | } | ||
247 | |||
220 | const char *event_type(int type) | 248 | const char *event_type(int type) |
221 | { | 249 | { |
222 | switch (type) { | 250 | switch (type) { |
@@ -241,40 +269,29 @@ const char *event_type(int type) | |||
241 | 269 | ||
242 | 270 | ||
243 | 271 | ||
244 | static int __add_event(struct list_head **_list, int *idx, | 272 | static int __add_event(struct list_head *list, int *idx, |
245 | struct perf_event_attr *attr, | 273 | struct perf_event_attr *attr, |
246 | char *name, struct cpu_map *cpus) | 274 | char *name, struct cpu_map *cpus) |
247 | { | 275 | { |
248 | struct perf_evsel *evsel; | 276 | struct perf_evsel *evsel; |
249 | struct list_head *list = *_list; | ||
250 | |||
251 | if (!list) { | ||
252 | list = malloc(sizeof(*list)); | ||
253 | if (!list) | ||
254 | return -ENOMEM; | ||
255 | INIT_LIST_HEAD(list); | ||
256 | } | ||
257 | 277 | ||
258 | event_attr_init(attr); | 278 | event_attr_init(attr); |
259 | 279 | ||
260 | evsel = perf_evsel__new(attr, (*idx)++); | 280 | evsel = perf_evsel__new(attr, (*idx)++); |
261 | if (!evsel) { | 281 | if (!evsel) |
262 | free(list); | ||
263 | return -ENOMEM; | 282 | return -ENOMEM; |
264 | } | ||
265 | 283 | ||
266 | evsel->cpus = cpus; | 284 | evsel->cpus = cpus; |
267 | if (name) | 285 | if (name) |
268 | evsel->name = strdup(name); | 286 | evsel->name = strdup(name); |
269 | list_add_tail(&evsel->node, list); | 287 | list_add_tail(&evsel->node, list); |
270 | *_list = list; | ||
271 | return 0; | 288 | return 0; |
272 | } | 289 | } |
273 | 290 | ||
274 | static int add_event(struct list_head **_list, int *idx, | 291 | static int add_event(struct list_head *list, int *idx, |
275 | struct perf_event_attr *attr, char *name) | 292 | struct perf_event_attr *attr, char *name) |
276 | { | 293 | { |
277 | return __add_event(_list, idx, attr, name, NULL); | 294 | return __add_event(list, idx, attr, name, NULL); |
278 | } | 295 | } |
279 | 296 | ||
280 | static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) | 297 | static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) |
@@ -295,7 +312,7 @@ static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES] | |||
295 | return -1; | 312 | return -1; |
296 | } | 313 | } |
297 | 314 | ||
298 | int parse_events_add_cache(struct list_head **list, int *idx, | 315 | int parse_events_add_cache(struct list_head *list, int *idx, |
299 | char *type, char *op_result1, char *op_result2) | 316 | char *type, char *op_result1, char *op_result2) |
300 | { | 317 | { |
301 | struct perf_event_attr attr; | 318 | struct perf_event_attr attr; |
@@ -356,31 +373,21 @@ int parse_events_add_cache(struct list_head **list, int *idx, | |||
356 | return add_event(list, idx, &attr, name); | 373 | return add_event(list, idx, &attr, name); |
357 | } | 374 | } |
358 | 375 | ||
359 | static int add_tracepoint(struct list_head **listp, int *idx, | 376 | static int add_tracepoint(struct list_head *list, int *idx, |
360 | char *sys_name, char *evt_name) | 377 | char *sys_name, char *evt_name) |
361 | { | 378 | { |
362 | struct perf_evsel *evsel; | 379 | struct perf_evsel *evsel; |
363 | struct list_head *list = *listp; | ||
364 | |||
365 | if (!list) { | ||
366 | list = malloc(sizeof(*list)); | ||
367 | if (!list) | ||
368 | return -ENOMEM; | ||
369 | INIT_LIST_HEAD(list); | ||
370 | } | ||
371 | 380 | ||
372 | evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); | 381 | evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); |
373 | if (!evsel) { | 382 | if (!evsel) |
374 | free(list); | ||
375 | return -ENOMEM; | 383 | return -ENOMEM; |
376 | } | ||
377 | 384 | ||
378 | list_add_tail(&evsel->node, list); | 385 | list_add_tail(&evsel->node, list); |
379 | *listp = list; | 386 | |
380 | return 0; | 387 | return 0; |
381 | } | 388 | } |
382 | 389 | ||
383 | static int add_tracepoint_multi_event(struct list_head **list, int *idx, | 390 | static int add_tracepoint_multi_event(struct list_head *list, int *idx, |
384 | char *sys_name, char *evt_name) | 391 | char *sys_name, char *evt_name) |
385 | { | 392 | { |
386 | char evt_path[MAXPATHLEN]; | 393 | char evt_path[MAXPATHLEN]; |
@@ -412,7 +419,7 @@ static int add_tracepoint_multi_event(struct list_head **list, int *idx, | |||
412 | return ret; | 419 | return ret; |
413 | } | 420 | } |
414 | 421 | ||
415 | static int add_tracepoint_event(struct list_head **list, int *idx, | 422 | static int add_tracepoint_event(struct list_head *list, int *idx, |
416 | char *sys_name, char *evt_name) | 423 | char *sys_name, char *evt_name) |
417 | { | 424 | { |
418 | return strpbrk(evt_name, "*?") ? | 425 | return strpbrk(evt_name, "*?") ? |
@@ -420,7 +427,7 @@ static int add_tracepoint_event(struct list_head **list, int *idx, | |||
420 | add_tracepoint(list, idx, sys_name, evt_name); | 427 | add_tracepoint(list, idx, sys_name, evt_name); |
421 | } | 428 | } |
422 | 429 | ||
423 | static int add_tracepoint_multi_sys(struct list_head **list, int *idx, | 430 | static int add_tracepoint_multi_sys(struct list_head *list, int *idx, |
424 | char *sys_name, char *evt_name) | 431 | char *sys_name, char *evt_name) |
425 | { | 432 | { |
426 | struct dirent *events_ent; | 433 | struct dirent *events_ent; |
@@ -452,7 +459,7 @@ static int add_tracepoint_multi_sys(struct list_head **list, int *idx, | |||
452 | return ret; | 459 | return ret; |
453 | } | 460 | } |
454 | 461 | ||
455 | int parse_events_add_tracepoint(struct list_head **list, int *idx, | 462 | int parse_events_add_tracepoint(struct list_head *list, int *idx, |
456 | char *sys, char *event) | 463 | char *sys, char *event) |
457 | { | 464 | { |
458 | int ret; | 465 | int ret; |
@@ -507,7 +514,7 @@ do { \ | |||
507 | return 0; | 514 | return 0; |
508 | } | 515 | } |
509 | 516 | ||
510 | int parse_events_add_breakpoint(struct list_head **list, int *idx, | 517 | int parse_events_add_breakpoint(struct list_head *list, int *idx, |
511 | void *ptr, char *type) | 518 | void *ptr, char *type) |
512 | { | 519 | { |
513 | struct perf_event_attr attr; | 520 | struct perf_event_attr attr; |
@@ -588,7 +595,7 @@ static int config_attr(struct perf_event_attr *attr, | |||
588 | return 0; | 595 | return 0; |
589 | } | 596 | } |
590 | 597 | ||
591 | int parse_events_add_numeric(struct list_head **list, int *idx, | 598 | int parse_events_add_numeric(struct list_head *list, int *idx, |
592 | u32 type, u64 config, | 599 | u32 type, u64 config, |
593 | struct list_head *head_config) | 600 | struct list_head *head_config) |
594 | { | 601 | { |
@@ -621,7 +628,7 @@ static char *pmu_event_name(struct list_head *head_terms) | |||
621 | return NULL; | 628 | return NULL; |
622 | } | 629 | } |
623 | 630 | ||
624 | int parse_events_add_pmu(struct list_head **list, int *idx, | 631 | int parse_events_add_pmu(struct list_head *list, int *idx, |
625 | char *name, struct list_head *head_config) | 632 | char *name, struct list_head *head_config) |
626 | { | 633 | { |
627 | struct perf_event_attr attr; | 634 | struct perf_event_attr attr; |
@@ -664,6 +671,7 @@ void parse_events__set_leader(char *name, struct list_head *list) | |||
664 | leader->group_name = name ? strdup(name) : NULL; | 671 | leader->group_name = name ? strdup(name) : NULL; |
665 | } | 672 | } |
666 | 673 | ||
674 | /* list_event is assumed to point to malloc'ed memory */ | ||
667 | void parse_events_update_lists(struct list_head *list_event, | 675 | void parse_events_update_lists(struct list_head *list_event, |
668 | struct list_head *list_all) | 676 | struct list_head *list_all) |
669 | { | 677 | { |
@@ -684,6 +692,8 @@ struct event_modifier { | |||
684 | int eG; | 692 | int eG; |
685 | int precise; | 693 | int precise; |
686 | int exclude_GH; | 694 | int exclude_GH; |
695 | int sample_read; | ||
696 | int pinned; | ||
687 | }; | 697 | }; |
688 | 698 | ||
689 | static int get_event_modifier(struct event_modifier *mod, char *str, | 699 | static int get_event_modifier(struct event_modifier *mod, char *str, |
@@ -695,6 +705,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str, | |||
695 | int eH = evsel ? evsel->attr.exclude_host : 0; | 705 | int eH = evsel ? evsel->attr.exclude_host : 0; |
696 | int eG = evsel ? evsel->attr.exclude_guest : 0; | 706 | int eG = evsel ? evsel->attr.exclude_guest : 0; |
697 | int precise = evsel ? evsel->attr.precise_ip : 0; | 707 | int precise = evsel ? evsel->attr.precise_ip : 0; |
708 | int sample_read = 0; | ||
709 | int pinned = evsel ? evsel->attr.pinned : 0; | ||
698 | 710 | ||
699 | int exclude = eu | ek | eh; | 711 | int exclude = eu | ek | eh; |
700 | int exclude_GH = evsel ? evsel->exclude_GH : 0; | 712 | int exclude_GH = evsel ? evsel->exclude_GH : 0; |
@@ -727,6 +739,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str, | |||
727 | /* use of precise requires exclude_guest */ | 739 | /* use of precise requires exclude_guest */ |
728 | if (!exclude_GH) | 740 | if (!exclude_GH) |
729 | eG = 1; | 741 | eG = 1; |
742 | } else if (*str == 'S') { | ||
743 | sample_read = 1; | ||
744 | } else if (*str == 'D') { | ||
745 | pinned = 1; | ||
730 | } else | 746 | } else |
731 | break; | 747 | break; |
732 | 748 | ||
@@ -753,6 +769,9 @@ static int get_event_modifier(struct event_modifier *mod, char *str, | |||
753 | mod->eG = eG; | 769 | mod->eG = eG; |
754 | mod->precise = precise; | 770 | mod->precise = precise; |
755 | mod->exclude_GH = exclude_GH; | 771 | mod->exclude_GH = exclude_GH; |
772 | mod->sample_read = sample_read; | ||
773 | mod->pinned = pinned; | ||
774 | |||
756 | return 0; | 775 | return 0; |
757 | } | 776 | } |
758 | 777 | ||
@@ -765,7 +784,7 @@ static int check_modifier(char *str) | |||
765 | char *p = str; | 784 | char *p = str; |
766 | 785 | ||
767 | /* The sizeof includes 0 byte as well. */ | 786 | /* The sizeof includes 0 byte as well. */ |
768 | if (strlen(str) > (sizeof("ukhGHppp") - 1)) | 787 | if (strlen(str) > (sizeof("ukhGHpppSD") - 1)) |
769 | return -1; | 788 | return -1; |
770 | 789 | ||
771 | while (*p) { | 790 | while (*p) { |
@@ -803,6 +822,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) | |||
803 | evsel->attr.exclude_host = mod.eH; | 822 | evsel->attr.exclude_host = mod.eH; |
804 | evsel->attr.exclude_guest = mod.eG; | 823 | evsel->attr.exclude_guest = mod.eG; |
805 | evsel->exclude_GH = mod.exclude_GH; | 824 | evsel->exclude_GH = mod.exclude_GH; |
825 | evsel->sample_read = mod.sample_read; | ||
826 | |||
827 | if (perf_evsel__is_group_leader(evsel)) | ||
828 | evsel->attr.pinned = mod.pinned; | ||
806 | } | 829 | } |
807 | 830 | ||
808 | return 0; | 831 | return 0; |
@@ -820,6 +843,32 @@ int parse_events_name(struct list_head *list, char *name) | |||
820 | return 0; | 843 | return 0; |
821 | } | 844 | } |
822 | 845 | ||
846 | static int parse_events__scanner(const char *str, void *data, int start_token); | ||
847 | |||
848 | static int parse_events_fixup(int ret, const char *str, void *data, | ||
849 | int start_token) | ||
850 | { | ||
851 | char *o = strdup(str); | ||
852 | char *s = NULL; | ||
853 | char *t = o; | ||
854 | char *p; | ||
855 | int len = 0; | ||
856 | |||
857 | if (!o) | ||
858 | return ret; | ||
859 | while ((p = strsep(&t, ",")) != NULL) { | ||
860 | if (s) | ||
861 | str_append(&s, &len, ","); | ||
862 | str_append(&s, &len, "cpu/"); | ||
863 | str_append(&s, &len, p); | ||
864 | str_append(&s, &len, "/"); | ||
865 | } | ||
866 | free(o); | ||
867 | if (!s) | ||
868 | return -ENOMEM; | ||
869 | return parse_events__scanner(s, data, start_token); | ||
870 | } | ||
871 | |||
823 | static int parse_events__scanner(const char *str, void *data, int start_token) | 872 | static int parse_events__scanner(const char *str, void *data, int start_token) |
824 | { | 873 | { |
825 | YY_BUFFER_STATE buffer; | 874 | YY_BUFFER_STATE buffer; |
@@ -840,6 +889,8 @@ static int parse_events__scanner(const char *str, void *data, int start_token) | |||
840 | parse_events__flush_buffer(buffer, scanner); | 889 | parse_events__flush_buffer(buffer, scanner); |
841 | parse_events__delete_buffer(buffer, scanner); | 890 | parse_events__delete_buffer(buffer, scanner); |
842 | parse_events_lex_destroy(scanner); | 891 | parse_events_lex_destroy(scanner); |
892 | if (ret && !strchr(str, '/')) | ||
893 | ret = parse_events_fixup(ret, str, data, start_token); | ||
843 | return ret; | 894 | return ret; |
844 | } | 895 | } |
845 | 896 | ||
@@ -1026,6 +1077,33 @@ int is_valid_tracepoint(const char *event_string) | |||
1026 | return 0; | 1077 | return 0; |
1027 | } | 1078 | } |
1028 | 1079 | ||
1080 | static bool is_event_supported(u8 type, unsigned config) | ||
1081 | { | ||
1082 | bool ret = true; | ||
1083 | struct perf_evsel *evsel; | ||
1084 | struct perf_event_attr attr = { | ||
1085 | .type = type, | ||
1086 | .config = config, | ||
1087 | .disabled = 1, | ||
1088 | .exclude_kernel = 1, | ||
1089 | }; | ||
1090 | struct { | ||
1091 | struct thread_map map; | ||
1092 | int threads[1]; | ||
1093 | } tmap = { | ||
1094 | .map.nr = 1, | ||
1095 | .threads = { 0 }, | ||
1096 | }; | ||
1097 | |||
1098 | evsel = perf_evsel__new(&attr, 0); | ||
1099 | if (evsel) { | ||
1100 | ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; | ||
1101 | perf_evsel__delete(evsel); | ||
1102 | } | ||
1103 | |||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1029 | static void __print_events_type(u8 type, struct event_symbol *syms, | 1107 | static void __print_events_type(u8 type, struct event_symbol *syms, |
1030 | unsigned max) | 1108 | unsigned max) |
1031 | { | 1109 | { |
@@ -1033,14 +1111,16 @@ static void __print_events_type(u8 type, struct event_symbol *syms, | |||
1033 | unsigned i; | 1111 | unsigned i; |
1034 | 1112 | ||
1035 | for (i = 0; i < max ; i++, syms++) { | 1113 | for (i = 0; i < max ; i++, syms++) { |
1114 | if (!is_event_supported(type, i)) | ||
1115 | continue; | ||
1116 | |||
1036 | if (strlen(syms->alias)) | 1117 | if (strlen(syms->alias)) |
1037 | snprintf(name, sizeof(name), "%s OR %s", | 1118 | snprintf(name, sizeof(name), "%s OR %s", |
1038 | syms->symbol, syms->alias); | 1119 | syms->symbol, syms->alias); |
1039 | else | 1120 | else |
1040 | snprintf(name, sizeof(name), "%s", syms->symbol); | 1121 | snprintf(name, sizeof(name), "%s", syms->symbol); |
1041 | 1122 | ||
1042 | printf(" %-50s [%s]\n", name, | 1123 | printf(" %-50s [%s]\n", name, event_type_descriptors[type]); |
1043 | event_type_descriptors[type]); | ||
1044 | } | 1124 | } |
1045 | } | 1125 | } |
1046 | 1126 | ||
@@ -1069,6 +1149,10 @@ int print_hwcache_events(const char *event_glob, bool name_only) | |||
1069 | if (event_glob != NULL && !strglobmatch(name, event_glob)) | 1149 | if (event_glob != NULL && !strglobmatch(name, event_glob)) |
1070 | continue; | 1150 | continue; |
1071 | 1151 | ||
1152 | if (!is_event_supported(PERF_TYPE_HW_CACHE, | ||
1153 | type | (op << 8) | (i << 16))) | ||
1154 | continue; | ||
1155 | |||
1072 | if (name_only) | 1156 | if (name_only) |
1073 | printf("%s ", name); | 1157 | printf("%s ", name); |
1074 | else | 1158 | else |
@@ -1079,6 +1163,8 @@ int print_hwcache_events(const char *event_glob, bool name_only) | |||
1079 | } | 1163 | } |
1080 | } | 1164 | } |
1081 | 1165 | ||
1166 | if (printed) | ||
1167 | printf("\n"); | ||
1082 | return printed; | 1168 | return printed; |
1083 | } | 1169 | } |
1084 | 1170 | ||
@@ -1096,6 +1182,9 @@ static void print_symbol_events(const char *event_glob, unsigned type, | |||
1096 | (syms->alias && strglobmatch(syms->alias, event_glob)))) | 1182 | (syms->alias && strglobmatch(syms->alias, event_glob)))) |
1097 | continue; | 1183 | continue; |
1098 | 1184 | ||
1185 | if (!is_event_supported(type, i)) | ||
1186 | continue; | ||
1187 | |||
1099 | if (name_only) { | 1188 | if (name_only) { |
1100 | printf("%s ", syms->symbol); | 1189 | printf("%s ", syms->symbol); |
1101 | continue; | 1190 | continue; |
@@ -1133,11 +1222,12 @@ void print_events(const char *event_glob, bool name_only) | |||
1133 | 1222 | ||
1134 | print_hwcache_events(event_glob, name_only); | 1223 | print_hwcache_events(event_glob, name_only); |
1135 | 1224 | ||
1225 | print_pmu_events(event_glob, name_only); | ||
1226 | |||
1136 | if (event_glob != NULL) | 1227 | if (event_glob != NULL) |
1137 | return; | 1228 | return; |
1138 | 1229 | ||
1139 | if (!name_only) { | 1230 | if (!name_only) { |
1140 | printf("\n"); | ||
1141 | printf(" %-50s [%s]\n", | 1231 | printf(" %-50s [%s]\n", |
1142 | "rNNN", | 1232 | "rNNN", |
1143 | event_type_descriptors[PERF_TYPE_RAW]); | 1233 | event_type_descriptors[PERF_TYPE_RAW]); |
@@ -1237,6 +1327,4 @@ void parse_events__free_terms(struct list_head *terms) | |||
1237 | 1327 | ||
1238 | list_for_each_entry_safe(term, h, terms, list) | 1328 | list_for_each_entry_safe(term, h, terms, list) |
1239 | free(term); | 1329 | free(term); |
1240 | |||
1241 | free(terms); | ||
1242 | } | 1330 | } |
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 8a4859315fd9..f1cb4c4b3c70 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -23,6 +23,7 @@ struct tracepoint_path { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | 25 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); |
26 | extern struct tracepoint_path *tracepoint_name_to_path(const char *name); | ||
26 | extern bool have_tracepoints(struct list_head *evlist); | 27 | extern bool have_tracepoints(struct list_head *evlist); |
27 | 28 | ||
28 | const char *event_type(int type); | 29 | const char *event_type(int type); |
@@ -84,16 +85,16 @@ void parse_events__free_terms(struct list_head *terms); | |||
84 | int parse_events__modifier_event(struct list_head *list, char *str, bool add); | 85 | int parse_events__modifier_event(struct list_head *list, char *str, bool add); |
85 | int parse_events__modifier_group(struct list_head *list, char *event_mod); | 86 | int parse_events__modifier_group(struct list_head *list, char *event_mod); |
86 | int parse_events_name(struct list_head *list, char *name); | 87 | int parse_events_name(struct list_head *list, char *name); |
87 | int parse_events_add_tracepoint(struct list_head **list, int *idx, | 88 | int parse_events_add_tracepoint(struct list_head *list, int *idx, |
88 | char *sys, char *event); | 89 | char *sys, char *event); |
89 | int parse_events_add_numeric(struct list_head **list, int *idx, | 90 | int parse_events_add_numeric(struct list_head *list, int *idx, |
90 | u32 type, u64 config, | 91 | u32 type, u64 config, |
91 | struct list_head *head_config); | 92 | struct list_head *head_config); |
92 | int parse_events_add_cache(struct list_head **list, int *idx, | 93 | int parse_events_add_cache(struct list_head *list, int *idx, |
93 | char *type, char *op_result1, char *op_result2); | 94 | char *type, char *op_result1, char *op_result2); |
94 | int parse_events_add_breakpoint(struct list_head **list, int *idx, | 95 | int parse_events_add_breakpoint(struct list_head *list, int *idx, |
95 | void *ptr, char *type); | 96 | void *ptr, char *type); |
96 | int parse_events_add_pmu(struct list_head **list, int *idx, | 97 | int parse_events_add_pmu(struct list_head *list, int *idx, |
97 | char *pmu , struct list_head *head_config); | 98 | char *pmu , struct list_head *head_config); |
98 | void parse_events__set_leader(char *name, struct list_head *list); | 99 | void parse_events__set_leader(char *name, struct list_head *list); |
99 | void parse_events_update_lists(struct list_head *list_event, | 100 | void parse_events_update_lists(struct list_head *list_event, |
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index e9d1134c2c68..91346b753960 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l | |||
@@ -82,7 +82,8 @@ num_hex 0x[a-fA-F0-9]+ | |||
82 | num_raw_hex [a-fA-F0-9]+ | 82 | num_raw_hex [a-fA-F0-9]+ |
83 | name [a-zA-Z_*?][a-zA-Z0-9_*?]* | 83 | name [a-zA-Z_*?][a-zA-Z0-9_*?]* |
84 | name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* | 84 | name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* |
85 | modifier_event [ukhpGH]+ | 85 | /* If you add a modifier you need to update check_modifier() */ |
86 | modifier_event [ukhpGHSD]+ | ||
86 | modifier_bp [rwx]{1,3} | 87 | modifier_bp [rwx]{1,3} |
87 | 88 | ||
88 | %% | 89 | %% |
@@ -144,6 +145,7 @@ context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW | |||
144 | cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } | 145 | cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } |
145 | alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } | 146 | alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } |
146 | emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } | 147 | emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } |
148 | dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } | ||
147 | 149 | ||
148 | L1-dcache|l1-d|l1d|L1-data | | 150 | L1-dcache|l1-d|l1d|L1-data | |
149 | L1-icache|l1-i|l1i|L1-instruction | | 151 | L1-icache|l1-i|l1i|L1-instruction | |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index afc44c18dfe1..4eb67ec333f1 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y | |||
@@ -22,6 +22,13 @@ do { \ | |||
22 | YYABORT; \ | 22 | YYABORT; \ |
23 | } while (0) | 23 | } while (0) |
24 | 24 | ||
25 | #define ALLOC_LIST(list) \ | ||
26 | do { \ | ||
27 | list = malloc(sizeof(*list)); \ | ||
28 | ABORT_ON(!list); \ | ||
29 | INIT_LIST_HEAD(list); \ | ||
30 | } while (0) | ||
31 | |||
25 | static inc_group_count(struct list_head *list, | 32 | static inc_group_count(struct list_head *list, |
26 | struct parse_events_evlist *data) | 33 | struct parse_events_evlist *data) |
27 | { | 34 | { |
@@ -196,9 +203,10 @@ event_pmu: | |||
196 | PE_NAME '/' event_config '/' | 203 | PE_NAME '/' event_config '/' |
197 | { | 204 | { |
198 | struct parse_events_evlist *data = _data; | 205 | struct parse_events_evlist *data = _data; |
199 | struct list_head *list = NULL; | 206 | struct list_head *list; |
200 | 207 | ||
201 | ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); | 208 | ALLOC_LIST(list); |
209 | ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3)); | ||
202 | parse_events__free_terms($3); | 210 | parse_events__free_terms($3); |
203 | $$ = list; | 211 | $$ = list; |
204 | } | 212 | } |
@@ -212,11 +220,12 @@ event_legacy_symbol: | |||
212 | value_sym '/' event_config '/' | 220 | value_sym '/' event_config '/' |
213 | { | 221 | { |
214 | struct parse_events_evlist *data = _data; | 222 | struct parse_events_evlist *data = _data; |
215 | struct list_head *list = NULL; | 223 | struct list_head *list; |
216 | int type = $1 >> 16; | 224 | int type = $1 >> 16; |
217 | int config = $1 & 255; | 225 | int config = $1 & 255; |
218 | 226 | ||
219 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, | 227 | ALLOC_LIST(list); |
228 | ABORT_ON(parse_events_add_numeric(list, &data->idx, | ||
220 | type, config, $3)); | 229 | type, config, $3)); |
221 | parse_events__free_terms($3); | 230 | parse_events__free_terms($3); |
222 | $$ = list; | 231 | $$ = list; |
@@ -225,11 +234,12 @@ value_sym '/' event_config '/' | |||
225 | value_sym sep_slash_dc | 234 | value_sym sep_slash_dc |
226 | { | 235 | { |
227 | struct parse_events_evlist *data = _data; | 236 | struct parse_events_evlist *data = _data; |
228 | struct list_head *list = NULL; | 237 | struct list_head *list; |
229 | int type = $1 >> 16; | 238 | int type = $1 >> 16; |
230 | int config = $1 & 255; | 239 | int config = $1 & 255; |
231 | 240 | ||
232 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, | 241 | ALLOC_LIST(list); |
242 | ABORT_ON(parse_events_add_numeric(list, &data->idx, | ||
233 | type, config, NULL)); | 243 | type, config, NULL)); |
234 | $$ = list; | 244 | $$ = list; |
235 | } | 245 | } |
@@ -238,27 +248,30 @@ event_legacy_cache: | |||
238 | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT | 248 | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT |
239 | { | 249 | { |
240 | struct parse_events_evlist *data = _data; | 250 | struct parse_events_evlist *data = _data; |
241 | struct list_head *list = NULL; | 251 | struct list_head *list; |
242 | 252 | ||
243 | ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); | 253 | ALLOC_LIST(list); |
254 | ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5)); | ||
244 | $$ = list; | 255 | $$ = list; |
245 | } | 256 | } |
246 | | | 257 | | |
247 | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT | 258 | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT |
248 | { | 259 | { |
249 | struct parse_events_evlist *data = _data; | 260 | struct parse_events_evlist *data = _data; |
250 | struct list_head *list = NULL; | 261 | struct list_head *list; |
251 | 262 | ||
252 | ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); | 263 | ALLOC_LIST(list); |
264 | ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL)); | ||
253 | $$ = list; | 265 | $$ = list; |
254 | } | 266 | } |
255 | | | 267 | | |
256 | PE_NAME_CACHE_TYPE | 268 | PE_NAME_CACHE_TYPE |
257 | { | 269 | { |
258 | struct parse_events_evlist *data = _data; | 270 | struct parse_events_evlist *data = _data; |
259 | struct list_head *list = NULL; | 271 | struct list_head *list; |
260 | 272 | ||
261 | ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); | 273 | ALLOC_LIST(list); |
274 | ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL)); | ||
262 | $$ = list; | 275 | $$ = list; |
263 | } | 276 | } |
264 | 277 | ||
@@ -266,9 +279,10 @@ event_legacy_mem: | |||
266 | PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc | 279 | PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc |
267 | { | 280 | { |
268 | struct parse_events_evlist *data = _data; | 281 | struct parse_events_evlist *data = _data; |
269 | struct list_head *list = NULL; | 282 | struct list_head *list; |
270 | 283 | ||
271 | ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, | 284 | ALLOC_LIST(list); |
285 | ABORT_ON(parse_events_add_breakpoint(list, &data->idx, | ||
272 | (void *) $2, $4)); | 286 | (void *) $2, $4)); |
273 | $$ = list; | 287 | $$ = list; |
274 | } | 288 | } |
@@ -276,9 +290,10 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc | |||
276 | PE_PREFIX_MEM PE_VALUE sep_dc | 290 | PE_PREFIX_MEM PE_VALUE sep_dc |
277 | { | 291 | { |
278 | struct parse_events_evlist *data = _data; | 292 | struct parse_events_evlist *data = _data; |
279 | struct list_head *list = NULL; | 293 | struct list_head *list; |
280 | 294 | ||
281 | ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, | 295 | ALLOC_LIST(list); |
296 | ABORT_ON(parse_events_add_breakpoint(list, &data->idx, | ||
282 | (void *) $2, NULL)); | 297 | (void *) $2, NULL)); |
283 | $$ = list; | 298 | $$ = list; |
284 | } | 299 | } |
@@ -287,9 +302,10 @@ event_legacy_tracepoint: | |||
287 | PE_NAME ':' PE_NAME | 302 | PE_NAME ':' PE_NAME |
288 | { | 303 | { |
289 | struct parse_events_evlist *data = _data; | 304 | struct parse_events_evlist *data = _data; |
290 | struct list_head *list = NULL; | 305 | struct list_head *list; |
291 | 306 | ||
292 | ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); | 307 | ALLOC_LIST(list); |
308 | ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3)); | ||
293 | $$ = list; | 309 | $$ = list; |
294 | } | 310 | } |
295 | 311 | ||
@@ -297,9 +313,10 @@ event_legacy_numeric: | |||
297 | PE_VALUE ':' PE_VALUE | 313 | PE_VALUE ':' PE_VALUE |
298 | { | 314 | { |
299 | struct parse_events_evlist *data = _data; | 315 | struct parse_events_evlist *data = _data; |
300 | struct list_head *list = NULL; | 316 | struct list_head *list; |
301 | 317 | ||
302 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); | 318 | ALLOC_LIST(list); |
319 | ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL)); | ||
303 | $$ = list; | 320 | $$ = list; |
304 | } | 321 | } |
305 | 322 | ||
@@ -307,9 +324,10 @@ event_legacy_raw: | |||
307 | PE_RAW | 324 | PE_RAW |
308 | { | 325 | { |
309 | struct parse_events_evlist *data = _data; | 326 | struct parse_events_evlist *data = _data; |
310 | struct list_head *list = NULL; | 327 | struct list_head *list; |
311 | 328 | ||
312 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, | 329 | ALLOC_LIST(list); |
330 | ABORT_ON(parse_events_add_numeric(list, &data->idx, | ||
313 | PERF_TYPE_RAW, $1, NULL)); | 331 | PERF_TYPE_RAW, $1, NULL)); |
314 | $$ = list; | 332 | $$ = list; |
315 | } | 333 | } |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 4c6f9c490a8d..bc9d8069d376 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
@@ -73,7 +73,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head) | |||
73 | * located at: | 73 | * located at: |
74 | * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes. | 74 | * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes. |
75 | */ | 75 | */ |
76 | static int pmu_format(char *name, struct list_head *format) | 76 | static int pmu_format(const char *name, struct list_head *format) |
77 | { | 77 | { |
78 | struct stat st; | 78 | struct stat st; |
79 | char path[PATH_MAX]; | 79 | char path[PATH_MAX]; |
@@ -162,7 +162,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head) | |||
162 | * Reading the pmu event aliases definition, which should be located at: | 162 | * Reading the pmu event aliases definition, which should be located at: |
163 | * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes. | 163 | * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes. |
164 | */ | 164 | */ |
165 | static int pmu_aliases(char *name, struct list_head *head) | 165 | static int pmu_aliases(const char *name, struct list_head *head) |
166 | { | 166 | { |
167 | struct stat st; | 167 | struct stat st; |
168 | char path[PATH_MAX]; | 168 | char path[PATH_MAX]; |
@@ -208,7 +208,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias, | |||
208 | * located at: | 208 | * located at: |
209 | * /sys/bus/event_source/devices/<dev>/type as sysfs attribute. | 209 | * /sys/bus/event_source/devices/<dev>/type as sysfs attribute. |
210 | */ | 210 | */ |
211 | static int pmu_type(char *name, __u32 *type) | 211 | static int pmu_type(const char *name, __u32 *type) |
212 | { | 212 | { |
213 | struct stat st; | 213 | struct stat st; |
214 | char path[PATH_MAX]; | 214 | char path[PATH_MAX]; |
@@ -266,7 +266,7 @@ static void pmu_read_sysfs(void) | |||
266 | closedir(dir); | 266 | closedir(dir); |
267 | } | 267 | } |
268 | 268 | ||
269 | static struct cpu_map *pmu_cpumask(char *name) | 269 | static struct cpu_map *pmu_cpumask(const char *name) |
270 | { | 270 | { |
271 | struct stat st; | 271 | struct stat st; |
272 | char path[PATH_MAX]; | 272 | char path[PATH_MAX]; |
@@ -293,7 +293,7 @@ static struct cpu_map *pmu_cpumask(char *name) | |||
293 | return cpus; | 293 | return cpus; |
294 | } | 294 | } |
295 | 295 | ||
296 | static struct perf_pmu *pmu_lookup(char *name) | 296 | static struct perf_pmu *pmu_lookup(const char *name) |
297 | { | 297 | { |
298 | struct perf_pmu *pmu; | 298 | struct perf_pmu *pmu; |
299 | LIST_HEAD(format); | 299 | LIST_HEAD(format); |
@@ -330,7 +330,7 @@ static struct perf_pmu *pmu_lookup(char *name) | |||
330 | return pmu; | 330 | return pmu; |
331 | } | 331 | } |
332 | 332 | ||
333 | static struct perf_pmu *pmu_find(char *name) | 333 | static struct perf_pmu *pmu_find(const char *name) |
334 | { | 334 | { |
335 | struct perf_pmu *pmu; | 335 | struct perf_pmu *pmu; |
336 | 336 | ||
@@ -356,7 +356,7 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu) | |||
356 | return NULL; | 356 | return NULL; |
357 | } | 357 | } |
358 | 358 | ||
359 | struct perf_pmu *perf_pmu__find(char *name) | 359 | struct perf_pmu *perf_pmu__find(const char *name) |
360 | { | 360 | { |
361 | struct perf_pmu *pmu; | 361 | struct perf_pmu *pmu; |
362 | 362 | ||
@@ -564,3 +564,76 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) | |||
564 | for (b = from; b <= to; b++) | 564 | for (b = from; b <= to; b++) |
565 | set_bit(b, bits); | 565 | set_bit(b, bits); |
566 | } | 566 | } |
567 | |||
568 | static char *format_alias(char *buf, int len, struct perf_pmu *pmu, | ||
569 | struct perf_pmu_alias *alias) | ||
570 | { | ||
571 | snprintf(buf, len, "%s/%s/", pmu->name, alias->name); | ||
572 | return buf; | ||
573 | } | ||
574 | |||
575 | static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu, | ||
576 | struct perf_pmu_alias *alias) | ||
577 | { | ||
578 | snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name); | ||
579 | return buf; | ||
580 | } | ||
581 | |||
582 | static int cmp_string(const void *a, const void *b) | ||
583 | { | ||
584 | const char * const *as = a; | ||
585 | const char * const *bs = b; | ||
586 | return strcmp(*as, *bs); | ||
587 | } | ||
588 | |||
589 | void print_pmu_events(const char *event_glob, bool name_only) | ||
590 | { | ||
591 | struct perf_pmu *pmu; | ||
592 | struct perf_pmu_alias *alias; | ||
593 | char buf[1024]; | ||
594 | int printed = 0; | ||
595 | int len, j; | ||
596 | char **aliases; | ||
597 | |||
598 | pmu = NULL; | ||
599 | len = 0; | ||
600 | while ((pmu = perf_pmu__scan(pmu)) != NULL) | ||
601 | list_for_each_entry(alias, &pmu->aliases, list) | ||
602 | len++; | ||
603 | aliases = malloc(sizeof(char *) * len); | ||
604 | if (!aliases) | ||
605 | return; | ||
606 | pmu = NULL; | ||
607 | j = 0; | ||
608 | while ((pmu = perf_pmu__scan(pmu)) != NULL) | ||
609 | list_for_each_entry(alias, &pmu->aliases, list) { | ||
610 | char *name = format_alias(buf, sizeof(buf), pmu, alias); | ||
611 | bool is_cpu = !strcmp(pmu->name, "cpu"); | ||
612 | |||
613 | if (event_glob != NULL && | ||
614 | !(strglobmatch(name, event_glob) || | ||
615 | (!is_cpu && strglobmatch(alias->name, | ||
616 | event_glob)))) | ||
617 | continue; | ||
618 | aliases[j] = name; | ||
619 | if (is_cpu && !name_only) | ||
620 | aliases[j] = format_alias_or(buf, sizeof(buf), | ||
621 | pmu, alias); | ||
622 | aliases[j] = strdup(aliases[j]); | ||
623 | j++; | ||
624 | } | ||
625 | len = j; | ||
626 | qsort(aliases, len, sizeof(char *), cmp_string); | ||
627 | for (j = 0; j < len; j++) { | ||
628 | if (name_only) { | ||
629 | printf("%s ", aliases[j]); | ||
630 | continue; | ||
631 | } | ||
632 | printf(" %-50s [Kernel PMU event]\n", aliases[j]); | ||
633 | free(aliases[j]); | ||
634 | printed++; | ||
635 | } | ||
636 | if (printed) | ||
637 | printf("\n"); | ||
638 | free(aliases); | ||
639 | } | ||
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 32fe55b659fa..6b2cbe2d4cc3 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/bitops.h> | 4 | #include <linux/bitops.h> |
5 | #include <linux/perf_event.h> | 5 | #include <linux/perf_event.h> |
6 | #include <stdbool.h> | ||
6 | 7 | ||
7 | enum { | 8 | enum { |
8 | PERF_PMU_FORMAT_VALUE_CONFIG, | 9 | PERF_PMU_FORMAT_VALUE_CONFIG, |
@@ -21,7 +22,7 @@ struct perf_pmu { | |||
21 | struct list_head list; | 22 | struct list_head list; |
22 | }; | 23 | }; |
23 | 24 | ||
24 | struct perf_pmu *perf_pmu__find(char *name); | 25 | struct perf_pmu *perf_pmu__find(const char *name); |
25 | int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, | 26 | int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, |
26 | struct list_head *head_terms); | 27 | struct list_head *head_terms); |
27 | int perf_pmu__config_terms(struct list_head *formats, | 28 | int perf_pmu__config_terms(struct list_head *formats, |
@@ -40,5 +41,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head); | |||
40 | 41 | ||
41 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); | 42 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); |
42 | 43 | ||
44 | void print_pmu_events(const char *event_glob, bool name_only); | ||
45 | |||
43 | int perf_pmu__test(void); | 46 | int perf_pmu__test(void); |
44 | #endif /* __PMU_H */ | 47 | #endif /* __PMU_H */ |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 925e0c3e6d91..71b5412bbbb9 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -8,6 +8,26 @@ | |||
8 | #include "cpumap.h" | 8 | #include "cpumap.h" |
9 | #include "thread_map.h" | 9 | #include "thread_map.h" |
10 | 10 | ||
11 | /* | ||
12 | * Support debug printing even though util/debug.c is not linked. That means | ||
13 | * implementing 'verbose' and 'eprintf'. | ||
14 | */ | ||
15 | int verbose; | ||
16 | |||
17 | int eprintf(int level, const char *fmt, ...) | ||
18 | { | ||
19 | va_list args; | ||
20 | int ret = 0; | ||
21 | |||
22 | if (verbose >= level) { | ||
23 | va_start(args, fmt); | ||
24 | ret = vfprintf(stderr, fmt, args); | ||
25 | va_end(args); | ||
26 | } | ||
27 | |||
28 | return ret; | ||
29 | } | ||
30 | |||
11 | /* Define PyVarObject_HEAD_INIT for python 2.5 */ | 31 | /* Define PyVarObject_HEAD_INIT for python 2.5 */ |
12 | #ifndef PyVarObject_HEAD_INIT | 32 | #ifndef PyVarObject_HEAD_INIT |
13 | # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, | 33 | # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, |
@@ -967,6 +987,7 @@ static struct { | |||
967 | { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, | 987 | { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, |
968 | { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, | 988 | { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, |
969 | { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, | 989 | { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, |
990 | { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY }, | ||
970 | 991 | ||
971 | { "SAMPLE_IP", PERF_SAMPLE_IP }, | 992 | { "SAMPLE_IP", PERF_SAMPLE_IP }, |
972 | { "SAMPLE_TID", PERF_SAMPLE_TID }, | 993 | { "SAMPLE_TID", PERF_SAMPLE_TID }, |
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c new file mode 100644 index 000000000000..18d73aa2f0f8 --- /dev/null +++ b/tools/perf/util/record.c | |||
@@ -0,0 +1,108 @@ | |||
1 | #include "evlist.h" | ||
2 | #include "evsel.h" | ||
3 | #include "cpumap.h" | ||
4 | #include "parse-events.h" | ||
5 | |||
6 | typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel); | ||
7 | |||
8 | static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) | ||
9 | { | ||
10 | struct perf_evlist *evlist; | ||
11 | struct perf_evsel *evsel; | ||
12 | int err = -EAGAIN, fd; | ||
13 | |||
14 | evlist = perf_evlist__new(); | ||
15 | if (!evlist) | ||
16 | return -ENOMEM; | ||
17 | |||
18 | if (parse_events(evlist, str)) | ||
19 | goto out_delete; | ||
20 | |||
21 | evsel = perf_evlist__first(evlist); | ||
22 | |||
23 | fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0); | ||
24 | if (fd < 0) | ||
25 | goto out_delete; | ||
26 | close(fd); | ||
27 | |||
28 | fn(evsel); | ||
29 | |||
30 | fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0); | ||
31 | if (fd < 0) { | ||
32 | if (errno == EINVAL) | ||
33 | err = -EINVAL; | ||
34 | goto out_delete; | ||
35 | } | ||
36 | close(fd); | ||
37 | err = 0; | ||
38 | |||
39 | out_delete: | ||
40 | perf_evlist__delete(evlist); | ||
41 | return err; | ||
42 | } | ||
43 | |||
44 | static bool perf_probe_api(setup_probe_fn_t fn) | ||
45 | { | ||
46 | const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL}; | ||
47 | struct cpu_map *cpus; | ||
48 | int cpu, ret, i = 0; | ||
49 | |||
50 | cpus = cpu_map__new(NULL); | ||
51 | if (!cpus) | ||
52 | return false; | ||
53 | cpu = cpus->map[0]; | ||
54 | cpu_map__delete(cpus); | ||
55 | |||
56 | do { | ||
57 | ret = perf_do_probe_api(fn, cpu, try[i++]); | ||
58 | if (!ret) | ||
59 | return true; | ||
60 | } while (ret == -EAGAIN && try[i]); | ||
61 | |||
62 | return false; | ||
63 | } | ||
64 | |||
65 | static void perf_probe_sample_identifier(struct perf_evsel *evsel) | ||
66 | { | ||
67 | evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER; | ||
68 | } | ||
69 | |||
70 | bool perf_can_sample_identifier(void) | ||
71 | { | ||
72 | return perf_probe_api(perf_probe_sample_identifier); | ||
73 | } | ||
74 | |||
75 | void perf_evlist__config(struct perf_evlist *evlist, | ||
76 | struct perf_record_opts *opts) | ||
77 | { | ||
78 | struct perf_evsel *evsel; | ||
79 | bool use_sample_identifier = false; | ||
80 | |||
81 | /* | ||
82 | * Set the evsel leader links before we configure attributes, | ||
83 | * since some might depend on this info. | ||
84 | */ | ||
85 | if (opts->group) | ||
86 | perf_evlist__set_leader(evlist); | ||
87 | |||
88 | if (evlist->cpus->map[0] < 0) | ||
89 | opts->no_inherit = true; | ||
90 | |||
91 | list_for_each_entry(evsel, &evlist->entries, node) | ||
92 | perf_evsel__config(evsel, opts); | ||
93 | |||
94 | if (evlist->nr_entries > 1) { | ||
95 | struct perf_evsel *first = perf_evlist__first(evlist); | ||
96 | |||
97 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
98 | if (evsel->attr.sample_type == first->attr.sample_type) | ||
99 | continue; | ||
100 | use_sample_identifier = perf_can_sample_identifier(); | ||
101 | break; | ||
102 | } | ||
103 | list_for_each_entry(evsel, &evlist->entries, node) | ||
104 | perf_evsel__set_sample_id(evsel, use_sample_identifier); | ||
105 | } | ||
106 | |||
107 | perf_evlist__set_id_pos(evlist); | ||
108 | } | ||
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index eacec859f299..a85e4ae5f3ac 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -261,7 +261,8 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, | |||
261 | struct perf_sample *sample, | 261 | struct perf_sample *sample, |
262 | struct perf_evsel *evsel, | 262 | struct perf_evsel *evsel, |
263 | struct machine *machine __maybe_unused, | 263 | struct machine *machine __maybe_unused, |
264 | struct addr_location *al) | 264 | struct thread *thread, |
265 | struct addr_location *al) | ||
265 | { | 266 | { |
266 | struct format_field *field; | 267 | struct format_field *field; |
267 | static char handler[256]; | 268 | static char handler[256]; |
@@ -272,7 +273,6 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, | |||
272 | int cpu = sample->cpu; | 273 | int cpu = sample->cpu; |
273 | void *data = sample->raw_data; | 274 | void *data = sample->raw_data; |
274 | unsigned long long nsecs = sample->time; | 275 | unsigned long long nsecs = sample->time; |
275 | struct thread *thread = al->thread; | ||
276 | char *comm = thread->comm; | 276 | char *comm = thread->comm; |
277 | 277 | ||
278 | dSP; | 278 | dSP; |
@@ -351,7 +351,8 @@ static void perl_process_event_generic(union perf_event *event, | |||
351 | struct perf_sample *sample, | 351 | struct perf_sample *sample, |
352 | struct perf_evsel *evsel, | 352 | struct perf_evsel *evsel, |
353 | struct machine *machine __maybe_unused, | 353 | struct machine *machine __maybe_unused, |
354 | struct addr_location *al __maybe_unused) | 354 | struct thread *thread __maybe_unused, |
355 | struct addr_location *al __maybe_unused) | ||
355 | { | 356 | { |
356 | dSP; | 357 | dSP; |
357 | 358 | ||
@@ -377,10 +378,11 @@ static void perl_process_event(union perf_event *event, | |||
377 | struct perf_sample *sample, | 378 | struct perf_sample *sample, |
378 | struct perf_evsel *evsel, | 379 | struct perf_evsel *evsel, |
379 | struct machine *machine, | 380 | struct machine *machine, |
380 | struct addr_location *al) | 381 | struct thread *thread, |
382 | struct addr_location *al) | ||
381 | { | 383 | { |
382 | perl_process_tracepoint(event, sample, evsel, machine, al); | 384 | perl_process_tracepoint(event, sample, evsel, machine, thread, al); |
383 | perl_process_event_generic(event, sample, evsel, machine, al); | 385 | perl_process_event_generic(event, sample, evsel, machine, thread, al); |
384 | } | 386 | } |
385 | 387 | ||
386 | static void run_start_sub(void) | 388 | static void run_start_sub(void) |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index e87aa5d9696b..cc75a3cef388 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -225,6 +225,7 @@ static void python_process_tracepoint(union perf_event *perf_event | |||
225 | struct perf_sample *sample, | 225 | struct perf_sample *sample, |
226 | struct perf_evsel *evsel, | 226 | struct perf_evsel *evsel, |
227 | struct machine *machine __maybe_unused, | 227 | struct machine *machine __maybe_unused, |
228 | struct thread *thread, | ||
228 | struct addr_location *al) | 229 | struct addr_location *al) |
229 | { | 230 | { |
230 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; | 231 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; |
@@ -238,7 +239,6 @@ static void python_process_tracepoint(union perf_event *perf_event | |||
238 | int cpu = sample->cpu; | 239 | int cpu = sample->cpu; |
239 | void *data = sample->raw_data; | 240 | void *data = sample->raw_data; |
240 | unsigned long long nsecs = sample->time; | 241 | unsigned long long nsecs = sample->time; |
241 | struct thread *thread = al->thread; | ||
242 | char *comm = thread->comm; | 242 | char *comm = thread->comm; |
243 | 243 | ||
244 | t = PyTuple_New(MAX_FIELDS); | 244 | t = PyTuple_New(MAX_FIELDS); |
@@ -345,12 +345,12 @@ static void python_process_general_event(union perf_event *perf_event | |||
345 | struct perf_sample *sample, | 345 | struct perf_sample *sample, |
346 | struct perf_evsel *evsel, | 346 | struct perf_evsel *evsel, |
347 | struct machine *machine __maybe_unused, | 347 | struct machine *machine __maybe_unused, |
348 | struct thread *thread, | ||
348 | struct addr_location *al) | 349 | struct addr_location *al) |
349 | { | 350 | { |
350 | PyObject *handler, *retval, *t, *dict; | 351 | PyObject *handler, *retval, *t, *dict; |
351 | static char handler_name[64]; | 352 | static char handler_name[64]; |
352 | unsigned n = 0; | 353 | unsigned n = 0; |
353 | struct thread *thread = al->thread; | ||
354 | 354 | ||
355 | /* | 355 | /* |
356 | * Use the MAX_FIELDS to make the function expandable, though | 356 | * Use the MAX_FIELDS to make the function expandable, though |
@@ -404,17 +404,18 @@ static void python_process_event(union perf_event *perf_event, | |||
404 | struct perf_sample *sample, | 404 | struct perf_sample *sample, |
405 | struct perf_evsel *evsel, | 405 | struct perf_evsel *evsel, |
406 | struct machine *machine, | 406 | struct machine *machine, |
407 | struct thread *thread, | ||
407 | struct addr_location *al) | 408 | struct addr_location *al) |
408 | { | 409 | { |
409 | switch (evsel->attr.type) { | 410 | switch (evsel->attr.type) { |
410 | case PERF_TYPE_TRACEPOINT: | 411 | case PERF_TYPE_TRACEPOINT: |
411 | python_process_tracepoint(perf_event, sample, evsel, | 412 | python_process_tracepoint(perf_event, sample, evsel, |
412 | machine, al); | 413 | machine, thread, al); |
413 | break; | 414 | break; |
414 | /* Reserve for future process_hw/sw/raw APIs */ | 415 | /* Reserve for future process_hw/sw/raw APIs */ |
415 | default: | 416 | default: |
416 | python_process_general_event(perf_event, sample, evsel, | 417 | python_process_general_event(perf_event, sample, evsel, |
417 | machine, al); | 418 | machine, thread, al); |
418 | } | 419 | } |
419 | } | 420 | } |
420 | 421 | ||
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index cf1fe01b7e89..1fc0c628683e 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <traceevent/event-parse.h> | ||
2 | 3 | ||
3 | #include <byteswap.h> | 4 | #include <byteswap.h> |
4 | #include <unistd.h> | 5 | #include <unistd.h> |
@@ -12,7 +13,6 @@ | |||
12 | #include "sort.h" | 13 | #include "sort.h" |
13 | #include "util.h" | 14 | #include "util.h" |
14 | #include "cpumap.h" | 15 | #include "cpumap.h" |
15 | #include "event-parse.h" | ||
16 | #include "perf_regs.h" | 16 | #include "perf_regs.h" |
17 | #include "vdso.h" | 17 | #include "vdso.h" |
18 | 18 | ||
@@ -24,7 +24,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
24 | self->fd_pipe = true; | 24 | self->fd_pipe = true; |
25 | self->fd = STDIN_FILENO; | 25 | self->fd = STDIN_FILENO; |
26 | 26 | ||
27 | if (perf_session__read_header(self, self->fd) < 0) | 27 | if (perf_session__read_header(self) < 0) |
28 | pr_err("incompatible file format (rerun with -v to learn more)"); | 28 | pr_err("incompatible file format (rerun with -v to learn more)"); |
29 | 29 | ||
30 | return 0; | 30 | return 0; |
@@ -56,7 +56,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
56 | goto out_close; | 56 | goto out_close; |
57 | } | 57 | } |
58 | 58 | ||
59 | if (perf_session__read_header(self, self->fd) < 0) { | 59 | if (perf_session__read_header(self) < 0) { |
60 | pr_err("incompatible file format (rerun with -v to learn more)"); | 60 | pr_err("incompatible file format (rerun with -v to learn more)"); |
61 | goto out_close; | 61 | goto out_close; |
62 | } | 62 | } |
@@ -71,6 +71,11 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
71 | goto out_close; | 71 | goto out_close; |
72 | } | 72 | } |
73 | 73 | ||
74 | if (!perf_evlist__valid_read_format(self->evlist)) { | ||
75 | pr_err("non matching read_format"); | ||
76 | goto out_close; | ||
77 | } | ||
78 | |||
74 | self->size = input_stat.st_size; | 79 | self->size = input_stat.st_size; |
75 | return 0; | 80 | return 0; |
76 | 81 | ||
@@ -193,7 +198,9 @@ void perf_session__delete(struct perf_session *self) | |||
193 | vdso__exit(); | 198 | vdso__exit(); |
194 | } | 199 | } |
195 | 200 | ||
196 | static int process_event_synth_tracing_data_stub(union perf_event *event | 201 | static int process_event_synth_tracing_data_stub(struct perf_tool *tool |
202 | __maybe_unused, | ||
203 | union perf_event *event | ||
197 | __maybe_unused, | 204 | __maybe_unused, |
198 | struct perf_session *session | 205 | struct perf_session *session |
199 | __maybe_unused) | 206 | __maybe_unused) |
@@ -202,7 +209,8 @@ static int process_event_synth_tracing_data_stub(union perf_event *event | |||
202 | return 0; | 209 | return 0; |
203 | } | 210 | } |
204 | 211 | ||
205 | static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, | 212 | static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, |
213 | union perf_event *event __maybe_unused, | ||
206 | struct perf_evlist **pevlist | 214 | struct perf_evlist **pevlist |
207 | __maybe_unused) | 215 | __maybe_unused) |
208 | { | 216 | { |
@@ -238,18 +246,11 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, | |||
238 | return 0; | 246 | return 0; |
239 | } | 247 | } |
240 | 248 | ||
241 | static int process_event_type_stub(struct perf_tool *tool __maybe_unused, | ||
242 | union perf_event *event __maybe_unused) | ||
243 | { | ||
244 | dump_printf(": unhandled!\n"); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int process_finished_round(struct perf_tool *tool, | 249 | static int process_finished_round(struct perf_tool *tool, |
249 | union perf_event *event, | 250 | union perf_event *event, |
250 | struct perf_session *session); | 251 | struct perf_session *session); |
251 | 252 | ||
252 | static void perf_tool__fill_defaults(struct perf_tool *tool) | 253 | void perf_tool__fill_defaults(struct perf_tool *tool) |
253 | { | 254 | { |
254 | if (tool->sample == NULL) | 255 | if (tool->sample == NULL) |
255 | tool->sample = process_event_sample_stub; | 256 | tool->sample = process_event_sample_stub; |
@@ -271,8 +272,6 @@ static void perf_tool__fill_defaults(struct perf_tool *tool) | |||
271 | tool->unthrottle = process_event_stub; | 272 | tool->unthrottle = process_event_stub; |
272 | if (tool->attr == NULL) | 273 | if (tool->attr == NULL) |
273 | tool->attr = process_event_synth_attr_stub; | 274 | tool->attr = process_event_synth_attr_stub; |
274 | if (tool->event_type == NULL) | ||
275 | tool->event_type = process_event_type_stub; | ||
276 | if (tool->tracing_data == NULL) | 275 | if (tool->tracing_data == NULL) |
277 | tool->tracing_data = process_event_synth_tracing_data_stub; | 276 | tool->tracing_data = process_event_synth_tracing_data_stub; |
278 | if (tool->build_id == NULL) | 277 | if (tool->build_id == NULL) |
@@ -496,7 +495,7 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
496 | u64 file_offset); | 495 | u64 file_offset); |
497 | 496 | ||
498 | static int flush_sample_queue(struct perf_session *s, | 497 | static int flush_sample_queue(struct perf_session *s, |
499 | struct perf_tool *tool) | 498 | struct perf_tool *tool) |
500 | { | 499 | { |
501 | struct ordered_samples *os = &s->ordered_samples; | 500 | struct ordered_samples *os = &s->ordered_samples; |
502 | struct list_head *head = &os->samples; | 501 | struct list_head *head = &os->samples; |
@@ -644,7 +643,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s) | |||
644 | 643 | ||
645 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) | 644 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
646 | 645 | ||
647 | static int perf_session_queue_event(struct perf_session *s, union perf_event *event, | 646 | int perf_session_queue_event(struct perf_session *s, union perf_event *event, |
648 | struct perf_sample *sample, u64 file_offset) | 647 | struct perf_sample *sample, u64 file_offset) |
649 | { | 648 | { |
650 | struct ordered_samples *os = &s->ordered_samples; | 649 | struct ordered_samples *os = &s->ordered_samples; |
@@ -740,7 +739,7 @@ static void perf_session__print_tstamp(struct perf_session *session, | |||
740 | union perf_event *event, | 739 | union perf_event *event, |
741 | struct perf_sample *sample) | 740 | struct perf_sample *sample) |
742 | { | 741 | { |
743 | u64 sample_type = perf_evlist__sample_type(session->evlist); | 742 | u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); |
744 | 743 | ||
745 | if (event->header.type != PERF_RECORD_SAMPLE && | 744 | if (event->header.type != PERF_RECORD_SAMPLE && |
746 | !perf_evlist__sample_id_all(session->evlist)) { | 745 | !perf_evlist__sample_id_all(session->evlist)) { |
@@ -755,6 +754,36 @@ static void perf_session__print_tstamp(struct perf_session *session, | |||
755 | printf("%" PRIu64 " ", sample->time); | 754 | printf("%" PRIu64 " ", sample->time); |
756 | } | 755 | } |
757 | 756 | ||
757 | static void sample_read__printf(struct perf_sample *sample, u64 read_format) | ||
758 | { | ||
759 | printf("... sample_read:\n"); | ||
760 | |||
761 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
762 | printf("...... time enabled %016" PRIx64 "\n", | ||
763 | sample->read.time_enabled); | ||
764 | |||
765 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
766 | printf("...... time running %016" PRIx64 "\n", | ||
767 | sample->read.time_running); | ||
768 | |||
769 | if (read_format & PERF_FORMAT_GROUP) { | ||
770 | u64 i; | ||
771 | |||
772 | printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); | ||
773 | |||
774 | for (i = 0; i < sample->read.group.nr; i++) { | ||
775 | struct sample_read_value *value; | ||
776 | |||
777 | value = &sample->read.group.values[i]; | ||
778 | printf("..... id %016" PRIx64 | ||
779 | ", value %016" PRIx64 "\n", | ||
780 | value->id, value->value); | ||
781 | } | ||
782 | } else | ||
783 | printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", | ||
784 | sample->read.one.id, sample->read.one.value); | ||
785 | } | ||
786 | |||
758 | static void dump_event(struct perf_session *session, union perf_event *event, | 787 | static void dump_event(struct perf_session *session, union perf_event *event, |
759 | u64 file_offset, struct perf_sample *sample) | 788 | u64 file_offset, struct perf_sample *sample) |
760 | { | 789 | { |
@@ -804,11 +833,15 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event, | |||
804 | 833 | ||
805 | if (sample_type & PERF_SAMPLE_DATA_SRC) | 834 | if (sample_type & PERF_SAMPLE_DATA_SRC) |
806 | printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); | 835 | printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); |
836 | |||
837 | if (sample_type & PERF_SAMPLE_READ) | ||
838 | sample_read__printf(sample, evsel->attr.read_format); | ||
807 | } | 839 | } |
808 | 840 | ||
809 | static struct machine * | 841 | static struct machine * |
810 | perf_session__find_machine_for_cpumode(struct perf_session *session, | 842 | perf_session__find_machine_for_cpumode(struct perf_session *session, |
811 | union perf_event *event) | 843 | union perf_event *event, |
844 | struct perf_sample *sample) | ||
812 | { | 845 | { |
813 | const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 846 | const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
814 | 847 | ||
@@ -820,7 +853,7 @@ static struct machine * | |||
820 | if (event->header.type == PERF_RECORD_MMAP) | 853 | if (event->header.type == PERF_RECORD_MMAP) |
821 | pid = event->mmap.pid; | 854 | pid = event->mmap.pid; |
822 | else | 855 | else |
823 | pid = event->ip.pid; | 856 | pid = sample->pid; |
824 | 857 | ||
825 | return perf_session__findnew_machine(session, pid); | 858 | return perf_session__findnew_machine(session, pid); |
826 | } | 859 | } |
@@ -828,6 +861,75 @@ static struct machine * | |||
828 | return &session->machines.host; | 861 | return &session->machines.host; |
829 | } | 862 | } |
830 | 863 | ||
864 | static int deliver_sample_value(struct perf_session *session, | ||
865 | struct perf_tool *tool, | ||
866 | union perf_event *event, | ||
867 | struct perf_sample *sample, | ||
868 | struct sample_read_value *v, | ||
869 | struct machine *machine) | ||
870 | { | ||
871 | struct perf_sample_id *sid; | ||
872 | |||
873 | sid = perf_evlist__id2sid(session->evlist, v->id); | ||
874 | if (sid) { | ||
875 | sample->id = v->id; | ||
876 | sample->period = v->value - sid->period; | ||
877 | sid->period = v->value; | ||
878 | } | ||
879 | |||
880 | if (!sid || sid->evsel == NULL) { | ||
881 | ++session->stats.nr_unknown_id; | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | return tool->sample(tool, event, sample, sid->evsel, machine); | ||
886 | } | ||
887 | |||
888 | static int deliver_sample_group(struct perf_session *session, | ||
889 | struct perf_tool *tool, | ||
890 | union perf_event *event, | ||
891 | struct perf_sample *sample, | ||
892 | struct machine *machine) | ||
893 | { | ||
894 | int ret = -EINVAL; | ||
895 | u64 i; | ||
896 | |||
897 | for (i = 0; i < sample->read.group.nr; i++) { | ||
898 | ret = deliver_sample_value(session, tool, event, sample, | ||
899 | &sample->read.group.values[i], | ||
900 | machine); | ||
901 | if (ret) | ||
902 | break; | ||
903 | } | ||
904 | |||
905 | return ret; | ||
906 | } | ||
907 | |||
908 | static int | ||
909 | perf_session__deliver_sample(struct perf_session *session, | ||
910 | struct perf_tool *tool, | ||
911 | union perf_event *event, | ||
912 | struct perf_sample *sample, | ||
913 | struct perf_evsel *evsel, | ||
914 | struct machine *machine) | ||
915 | { | ||
916 | /* We know evsel != NULL. */ | ||
917 | u64 sample_type = evsel->attr.sample_type; | ||
918 | u64 read_format = evsel->attr.read_format; | ||
919 | |||
920 | /* Standard sample delievery. */ | ||
921 | if (!(sample_type & PERF_SAMPLE_READ)) | ||
922 | return tool->sample(tool, event, sample, evsel, machine); | ||
923 | |||
924 | /* For PERF_SAMPLE_READ we have either single or group mode. */ | ||
925 | if (read_format & PERF_FORMAT_GROUP) | ||
926 | return deliver_sample_group(session, tool, event, sample, | ||
927 | machine); | ||
928 | else | ||
929 | return deliver_sample_value(session, tool, event, sample, | ||
930 | &sample->read.one, machine); | ||
931 | } | ||
932 | |||
831 | static int perf_session_deliver_event(struct perf_session *session, | 933 | static int perf_session_deliver_event(struct perf_session *session, |
832 | union perf_event *event, | 934 | union perf_event *event, |
833 | struct perf_sample *sample, | 935 | struct perf_sample *sample, |
@@ -857,7 +959,8 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
857 | hists__inc_nr_events(&evsel->hists, event->header.type); | 959 | hists__inc_nr_events(&evsel->hists, event->header.type); |
858 | } | 960 | } |
859 | 961 | ||
860 | machine = perf_session__find_machine_for_cpumode(session, event); | 962 | machine = perf_session__find_machine_for_cpumode(session, event, |
963 | sample); | ||
861 | 964 | ||
862 | switch (event->header.type) { | 965 | switch (event->header.type) { |
863 | case PERF_RECORD_SAMPLE: | 966 | case PERF_RECORD_SAMPLE: |
@@ -870,7 +973,8 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
870 | ++session->stats.nr_unprocessable_samples; | 973 | ++session->stats.nr_unprocessable_samples; |
871 | return 0; | 974 | return 0; |
872 | } | 975 | } |
873 | return tool->sample(tool, event, sample, evsel, machine); | 976 | return perf_session__deliver_sample(session, tool, event, |
977 | sample, evsel, machine); | ||
874 | case PERF_RECORD_MMAP: | 978 | case PERF_RECORD_MMAP: |
875 | return tool->mmap(tool, event, sample, machine); | 979 | return tool->mmap(tool, event, sample, machine); |
876 | case PERF_RECORD_COMM: | 980 | case PERF_RECORD_COMM: |
@@ -895,22 +999,6 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
895 | } | 999 | } |
896 | } | 1000 | } |
897 | 1001 | ||
898 | static int perf_session__preprocess_sample(struct perf_session *session, | ||
899 | union perf_event *event, struct perf_sample *sample) | ||
900 | { | ||
901 | if (event->header.type != PERF_RECORD_SAMPLE || | ||
902 | !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) | ||
903 | return 0; | ||
904 | |||
905 | if (!ip_callchain__valid(sample->callchain, event)) { | ||
906 | pr_debug("call-chain problem with event, skipping it.\n"); | ||
907 | ++session->stats.nr_invalid_chains; | ||
908 | session->stats.total_invalid_chains += sample->period; | ||
909 | return -EINVAL; | ||
910 | } | ||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, | 1002 | static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, |
915 | struct perf_tool *tool, u64 file_offset) | 1003 | struct perf_tool *tool, u64 file_offset) |
916 | { | 1004 | { |
@@ -921,16 +1009,14 @@ static int perf_session__process_user_event(struct perf_session *session, union | |||
921 | /* These events are processed right away */ | 1009 | /* These events are processed right away */ |
922 | switch (event->header.type) { | 1010 | switch (event->header.type) { |
923 | case PERF_RECORD_HEADER_ATTR: | 1011 | case PERF_RECORD_HEADER_ATTR: |
924 | err = tool->attr(event, &session->evlist); | 1012 | err = tool->attr(tool, event, &session->evlist); |
925 | if (err == 0) | 1013 | if (err == 0) |
926 | perf_session__set_id_hdr_size(session); | 1014 | perf_session__set_id_hdr_size(session); |
927 | return err; | 1015 | return err; |
928 | case PERF_RECORD_HEADER_EVENT_TYPE: | ||
929 | return tool->event_type(tool, event); | ||
930 | case PERF_RECORD_HEADER_TRACING_DATA: | 1016 | case PERF_RECORD_HEADER_TRACING_DATA: |
931 | /* setup for reading amidst mmap */ | 1017 | /* setup for reading amidst mmap */ |
932 | lseek(session->fd, file_offset, SEEK_SET); | 1018 | lseek(session->fd, file_offset, SEEK_SET); |
933 | return tool->tracing_data(event, session); | 1019 | return tool->tracing_data(tool, event, session); |
934 | case PERF_RECORD_HEADER_BUILD_ID: | 1020 | case PERF_RECORD_HEADER_BUILD_ID: |
935 | return tool->build_id(tool, event, session); | 1021 | return tool->build_id(tool, event, session); |
936 | case PERF_RECORD_FINISHED_ROUND: | 1022 | case PERF_RECORD_FINISHED_ROUND: |
@@ -975,10 +1061,6 @@ static int perf_session__process_event(struct perf_session *session, | |||
975 | if (ret) | 1061 | if (ret) |
976 | return ret; | 1062 | return ret; |
977 | 1063 | ||
978 | /* Preprocess sample records - precheck callchains */ | ||
979 | if (perf_session__preprocess_sample(session, event, &sample)) | ||
980 | return 0; | ||
981 | |||
982 | if (tool->ordered_samples) { | 1064 | if (tool->ordered_samples) { |
983 | ret = perf_session_queue_event(session, event, &sample, | 1065 | ret = perf_session_queue_event(session, event, &sample, |
984 | file_offset); | 1066 | file_offset); |
@@ -999,7 +1081,7 @@ void perf_event_header__bswap(struct perf_event_header *self) | |||
999 | 1081 | ||
1000 | struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) | 1082 | struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) |
1001 | { | 1083 | { |
1002 | return machine__findnew_thread(&session->machines.host, pid); | 1084 | return machine__findnew_thread(&session->machines.host, 0, pid); |
1003 | } | 1085 | } |
1004 | 1086 | ||
1005 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) | 1087 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) |
@@ -1091,8 +1173,10 @@ more: | |||
1091 | perf_event_header__bswap(&event->header); | 1173 | perf_event_header__bswap(&event->header); |
1092 | 1174 | ||
1093 | size = event->header.size; | 1175 | size = event->header.size; |
1094 | if (size == 0) | 1176 | if (size < sizeof(struct perf_event_header)) { |
1095 | size = 8; | 1177 | pr_err("bad event header size\n"); |
1178 | goto out_err; | ||
1179 | } | ||
1096 | 1180 | ||
1097 | if (size > cur_size) { | 1181 | if (size > cur_size) { |
1098 | void *new = realloc(buf, size); | 1182 | void *new = realloc(buf, size); |
@@ -1161,8 +1245,12 @@ fetch_mmaped_event(struct perf_session *session, | |||
1161 | if (session->header.needs_swap) | 1245 | if (session->header.needs_swap) |
1162 | perf_event_header__bswap(&event->header); | 1246 | perf_event_header__bswap(&event->header); |
1163 | 1247 | ||
1164 | if (head + event->header.size > mmap_size) | 1248 | if (head + event->header.size > mmap_size) { |
1249 | /* We're not fetching the event so swap back again */ | ||
1250 | if (session->header.needs_swap) | ||
1251 | perf_event_header__bswap(&event->header); | ||
1165 | return NULL; | 1252 | return NULL; |
1253 | } | ||
1166 | 1254 | ||
1167 | return event; | 1255 | return event; |
1168 | } | 1256 | } |
@@ -1242,7 +1330,7 @@ more: | |||
1242 | 1330 | ||
1243 | size = event->header.size; | 1331 | size = event->header.size; |
1244 | 1332 | ||
1245 | if (size == 0 || | 1333 | if (size < sizeof(struct perf_event_header) || |
1246 | perf_session__process_event(session, event, tool, file_pos) < 0) { | 1334 | perf_session__process_event(session, event, tool, file_pos) < 0) { |
1247 | pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", | 1335 | pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", |
1248 | file_offset + head, event->header.size, | 1336 | file_offset + head, event->header.size, |
@@ -1295,12 +1383,15 @@ int perf_session__process_events(struct perf_session *self, | |||
1295 | 1383 | ||
1296 | bool perf_session__has_traces(struct perf_session *session, const char *msg) | 1384 | bool perf_session__has_traces(struct perf_session *session, const char *msg) |
1297 | { | 1385 | { |
1298 | if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { | 1386 | struct perf_evsel *evsel; |
1299 | pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); | 1387 | |
1300 | return false; | 1388 | list_for_each_entry(evsel, &session->evlist->entries, node) { |
1389 | if (evsel->attr.type == PERF_TYPE_TRACEPOINT) | ||
1390 | return true; | ||
1301 | } | 1391 | } |
1302 | 1392 | ||
1303 | return true; | 1393 | pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); |
1394 | return false; | ||
1304 | } | 1395 | } |
1305 | 1396 | ||
1306 | int maps__set_kallsyms_ref_reloc_sym(struct map **maps, | 1397 | int maps__set_kallsyms_ref_reloc_sym(struct map **maps, |
@@ -1383,13 +1474,18 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | |||
1383 | 1474 | ||
1384 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, | 1475 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, |
1385 | struct perf_sample *sample, struct machine *machine, | 1476 | struct perf_sample *sample, struct machine *machine, |
1386 | int print_sym, int print_dso, int print_symoffset) | 1477 | unsigned int print_opts, unsigned int stack_depth) |
1387 | { | 1478 | { |
1388 | struct addr_location al; | 1479 | struct addr_location al; |
1389 | struct callchain_cursor_node *node; | 1480 | struct callchain_cursor_node *node; |
1390 | 1481 | int print_ip = print_opts & PRINT_IP_OPT_IP; | |
1391 | if (perf_event__preprocess_sample(event, machine, &al, sample, | 1482 | int print_sym = print_opts & PRINT_IP_OPT_SYM; |
1392 | NULL) < 0) { | 1483 | int print_dso = print_opts & PRINT_IP_OPT_DSO; |
1484 | int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; | ||
1485 | int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; | ||
1486 | char s = print_oneline ? ' ' : '\t'; | ||
1487 | |||
1488 | if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { | ||
1393 | error("problem processing %d event, skipping it.\n", | 1489 | error("problem processing %d event, skipping it.\n", |
1394 | event->header.type); | 1490 | event->header.type); |
1395 | return; | 1491 | return; |
@@ -1397,37 +1493,50 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, | |||
1397 | 1493 | ||
1398 | if (symbol_conf.use_callchain && sample->callchain) { | 1494 | if (symbol_conf.use_callchain && sample->callchain) { |
1399 | 1495 | ||
1400 | |||
1401 | if (machine__resolve_callchain(machine, evsel, al.thread, | 1496 | if (machine__resolve_callchain(machine, evsel, al.thread, |
1402 | sample, NULL) != 0) { | 1497 | sample, NULL, NULL) != 0) { |
1403 | if (verbose) | 1498 | if (verbose) |
1404 | error("Failed to resolve callchain. Skipping\n"); | 1499 | error("Failed to resolve callchain. Skipping\n"); |
1405 | return; | 1500 | return; |
1406 | } | 1501 | } |
1407 | callchain_cursor_commit(&callchain_cursor); | 1502 | callchain_cursor_commit(&callchain_cursor); |
1408 | 1503 | ||
1409 | while (1) { | 1504 | while (stack_depth) { |
1410 | node = callchain_cursor_current(&callchain_cursor); | 1505 | node = callchain_cursor_current(&callchain_cursor); |
1411 | if (!node) | 1506 | if (!node) |
1412 | break; | 1507 | break; |
1413 | 1508 | ||
1414 | printf("\t%16" PRIx64, node->ip); | 1509 | if (print_ip) |
1510 | printf("%c%16" PRIx64, s, node->ip); | ||
1511 | |||
1415 | if (print_sym) { | 1512 | if (print_sym) { |
1416 | printf(" "); | 1513 | printf(" "); |
1417 | symbol__fprintf_symname(node->sym, stdout); | 1514 | if (print_symoffset) { |
1515 | al.addr = node->ip; | ||
1516 | al.map = node->map; | ||
1517 | symbol__fprintf_symname_offs(node->sym, &al, stdout); | ||
1518 | } else | ||
1519 | symbol__fprintf_symname(node->sym, stdout); | ||
1418 | } | 1520 | } |
1521 | |||
1419 | if (print_dso) { | 1522 | if (print_dso) { |
1420 | printf(" ("); | 1523 | printf(" ("); |
1421 | map__fprintf_dsoname(node->map, stdout); | 1524 | map__fprintf_dsoname(node->map, stdout); |
1422 | printf(")"); | 1525 | printf(")"); |
1423 | } | 1526 | } |
1424 | printf("\n"); | 1527 | |
1528 | if (!print_oneline) | ||
1529 | printf("\n"); | ||
1425 | 1530 | ||
1426 | callchain_cursor_advance(&callchain_cursor); | 1531 | callchain_cursor_advance(&callchain_cursor); |
1532 | |||
1533 | stack_depth--; | ||
1427 | } | 1534 | } |
1428 | 1535 | ||
1429 | } else { | 1536 | } else { |
1430 | printf("%16" PRIx64, sample->ip); | 1537 | if (print_ip) |
1538 | printf("%16" PRIx64, sample->ip); | ||
1539 | |||
1431 | if (print_sym) { | 1540 | if (print_sym) { |
1432 | printf(" "); | 1541 | printf(" "); |
1433 | if (print_symoffset) | 1542 | if (print_symoffset) |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index f3b235ec7bf4..3aa75fb2225f 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -37,11 +37,16 @@ struct perf_session { | |||
37 | int fd; | 37 | int fd; |
38 | bool fd_pipe; | 38 | bool fd_pipe; |
39 | bool repipe; | 39 | bool repipe; |
40 | char *cwd; | ||
41 | struct ordered_samples ordered_samples; | 40 | struct ordered_samples ordered_samples; |
42 | char filename[1]; | 41 | char filename[1]; |
43 | }; | 42 | }; |
44 | 43 | ||
44 | #define PRINT_IP_OPT_IP (1<<0) | ||
45 | #define PRINT_IP_OPT_SYM (1<<1) | ||
46 | #define PRINT_IP_OPT_DSO (1<<2) | ||
47 | #define PRINT_IP_OPT_SYMOFFSET (1<<3) | ||
48 | #define PRINT_IP_OPT_ONELINE (1<<4) | ||
49 | |||
45 | struct perf_tool; | 50 | struct perf_tool; |
46 | 51 | ||
47 | struct perf_session *perf_session__new(const char *filename, int mode, | 52 | struct perf_session *perf_session__new(const char *filename, int mode, |
@@ -57,6 +62,11 @@ int __perf_session__process_events(struct perf_session *self, | |||
57 | int perf_session__process_events(struct perf_session *self, | 62 | int perf_session__process_events(struct perf_session *self, |
58 | struct perf_tool *tool); | 63 | struct perf_tool *tool); |
59 | 64 | ||
65 | int perf_session_queue_event(struct perf_session *s, union perf_event *event, | ||
66 | struct perf_sample *sample, u64 file_offset); | ||
67 | |||
68 | void perf_tool__fill_defaults(struct perf_tool *tool); | ||
69 | |||
60 | int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, | 70 | int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, |
61 | struct thread *thread, | 71 | struct thread *thread, |
62 | struct ip_callchain *chain, | 72 | struct ip_callchain *chain, |
@@ -99,7 +109,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | |||
99 | 109 | ||
100 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, | 110 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, |
101 | struct perf_sample *sample, struct machine *machine, | 111 | struct perf_sample *sample, struct machine *machine, |
102 | int print_sym, int print_dso, int print_symoffset); | 112 | unsigned int print_opts, unsigned int stack_depth); |
103 | 113 | ||
104 | int perf_session__cpu_bitmap(struct perf_session *session, | 114 | int perf_session__cpu_bitmap(struct perf_session *session, |
105 | const char *cpu_list, unsigned long *cpu_bitmap); | 115 | const char *cpu_list, unsigned long *cpu_bitmap); |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 313a5a730112..5f118a089519 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -7,6 +7,8 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault"; | |||
7 | const char *parent_pattern = default_parent_pattern; | 7 | const char *parent_pattern = default_parent_pattern; |
8 | const char default_sort_order[] = "comm,dso,symbol"; | 8 | const char default_sort_order[] = "comm,dso,symbol"; |
9 | const char *sort_order = default_sort_order; | 9 | const char *sort_order = default_sort_order; |
10 | regex_t ignore_callees_regex; | ||
11 | int have_ignore_callees = 0; | ||
10 | int sort__need_collapse = 0; | 12 | int sort__need_collapse = 0; |
11 | int sort__has_parent = 0; | 13 | int sort__has_parent = 0; |
12 | int sort__has_sym = 0; | 14 | int sort__has_sym = 0; |
@@ -55,14 +57,14 @@ static int64_t cmp_null(void *l, void *r) | |||
55 | static int64_t | 57 | static int64_t |
56 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | 58 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) |
57 | { | 59 | { |
58 | return right->thread->pid - left->thread->pid; | 60 | return right->thread->tid - left->thread->tid; |
59 | } | 61 | } |
60 | 62 | ||
61 | static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, | 63 | static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, |
62 | size_t size, unsigned int width) | 64 | size_t size, unsigned int width) |
63 | { | 65 | { |
64 | return repsep_snprintf(bf, size, "%*s:%5d", width - 6, | 66 | return repsep_snprintf(bf, size, "%*s:%5d", width - 6, |
65 | self->thread->comm ?: "", self->thread->pid); | 67 | self->thread->comm ?: "", self->thread->tid); |
66 | } | 68 | } |
67 | 69 | ||
68 | struct sort_entry sort_thread = { | 70 | struct sort_entry sort_thread = { |
@@ -77,7 +79,7 @@ struct sort_entry sort_thread = { | |||
77 | static int64_t | 79 | static int64_t |
78 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | 80 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) |
79 | { | 81 | { |
80 | return right->thread->pid - left->thread->pid; | 82 | return right->thread->tid - left->thread->tid; |
81 | } | 83 | } |
82 | 84 | ||
83 | static int64_t | 85 | static int64_t |
@@ -872,6 +874,8 @@ static struct sort_dimension common_sort_dimensions[] = { | |||
872 | DIM(SORT_PARENT, "parent", sort_parent), | 874 | DIM(SORT_PARENT, "parent", sort_parent), |
873 | DIM(SORT_CPU, "cpu", sort_cpu), | 875 | DIM(SORT_CPU, "cpu", sort_cpu), |
874 | DIM(SORT_SRCLINE, "srcline", sort_srcline), | 876 | DIM(SORT_SRCLINE, "srcline", sort_srcline), |
877 | DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), | ||
878 | DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), | ||
875 | }; | 879 | }; |
876 | 880 | ||
877 | #undef DIM | 881 | #undef DIM |
@@ -891,8 +895,6 @@ static struct sort_dimension bstack_sort_dimensions[] = { | |||
891 | #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } | 895 | #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } |
892 | 896 | ||
893 | static struct sort_dimension memory_sort_dimensions[] = { | 897 | static struct sort_dimension memory_sort_dimensions[] = { |
894 | DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), | ||
895 | DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), | ||
896 | DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), | 898 | DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), |
897 | DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), | 899 | DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), |
898 | DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), | 900 | DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), |
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 45ac84c1e037..4e80dbd271e7 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h | |||
@@ -29,6 +29,8 @@ extern const char *sort_order; | |||
29 | extern const char default_parent_pattern[]; | 29 | extern const char default_parent_pattern[]; |
30 | extern const char *parent_pattern; | 30 | extern const char *parent_pattern; |
31 | extern const char default_sort_order[]; | 31 | extern const char default_sort_order[]; |
32 | extern regex_t ignore_callees_regex; | ||
33 | extern int have_ignore_callees; | ||
32 | extern int sort__need_collapse; | 34 | extern int sort__need_collapse; |
33 | extern int sort__has_parent; | 35 | extern int sort__has_parent; |
34 | extern int sort__has_sym; | 36 | extern int sort__has_sym; |
@@ -87,6 +89,9 @@ struct hist_entry { | |||
87 | 89 | ||
88 | struct hist_entry_diff diff; | 90 | struct hist_entry_diff diff; |
89 | 91 | ||
92 | /* We are added by hists__add_dummy_entry. */ | ||
93 | bool dummy; | ||
94 | |||
90 | /* XXX These two should move to some tree widget lib */ | 95 | /* XXX These two should move to some tree widget lib */ |
91 | u16 row_offset; | 96 | u16 row_offset; |
92 | u16 nr_rows; | 97 | u16 nr_rows; |
@@ -138,6 +143,8 @@ enum sort_type { | |||
138 | SORT_PARENT, | 143 | SORT_PARENT, |
139 | SORT_CPU, | 144 | SORT_CPU, |
140 | SORT_SRCLINE, | 145 | SORT_SRCLINE, |
146 | SORT_LOCAL_WEIGHT, | ||
147 | SORT_GLOBAL_WEIGHT, | ||
141 | 148 | ||
142 | /* branch stack specific sort keys */ | 149 | /* branch stack specific sort keys */ |
143 | __SORT_BRANCH_STACK, | 150 | __SORT_BRANCH_STACK, |
@@ -149,9 +156,7 @@ enum sort_type { | |||
149 | 156 | ||
150 | /* memory mode specific sort keys */ | 157 | /* memory mode specific sort keys */ |
151 | __SORT_MEMORY_MODE, | 158 | __SORT_MEMORY_MODE, |
152 | SORT_LOCAL_WEIGHT = __SORT_MEMORY_MODE, | 159 | SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE, |
153 | SORT_GLOBAL_WEIGHT, | ||
154 | SORT_MEM_DADDR_SYMBOL, | ||
155 | SORT_MEM_DADDR_DSO, | 160 | SORT_MEM_DADDR_DSO, |
156 | SORT_MEM_LOCKED, | 161 | SORT_MEM_LOCKED, |
157 | SORT_MEM_TLB, | 162 | SORT_MEM_TLB, |
@@ -183,4 +188,6 @@ int setup_sorting(void); | |||
183 | extern int sort_dimension__add(const char *); | 188 | extern int sort_dimension__add(const char *); |
184 | void sort__setup_elide(FILE *fp); | 189 | void sort__setup_elide(FILE *fp); |
185 | 190 | ||
191 | int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); | ||
192 | |||
186 | #endif /* __PERF_SORT_H */ | 193 | #endif /* __PERF_SORT_H */ |
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 7c59c28afcc5..6506b3dfb605 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c | |||
@@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val) | |||
10 | delta = val - stats->mean; | 10 | delta = val - stats->mean; |
11 | stats->mean += delta / stats->n; | 11 | stats->mean += delta / stats->n; |
12 | stats->M2 += delta*(val - stats->mean); | 12 | stats->M2 += delta*(val - stats->mean); |
13 | |||
14 | if (val > stats->max) | ||
15 | stats->max = val; | ||
16 | |||
17 | if (val < stats->min) | ||
18 | stats->min = val; | ||
13 | } | 19 | } |
14 | 20 | ||
15 | double avg_stats(struct stats *stats) | 21 | double avg_stats(struct stats *stats) |
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 588367c3c767..ae8ccd7227cf 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h | |||
@@ -6,6 +6,7 @@ | |||
6 | struct stats | 6 | struct stats |
7 | { | 7 | { |
8 | double n, mean, M2; | 8 | double n, mean, M2; |
9 | u64 max, min; | ||
9 | }; | 10 | }; |
10 | 11 | ||
11 | void update_stats(struct stats *stats, u64 val); | 12 | void update_stats(struct stats *stats, u64 val); |
@@ -13,4 +14,12 @@ double avg_stats(struct stats *stats); | |||
13 | double stddev_stats(struct stats *stats); | 14 | double stddev_stats(struct stats *stats); |
14 | double rel_stddev_stats(double stddev, double avg); | 15 | double rel_stddev_stats(double stddev, double avg); |
15 | 16 | ||
17 | static inline void init_stats(struct stats *stats) | ||
18 | { | ||
19 | stats->n = 0.0; | ||
20 | stats->mean = 0.0; | ||
21 | stats->M2 = 0.0; | ||
22 | stats->min = (u64) -1; | ||
23 | stats->max = 0; | ||
24 | } | ||
16 | #endif | 25 | #endif |
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 29c7b2cb2521..f0b0c008c507 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c | |||
@@ -387,3 +387,27 @@ void *memdup(const void *src, size_t len) | |||
387 | 387 | ||
388 | return p; | 388 | return p; |
389 | } | 389 | } |
390 | |||
391 | /** | ||
392 | * str_append - reallocate string and append another | ||
393 | * @s: pointer to string pointer | ||
394 | * @len: pointer to len (initialized) | ||
395 | * @a: string to append. | ||
396 | */ | ||
397 | int str_append(char **s, int *len, const char *a) | ||
398 | { | ||
399 | int olen = *s ? strlen(*s) : 0; | ||
400 | int nlen = olen + strlen(a) + 1; | ||
401 | if (*len < nlen) { | ||
402 | *len = *len * 2; | ||
403 | if (*len < nlen) | ||
404 | *len = nlen; | ||
405 | *s = realloc(*s, *len); | ||
406 | if (!*s) | ||
407 | return -ENOMEM; | ||
408 | if (olen == 0) | ||
409 | **s = 0; | ||
410 | } | ||
411 | strcat(*s, a); | ||
412 | return 0; | ||
413 | } | ||
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4b12bf850325..a7b9ab557380 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -599,11 +599,13 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, | |||
599 | if (dso->kernel == DSO_TYPE_USER) { | 599 | if (dso->kernel == DSO_TYPE_USER) { |
600 | GElf_Shdr shdr; | 600 | GElf_Shdr shdr; |
601 | ss->adjust_symbols = (ehdr.e_type == ET_EXEC || | 601 | ss->adjust_symbols = (ehdr.e_type == ET_EXEC || |
602 | ehdr.e_type == ET_REL || | ||
602 | elf_section_by_name(elf, &ehdr, &shdr, | 603 | elf_section_by_name(elf, &ehdr, &shdr, |
603 | ".gnu.prelink_undo", | 604 | ".gnu.prelink_undo", |
604 | NULL) != NULL); | 605 | NULL) != NULL); |
605 | } else { | 606 | } else { |
606 | ss->adjust_symbols = 0; | 607 | ss->adjust_symbols = ehdr.e_type == ET_EXEC || |
608 | ehdr.e_type == ET_REL; | ||
607 | } | 609 | } |
608 | 610 | ||
609 | ss->name = strdup(name); | 611 | ss->name = strdup(name); |
@@ -624,6 +626,37 @@ out_close: | |||
624 | return err; | 626 | return err; |
625 | } | 627 | } |
626 | 628 | ||
629 | /** | ||
630 | * ref_reloc_sym_not_found - has kernel relocation symbol been found. | ||
631 | * @kmap: kernel maps and relocation reference symbol | ||
632 | * | ||
633 | * This function returns %true if we are dealing with the kernel maps and the | ||
634 | * relocation reference symbol has not yet been found. Otherwise %false is | ||
635 | * returned. | ||
636 | */ | ||
637 | static bool ref_reloc_sym_not_found(struct kmap *kmap) | ||
638 | { | ||
639 | return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && | ||
640 | !kmap->ref_reloc_sym->unrelocated_addr; | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * ref_reloc - kernel relocation offset. | ||
645 | * @kmap: kernel maps and relocation reference symbol | ||
646 | * | ||
647 | * This function returns the offset of kernel addresses as determined by using | ||
648 | * the relocation reference symbol i.e. if the kernel has not been relocated | ||
649 | * then the return value is zero. | ||
650 | */ | ||
651 | static u64 ref_reloc(struct kmap *kmap) | ||
652 | { | ||
653 | if (kmap && kmap->ref_reloc_sym && | ||
654 | kmap->ref_reloc_sym->unrelocated_addr) | ||
655 | return kmap->ref_reloc_sym->addr - | ||
656 | kmap->ref_reloc_sym->unrelocated_addr; | ||
657 | return 0; | ||
658 | } | ||
659 | |||
627 | int dso__load_sym(struct dso *dso, struct map *map, | 660 | int dso__load_sym(struct dso *dso, struct map *map, |
628 | struct symsrc *syms_ss, struct symsrc *runtime_ss, | 661 | struct symsrc *syms_ss, struct symsrc *runtime_ss, |
629 | symbol_filter_t filter, int kmodule) | 662 | symbol_filter_t filter, int kmodule) |
@@ -642,8 +675,17 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
642 | Elf_Scn *sec, *sec_strndx; | 675 | Elf_Scn *sec, *sec_strndx; |
643 | Elf *elf; | 676 | Elf *elf; |
644 | int nr = 0; | 677 | int nr = 0; |
678 | bool remap_kernel = false, adjust_kernel_syms = false; | ||
645 | 679 | ||
646 | dso->symtab_type = syms_ss->type; | 680 | dso->symtab_type = syms_ss->type; |
681 | dso->rel = syms_ss->ehdr.e_type == ET_REL; | ||
682 | |||
683 | /* | ||
684 | * Modules may already have symbols from kallsyms, but those symbols | ||
685 | * have the wrong values for the dso maps, so remove them. | ||
686 | */ | ||
687 | if (kmodule && syms_ss->symtab) | ||
688 | symbols__delete(&dso->symbols[map->type]); | ||
647 | 689 | ||
648 | if (!syms_ss->symtab) { | 690 | if (!syms_ss->symtab) { |
649 | syms_ss->symtab = syms_ss->dynsym; | 691 | syms_ss->symtab = syms_ss->dynsym; |
@@ -681,7 +723,31 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
681 | nr_syms = shdr.sh_size / shdr.sh_entsize; | 723 | nr_syms = shdr.sh_size / shdr.sh_entsize; |
682 | 724 | ||
683 | memset(&sym, 0, sizeof(sym)); | 725 | memset(&sym, 0, sizeof(sym)); |
684 | dso->adjust_symbols = runtime_ss->adjust_symbols; | 726 | |
727 | /* | ||
728 | * The kernel relocation symbol is needed in advance in order to adjust | ||
729 | * kernel maps correctly. | ||
730 | */ | ||
731 | if (ref_reloc_sym_not_found(kmap)) { | ||
732 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | ||
733 | const char *elf_name = elf_sym__name(&sym, symstrs); | ||
734 | |||
735 | if (strcmp(elf_name, kmap->ref_reloc_sym->name)) | ||
736 | continue; | ||
737 | kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; | ||
738 | break; | ||
739 | } | ||
740 | } | ||
741 | |||
742 | dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); | ||
743 | /* | ||
744 | * Initial kernel and module mappings do not map to the dso. For | ||
745 | * function mappings, flag the fixups. | ||
746 | */ | ||
747 | if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { | ||
748 | remap_kernel = true; | ||
749 | adjust_kernel_syms = dso->adjust_symbols; | ||
750 | } | ||
685 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | 751 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { |
686 | struct symbol *f; | 752 | struct symbol *f; |
687 | const char *elf_name = elf_sym__name(&sym, symstrs); | 753 | const char *elf_name = elf_sym__name(&sym, symstrs); |
@@ -690,10 +756,6 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
690 | const char *section_name; | 756 | const char *section_name; |
691 | bool used_opd = false; | 757 | bool used_opd = false; |
692 | 758 | ||
693 | if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && | ||
694 | strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) | ||
695 | kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; | ||
696 | |||
697 | if (!is_label && !elf_sym__is_a(&sym, map->type)) | 759 | if (!is_label && !elf_sym__is_a(&sym, map->type)) |
698 | continue; | 760 | continue; |
699 | 761 | ||
@@ -745,20 +807,55 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
745 | (sym.st_value & 1)) | 807 | (sym.st_value & 1)) |
746 | --sym.st_value; | 808 | --sym.st_value; |
747 | 809 | ||
748 | if (dso->kernel != DSO_TYPE_USER || kmodule) { | 810 | if (dso->kernel || kmodule) { |
749 | char dso_name[PATH_MAX]; | 811 | char dso_name[PATH_MAX]; |
750 | 812 | ||
813 | /* Adjust symbol to map to file offset */ | ||
814 | if (adjust_kernel_syms) | ||
815 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | ||
816 | |||
751 | if (strcmp(section_name, | 817 | if (strcmp(section_name, |
752 | (curr_dso->short_name + | 818 | (curr_dso->short_name + |
753 | dso->short_name_len)) == 0) | 819 | dso->short_name_len)) == 0) |
754 | goto new_symbol; | 820 | goto new_symbol; |
755 | 821 | ||
756 | if (strcmp(section_name, ".text") == 0) { | 822 | if (strcmp(section_name, ".text") == 0) { |
823 | /* | ||
824 | * The initial kernel mapping is based on | ||
825 | * kallsyms and identity maps. Overwrite it to | ||
826 | * map to the kernel dso. | ||
827 | */ | ||
828 | if (remap_kernel && dso->kernel) { | ||
829 | remap_kernel = false; | ||
830 | map->start = shdr.sh_addr + | ||
831 | ref_reloc(kmap); | ||
832 | map->end = map->start + shdr.sh_size; | ||
833 | map->pgoff = shdr.sh_offset; | ||
834 | map->map_ip = map__map_ip; | ||
835 | map->unmap_ip = map__unmap_ip; | ||
836 | /* Ensure maps are correctly ordered */ | ||
837 | map_groups__remove(kmap->kmaps, map); | ||
838 | map_groups__insert(kmap->kmaps, map); | ||
839 | } | ||
840 | |||
841 | /* | ||
842 | * The initial module mapping is based on | ||
843 | * /proc/modules mapped to offset zero. | ||
844 | * Overwrite it to map to the module dso. | ||
845 | */ | ||
846 | if (remap_kernel && kmodule) { | ||
847 | remap_kernel = false; | ||
848 | map->pgoff = shdr.sh_offset; | ||
849 | } | ||
850 | |||
757 | curr_map = map; | 851 | curr_map = map; |
758 | curr_dso = dso; | 852 | curr_dso = dso; |
759 | goto new_symbol; | 853 | goto new_symbol; |
760 | } | 854 | } |
761 | 855 | ||
856 | if (!kmap) | ||
857 | goto new_symbol; | ||
858 | |||
762 | snprintf(dso_name, sizeof(dso_name), | 859 | snprintf(dso_name, sizeof(dso_name), |
763 | "%s%s", dso->short_name, section_name); | 860 | "%s%s", dso->short_name, section_name); |
764 | 861 | ||
@@ -781,8 +878,16 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
781 | dso__delete(curr_dso); | 878 | dso__delete(curr_dso); |
782 | goto out_elf_end; | 879 | goto out_elf_end; |
783 | } | 880 | } |
784 | curr_map->map_ip = identity__map_ip; | 881 | if (adjust_kernel_syms) { |
785 | curr_map->unmap_ip = identity__map_ip; | 882 | curr_map->start = shdr.sh_addr + |
883 | ref_reloc(kmap); | ||
884 | curr_map->end = curr_map->start + | ||
885 | shdr.sh_size; | ||
886 | curr_map->pgoff = shdr.sh_offset; | ||
887 | } else { | ||
888 | curr_map->map_ip = identity__map_ip; | ||
889 | curr_map->unmap_ip = identity__map_ip; | ||
890 | } | ||
786 | curr_dso->symtab_type = dso->symtab_type; | 891 | curr_dso->symtab_type = dso->symtab_type; |
787 | map_groups__insert(kmap->kmaps, curr_map); | 892 | map_groups__insert(kmap->kmaps, curr_map); |
788 | dsos__add(&dso->node, curr_dso); | 893 | dsos__add(&dso->node, curr_dso); |
@@ -846,6 +951,57 @@ out_elf_end: | |||
846 | return err; | 951 | return err; |
847 | } | 952 | } |
848 | 953 | ||
954 | static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) | ||
955 | { | ||
956 | GElf_Phdr phdr; | ||
957 | size_t i, phdrnum; | ||
958 | int err; | ||
959 | u64 sz; | ||
960 | |||
961 | if (elf_getphdrnum(elf, &phdrnum)) | ||
962 | return -1; | ||
963 | |||
964 | for (i = 0; i < phdrnum; i++) { | ||
965 | if (gelf_getphdr(elf, i, &phdr) == NULL) | ||
966 | return -1; | ||
967 | if (phdr.p_type != PT_LOAD) | ||
968 | continue; | ||
969 | if (exe) { | ||
970 | if (!(phdr.p_flags & PF_X)) | ||
971 | continue; | ||
972 | } else { | ||
973 | if (!(phdr.p_flags & PF_R)) | ||
974 | continue; | ||
975 | } | ||
976 | sz = min(phdr.p_memsz, phdr.p_filesz); | ||
977 | if (!sz) | ||
978 | continue; | ||
979 | err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); | ||
980 | if (err) | ||
981 | return err; | ||
982 | } | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, | ||
987 | bool *is_64_bit) | ||
988 | { | ||
989 | int err; | ||
990 | Elf *elf; | ||
991 | |||
992 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
993 | if (elf == NULL) | ||
994 | return -1; | ||
995 | |||
996 | if (is_64_bit) | ||
997 | *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); | ||
998 | |||
999 | err = elf_read_maps(elf, exe, mapfn, data); | ||
1000 | |||
1001 | elf_end(elf); | ||
1002 | return err; | ||
1003 | } | ||
1004 | |||
849 | void symbol__elf_init(void) | 1005 | void symbol__elf_init(void) |
850 | { | 1006 | { |
851 | elf_version(EV_CURRENT); | 1007 | elf_version(EV_CURRENT); |
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index a7390cde63bc..3a802c300fc5 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c | |||
@@ -301,6 +301,13 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, | |||
301 | return 0; | 301 | return 0; |
302 | } | 302 | } |
303 | 303 | ||
304 | int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused, | ||
305 | mapfn_t mapfn __maybe_unused, void *data __maybe_unused, | ||
306 | bool *is_64_bit __maybe_unused) | ||
307 | { | ||
308 | return -1; | ||
309 | } | ||
310 | |||
304 | void symbol__elf_init(void) | 311 | void symbol__elf_init(void) |
305 | { | 312 | { |
306 | } | 313 | } |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index d5528e1cc03a..7eb0362f4ffd 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -87,6 +87,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) | |||
87 | { | 87 | { |
88 | s64 a; | 88 | s64 a; |
89 | s64 b; | 89 | s64 b; |
90 | size_t na, nb; | ||
90 | 91 | ||
91 | /* Prefer a symbol with non zero length */ | 92 | /* Prefer a symbol with non zero length */ |
92 | a = syma->end - syma->start; | 93 | a = syma->end - syma->start; |
@@ -120,11 +121,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) | |||
120 | else if (a > b) | 121 | else if (a > b) |
121 | return SYMBOL_B; | 122 | return SYMBOL_B; |
122 | 123 | ||
123 | /* If all else fails, choose the symbol with the longest name */ | 124 | /* Choose the symbol with the longest name */ |
124 | if (strlen(syma->name) >= strlen(symb->name)) | 125 | na = strlen(syma->name); |
126 | nb = strlen(symb->name); | ||
127 | if (na > nb) | ||
125 | return SYMBOL_A; | 128 | return SYMBOL_A; |
126 | else | 129 | else if (na < nb) |
130 | return SYMBOL_B; | ||
131 | |||
132 | /* Avoid "SyS" kernel syscall aliases */ | ||
133 | if (na >= 3 && !strncmp(syma->name, "SyS", 3)) | ||
134 | return SYMBOL_B; | ||
135 | if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) | ||
127 | return SYMBOL_B; | 136 | return SYMBOL_B; |
137 | |||
138 | return SYMBOL_A; | ||
128 | } | 139 | } |
129 | 140 | ||
130 | void symbols__fixup_duplicate(struct rb_root *symbols) | 141 | void symbols__fixup_duplicate(struct rb_root *symbols) |
@@ -248,7 +259,10 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym, | |||
248 | if (sym && sym->name) { | 259 | if (sym && sym->name) { |
249 | length = fprintf(fp, "%s", sym->name); | 260 | length = fprintf(fp, "%s", sym->name); |
250 | if (al) { | 261 | if (al) { |
251 | offset = al->addr - sym->start; | 262 | if (al->addr < sym->end) |
263 | offset = al->addr - sym->start; | ||
264 | else | ||
265 | offset = al->addr - al->map->start - sym->start; | ||
252 | length += fprintf(fp, "+0x%lx", offset); | 266 | length += fprintf(fp, "+0x%lx", offset); |
253 | } | 267 | } |
254 | return length; | 268 | return length; |
@@ -316,6 +330,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) | |||
316 | return NULL; | 330 | return NULL; |
317 | } | 331 | } |
318 | 332 | ||
333 | static struct symbol *symbols__first(struct rb_root *symbols) | ||
334 | { | ||
335 | struct rb_node *n = rb_first(symbols); | ||
336 | |||
337 | if (n) | ||
338 | return rb_entry(n, struct symbol, rb_node); | ||
339 | |||
340 | return NULL; | ||
341 | } | ||
342 | |||
319 | struct symbol_name_rb_node { | 343 | struct symbol_name_rb_node { |
320 | struct rb_node rb_node; | 344 | struct rb_node rb_node; |
321 | struct symbol sym; | 345 | struct symbol sym; |
@@ -386,6 +410,11 @@ struct symbol *dso__find_symbol(struct dso *dso, | |||
386 | return symbols__find(&dso->symbols[type], addr); | 410 | return symbols__find(&dso->symbols[type], addr); |
387 | } | 411 | } |
388 | 412 | ||
413 | struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) | ||
414 | { | ||
415 | return symbols__first(&dso->symbols[type]); | ||
416 | } | ||
417 | |||
389 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | 418 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
390 | const char *name) | 419 | const char *name) |
391 | { | 420 | { |
@@ -522,6 +551,53 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, | |||
522 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); | 551 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); |
523 | } | 552 | } |
524 | 553 | ||
554 | static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, | ||
555 | symbol_filter_t filter) | ||
556 | { | ||
557 | struct map_groups *kmaps = map__kmap(map)->kmaps; | ||
558 | struct map *curr_map; | ||
559 | struct symbol *pos; | ||
560 | int count = 0, moved = 0; | ||
561 | struct rb_root *root = &dso->symbols[map->type]; | ||
562 | struct rb_node *next = rb_first(root); | ||
563 | |||
564 | while (next) { | ||
565 | char *module; | ||
566 | |||
567 | pos = rb_entry(next, struct symbol, rb_node); | ||
568 | next = rb_next(&pos->rb_node); | ||
569 | |||
570 | module = strchr(pos->name, '\t'); | ||
571 | if (module) | ||
572 | *module = '\0'; | ||
573 | |||
574 | curr_map = map_groups__find(kmaps, map->type, pos->start); | ||
575 | |||
576 | if (!curr_map || (filter && filter(curr_map, pos))) { | ||
577 | rb_erase(&pos->rb_node, root); | ||
578 | symbol__delete(pos); | ||
579 | } else { | ||
580 | pos->start -= curr_map->start - curr_map->pgoff; | ||
581 | if (pos->end) | ||
582 | pos->end -= curr_map->start - curr_map->pgoff; | ||
583 | if (curr_map != map) { | ||
584 | rb_erase(&pos->rb_node, root); | ||
585 | symbols__insert( | ||
586 | &curr_map->dso->symbols[curr_map->type], | ||
587 | pos); | ||
588 | ++moved; | ||
589 | } else { | ||
590 | ++count; | ||
591 | } | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* Symbols have been adjusted */ | ||
596 | dso->adjust_symbols = 1; | ||
597 | |||
598 | return count + moved; | ||
599 | } | ||
600 | |||
525 | /* | 601 | /* |
526 | * Split the symbols into maps, making sure there are no overlaps, i.e. the | 602 | * Split the symbols into maps, making sure there are no overlaps, i.e. the |
527 | * kernel range is broken in several maps, named [kernel].N, as we don't have | 603 | * kernel range is broken in several maps, named [kernel].N, as we don't have |
@@ -663,6 +739,161 @@ bool symbol__restricted_filename(const char *filename, | |||
663 | return restricted; | 739 | return restricted; |
664 | } | 740 | } |
665 | 741 | ||
742 | struct kcore_mapfn_data { | ||
743 | struct dso *dso; | ||
744 | enum map_type type; | ||
745 | struct list_head maps; | ||
746 | }; | ||
747 | |||
748 | static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) | ||
749 | { | ||
750 | struct kcore_mapfn_data *md = data; | ||
751 | struct map *map; | ||
752 | |||
753 | map = map__new2(start, md->dso, md->type); | ||
754 | if (map == NULL) | ||
755 | return -ENOMEM; | ||
756 | |||
757 | map->end = map->start + len; | ||
758 | map->pgoff = pgoff; | ||
759 | |||
760 | list_add(&map->node, &md->maps); | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | /* | ||
766 | * If kallsyms is referenced by name then we look for kcore in the same | ||
767 | * directory. | ||
768 | */ | ||
769 | static bool kcore_filename_from_kallsyms_filename(char *kcore_filename, | ||
770 | const char *kallsyms_filename) | ||
771 | { | ||
772 | char *name; | ||
773 | |||
774 | strcpy(kcore_filename, kallsyms_filename); | ||
775 | name = strrchr(kcore_filename, '/'); | ||
776 | if (!name) | ||
777 | return false; | ||
778 | |||
779 | if (!strcmp(name, "/kallsyms")) { | ||
780 | strcpy(name, "/kcore"); | ||
781 | return true; | ||
782 | } | ||
783 | |||
784 | return false; | ||
785 | } | ||
786 | |||
787 | static int dso__load_kcore(struct dso *dso, struct map *map, | ||
788 | const char *kallsyms_filename) | ||
789 | { | ||
790 | struct map_groups *kmaps = map__kmap(map)->kmaps; | ||
791 | struct machine *machine = kmaps->machine; | ||
792 | struct kcore_mapfn_data md; | ||
793 | struct map *old_map, *new_map, *replacement_map = NULL; | ||
794 | bool is_64_bit; | ||
795 | int err, fd; | ||
796 | char kcore_filename[PATH_MAX]; | ||
797 | struct symbol *sym; | ||
798 | |||
799 | /* This function requires that the map is the kernel map */ | ||
800 | if (map != machine->vmlinux_maps[map->type]) | ||
801 | return -EINVAL; | ||
802 | |||
803 | if (!kcore_filename_from_kallsyms_filename(kcore_filename, | ||
804 | kallsyms_filename)) | ||
805 | return -EINVAL; | ||
806 | |||
807 | md.dso = dso; | ||
808 | md.type = map->type; | ||
809 | INIT_LIST_HEAD(&md.maps); | ||
810 | |||
811 | fd = open(kcore_filename, O_RDONLY); | ||
812 | if (fd < 0) | ||
813 | return -EINVAL; | ||
814 | |||
815 | /* Read new maps into temporary lists */ | ||
816 | err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, | ||
817 | &is_64_bit); | ||
818 | if (err) | ||
819 | goto out_err; | ||
820 | |||
821 | if (list_empty(&md.maps)) { | ||
822 | err = -EINVAL; | ||
823 | goto out_err; | ||
824 | } | ||
825 | |||
826 | /* Remove old maps */ | ||
827 | old_map = map_groups__first(kmaps, map->type); | ||
828 | while (old_map) { | ||
829 | struct map *next = map_groups__next(old_map); | ||
830 | |||
831 | if (old_map != map) | ||
832 | map_groups__remove(kmaps, old_map); | ||
833 | old_map = next; | ||
834 | } | ||
835 | |||
836 | /* Find the kernel map using the first symbol */ | ||
837 | sym = dso__first_symbol(dso, map->type); | ||
838 | list_for_each_entry(new_map, &md.maps, node) { | ||
839 | if (sym && sym->start >= new_map->start && | ||
840 | sym->start < new_map->end) { | ||
841 | replacement_map = new_map; | ||
842 | break; | ||
843 | } | ||
844 | } | ||
845 | |||
846 | if (!replacement_map) | ||
847 | replacement_map = list_entry(md.maps.next, struct map, node); | ||
848 | |||
849 | /* Add new maps */ | ||
850 | while (!list_empty(&md.maps)) { | ||
851 | new_map = list_entry(md.maps.next, struct map, node); | ||
852 | list_del(&new_map->node); | ||
853 | if (new_map == replacement_map) { | ||
854 | map->start = new_map->start; | ||
855 | map->end = new_map->end; | ||
856 | map->pgoff = new_map->pgoff; | ||
857 | map->map_ip = new_map->map_ip; | ||
858 | map->unmap_ip = new_map->unmap_ip; | ||
859 | map__delete(new_map); | ||
860 | /* Ensure maps are correctly ordered */ | ||
861 | map_groups__remove(kmaps, map); | ||
862 | map_groups__insert(kmaps, map); | ||
863 | } else { | ||
864 | map_groups__insert(kmaps, new_map); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | /* | ||
869 | * Set the data type and long name so that kcore can be read via | ||
870 | * dso__data_read_addr(). | ||
871 | */ | ||
872 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) | ||
873 | dso->data_type = DSO_BINARY_TYPE__GUEST_KCORE; | ||
874 | else | ||
875 | dso->data_type = DSO_BINARY_TYPE__KCORE; | ||
876 | dso__set_long_name(dso, strdup(kcore_filename)); | ||
877 | |||
878 | close(fd); | ||
879 | |||
880 | if (map->type == MAP__FUNCTION) | ||
881 | pr_debug("Using %s for kernel object code\n", kcore_filename); | ||
882 | else | ||
883 | pr_debug("Using %s for kernel data\n", kcore_filename); | ||
884 | |||
885 | return 0; | ||
886 | |||
887 | out_err: | ||
888 | while (!list_empty(&md.maps)) { | ||
889 | map = list_entry(md.maps.next, struct map, node); | ||
890 | list_del(&map->node); | ||
891 | map__delete(map); | ||
892 | } | ||
893 | close(fd); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | |||
666 | int dso__load_kallsyms(struct dso *dso, const char *filename, | 897 | int dso__load_kallsyms(struct dso *dso, const char *filename, |
667 | struct map *map, symbol_filter_t filter) | 898 | struct map *map, symbol_filter_t filter) |
668 | { | 899 | { |
@@ -680,7 +911,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, | |||
680 | else | 911 | else |
681 | dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; | 912 | dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; |
682 | 913 | ||
683 | return dso__split_kallsyms(dso, map, filter); | 914 | if (!dso__load_kcore(dso, map, filename)) |
915 | return dso__split_kallsyms_for_kcore(dso, map, filter); | ||
916 | else | ||
917 | return dso__split_kallsyms(dso, map, filter); | ||
684 | } | 918 | } |
685 | 919 | ||
686 | static int dso__load_perf_map(struct dso *dso, struct map *map, | 920 | static int dso__load_perf_map(struct dso *dso, struct map *map, |
@@ -843,10 +1077,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
843 | if (!runtime_ss && syms_ss) | 1077 | if (!runtime_ss && syms_ss) |
844 | runtime_ss = syms_ss; | 1078 | runtime_ss = syms_ss; |
845 | 1079 | ||
846 | if (syms_ss) | 1080 | if (syms_ss) { |
847 | ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0); | 1081 | int km; |
848 | else | 1082 | |
1083 | km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || | ||
1084 | dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; | ||
1085 | ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km); | ||
1086 | } else { | ||
849 | ret = -1; | 1087 | ret = -1; |
1088 | } | ||
850 | 1089 | ||
851 | if (ret > 0) { | 1090 | if (ret > 0) { |
852 | int nr_plt; | 1091 | int nr_plt; |
@@ -888,8 +1127,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, | |||
888 | char symfs_vmlinux[PATH_MAX]; | 1127 | char symfs_vmlinux[PATH_MAX]; |
889 | enum dso_binary_type symtab_type; | 1128 | enum dso_binary_type symtab_type; |
890 | 1129 | ||
891 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", | 1130 | if (vmlinux[0] == '/') |
892 | symbol_conf.symfs, vmlinux); | 1131 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); |
1132 | else | ||
1133 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", | ||
1134 | symbol_conf.symfs, vmlinux); | ||
893 | 1135 | ||
894 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) | 1136 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
895 | symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; | 1137 | symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; |
@@ -903,6 +1145,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, | |||
903 | symsrc__destroy(&ss); | 1145 | symsrc__destroy(&ss); |
904 | 1146 | ||
905 | if (err > 0) { | 1147 | if (err > 0) { |
1148 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) | ||
1149 | dso->data_type = DSO_BINARY_TYPE__GUEST_VMLINUX; | ||
1150 | else | ||
1151 | dso->data_type = DSO_BINARY_TYPE__VMLINUX; | ||
906 | dso__set_long_name(dso, (char *)vmlinux); | 1152 | dso__set_long_name(dso, (char *)vmlinux); |
907 | dso__set_loaded(dso, map->type); | 1153 | dso__set_loaded(dso, map->type); |
908 | pr_debug("Using %s for symbols\n", symfs_vmlinux); | 1154 | pr_debug("Using %s for symbols\n", symfs_vmlinux); |
@@ -975,7 +1221,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, | |||
975 | dso__set_long_name(dso, | 1221 | dso__set_long_name(dso, |
976 | strdup(symbol_conf.vmlinux_name)); | 1222 | strdup(symbol_conf.vmlinux_name)); |
977 | dso->lname_alloc = 1; | 1223 | dso->lname_alloc = 1; |
978 | goto out_fixup; | 1224 | return err; |
979 | } | 1225 | } |
980 | return err; | 1226 | return err; |
981 | } | 1227 | } |
@@ -983,7 +1229,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, | |||
983 | if (vmlinux_path != NULL) { | 1229 | if (vmlinux_path != NULL) { |
984 | err = dso__load_vmlinux_path(dso, map, filter); | 1230 | err = dso__load_vmlinux_path(dso, map, filter); |
985 | if (err > 0) | 1231 | if (err > 0) |
986 | goto out_fixup; | 1232 | return err; |
987 | } | 1233 | } |
988 | 1234 | ||
989 | /* do not try local files if a symfs was given */ | 1235 | /* do not try local files if a symfs was given */ |
@@ -1042,9 +1288,8 @@ do_kallsyms: | |||
1042 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 1288 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
1043 | free(kallsyms_allocated_filename); | 1289 | free(kallsyms_allocated_filename); |
1044 | 1290 | ||
1045 | if (err > 0) { | 1291 | if (err > 0 && !dso__is_kcore(dso)) { |
1046 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); | 1292 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); |
1047 | out_fixup: | ||
1048 | map__fixup_start(map); | 1293 | map__fixup_start(map); |
1049 | map__fixup_end(map); | 1294 | map__fixup_end(map); |
1050 | } | 1295 | } |
@@ -1075,7 +1320,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, | |||
1075 | if (symbol_conf.default_guest_vmlinux_name != NULL) { | 1320 | if (symbol_conf.default_guest_vmlinux_name != NULL) { |
1076 | err = dso__load_vmlinux(dso, map, | 1321 | err = dso__load_vmlinux(dso, map, |
1077 | symbol_conf.default_guest_vmlinux_name, filter); | 1322 | symbol_conf.default_guest_vmlinux_name, filter); |
1078 | goto out_try_fixup; | 1323 | return err; |
1079 | } | 1324 | } |
1080 | 1325 | ||
1081 | kallsyms_filename = symbol_conf.default_guest_kallsyms; | 1326 | kallsyms_filename = symbol_conf.default_guest_kallsyms; |
@@ -1089,13 +1334,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, | |||
1089 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); | 1334 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); |
1090 | if (err > 0) | 1335 | if (err > 0) |
1091 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 1336 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
1092 | 1337 | if (err > 0 && !dso__is_kcore(dso)) { | |
1093 | out_try_fixup: | 1338 | machine__mmap_name(machine, path, sizeof(path)); |
1094 | if (err > 0) { | 1339 | dso__set_long_name(dso, strdup(path)); |
1095 | if (kallsyms_filename != NULL) { | ||
1096 | machine__mmap_name(machine, path, sizeof(path)); | ||
1097 | dso__set_long_name(dso, strdup(path)); | ||
1098 | } | ||
1099 | map__fixup_start(map); | 1340 | map__fixup_start(map); |
1100 | map__fixup_end(map); | 1341 | map__fixup_end(map); |
1101 | } | 1342 | } |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5f720dc076da..fd5b70ea2981 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -215,6 +215,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, | |||
215 | u64 addr); | 215 | u64 addr); |
216 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | 216 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
217 | const char *name); | 217 | const char *name); |
218 | struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); | ||
218 | 219 | ||
219 | int filename__read_build_id(const char *filename, void *bf, size_t size); | 220 | int filename__read_build_id(const char *filename, void *bf, size_t size); |
220 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); | 221 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); |
@@ -247,4 +248,8 @@ void symbols__fixup_duplicate(struct rb_root *symbols); | |||
247 | void symbols__fixup_end(struct rb_root *symbols); | 248 | void symbols__fixup_end(struct rb_root *symbols); |
248 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); | 249 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); |
249 | 250 | ||
251 | typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); | ||
252 | int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, | ||
253 | bool *is_64_bit); | ||
254 | |||
250 | #endif /* __PERF_SYMBOL */ | 255 | #endif /* __PERF_SYMBOL */ |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 40399cbcca77..e3d4a550a703 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -7,17 +7,18 @@ | |||
7 | #include "util.h" | 7 | #include "util.h" |
8 | #include "debug.h" | 8 | #include "debug.h" |
9 | 9 | ||
10 | struct thread *thread__new(pid_t pid) | 10 | struct thread *thread__new(pid_t pid, pid_t tid) |
11 | { | 11 | { |
12 | struct thread *self = zalloc(sizeof(*self)); | 12 | struct thread *self = zalloc(sizeof(*self)); |
13 | 13 | ||
14 | if (self != NULL) { | 14 | if (self != NULL) { |
15 | map_groups__init(&self->mg); | 15 | map_groups__init(&self->mg); |
16 | self->pid = pid; | 16 | self->pid_ = pid; |
17 | self->tid = tid; | ||
17 | self->ppid = -1; | 18 | self->ppid = -1; |
18 | self->comm = malloc(32); | 19 | self->comm = malloc(32); |
19 | if (self->comm) | 20 | if (self->comm) |
20 | snprintf(self->comm, 32, ":%d", self->pid); | 21 | snprintf(self->comm, 32, ":%d", self->tid); |
21 | } | 22 | } |
22 | 23 | ||
23 | return self; | 24 | return self; |
@@ -57,7 +58,7 @@ int thread__comm_len(struct thread *self) | |||
57 | 58 | ||
58 | size_t thread__fprintf(struct thread *thread, FILE *fp) | 59 | size_t thread__fprintf(struct thread *thread, FILE *fp) |
59 | { | 60 | { |
60 | return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) + | 61 | return fprintf(fp, "Thread %d %s\n", thread->tid, thread->comm) + |
61 | map_groups__fprintf(&thread->mg, verbose, fp); | 62 | map_groups__fprintf(&thread->mg, verbose, fp); |
62 | } | 63 | } |
63 | 64 | ||
@@ -84,7 +85,7 @@ int thread__fork(struct thread *self, struct thread *parent) | |||
84 | if (map_groups__clone(&self->mg, &parent->mg, i) < 0) | 85 | if (map_groups__clone(&self->mg, &parent->mg, i) < 0) |
85 | return -ENOMEM; | 86 | return -ENOMEM; |
86 | 87 | ||
87 | self->ppid = parent->pid; | 88 | self->ppid = parent->tid; |
88 | 89 | ||
89 | return 0; | 90 | return 0; |
90 | } | 91 | } |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index eeb7ac62b9e3..4ebbb40d46d4 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -12,10 +12,12 @@ struct thread { | |||
12 | struct list_head node; | 12 | struct list_head node; |
13 | }; | 13 | }; |
14 | struct map_groups mg; | 14 | struct map_groups mg; |
15 | pid_t pid; | 15 | pid_t pid_; /* Not all tools update this */ |
16 | pid_t tid; | ||
16 | pid_t ppid; | 17 | pid_t ppid; |
17 | char shortname[3]; | 18 | char shortname[3]; |
18 | bool comm_set; | 19 | bool comm_set; |
20 | bool dead; /* if set thread has exited */ | ||
19 | char *comm; | 21 | char *comm; |
20 | int comm_len; | 22 | int comm_len; |
21 | 23 | ||
@@ -24,8 +26,12 @@ struct thread { | |||
24 | 26 | ||
25 | struct machine; | 27 | struct machine; |
26 | 28 | ||
27 | struct thread *thread__new(pid_t pid); | 29 | struct thread *thread__new(pid_t pid, pid_t tid); |
28 | void thread__delete(struct thread *self); | 30 | void thread__delete(struct thread *self); |
31 | static inline void thread__exited(struct thread *thread) | ||
32 | { | ||
33 | thread->dead = true; | ||
34 | } | ||
29 | 35 | ||
30 | int thread__set_comm(struct thread *self, const char *comm); | 36 | int thread__set_comm(struct thread *self, const char *comm); |
31 | int thread__comm_len(struct thread *self); | 37 | int thread__comm_len(struct thread *self); |
@@ -45,6 +51,15 @@ void thread__find_addr_map(struct thread *thread, struct machine *machine, | |||
45 | 51 | ||
46 | void thread__find_addr_location(struct thread *thread, struct machine *machine, | 52 | void thread__find_addr_location(struct thread *thread, struct machine *machine, |
47 | u8 cpumode, enum map_type type, u64 addr, | 53 | u8 cpumode, enum map_type type, u64 addr, |
48 | struct addr_location *al, | 54 | struct addr_location *al); |
49 | symbol_filter_t filter); | 55 | |
56 | static inline void *thread__priv(struct thread *thread) | ||
57 | { | ||
58 | return thread->priv; | ||
59 | } | ||
60 | |||
61 | static inline void thread__set_priv(struct thread *thread, void *p) | ||
62 | { | ||
63 | thread->priv = p; | ||
64 | } | ||
50 | #endif /* __PERF_THREAD_H */ | 65 | #endif /* __PERF_THREAD_H */ |
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h index b0e1aadba8d5..62b16b6165ba 100644 --- a/tools/perf/util/tool.h +++ b/tools/perf/util/tool.h | |||
@@ -18,12 +18,9 @@ typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, | |||
18 | typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, | 18 | typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, |
19 | struct perf_sample *sample, struct machine *machine); | 19 | struct perf_sample *sample, struct machine *machine); |
20 | 20 | ||
21 | typedef int (*event_attr_op)(union perf_event *event, | 21 | typedef int (*event_attr_op)(struct perf_tool *tool, |
22 | union perf_event *event, | ||
22 | struct perf_evlist **pevlist); | 23 | struct perf_evlist **pevlist); |
23 | typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event); | ||
24 | |||
25 | typedef int (*event_synth_op)(union perf_event *event, | ||
26 | struct perf_session *session); | ||
27 | 24 | ||
28 | typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, | 25 | typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, |
29 | struct perf_session *session); | 26 | struct perf_session *session); |
@@ -39,8 +36,7 @@ struct perf_tool { | |||
39 | throttle, | 36 | throttle, |
40 | unthrottle; | 37 | unthrottle; |
41 | event_attr_op attr; | 38 | event_attr_op attr; |
42 | event_synth_op tracing_data; | 39 | event_op2 tracing_data; |
43 | event_simple_op event_type; | ||
44 | event_op2 finished_round, | 40 | event_op2 finished_round, |
45 | build_id; | 41 | build_id; |
46 | bool ordered_samples; | 42 | bool ordered_samples; |
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index df46be93d902..b554ffc462b6 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h | |||
@@ -39,6 +39,8 @@ struct perf_top { | |||
39 | float min_percent; | 39 | float min_percent; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | #define CONSOLE_CLEAR "[H[2J" | ||
43 | |||
42 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); | 44 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); |
43 | void perf_top__reset_sample_counters(struct perf_top *top); | 45 | void perf_top__reset_sample_counters(struct perf_top *top); |
44 | #endif /* __PERF_TOP_H */ | 46 | #endif /* __PERF_TOP_H */ |
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index 3917eb9a8479..f3c9e551bd35 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c | |||
@@ -46,65 +46,6 @@ | |||
46 | static int output_fd; | 46 | static int output_fd; |
47 | 47 | ||
48 | 48 | ||
49 | static const char *find_debugfs(void) | ||
50 | { | ||
51 | const char *path = perf_debugfs_mount(NULL); | ||
52 | |||
53 | if (!path) | ||
54 | pr_debug("Your kernel does not support the debugfs filesystem"); | ||
55 | |||
56 | return path; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Finds the path to the debugfs/tracing | ||
61 | * Allocates the string and stores it. | ||
62 | */ | ||
63 | static const char *find_tracing_dir(void) | ||
64 | { | ||
65 | static char *tracing; | ||
66 | static int tracing_found; | ||
67 | const char *debugfs; | ||
68 | |||
69 | if (tracing_found) | ||
70 | return tracing; | ||
71 | |||
72 | debugfs = find_debugfs(); | ||
73 | if (!debugfs) | ||
74 | return NULL; | ||
75 | |||
76 | tracing = malloc(strlen(debugfs) + 9); | ||
77 | if (!tracing) | ||
78 | return NULL; | ||
79 | |||
80 | sprintf(tracing, "%s/tracing", debugfs); | ||
81 | |||
82 | tracing_found = 1; | ||
83 | return tracing; | ||
84 | } | ||
85 | |||
86 | static char *get_tracing_file(const char *name) | ||
87 | { | ||
88 | const char *tracing; | ||
89 | char *file; | ||
90 | |||
91 | tracing = find_tracing_dir(); | ||
92 | if (!tracing) | ||
93 | return NULL; | ||
94 | |||
95 | file = malloc(strlen(tracing) + strlen(name) + 2); | ||
96 | if (!file) | ||
97 | return NULL; | ||
98 | |||
99 | sprintf(file, "%s/%s", tracing, name); | ||
100 | return file; | ||
101 | } | ||
102 | |||
103 | static void put_tracing_file(char *file) | ||
104 | { | ||
105 | free(file); | ||
106 | } | ||
107 | |||
108 | int bigendian(void) | 49 | int bigendian(void) |
109 | { | 50 | { |
110 | unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0}; | 51 | unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0}; |
@@ -160,7 +101,7 @@ out: | |||
160 | return err; | 101 | return err; |
161 | } | 102 | } |
162 | 103 | ||
163 | static int read_header_files(void) | 104 | static int record_header_files(void) |
164 | { | 105 | { |
165 | char *path; | 106 | char *path; |
166 | struct stat st; | 107 | struct stat st; |
@@ -299,7 +240,7 @@ out: | |||
299 | return err; | 240 | return err; |
300 | } | 241 | } |
301 | 242 | ||
302 | static int read_ftrace_files(struct tracepoint_path *tps) | 243 | static int record_ftrace_files(struct tracepoint_path *tps) |
303 | { | 244 | { |
304 | char *path; | 245 | char *path; |
305 | int ret; | 246 | int ret; |
@@ -328,7 +269,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps) | |||
328 | return false; | 269 | return false; |
329 | } | 270 | } |
330 | 271 | ||
331 | static int read_event_files(struct tracepoint_path *tps) | 272 | static int record_event_files(struct tracepoint_path *tps) |
332 | { | 273 | { |
333 | struct dirent *dent; | 274 | struct dirent *dent; |
334 | struct stat st; | 275 | struct stat st; |
@@ -403,7 +344,7 @@ out: | |||
403 | return err; | 344 | return err; |
404 | } | 345 | } |
405 | 346 | ||
406 | static int read_proc_kallsyms(void) | 347 | static int record_proc_kallsyms(void) |
407 | { | 348 | { |
408 | unsigned int size; | 349 | unsigned int size; |
409 | const char *path = "/proc/kallsyms"; | 350 | const char *path = "/proc/kallsyms"; |
@@ -421,7 +362,7 @@ static int read_proc_kallsyms(void) | |||
421 | return record_file(path, 4); | 362 | return record_file(path, 4); |
422 | } | 363 | } |
423 | 364 | ||
424 | static int read_ftrace_printk(void) | 365 | static int record_ftrace_printk(void) |
425 | { | 366 | { |
426 | unsigned int size; | 367 | unsigned int size; |
427 | char *path; | 368 | char *path; |
@@ -473,12 +414,27 @@ get_tracepoints_path(struct list_head *pattrs) | |||
473 | if (pos->attr.type != PERF_TYPE_TRACEPOINT) | 414 | if (pos->attr.type != PERF_TYPE_TRACEPOINT) |
474 | continue; | 415 | continue; |
475 | ++nr_tracepoints; | 416 | ++nr_tracepoints; |
417 | |||
418 | if (pos->name) { | ||
419 | ppath->next = tracepoint_name_to_path(pos->name); | ||
420 | if (ppath->next) | ||
421 | goto next; | ||
422 | |||
423 | if (strchr(pos->name, ':') == NULL) | ||
424 | goto try_id; | ||
425 | |||
426 | goto error; | ||
427 | } | ||
428 | |||
429 | try_id: | ||
476 | ppath->next = tracepoint_id_to_path(pos->attr.config); | 430 | ppath->next = tracepoint_id_to_path(pos->attr.config); |
477 | if (!ppath->next) { | 431 | if (!ppath->next) { |
432 | error: | ||
478 | pr_debug("No memory to alloc tracepoints list\n"); | 433 | pr_debug("No memory to alloc tracepoints list\n"); |
479 | put_tracepoints_path(&path); | 434 | put_tracepoints_path(&path); |
480 | return NULL; | 435 | return NULL; |
481 | } | 436 | } |
437 | next: | ||
482 | ppath = ppath->next; | 438 | ppath = ppath->next; |
483 | } | 439 | } |
484 | 440 | ||
@@ -520,8 +476,6 @@ static int tracing_data_header(void) | |||
520 | else | 476 | else |
521 | buf[0] = 0; | 477 | buf[0] = 0; |
522 | 478 | ||
523 | read_trace_init(buf[0], buf[0]); | ||
524 | |||
525 | if (write(output_fd, buf, 1) != 1) | 479 | if (write(output_fd, buf, 1) != 1) |
526 | return -1; | 480 | return -1; |
527 | 481 | ||
@@ -583,19 +537,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, | |||
583 | err = tracing_data_header(); | 537 | err = tracing_data_header(); |
584 | if (err) | 538 | if (err) |
585 | goto out; | 539 | goto out; |
586 | err = read_header_files(); | 540 | err = record_header_files(); |
587 | if (err) | 541 | if (err) |
588 | goto out; | 542 | goto out; |
589 | err = read_ftrace_files(tps); | 543 | err = record_ftrace_files(tps); |
590 | if (err) | 544 | if (err) |
591 | goto out; | 545 | goto out; |
592 | err = read_event_files(tps); | 546 | err = record_event_files(tps); |
593 | if (err) | 547 | if (err) |
594 | goto out; | 548 | goto out; |
595 | err = read_proc_kallsyms(); | 549 | err = record_proc_kallsyms(); |
596 | if (err) | 550 | if (err) |
597 | goto out; | 551 | goto out; |
598 | err = read_ftrace_printk(); | 552 | err = record_ftrace_printk(); |
599 | 553 | ||
600 | out: | 554 | out: |
601 | /* | 555 | /* |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 4454835a9ebc..fe7a27d67d2b 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -28,12 +28,6 @@ | |||
28 | #include "util.h" | 28 | #include "util.h" |
29 | #include "trace-event.h" | 29 | #include "trace-event.h" |
30 | 30 | ||
31 | int header_page_size_size; | ||
32 | int header_page_ts_size; | ||
33 | int header_page_data_offset; | ||
34 | |||
35 | bool latency_format; | ||
36 | |||
37 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian) | 31 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian) |
38 | { | 32 | { |
39 | struct pevent *pevent = pevent_alloc(); | 33 | struct pevent *pevent = pevent_alloc(); |
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index af215c0d2379..f2112270c663 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c | |||
@@ -39,10 +39,6 @@ | |||
39 | 39 | ||
40 | static int input_fd; | 40 | static int input_fd; |
41 | 41 | ||
42 | int file_bigendian; | ||
43 | int host_bigendian; | ||
44 | static int long_size; | ||
45 | |||
46 | static ssize_t trace_data_size; | 42 | static ssize_t trace_data_size; |
47 | static bool repipe; | 43 | static bool repipe; |
48 | 44 | ||
@@ -216,7 +212,7 @@ static int read_ftrace_printk(struct pevent *pevent) | |||
216 | static int read_header_files(struct pevent *pevent) | 212 | static int read_header_files(struct pevent *pevent) |
217 | { | 213 | { |
218 | unsigned long long size; | 214 | unsigned long long size; |
219 | char *header_event; | 215 | char *header_page; |
220 | char buf[BUFSIZ]; | 216 | char buf[BUFSIZ]; |
221 | int ret = 0; | 217 | int ret = 0; |
222 | 218 | ||
@@ -229,13 +225,26 @@ static int read_header_files(struct pevent *pevent) | |||
229 | } | 225 | } |
230 | 226 | ||
231 | size = read8(pevent); | 227 | size = read8(pevent); |
232 | skip(size); | ||
233 | 228 | ||
234 | /* | 229 | header_page = malloc(size); |
235 | * The size field in the page is of type long, | 230 | if (header_page == NULL) |
236 | * use that instead, since it represents the kernel. | 231 | return -1; |
237 | */ | 232 | |
238 | long_size = header_page_size_size; | 233 | if (do_read(header_page, size) < 0) { |
234 | pr_debug("did not read header page"); | ||
235 | free(header_page); | ||
236 | return -1; | ||
237 | } | ||
238 | |||
239 | if (!pevent_parse_header_page(pevent, header_page, size, | ||
240 | pevent_get_long_size(pevent))) { | ||
241 | /* | ||
242 | * The commit field in the page is of type long, | ||
243 | * use that instead, since it represents the kernel. | ||
244 | */ | ||
245 | pevent_set_long_size(pevent, pevent->header_page_size_size); | ||
246 | } | ||
247 | free(header_page); | ||
239 | 248 | ||
240 | if (do_read(buf, 13) < 0) | 249 | if (do_read(buf, 13) < 0) |
241 | return -1; | 250 | return -1; |
@@ -246,14 +255,8 @@ static int read_header_files(struct pevent *pevent) | |||
246 | } | 255 | } |
247 | 256 | ||
248 | size = read8(pevent); | 257 | size = read8(pevent); |
249 | header_event = malloc(size); | 258 | skip(size); |
250 | if (header_event == NULL) | ||
251 | return -1; | ||
252 | |||
253 | if (do_read(header_event, size) < 0) | ||
254 | ret = -1; | ||
255 | 259 | ||
256 | free(header_event); | ||
257 | return ret; | 260 | return ret; |
258 | } | 261 | } |
259 | 262 | ||
@@ -349,6 +352,10 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) | |||
349 | int show_funcs = 0; | 352 | int show_funcs = 0; |
350 | int show_printk = 0; | 353 | int show_printk = 0; |
351 | ssize_t size = -1; | 354 | ssize_t size = -1; |
355 | int file_bigendian; | ||
356 | int host_bigendian; | ||
357 | int file_long_size; | ||
358 | int file_page_size; | ||
352 | struct pevent *pevent; | 359 | struct pevent *pevent; |
353 | int err; | 360 | int err; |
354 | 361 | ||
@@ -391,12 +398,15 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) | |||
391 | 398 | ||
392 | if (do_read(buf, 1) < 0) | 399 | if (do_read(buf, 1) < 0) |
393 | goto out; | 400 | goto out; |
394 | long_size = buf[0]; | 401 | file_long_size = buf[0]; |
395 | 402 | ||
396 | page_size = read4(pevent); | 403 | file_page_size = read4(pevent); |
397 | if (!page_size) | 404 | if (!file_page_size) |
398 | goto out; | 405 | goto out; |
399 | 406 | ||
407 | pevent_set_long_size(pevent, file_long_size); | ||
408 | pevent_set_page_size(pevent, file_page_size); | ||
409 | |||
400 | err = read_header_files(pevent); | 410 | err = read_header_files(pevent); |
401 | if (err) | 411 | if (err) |
402 | goto out; | 412 | goto out; |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 8715a1006d00..95199e4eea97 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -39,7 +39,8 @@ static void process_event_unsupported(union perf_event *event __maybe_unused, | |||
39 | struct perf_sample *sample __maybe_unused, | 39 | struct perf_sample *sample __maybe_unused, |
40 | struct perf_evsel *evsel __maybe_unused, | 40 | struct perf_evsel *evsel __maybe_unused, |
41 | struct machine *machine __maybe_unused, | 41 | struct machine *machine __maybe_unused, |
42 | struct addr_location *al __maybe_unused) | 42 | struct thread *thread __maybe_unused, |
43 | struct addr_location *al __maybe_unused) | ||
43 | { | 44 | { |
44 | } | 45 | } |
45 | 46 | ||
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 1978c398ad87..fafe1a40444a 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -1,32 +1,18 @@ | |||
1 | #ifndef _PERF_UTIL_TRACE_EVENT_H | 1 | #ifndef _PERF_UTIL_TRACE_EVENT_H |
2 | #define _PERF_UTIL_TRACE_EVENT_H | 2 | #define _PERF_UTIL_TRACE_EVENT_H |
3 | 3 | ||
4 | #include <traceevent/event-parse.h> | ||
4 | #include "parse-events.h" | 5 | #include "parse-events.h" |
5 | #include "event-parse.h" | ||
6 | #include "session.h" | 6 | #include "session.h" |
7 | 7 | ||
8 | struct machine; | 8 | struct machine; |
9 | struct perf_sample; | 9 | struct perf_sample; |
10 | union perf_event; | 10 | union perf_event; |
11 | struct perf_tool; | 11 | struct perf_tool; |
12 | struct thread; | ||
12 | 13 | ||
13 | extern int header_page_size_size; | ||
14 | extern int header_page_ts_size; | ||
15 | extern int header_page_data_offset; | ||
16 | |||
17 | extern bool latency_format; | ||
18 | extern struct pevent *perf_pevent; | 14 | extern struct pevent *perf_pevent; |
19 | 15 | ||
20 | enum { | ||
21 | RINGBUF_TYPE_PADDING = 29, | ||
22 | RINGBUF_TYPE_TIME_EXTEND = 30, | ||
23 | RINGBUF_TYPE_TIME_STAMP = 31, | ||
24 | }; | ||
25 | |||
26 | #ifndef TS_SHIFT | ||
27 | #define TS_SHIFT 27 | ||
28 | #endif | ||
29 | |||
30 | int bigendian(void); | 16 | int bigendian(void); |
31 | 17 | ||
32 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian); | 18 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian); |
@@ -83,7 +69,8 @@ struct scripting_ops { | |||
83 | struct perf_sample *sample, | 69 | struct perf_sample *sample, |
84 | struct perf_evsel *evsel, | 70 | struct perf_evsel *evsel, |
85 | struct machine *machine, | 71 | struct machine *machine, |
86 | struct addr_location *al); | 72 | struct thread *thread, |
73 | struct addr_location *al); | ||
87 | int (*generate_script) (struct pevent *pevent, const char *outfile); | 74 | int (*generate_script) (struct pevent *pevent, const char *outfile); |
88 | }; | 75 | }; |
89 | 76 | ||
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c index 958723ba3d2e..2f891f7e70bf 100644 --- a/tools/perf/util/unwind.c +++ b/tools/perf/util/unwind.c | |||
@@ -473,7 +473,7 @@ static int entry(u64 ip, struct thread *thread, struct machine *machine, | |||
473 | 473 | ||
474 | thread__find_addr_location(thread, machine, | 474 | thread__find_addr_location(thread, machine, |
475 | PERF_RECORD_MISC_USER, | 475 | PERF_RECORD_MISC_USER, |
476 | MAP__FUNCTION, ip, &al, NULL); | 476 | MAP__FUNCTION, ip, &al); |
477 | 477 | ||
478 | e.ip = ip; | 478 | e.ip = ip; |
479 | e.map = al.map; | 479 | e.map = al.map; |
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 59d868add275..6d17b18e915d 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -269,3 +269,95 @@ void perf_debugfs_set_path(const char *mntpt) | |||
269 | snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt); | 269 | snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt); |
270 | set_tracing_events_path(mntpt); | 270 | set_tracing_events_path(mntpt); |
271 | } | 271 | } |
272 | |||
273 | static const char *find_debugfs(void) | ||
274 | { | ||
275 | const char *path = perf_debugfs_mount(NULL); | ||
276 | |||
277 | if (!path) | ||
278 | fprintf(stderr, "Your kernel does not support the debugfs filesystem"); | ||
279 | |||
280 | return path; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Finds the path to the debugfs/tracing | ||
285 | * Allocates the string and stores it. | ||
286 | */ | ||
287 | const char *find_tracing_dir(void) | ||
288 | { | ||
289 | static char *tracing; | ||
290 | static int tracing_found; | ||
291 | const char *debugfs; | ||
292 | |||
293 | if (tracing_found) | ||
294 | return tracing; | ||
295 | |||
296 | debugfs = find_debugfs(); | ||
297 | if (!debugfs) | ||
298 | return NULL; | ||
299 | |||
300 | tracing = malloc(strlen(debugfs) + 9); | ||
301 | if (!tracing) | ||
302 | return NULL; | ||
303 | |||
304 | sprintf(tracing, "%s/tracing", debugfs); | ||
305 | |||
306 | tracing_found = 1; | ||
307 | return tracing; | ||
308 | } | ||
309 | |||
310 | char *get_tracing_file(const char *name) | ||
311 | { | ||
312 | const char *tracing; | ||
313 | char *file; | ||
314 | |||
315 | tracing = find_tracing_dir(); | ||
316 | if (!tracing) | ||
317 | return NULL; | ||
318 | |||
319 | file = malloc(strlen(tracing) + strlen(name) + 2); | ||
320 | if (!file) | ||
321 | return NULL; | ||
322 | |||
323 | sprintf(file, "%s/%s", tracing, name); | ||
324 | return file; | ||
325 | } | ||
326 | |||
327 | void put_tracing_file(char *file) | ||
328 | { | ||
329 | free(file); | ||
330 | } | ||
331 | |||
332 | int parse_nsec_time(const char *str, u64 *ptime) | ||
333 | { | ||
334 | u64 time_sec, time_nsec; | ||
335 | char *end; | ||
336 | |||
337 | time_sec = strtoul(str, &end, 10); | ||
338 | if (*end != '.' && *end != '\0') | ||
339 | return -1; | ||
340 | |||
341 | if (*end == '.') { | ||
342 | int i; | ||
343 | char nsec_buf[10]; | ||
344 | |||
345 | if (strlen(++end) > 9) | ||
346 | return -1; | ||
347 | |||
348 | strncpy(nsec_buf, end, 9); | ||
349 | nsec_buf[9] = '\0'; | ||
350 | |||
351 | /* make it nsec precision */ | ||
352 | for (i = strlen(nsec_buf); i < 9; i++) | ||
353 | nsec_buf[i] = '0'; | ||
354 | |||
355 | time_nsec = strtoul(nsec_buf, &end, 10); | ||
356 | if (*end != '\0') | ||
357 | return -1; | ||
358 | } else | ||
359 | time_nsec = 0; | ||
360 | |||
361 | *ptime = time_sec * NSEC_PER_SEC + time_nsec; | ||
362 | return 0; | ||
363 | } | ||
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 2732fad03908..a53535949043 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -80,6 +80,9 @@ extern char buildid_dir[]; | |||
80 | extern char tracing_events_path[]; | 80 | extern char tracing_events_path[]; |
81 | extern void perf_debugfs_set_path(const char *mountpoint); | 81 | extern void perf_debugfs_set_path(const char *mountpoint); |
82 | const char *perf_debugfs_mount(const char *mountpoint); | 82 | const char *perf_debugfs_mount(const char *mountpoint); |
83 | const char *find_tracing_dir(void); | ||
84 | char *get_tracing_file(const char *name); | ||
85 | void put_tracing_file(char *file); | ||
83 | 86 | ||
84 | /* On most systems <limits.h> would have given us this, but | 87 | /* On most systems <limits.h> would have given us this, but |
85 | * not on some systems (e.g. GNU/Hurd). | 88 | * not on some systems (e.g. GNU/Hurd). |
@@ -205,6 +208,8 @@ static inline int has_extension(const char *filename, const char *ext) | |||
205 | #define NSEC_PER_MSEC 1000000L | 208 | #define NSEC_PER_MSEC 1000000L |
206 | #endif | 209 | #endif |
207 | 210 | ||
211 | int parse_nsec_time(const char *str, u64 *ptime); | ||
212 | |||
208 | extern unsigned char sane_ctype[256]; | 213 | extern unsigned char sane_ctype[256]; |
209 | #define GIT_SPACE 0x01 | 214 | #define GIT_SPACE 0x01 |
210 | #define GIT_DIGIT 0x02 | 215 | #define GIT_DIGIT 0x02 |