diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:18:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:18:41 -0500 |
commit | adc1ef1e37358d3c17d1a74a58b2e104fc0bda15 (patch) | |
tree | 6f43107a76ed87f2b817594d2d62246ab82cfba6 /tools | |
parent | 84e53ff77cb1e005f49966cd6789109d84acc9e2 (diff) | |
parent | 07ac002f2fcc74c5be47b656d9201d5de84dc53d (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
- UAPI fixes, from David Howels
- Separate perf tests into multiple objects, one per test, from Jiri Olsa.
- Fixes to /proc/pid/maps parsing, preparatory to supporting data maps,
from Namhyung Kim
- Fix compile error for non-NEWT builds, from Namhyung Kim
- Implement ui_progress for GTK, from Namhyung Kim
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools')
51 files changed, 2046 insertions, 1783 deletions
diff --git a/tools/Makefile b/tools/Makefile index 3ae43947a171..1f9a529fe544 100644 --- a/tools/Makefile +++ b/tools/Makefile | |||
@@ -31,44 +31,44 @@ help: | |||
31 | @echo ' clean: a summary clean target to clean _all_ folders' | 31 | @echo ' clean: a summary clean target to clean _all_ folders' |
32 | 32 | ||
33 | cpupower: FORCE | 33 | cpupower: FORCE |
34 | $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1) | 34 | $(call descend,power/$@) |
35 | 35 | ||
36 | firewire lguest perf usb virtio vm: FORCE | 36 | firewire lguest perf usb virtio vm: FORCE |
37 | $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1) | 37 | $(call descend,$@) |
38 | 38 | ||
39 | selftests: FORCE | 39 | selftests: FORCE |
40 | $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1) | 40 | $(call descend,testing/$@) |
41 | 41 | ||
42 | turbostat x86_energy_perf_policy: FORCE | 42 | turbostat x86_energy_perf_policy: FORCE |
43 | $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1) | 43 | $(call descend,power/x86/$@) |
44 | 44 | ||
45 | cpupower_install: | 45 | cpupower_install: |
46 | $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install | 46 | $(call descend,power/$(@:_install=),install) |
47 | 47 | ||
48 | firewire_install lguest_install perf_install usb_install virtio_install vm_install: | 48 | firewire_install lguest_install perf_install usb_install virtio_install vm_install: |
49 | $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install | 49 | $(call descend,$(@:_install=),install) |
50 | 50 | ||
51 | selftests_install: | 51 | selftests_install: |
52 | $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install | 52 | $(call descend,testing/$(@:_clean=),install) |
53 | 53 | ||
54 | turbostat_install x86_energy_perf_policy_install: | 54 | turbostat_install x86_energy_perf_policy_install: |
55 | $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install | 55 | $(call descend,power/x86/$(@:_install=),install) |
56 | 56 | ||
57 | install: cpupower_install firewire_install lguest_install perf_install \ | 57 | install: cpupower_install firewire_install lguest_install perf_install \ |
58 | selftests_install turbostat_install usb_install virtio_install \ | 58 | selftests_install turbostat_install usb_install virtio_install \ |
59 | vm_install x86_energy_perf_policy_install | 59 | vm_install x86_energy_perf_policy_install |
60 | 60 | ||
61 | cpupower_clean: | 61 | cpupower_clean: |
62 | $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean | 62 | $(call descend,power/cpupower,clean) |
63 | 63 | ||
64 | firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: | 64 | firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: |
65 | $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean | 65 | $(call descend,$(@:_clean=),clean) |
66 | 66 | ||
67 | selftests_clean: | 67 | selftests_clean: |
68 | $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean | 68 | $(call descend,testing/$(@:_clean=),clean) |
69 | 69 | ||
70 | turbostat_clean x86_energy_perf_policy_clean: | 70 | turbostat_clean x86_energy_perf_policy_clean: |
71 | $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean | 71 | $(call descend,power/x86/$(@:_clean=),clean) |
72 | 72 | ||
73 | clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \ | 73 | clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \ |
74 | turbostat_clean usb_clean virtio_clean vm_clean \ | 74 | turbostat_clean usb_clean virtio_clean vm_clean \ |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index cca5bb8334ad..a7c6aa8d4a8b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -422,7 +422,9 @@ LIB_OBJS += $(OUTPUT)util/intlist.o | |||
422 | LIB_OBJS += $(OUTPUT)util/vdso.o | 422 | LIB_OBJS += $(OUTPUT)util/vdso.o |
423 | LIB_OBJS += $(OUTPUT)util/stat.o | 423 | LIB_OBJS += $(OUTPUT)util/stat.o |
424 | 424 | ||
425 | LIB_OBJS += $(OUTPUT)ui/setup.o | ||
425 | LIB_OBJS += $(OUTPUT)ui/helpline.o | 426 | LIB_OBJS += $(OUTPUT)ui/helpline.o |
427 | LIB_OBJS += $(OUTPUT)ui/progress.o | ||
426 | LIB_OBJS += $(OUTPUT)ui/hist.o | 428 | LIB_OBJS += $(OUTPUT)ui/hist.o |
427 | LIB_OBJS += $(OUTPUT)ui/stdio/hist.o | 429 | LIB_OBJS += $(OUTPUT)ui/stdio/hist.o |
428 | 430 | ||
@@ -431,6 +433,17 @@ LIB_OBJS += $(OUTPUT)arch/common.o | |||
431 | LIB_OBJS += $(OUTPUT)tests/parse-events.o | 433 | LIB_OBJS += $(OUTPUT)tests/parse-events.o |
432 | LIB_OBJS += $(OUTPUT)tests/dso-data.o | 434 | LIB_OBJS += $(OUTPUT)tests/dso-data.o |
433 | LIB_OBJS += $(OUTPUT)tests/attr.o | 435 | LIB_OBJS += $(OUTPUT)tests/attr.o |
436 | LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o | ||
437 | LIB_OBJS += $(OUTPUT)tests/open-syscall.o | ||
438 | LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o | ||
439 | LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o | ||
440 | LIB_OBJS += $(OUTPUT)tests/mmap-basic.o | ||
441 | LIB_OBJS += $(OUTPUT)tests/perf-record.o | ||
442 | LIB_OBJS += $(OUTPUT)tests/rdpmc.o | ||
443 | LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o | ||
444 | LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o | ||
445 | LIB_OBJS += $(OUTPUT)tests/pmu.o | ||
446 | LIB_OBJS += $(OUTPUT)tests/util.o | ||
434 | 447 | ||
435 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o | 448 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o |
436 | BUILTIN_OBJS += $(OUTPUT)builtin-bench.o | 449 | BUILTIN_OBJS += $(OUTPUT)builtin-bench.o |
@@ -600,17 +613,16 @@ ifndef NO_NEWT | |||
600 | BASIC_CFLAGS += -I/usr/include/slang | 613 | BASIC_CFLAGS += -I/usr/include/slang |
601 | BASIC_CFLAGS += -DNEWT_SUPPORT | 614 | BASIC_CFLAGS += -DNEWT_SUPPORT |
602 | EXTLIBS += -lnewt -lslang | 615 | EXTLIBS += -lnewt -lslang |
603 | LIB_OBJS += $(OUTPUT)ui/setup.o | ||
604 | LIB_OBJS += $(OUTPUT)ui/browser.o | 616 | LIB_OBJS += $(OUTPUT)ui/browser.o |
605 | LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o | 617 | LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o |
606 | LIB_OBJS += $(OUTPUT)ui/browsers/hists.o | 618 | LIB_OBJS += $(OUTPUT)ui/browsers/hists.o |
607 | LIB_OBJS += $(OUTPUT)ui/browsers/map.o | 619 | LIB_OBJS += $(OUTPUT)ui/browsers/map.o |
608 | LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o | 620 | LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o |
609 | LIB_OBJS += $(OUTPUT)ui/progress.o | ||
610 | LIB_OBJS += $(OUTPUT)ui/util.o | 621 | LIB_OBJS += $(OUTPUT)ui/util.o |
611 | LIB_OBJS += $(OUTPUT)ui/tui/setup.o | 622 | LIB_OBJS += $(OUTPUT)ui/tui/setup.o |
612 | LIB_OBJS += $(OUTPUT)ui/tui/util.o | 623 | LIB_OBJS += $(OUTPUT)ui/tui/util.o |
613 | LIB_OBJS += $(OUTPUT)ui/tui/helpline.o | 624 | LIB_OBJS += $(OUTPUT)ui/tui/helpline.o |
625 | LIB_OBJS += $(OUTPUT)ui/tui/progress.o | ||
614 | LIB_H += ui/browser.h | 626 | LIB_H += ui/browser.h |
615 | LIB_H += ui/browsers/map.h | 627 | LIB_H += ui/browsers/map.h |
616 | LIB_H += ui/keysyms.h | 628 | LIB_H += ui/keysyms.h |
@@ -636,9 +648,9 @@ ifndef NO_GTK2 | |||
636 | LIB_OBJS += $(OUTPUT)ui/gtk/setup.o | 648 | LIB_OBJS += $(OUTPUT)ui/gtk/setup.o |
637 | LIB_OBJS += $(OUTPUT)ui/gtk/util.o | 649 | LIB_OBJS += $(OUTPUT)ui/gtk/util.o |
638 | LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o | 650 | LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o |
651 | LIB_OBJS += $(OUTPUT)ui/gtk/progress.o | ||
639 | # Make sure that it'd be included only once. | 652 | # Make sure that it'd be included only once. |
640 | ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),) | 653 | ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),) |
641 | LIB_OBJS += $(OUTPUT)ui/setup.o | ||
642 | LIB_OBJS += $(OUTPUT)ui/util.o | 654 | LIB_OBJS += $(OUTPUT)ui/util.o |
643 | endif | 655 | endif |
644 | endif | 656 | endif |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 5783c3225116..f3151d3c70ce 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -230,11 +230,15 @@ static int perf_record__open(struct perf_record *rec) | |||
230 | struct perf_record_opts *opts = &rec->opts; | 230 | struct perf_record_opts *opts = &rec->opts; |
231 | int rc = 0; | 231 | int rc = 0; |
232 | 232 | ||
233 | perf_evlist__config_attrs(evlist, opts); | 233 | /* |
234 | 234 | * Set the evsel leader links before we configure attributes, | |
235 | * since some might depend on this info. | ||
236 | */ | ||
235 | if (opts->group) | 237 | if (opts->group) |
236 | perf_evlist__set_leader(evlist); | 238 | perf_evlist__set_leader(evlist); |
237 | 239 | ||
240 | perf_evlist__config_attrs(evlist, opts); | ||
241 | |||
238 | list_for_each_entry(pos, &evlist->entries, node) { | 242 | list_for_each_entry(pos, &evlist->entries, node) { |
239 | struct perf_event_attr *attr = &pos->attr; | 243 | struct perf_event_attr *attr = &pos->attr; |
240 | /* | 244 | /* |
@@ -498,6 +502,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
498 | struct perf_evlist *evsel_list = rec->evlist; | 502 | struct perf_evlist *evsel_list = rec->evlist; |
499 | const char *output_name = rec->output_name; | 503 | const char *output_name = rec->output_name; |
500 | struct perf_session *session; | 504 | struct perf_session *session; |
505 | bool disabled = false; | ||
501 | 506 | ||
502 | rec->progname = argv[0]; | 507 | rec->progname = argv[0]; |
503 | 508 | ||
@@ -697,7 +702,13 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
697 | } | 702 | } |
698 | } | 703 | } |
699 | 704 | ||
700 | perf_evlist__enable(evsel_list); | 705 | /* |
706 | * When perf is starting the traced process, all the events | ||
707 | * (apart from group members) have enable_on_exec=1 set, | ||
708 | * so don't spoil it by prematurely enabling them. | ||
709 | */ | ||
710 | if (!perf_target__none(&opts->target)) | ||
711 | perf_evlist__enable(evsel_list); | ||
701 | 712 | ||
702 | /* | 713 | /* |
703 | * Let the child rip | 714 | * Let the child rip |
@@ -720,8 +731,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
720 | waking++; | 731 | waking++; |
721 | } | 732 | } |
722 | 733 | ||
723 | if (done) | 734 | /* |
735 | * When perf is starting the traced process, at the end events | ||
736 | * die with the process and we wait for that. Thus no need to | ||
737 | * disable events in this case. | ||
738 | */ | ||
739 | if (done && !disabled && !perf_target__none(&opts->target)) { | ||
724 | perf_evlist__disable(evsel_list); | 740 | perf_evlist__disable(evsel_list); |
741 | disabled = true; | ||
742 | } | ||
725 | } | 743 | } |
726 | 744 | ||
727 | if (quiet || signr == SIGUSR1) | 745 | if (quiet || signr == SIGUSR1) |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6888960ef8b8..c247faca7127 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -129,8 +129,7 @@ static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; | |||
129 | static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; | 129 | static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; |
130 | static struct stats walltime_nsecs_stats; | 130 | static struct stats walltime_nsecs_stats; |
131 | 131 | ||
132 | static int create_perf_stat_counter(struct perf_evsel *evsel, | 132 | static int create_perf_stat_counter(struct perf_evsel *evsel) |
133 | struct perf_evsel *first) | ||
134 | { | 133 | { |
135 | struct perf_event_attr *attr = &evsel->attr; | 134 | struct perf_event_attr *attr = &evsel->attr; |
136 | bool exclude_guest_missing = false; | 135 | bool exclude_guest_missing = false; |
@@ -153,7 +152,8 @@ retry: | |||
153 | return 0; | 152 | return 0; |
154 | } | 153 | } |
155 | 154 | ||
156 | if (!perf_target__has_task(&target) && (!group || evsel == first)) { | 155 | if (!perf_target__has_task(&target) && |
156 | !perf_evsel__is_group_member(evsel)) { | ||
157 | attr->disabled = 1; | 157 | attr->disabled = 1; |
158 | attr->enable_on_exec = 1; | 158 | attr->enable_on_exec = 1; |
159 | } | 159 | } |
@@ -272,7 +272,7 @@ static int read_counter(struct perf_evsel *counter) | |||
272 | static int __run_perf_stat(int argc __maybe_unused, const char **argv) | 272 | static int __run_perf_stat(int argc __maybe_unused, const char **argv) |
273 | { | 273 | { |
274 | unsigned long long t0, t1; | 274 | unsigned long long t0, t1; |
275 | struct perf_evsel *counter, *first; | 275 | struct perf_evsel *counter; |
276 | int status = 0; | 276 | int status = 0; |
277 | int child_ready_pipe[2], go_pipe[2]; | 277 | int child_ready_pipe[2], go_pipe[2]; |
278 | const bool forks = (argc > 0); | 278 | const bool forks = (argc > 0); |
@@ -332,10 +332,8 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv) | |||
332 | if (group) | 332 | if (group) |
333 | perf_evlist__set_leader(evsel_list); | 333 | perf_evlist__set_leader(evsel_list); |
334 | 334 | ||
335 | first = perf_evlist__first(evsel_list); | ||
336 | |||
337 | list_for_each_entry(counter, &evsel_list->entries, node) { | 335 | list_for_each_entry(counter, &evsel_list->entries, node) { |
338 | if (create_perf_stat_counter(counter, first) < 0) { | 336 | if (create_perf_stat_counter(counter) < 0) { |
339 | /* | 337 | /* |
340 | * PPC returns ENXIO for HW counters until 2.6.37 | 338 | * PPC returns ENXIO for HW counters until 2.6.37 |
341 | * (behavior changed with commit b0a873e). | 339 | * (behavior changed with commit b0a873e). |
diff --git a/tools/perf/perf.c b/tools/perf/perf.c index a0ae2902f9c9..0f661fbce6a8 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c | |||
@@ -85,21 +85,26 @@ int check_pager_config(const char *cmd) | |||
85 | return c.val; | 85 | return c.val; |
86 | } | 86 | } |
87 | 87 | ||
88 | static int tui_command_config(const char *var, const char *value, void *data) | 88 | static int browser_command_config(const char *var, const char *value, void *data) |
89 | { | 89 | { |
90 | struct pager_config *c = data; | 90 | struct pager_config *c = data; |
91 | if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd)) | 91 | if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd)) |
92 | c->val = perf_config_bool(var, value); | 92 | c->val = perf_config_bool(var, value); |
93 | if (!prefixcmp(var, "gtk.") && !strcmp(var + 4, c->cmd)) | ||
94 | c->val = perf_config_bool(var, value) ? 2 : 0; | ||
93 | return 0; | 95 | return 0; |
94 | } | 96 | } |
95 | 97 | ||
96 | /* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */ | 98 | /* |
97 | static int check_tui_config(const char *cmd) | 99 | * returns 0 for "no tui", 1 for "use tui", 2 for "use gtk", |
100 | * and -1 for "not specified" | ||
101 | */ | ||
102 | static int check_browser_config(const char *cmd) | ||
98 | { | 103 | { |
99 | struct pager_config c; | 104 | struct pager_config c; |
100 | c.cmd = cmd; | 105 | c.cmd = cmd; |
101 | c.val = -1; | 106 | c.val = -1; |
102 | perf_config(tui_command_config, &c); | 107 | perf_config(browser_command_config, &c); |
103 | return c.val; | 108 | return c.val; |
104 | } | 109 | } |
105 | 110 | ||
@@ -302,7 +307,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) | |||
302 | prefix = NULL; /* setup_perf_directory(); */ | 307 | prefix = NULL; /* setup_perf_directory(); */ |
303 | 308 | ||
304 | if (use_browser == -1) | 309 | if (use_browser == -1) |
305 | use_browser = check_tui_config(p->cmd); | 310 | use_browser = check_browser_config(p->cmd); |
306 | 311 | ||
307 | if (use_pager == -1 && p->option & RUN_SETUP) | 312 | if (use_pager == -1 && p->option & RUN_SETUP) |
308 | use_pager = check_pager_config(p->cmd); | 313 | use_pager = check_pager_config(p->cmd); |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 054182e41dca..f53ee0bbee88 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -26,7 +26,7 @@ void get_term_dimensions(struct winsize *ws); | |||
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef __powerpc__ | 28 | #ifdef __powerpc__ |
29 | #include "../../arch/powerpc/include/asm/unistd.h" | 29 | #include "../../arch/powerpc/include/uapi/asm/unistd.h" |
30 | #define rmb() asm volatile ("sync" ::: "memory") | 30 | #define rmb() asm volatile ("sync" ::: "memory") |
31 | #define cpu_relax() asm volatile ("" ::: "memory"); | 31 | #define cpu_relax() asm volatile ("" ::: "memory"); |
32 | #define CPUINFO_PROC "cpu" | 32 | #define CPUINFO_PROC "cpu" |
@@ -178,7 +178,6 @@ extern bool test_attr__enabled; | |||
178 | void test_attr__init(void); | 178 | void test_attr__init(void); |
179 | void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, | 179 | void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, |
180 | int fd, int group_fd, unsigned long flags); | 180 | int fd, int group_fd, unsigned long flags); |
181 | int test_attr__run(void); | ||
182 | 181 | ||
183 | static inline int | 182 | static inline int |
184 | sys_perf_event_open(struct perf_event_attr *attr, | 183 | sys_perf_event_open(struct perf_event_attr *attr, |
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 6e2feee8db2a..25638a986257 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "../perf.h" | 27 | #include "../perf.h" |
28 | #include "util.h" | 28 | #include "util.h" |
29 | #include "exec_cmd.h" | 29 | #include "exec_cmd.h" |
30 | #include "tests.h" | ||
30 | 31 | ||
31 | #define ENV "PERF_TEST_ATTR" | 32 | #define ENV "PERF_TEST_ATTR" |
32 | 33 | ||
@@ -151,7 +152,7 @@ static int run_dir(const char *d, const char *perf) | |||
151 | return system(cmd); | 152 | return system(cmd); |
152 | } | 153 | } |
153 | 154 | ||
154 | int test_attr__run(void) | 155 | int test__attr(void) |
155 | { | 156 | { |
156 | struct stat st; | 157 | struct stat st; |
157 | char path_perf[PATH_MAX]; | 158 | char path_perf[PATH_MAX]; |
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group index b945f770dc9e..a6599e9a19d3 100644 --- a/tools/perf/tests/attr/test-record-group +++ b/tools/perf/tests/attr/test-record-group | |||
@@ -15,3 +15,4 @@ sample_type=327 | |||
15 | mmap=0 | 15 | mmap=0 |
16 | comm=0 | 16 | comm=0 |
17 | enable_on_exec=0 | 17 | enable_on_exec=0 |
18 | disabled=0 | ||
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1 index 39bf8609538c..5a8359da38af 100644 --- a/tools/perf/tests/attr/test-record-group1 +++ b/tools/perf/tests/attr/test-record-group1 | |||
@@ -15,6 +15,5 @@ config=1 | |||
15 | sample_type=327 | 15 | sample_type=327 |
16 | mmap=0 | 16 | mmap=0 |
17 | comm=0 | 17 | comm=0 |
18 | # TODO this is disabled for --group option, enabled otherwise | 18 | enable_on_exec=0 |
19 | # check why.. | 19 | disabled=0 |
20 | enable_on_exec=1 | ||
diff --git a/tools/perf/tests/attr/test-stat-group1 b/tools/perf/tests/attr/test-stat-group1 index 5ae2718de864..2a1f86e4a904 100644 --- a/tools/perf/tests/attr/test-stat-group1 +++ b/tools/perf/tests/attr/test-stat-group1 | |||
@@ -11,7 +11,5 @@ group_fd=-1 | |||
11 | fd=2 | 11 | fd=2 |
12 | group_fd=1 | 12 | group_fd=1 |
13 | config=1 | 13 | config=1 |
14 | # TODO both disabled and enable_on_exec are disabled for --group option, | 14 | disabled=0 |
15 | # enabled otherwise, check why.. | 15 | enable_on_exec=0 |
16 | disabled=1 | ||
17 | enable_on_exec=1 | ||
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 5d4354e24457..186f67535494 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c | |||
@@ -4,1388 +4,11 @@ | |||
4 | * Builtin regression testing command: ever growing number of sanity tests | 4 | * Builtin regression testing command: ever growing number of sanity tests |
5 | */ | 5 | */ |
6 | #include "builtin.h" | 6 | #include "builtin.h" |
7 | 7 | #include "tests.h" | |
8 | #include "util/cache.h" | 8 | #include "debug.h" |
9 | #include "util/color.h" | 9 | #include "color.h" |
10 | #include "util/debug.h" | 10 | #include "parse-options.h" |
11 | #include "util/debugfs.h" | 11 | #include "symbol.h" |
12 | #include "util/evlist.h" | ||
13 | #include "util/machine.h" | ||
14 | #include "util/parse-options.h" | ||
15 | #include "util/parse-events.h" | ||
16 | #include "util/symbol.h" | ||
17 | #include "util/thread_map.h" | ||
18 | #include "util/pmu.h" | ||
19 | #include "event-parse.h" | ||
20 | #include "../../include/linux/hw_breakpoint.h" | ||
21 | |||
22 | #include <sys/mman.h> | ||
23 | |||
24 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, | ||
25 | struct symbol *sym) | ||
26 | { | ||
27 | bool *visited = symbol__priv(sym); | ||
28 | *visited = true; | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | static int test__vmlinux_matches_kallsyms(void) | ||
33 | { | ||
34 | int err = -1; | ||
35 | struct rb_node *nd; | ||
36 | struct symbol *sym; | ||
37 | struct map *kallsyms_map, *vmlinux_map; | ||
38 | struct machine kallsyms, vmlinux; | ||
39 | enum map_type type = MAP__FUNCTION; | ||
40 | struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; | ||
41 | |||
42 | /* | ||
43 | * Step 1: | ||
44 | * | ||
45 | * Init the machines that will hold kernel, modules obtained from | ||
46 | * both vmlinux + .ko files and from /proc/kallsyms split by modules. | ||
47 | */ | ||
48 | machine__init(&kallsyms, "", HOST_KERNEL_ID); | ||
49 | machine__init(&vmlinux, "", HOST_KERNEL_ID); | ||
50 | |||
51 | /* | ||
52 | * Step 2: | ||
53 | * | ||
54 | * Create the kernel maps for kallsyms and the DSO where we will then | ||
55 | * load /proc/kallsyms. Also create the modules maps from /proc/modules | ||
56 | * and find the .ko files that match them in /lib/modules/`uname -r`/. | ||
57 | */ | ||
58 | if (machine__create_kernel_maps(&kallsyms) < 0) { | ||
59 | pr_debug("machine__create_kernel_maps "); | ||
60 | return -1; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Step 3: | ||
65 | * | ||
66 | * Load and split /proc/kallsyms into multiple maps, one per module. | ||
67 | */ | ||
68 | if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { | ||
69 | pr_debug("dso__load_kallsyms "); | ||
70 | goto out; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Step 4: | ||
75 | * | ||
76 | * kallsyms will be internally on demand sorted by name so that we can | ||
77 | * find the reference relocation * symbol, i.e. the symbol we will use | ||
78 | * to see if the running kernel was relocated by checking if it has the | ||
79 | * same value in the vmlinux file we load. | ||
80 | */ | ||
81 | kallsyms_map = machine__kernel_map(&kallsyms, type); | ||
82 | |||
83 | sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); | ||
84 | if (sym == NULL) { | ||
85 | pr_debug("dso__find_symbol_by_name "); | ||
86 | goto out; | ||
87 | } | ||
88 | |||
89 | ref_reloc_sym.addr = sym->start; | ||
90 | |||
91 | /* | ||
92 | * Step 5: | ||
93 | * | ||
94 | * Now repeat step 2, this time for the vmlinux file we'll auto-locate. | ||
95 | */ | ||
96 | if (machine__create_kernel_maps(&vmlinux) < 0) { | ||
97 | pr_debug("machine__create_kernel_maps "); | ||
98 | goto out; | ||
99 | } | ||
100 | |||
101 | vmlinux_map = machine__kernel_map(&vmlinux, type); | ||
102 | map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; | ||
103 | |||
104 | /* | ||
105 | * Step 6: | ||
106 | * | ||
107 | * Locate a vmlinux file in the vmlinux path that has a buildid that | ||
108 | * matches the one of the running kernel. | ||
109 | * | ||
110 | * While doing that look if we find the ref reloc symbol, if we find it | ||
111 | * we'll have its ref_reloc_symbol.unrelocated_addr and then | ||
112 | * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines | ||
113 | * to fixup the symbols. | ||
114 | */ | ||
115 | if (machine__load_vmlinux_path(&vmlinux, type, | ||
116 | vmlinux_matches_kallsyms_filter) <= 0) { | ||
117 | pr_debug("machine__load_vmlinux_path "); | ||
118 | goto out; | ||
119 | } | ||
120 | |||
121 | err = 0; | ||
122 | /* | ||
123 | * Step 7: | ||
124 | * | ||
125 | * Now look at the symbols in the vmlinux DSO and check if we find all of them | ||
126 | * in the kallsyms dso. For the ones that are in both, check its names and | ||
127 | * end addresses too. | ||
128 | */ | ||
129 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | ||
130 | struct symbol *pair, *first_pair; | ||
131 | bool backwards = true; | ||
132 | |||
133 | sym = rb_entry(nd, struct symbol, rb_node); | ||
134 | |||
135 | if (sym->start == sym->end) | ||
136 | continue; | ||
137 | |||
138 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
139 | pair = first_pair; | ||
140 | |||
141 | if (pair && pair->start == sym->start) { | ||
142 | next_pair: | ||
143 | if (strcmp(sym->name, pair->name) == 0) { | ||
144 | /* | ||
145 | * kallsyms don't have the symbol end, so we | ||
146 | * set that by using the next symbol start - 1, | ||
147 | * in some cases we get this up to a page | ||
148 | * wrong, trace_kmalloc when I was developing | ||
149 | * this code was one such example, 2106 bytes | ||
150 | * off the real size. More than that and we | ||
151 | * _really_ have a problem. | ||
152 | */ | ||
153 | s64 skew = sym->end - pair->end; | ||
154 | if (llabs(skew) < page_size) | ||
155 | continue; | ||
156 | |||
157 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", | ||
158 | sym->start, sym->name, sym->end, pair->end); | ||
159 | } else { | ||
160 | struct rb_node *nnd; | ||
161 | detour: | ||
162 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
163 | rb_next(&pair->rb_node); | ||
164 | if (nnd) { | ||
165 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | ||
166 | |||
167 | if (next->start == sym->start) { | ||
168 | pair = next; | ||
169 | goto next_pair; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | if (backwards) { | ||
174 | backwards = false; | ||
175 | pair = first_pair; | ||
176 | goto detour; | ||
177 | } | ||
178 | |||
179 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
180 | sym->start, sym->name, pair->name); | ||
181 | } | ||
182 | } else | ||
183 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); | ||
184 | |||
185 | err = -1; | ||
186 | } | ||
187 | |||
188 | if (!verbose) | ||
189 | goto out; | ||
190 | |||
191 | pr_info("Maps only in vmlinux:\n"); | ||
192 | |||
193 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
194 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
195 | /* | ||
196 | * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while | ||
197 | * the kernel will have the path for the vmlinux file being used, | ||
198 | * so use the short name, less descriptive but the same ("[kernel]" in | ||
199 | * both cases. | ||
200 | */ | ||
201 | pair = map_groups__find_by_name(&kallsyms.kmaps, type, | ||
202 | (pos->dso->kernel ? | ||
203 | pos->dso->short_name : | ||
204 | pos->dso->name)); | ||
205 | if (pair) | ||
206 | pair->priv = 1; | ||
207 | else | ||
208 | map__fprintf(pos, stderr); | ||
209 | } | ||
210 | |||
211 | pr_info("Maps in vmlinux with a different name in kallsyms:\n"); | ||
212 | |||
213 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
214 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
215 | |||
216 | pair = map_groups__find(&kallsyms.kmaps, type, pos->start); | ||
217 | if (pair == NULL || pair->priv) | ||
218 | continue; | ||
219 | |||
220 | if (pair->start == pos->start) { | ||
221 | pair->priv = 1; | ||
222 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", | ||
223 | pos->start, pos->end, pos->pgoff, pos->dso->name); | ||
224 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | ||
225 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", | ||
226 | pair->start, pair->end, pair->pgoff); | ||
227 | pr_info(" %s\n", pair->dso->name); | ||
228 | pair->priv = 1; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | pr_info("Maps only in kallsyms:\n"); | ||
233 | |||
234 | for (nd = rb_first(&kallsyms.kmaps.maps[type]); | ||
235 | nd; nd = rb_next(nd)) { | ||
236 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
237 | |||
238 | if (!pos->priv) | ||
239 | map__fprintf(pos, stderr); | ||
240 | } | ||
241 | out: | ||
242 | return err; | ||
243 | } | ||
244 | |||
245 | #include "util/cpumap.h" | ||
246 | #include "util/evsel.h" | ||
247 | #include <sys/types.h> | ||
248 | |||
249 | static int trace_event__id(const char *evname) | ||
250 | { | ||
251 | char *filename; | ||
252 | int err = -1, fd; | ||
253 | |||
254 | if (asprintf(&filename, | ||
255 | "%s/syscalls/%s/id", | ||
256 | tracing_events_path, evname) < 0) | ||
257 | return -1; | ||
258 | |||
259 | fd = open(filename, O_RDONLY); | ||
260 | if (fd >= 0) { | ||
261 | char id[16]; | ||
262 | if (read(fd, id, sizeof(id)) > 0) | ||
263 | err = atoi(id); | ||
264 | close(fd); | ||
265 | } | ||
266 | |||
267 | free(filename); | ||
268 | return err; | ||
269 | } | ||
270 | |||
271 | static int test__open_syscall_event(void) | ||
272 | { | ||
273 | int err = -1, fd; | ||
274 | struct thread_map *threads; | ||
275 | struct perf_evsel *evsel; | ||
276 | struct perf_event_attr attr; | ||
277 | unsigned int nr_open_calls = 111, i; | ||
278 | int id = trace_event__id("sys_enter_open"); | ||
279 | |||
280 | if (id < 0) { | ||
281 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
282 | return -1; | ||
283 | } | ||
284 | |||
285 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
286 | if (threads == NULL) { | ||
287 | pr_debug("thread_map__new\n"); | ||
288 | return -1; | ||
289 | } | ||
290 | |||
291 | memset(&attr, 0, sizeof(attr)); | ||
292 | attr.type = PERF_TYPE_TRACEPOINT; | ||
293 | attr.config = id; | ||
294 | evsel = perf_evsel__new(&attr, 0); | ||
295 | if (evsel == NULL) { | ||
296 | pr_debug("perf_evsel__new\n"); | ||
297 | goto out_thread_map_delete; | ||
298 | } | ||
299 | |||
300 | if (perf_evsel__open_per_thread(evsel, threads) < 0) { | ||
301 | pr_debug("failed to open counter: %s, " | ||
302 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
303 | strerror(errno)); | ||
304 | goto out_evsel_delete; | ||
305 | } | ||
306 | |||
307 | for (i = 0; i < nr_open_calls; ++i) { | ||
308 | fd = open("/etc/passwd", O_RDONLY); | ||
309 | close(fd); | ||
310 | } | ||
311 | |||
312 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
313 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
314 | goto out_close_fd; | ||
315 | } | ||
316 | |||
317 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
318 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
319 | nr_open_calls, evsel->counts->cpu[0].val); | ||
320 | goto out_close_fd; | ||
321 | } | ||
322 | |||
323 | err = 0; | ||
324 | out_close_fd: | ||
325 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
326 | out_evsel_delete: | ||
327 | perf_evsel__delete(evsel); | ||
328 | out_thread_map_delete: | ||
329 | thread_map__delete(threads); | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | #include <sched.h> | ||
334 | |||
335 | static int test__open_syscall_event_on_all_cpus(void) | ||
336 | { | ||
337 | int err = -1, fd, cpu; | ||
338 | struct thread_map *threads; | ||
339 | struct cpu_map *cpus; | ||
340 | struct perf_evsel *evsel; | ||
341 | struct perf_event_attr attr; | ||
342 | unsigned int nr_open_calls = 111, i; | ||
343 | cpu_set_t cpu_set; | ||
344 | int id = trace_event__id("sys_enter_open"); | ||
345 | |||
346 | if (id < 0) { | ||
347 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
348 | return -1; | ||
349 | } | ||
350 | |||
351 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
352 | if (threads == NULL) { | ||
353 | pr_debug("thread_map__new\n"); | ||
354 | return -1; | ||
355 | } | ||
356 | |||
357 | cpus = cpu_map__new(NULL); | ||
358 | if (cpus == NULL) { | ||
359 | pr_debug("cpu_map__new\n"); | ||
360 | goto out_thread_map_delete; | ||
361 | } | ||
362 | |||
363 | |||
364 | CPU_ZERO(&cpu_set); | ||
365 | |||
366 | memset(&attr, 0, sizeof(attr)); | ||
367 | attr.type = PERF_TYPE_TRACEPOINT; | ||
368 | attr.config = id; | ||
369 | evsel = perf_evsel__new(&attr, 0); | ||
370 | if (evsel == NULL) { | ||
371 | pr_debug("perf_evsel__new\n"); | ||
372 | goto out_thread_map_delete; | ||
373 | } | ||
374 | |||
375 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | ||
376 | pr_debug("failed to open counter: %s, " | ||
377 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
378 | strerror(errno)); | ||
379 | goto out_evsel_delete; | ||
380 | } | ||
381 | |||
382 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
383 | unsigned int ncalls = nr_open_calls + cpu; | ||
384 | /* | ||
385 | * XXX eventually lift this restriction in a way that | ||
386 | * keeps perf building on older glibc installations | ||
387 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
388 | * a reasonable upper limit tho :-) | ||
389 | */ | ||
390 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
391 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
396 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
397 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
398 | cpus->map[cpu], | ||
399 | strerror(errno)); | ||
400 | goto out_close_fd; | ||
401 | } | ||
402 | for (i = 0; i < ncalls; ++i) { | ||
403 | fd = open("/etc/passwd", O_RDONLY); | ||
404 | close(fd); | ||
405 | } | ||
406 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Here we need to explicitely preallocate the counts, as if | ||
411 | * we use the auto allocation it will allocate just for 1 cpu, | ||
412 | * as we start by cpu 0. | ||
413 | */ | ||
414 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
415 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
416 | goto out_close_fd; | ||
417 | } | ||
418 | |||
419 | err = 0; | ||
420 | |||
421 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
422 | unsigned int expected; | ||
423 | |||
424 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
425 | continue; | ||
426 | |||
427 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
428 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
429 | err = -1; | ||
430 | break; | ||
431 | } | ||
432 | |||
433 | expected = nr_open_calls + cpu; | ||
434 | if (evsel->counts->cpu[cpu].val != expected) { | ||
435 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
436 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
437 | err = -1; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | out_close_fd: | ||
442 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
443 | out_evsel_delete: | ||
444 | perf_evsel__delete(evsel); | ||
445 | out_thread_map_delete: | ||
446 | thread_map__delete(threads); | ||
447 | return err; | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * This test will generate random numbers of calls to some getpid syscalls, | ||
452 | * then establish an mmap for a group of events that are created to monitor | ||
453 | * the syscalls. | ||
454 | * | ||
455 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
456 | * sample.id field to map back to its respective perf_evsel instance. | ||
457 | * | ||
458 | * Then it checks if the number of syscalls reported as perf events by | ||
459 | * the kernel corresponds to the number of syscalls made. | ||
460 | */ | ||
461 | static int test__basic_mmap(void) | ||
462 | { | ||
463 | int err = -1; | ||
464 | union perf_event *event; | ||
465 | struct thread_map *threads; | ||
466 | struct cpu_map *cpus; | ||
467 | struct perf_evlist *evlist; | ||
468 | struct perf_event_attr attr = { | ||
469 | .type = PERF_TYPE_TRACEPOINT, | ||
470 | .read_format = PERF_FORMAT_ID, | ||
471 | .sample_type = PERF_SAMPLE_ID, | ||
472 | .watermark = 0, | ||
473 | }; | ||
474 | cpu_set_t cpu_set; | ||
475 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
476 | "getpgid", }; | ||
477 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
478 | (void*)getpgid }; | ||
479 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
480 | int ids[nsyscalls]; | ||
481 | unsigned int nr_events[nsyscalls], | ||
482 | expected_nr_events[nsyscalls], i, j; | ||
483 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
484 | |||
485 | for (i = 0; i < nsyscalls; ++i) { | ||
486 | char name[64]; | ||
487 | |||
488 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
489 | ids[i] = trace_event__id(name); | ||
490 | if (ids[i] < 0) { | ||
491 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
492 | return -1; | ||
493 | } | ||
494 | nr_events[i] = 0; | ||
495 | expected_nr_events[i] = random() % 257; | ||
496 | } | ||
497 | |||
498 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
499 | if (threads == NULL) { | ||
500 | pr_debug("thread_map__new\n"); | ||
501 | return -1; | ||
502 | } | ||
503 | |||
504 | cpus = cpu_map__new(NULL); | ||
505 | if (cpus == NULL) { | ||
506 | pr_debug("cpu_map__new\n"); | ||
507 | goto out_free_threads; | ||
508 | } | ||
509 | |||
510 | CPU_ZERO(&cpu_set); | ||
511 | CPU_SET(cpus->map[0], &cpu_set); | ||
512 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
513 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
514 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
515 | cpus->map[0], strerror(errno)); | ||
516 | goto out_free_cpus; | ||
517 | } | ||
518 | |||
519 | evlist = perf_evlist__new(cpus, threads); | ||
520 | if (evlist == NULL) { | ||
521 | pr_debug("perf_evlist__new\n"); | ||
522 | goto out_free_cpus; | ||
523 | } | ||
524 | |||
525 | /* anonymous union fields, can't be initialized above */ | ||
526 | attr.wakeup_events = 1; | ||
527 | attr.sample_period = 1; | ||
528 | |||
529 | for (i = 0; i < nsyscalls; ++i) { | ||
530 | attr.config = ids[i]; | ||
531 | evsels[i] = perf_evsel__new(&attr, i); | ||
532 | if (evsels[i] == NULL) { | ||
533 | pr_debug("perf_evsel__new\n"); | ||
534 | goto out_free_evlist; | ||
535 | } | ||
536 | |||
537 | perf_evlist__add(evlist, evsels[i]); | ||
538 | |||
539 | if (perf_evsel__open(evsels[i], cpus, threads) < 0) { | ||
540 | pr_debug("failed to open counter: %s, " | ||
541 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
542 | strerror(errno)); | ||
543 | goto out_close_fd; | ||
544 | } | ||
545 | } | ||
546 | |||
547 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
548 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
549 | strerror(errno)); | ||
550 | goto out_close_fd; | ||
551 | } | ||
552 | |||
553 | for (i = 0; i < nsyscalls; ++i) | ||
554 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
555 | int foo = syscalls[i](); | ||
556 | ++foo; | ||
557 | } | ||
558 | |||
559 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
560 | struct perf_sample sample; | ||
561 | |||
562 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
563 | pr_debug("unexpected %s event\n", | ||
564 | perf_event__name(event->header.type)); | ||
565 | goto out_munmap; | ||
566 | } | ||
567 | |||
568 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
569 | if (err) { | ||
570 | pr_err("Can't parse sample, err = %d\n", err); | ||
571 | goto out_munmap; | ||
572 | } | ||
573 | |||
574 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
575 | if (evsel == NULL) { | ||
576 | pr_debug("event with id %" PRIu64 | ||
577 | " doesn't map to an evsel\n", sample.id); | ||
578 | goto out_munmap; | ||
579 | } | ||
580 | nr_events[evsel->idx]++; | ||
581 | } | ||
582 | |||
583 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
584 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
585 | pr_debug("expected %d %s events, got %d\n", | ||
586 | expected_nr_events[evsel->idx], | ||
587 | perf_evsel__name(evsel), nr_events[evsel->idx]); | ||
588 | goto out_munmap; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | err = 0; | ||
593 | out_munmap: | ||
594 | perf_evlist__munmap(evlist); | ||
595 | out_close_fd: | ||
596 | for (i = 0; i < nsyscalls; ++i) | ||
597 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
598 | out_free_evlist: | ||
599 | perf_evlist__delete(evlist); | ||
600 | out_free_cpus: | ||
601 | cpu_map__delete(cpus); | ||
602 | out_free_threads: | ||
603 | thread_map__delete(threads); | ||
604 | return err; | ||
605 | #undef nsyscalls | ||
606 | } | ||
607 | |||
608 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) | ||
609 | { | ||
610 | int i, cpu = -1, nrcpus = 1024; | ||
611 | realloc: | ||
612 | CPU_ZERO(maskp); | ||
613 | |||
614 | if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { | ||
615 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
616 | nrcpus = nrcpus << 2; | ||
617 | goto realloc; | ||
618 | } | ||
619 | perror("sched_getaffinity"); | ||
620 | return -1; | ||
621 | } | ||
622 | |||
623 | for (i = 0; i < nrcpus; i++) { | ||
624 | if (CPU_ISSET(i, maskp)) { | ||
625 | if (cpu == -1) | ||
626 | cpu = i; | ||
627 | else | ||
628 | CPU_CLR(i, maskp); | ||
629 | } | ||
630 | } | ||
631 | |||
632 | return cpu; | ||
633 | } | ||
634 | |||
635 | static int test__PERF_RECORD(void) | ||
636 | { | ||
637 | struct perf_record_opts opts = { | ||
638 | .target = { | ||
639 | .uid = UINT_MAX, | ||
640 | .uses_mmap = true, | ||
641 | }, | ||
642 | .no_delay = true, | ||
643 | .freq = 10, | ||
644 | .mmap_pages = 256, | ||
645 | }; | ||
646 | cpu_set_t cpu_mask; | ||
647 | size_t cpu_mask_size = sizeof(cpu_mask); | ||
648 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
649 | struct perf_evsel *evsel; | ||
650 | struct perf_sample sample; | ||
651 | const char *cmd = "sleep"; | ||
652 | const char *argv[] = { cmd, "1", NULL, }; | ||
653 | char *bname; | ||
654 | u64 prev_time = 0; | ||
655 | bool found_cmd_mmap = false, | ||
656 | found_libc_mmap = false, | ||
657 | found_vdso_mmap = false, | ||
658 | found_ld_mmap = false; | ||
659 | int err = -1, errs = 0, i, wakeups = 0; | ||
660 | u32 cpu; | ||
661 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
662 | |||
663 | if (evlist == NULL || argv == NULL) { | ||
664 | pr_debug("Not enough memory to create evlist\n"); | ||
665 | goto out; | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * We need at least one evsel in the evlist, use the default | ||
670 | * one: "cycles". | ||
671 | */ | ||
672 | err = perf_evlist__add_default(evlist); | ||
673 | if (err < 0) { | ||
674 | pr_debug("Not enough memory to create evsel\n"); | ||
675 | goto out_delete_evlist; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * Create maps of threads and cpus to monitor. In this case | ||
680 | * we start with all threads and cpus (-1, -1) but then in | ||
681 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
682 | * we're monitoring, the one forked there. | ||
683 | */ | ||
684 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
685 | if (err < 0) { | ||
686 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
687 | goto out_delete_evlist; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
692 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
693 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
694 | * on all the fds) and then mmap them. | ||
695 | */ | ||
696 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
697 | if (err < 0) { | ||
698 | pr_debug("Couldn't run the workload!\n"); | ||
699 | goto out_delete_evlist; | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * Config the evsels, setting attr->comm on the first one, etc. | ||
704 | */ | ||
705 | evsel = perf_evlist__first(evlist); | ||
706 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
707 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
708 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
709 | perf_evlist__config_attrs(evlist, &opts); | ||
710 | |||
711 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); | ||
712 | if (err < 0) { | ||
713 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
714 | goto out_delete_evlist; | ||
715 | } | ||
716 | |||
717 | cpu = err; | ||
718 | |||
719 | /* | ||
720 | * So that we can check perf_sample.cpu on all the samples. | ||
721 | */ | ||
722 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { | ||
723 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
724 | goto out_delete_evlist; | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
729 | * grouping them if asked to. | ||
730 | */ | ||
731 | err = perf_evlist__open(evlist); | ||
732 | if (err < 0) { | ||
733 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
734 | goto out_delete_evlist; | ||
735 | } | ||
736 | |||
737 | /* | ||
738 | * mmap the first fd on a given CPU and ask for events for the other | ||
739 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
740 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
741 | */ | ||
742 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
743 | if (err < 0) { | ||
744 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
745 | goto out_delete_evlist; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * Now that all is properly set up, enable the events, they will | ||
750 | * count just on workload.pid, which will start... | ||
751 | */ | ||
752 | perf_evlist__enable(evlist); | ||
753 | |||
754 | /* | ||
755 | * Now! | ||
756 | */ | ||
757 | perf_evlist__start_workload(evlist); | ||
758 | |||
759 | while (1) { | ||
760 | int before = total_events; | ||
761 | |||
762 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
763 | union perf_event *event; | ||
764 | |||
765 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
766 | const u32 type = event->header.type; | ||
767 | const char *name = perf_event__name(type); | ||
768 | |||
769 | ++total_events; | ||
770 | if (type < PERF_RECORD_MAX) | ||
771 | nr_events[type]++; | ||
772 | |||
773 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
774 | if (err < 0) { | ||
775 | if (verbose) | ||
776 | perf_event__fprintf(event, stderr); | ||
777 | pr_debug("Couldn't parse sample\n"); | ||
778 | goto out_err; | ||
779 | } | ||
780 | |||
781 | if (verbose) { | ||
782 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
783 | perf_event__fprintf(event, stderr); | ||
784 | } | ||
785 | |||
786 | if (prev_time > sample.time) { | ||
787 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
788 | name, prev_time, sample.time); | ||
789 | ++errs; | ||
790 | } | ||
791 | |||
792 | prev_time = sample.time; | ||
793 | |||
794 | if (sample.cpu != cpu) { | ||
795 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
796 | name, cpu, sample.cpu); | ||
797 | ++errs; | ||
798 | } | ||
799 | |||
800 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
801 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
802 | name, evlist->workload.pid, sample.pid); | ||
803 | ++errs; | ||
804 | } | ||
805 | |||
806 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
807 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
808 | name, evlist->workload.pid, sample.tid); | ||
809 | ++errs; | ||
810 | } | ||
811 | |||
812 | if ((type == PERF_RECORD_COMM || | ||
813 | type == PERF_RECORD_MMAP || | ||
814 | type == PERF_RECORD_FORK || | ||
815 | type == PERF_RECORD_EXIT) && | ||
816 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
817 | pr_debug("%s with unexpected pid/tid\n", name); | ||
818 | ++errs; | ||
819 | } | ||
820 | |||
821 | if ((type == PERF_RECORD_COMM || | ||
822 | type == PERF_RECORD_MMAP) && | ||
823 | event->comm.pid != event->comm.tid) { | ||
824 | pr_debug("%s with different pid/tid!\n", name); | ||
825 | ++errs; | ||
826 | } | ||
827 | |||
828 | switch (type) { | ||
829 | case PERF_RECORD_COMM: | ||
830 | if (strcmp(event->comm.comm, cmd)) { | ||
831 | pr_debug("%s with unexpected comm!\n", name); | ||
832 | ++errs; | ||
833 | } | ||
834 | break; | ||
835 | case PERF_RECORD_EXIT: | ||
836 | goto found_exit; | ||
837 | case PERF_RECORD_MMAP: | ||
838 | bname = strrchr(event->mmap.filename, '/'); | ||
839 | if (bname != NULL) { | ||
840 | if (!found_cmd_mmap) | ||
841 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
842 | if (!found_libc_mmap) | ||
843 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
844 | if (!found_ld_mmap) | ||
845 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
846 | } else if (!found_vdso_mmap) | ||
847 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
848 | break; | ||
849 | |||
850 | case PERF_RECORD_SAMPLE: | ||
851 | /* Just ignore samples for now */ | ||
852 | break; | ||
853 | default: | ||
854 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
855 | type); | ||
856 | ++errs; | ||
857 | } | ||
858 | } | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * We don't use poll here because at least at 3.1 times the | ||
863 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
864 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
865 | */ | ||
866 | if (total_events == before && false) | ||
867 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
868 | |||
869 | sleep(1); | ||
870 | if (++wakeups > 5) { | ||
871 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
872 | break; | ||
873 | } | ||
874 | } | ||
875 | |||
876 | found_exit: | ||
877 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
878 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
879 | ++errs; | ||
880 | } | ||
881 | |||
882 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
883 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
884 | ++errs; | ||
885 | } | ||
886 | |||
887 | if (!found_cmd_mmap) { | ||
888 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
889 | ++errs; | ||
890 | } | ||
891 | |||
892 | if (!found_libc_mmap) { | ||
893 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
894 | ++errs; | ||
895 | } | ||
896 | |||
897 | if (!found_ld_mmap) { | ||
898 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
899 | ++errs; | ||
900 | } | ||
901 | |||
902 | if (!found_vdso_mmap) { | ||
903 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
904 | ++errs; | ||
905 | } | ||
906 | out_err: | ||
907 | perf_evlist__munmap(evlist); | ||
908 | out_delete_evlist: | ||
909 | perf_evlist__delete(evlist); | ||
910 | out: | ||
911 | return (err < 0 || errs > 0) ? -1 : 0; | ||
912 | } | ||
913 | |||
914 | |||
915 | #if defined(__x86_64__) || defined(__i386__) | ||
916 | |||
917 | #define barrier() asm volatile("" ::: "memory") | ||
918 | |||
919 | static u64 rdpmc(unsigned int counter) | ||
920 | { | ||
921 | unsigned int low, high; | ||
922 | |||
923 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); | ||
924 | |||
925 | return low | ((u64)high) << 32; | ||
926 | } | ||
927 | |||
928 | static u64 rdtsc(void) | ||
929 | { | ||
930 | unsigned int low, high; | ||
931 | |||
932 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | ||
933 | |||
934 | return low | ((u64)high) << 32; | ||
935 | } | ||
936 | |||
937 | static u64 mmap_read_self(void *addr) | ||
938 | { | ||
939 | struct perf_event_mmap_page *pc = addr; | ||
940 | u32 seq, idx, time_mult = 0, time_shift = 0; | ||
941 | u64 count, cyc = 0, time_offset = 0, enabled, running, delta; | ||
942 | |||
943 | do { | ||
944 | seq = pc->lock; | ||
945 | barrier(); | ||
946 | |||
947 | enabled = pc->time_enabled; | ||
948 | running = pc->time_running; | ||
949 | |||
950 | if (enabled != running) { | ||
951 | cyc = rdtsc(); | ||
952 | time_mult = pc->time_mult; | ||
953 | time_shift = pc->time_shift; | ||
954 | time_offset = pc->time_offset; | ||
955 | } | ||
956 | |||
957 | idx = pc->index; | ||
958 | count = pc->offset; | ||
959 | if (idx) | ||
960 | count += rdpmc(idx - 1); | ||
961 | |||
962 | barrier(); | ||
963 | } while (pc->lock != seq); | ||
964 | |||
965 | if (enabled != running) { | ||
966 | u64 quot, rem; | ||
967 | |||
968 | quot = (cyc >> time_shift); | ||
969 | rem = cyc & ((1 << time_shift) - 1); | ||
970 | delta = time_offset + quot * time_mult + | ||
971 | ((rem * time_mult) >> time_shift); | ||
972 | |||
973 | enabled += delta; | ||
974 | if (idx) | ||
975 | running += delta; | ||
976 | |||
977 | quot = count / running; | ||
978 | rem = count % running; | ||
979 | count = quot * enabled + (rem * enabled) / running; | ||
980 | } | ||
981 | |||
982 | return count; | ||
983 | } | ||
984 | |||
985 | /* | ||
986 | * If the RDPMC instruction faults then signal this back to the test parent task: | ||
987 | */ | ||
988 | static void segfault_handler(int sig __maybe_unused, | ||
989 | siginfo_t *info __maybe_unused, | ||
990 | void *uc __maybe_unused) | ||
991 | { | ||
992 | exit(-1); | ||
993 | } | ||
994 | |||
995 | static int __test__rdpmc(void) | ||
996 | { | ||
997 | volatile int tmp = 0; | ||
998 | u64 i, loops = 1000; | ||
999 | int n; | ||
1000 | int fd; | ||
1001 | void *addr; | ||
1002 | struct perf_event_attr attr = { | ||
1003 | .type = PERF_TYPE_HARDWARE, | ||
1004 | .config = PERF_COUNT_HW_INSTRUCTIONS, | ||
1005 | .exclude_kernel = 1, | ||
1006 | }; | ||
1007 | u64 delta_sum = 0; | ||
1008 | struct sigaction sa; | ||
1009 | |||
1010 | sigfillset(&sa.sa_mask); | ||
1011 | sa.sa_sigaction = segfault_handler; | ||
1012 | sigaction(SIGSEGV, &sa, NULL); | ||
1013 | |||
1014 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | ||
1015 | if (fd < 0) { | ||
1016 | pr_err("Error: sys_perf_event_open() syscall returned " | ||
1017 | "with %d (%s)\n", fd, strerror(errno)); | ||
1018 | return -1; | ||
1019 | } | ||
1020 | |||
1021 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | ||
1022 | if (addr == (void *)(-1)) { | ||
1023 | pr_err("Error: mmap() syscall returned with (%s)\n", | ||
1024 | strerror(errno)); | ||
1025 | goto out_close; | ||
1026 | } | ||
1027 | |||
1028 | for (n = 0; n < 6; n++) { | ||
1029 | u64 stamp, now, delta; | ||
1030 | |||
1031 | stamp = mmap_read_self(addr); | ||
1032 | |||
1033 | for (i = 0; i < loops; i++) | ||
1034 | tmp++; | ||
1035 | |||
1036 | now = mmap_read_self(addr); | ||
1037 | loops *= 10; | ||
1038 | |||
1039 | delta = now - stamp; | ||
1040 | pr_debug("%14d: %14Lu\n", n, (long long)delta); | ||
1041 | |||
1042 | delta_sum += delta; | ||
1043 | } | ||
1044 | |||
1045 | munmap(addr, page_size); | ||
1046 | pr_debug(" "); | ||
1047 | out_close: | ||
1048 | close(fd); | ||
1049 | |||
1050 | if (!delta_sum) | ||
1051 | return -1; | ||
1052 | |||
1053 | return 0; | ||
1054 | } | ||
1055 | |||
1056 | static int test__rdpmc(void) | ||
1057 | { | ||
1058 | int status = 0; | ||
1059 | int wret = 0; | ||
1060 | int ret; | ||
1061 | int pid; | ||
1062 | |||
1063 | pid = fork(); | ||
1064 | if (pid < 0) | ||
1065 | return -1; | ||
1066 | |||
1067 | if (!pid) { | ||
1068 | ret = __test__rdpmc(); | ||
1069 | |||
1070 | exit(ret); | ||
1071 | } | ||
1072 | |||
1073 | wret = waitpid(pid, &status, 0); | ||
1074 | if (wret < 0 || status) | ||
1075 | return -1; | ||
1076 | |||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1080 | #endif | ||
1081 | |||
1082 | static int test__perf_pmu(void) | ||
1083 | { | ||
1084 | return perf_pmu__test(); | ||
1085 | } | ||
1086 | |||
1087 | static int perf_evsel__roundtrip_cache_name_test(void) | ||
1088 | { | ||
1089 | char name[128]; | ||
1090 | int type, op, err = 0, ret = 0, i, idx; | ||
1091 | struct perf_evsel *evsel; | ||
1092 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1093 | |||
1094 | if (evlist == NULL) | ||
1095 | return -ENOMEM; | ||
1096 | |||
1097 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1098 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1099 | /* skip invalid cache type */ | ||
1100 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1101 | continue; | ||
1102 | |||
1103 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1104 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1105 | name, sizeof(name)); | ||
1106 | err = parse_events(evlist, name, 0); | ||
1107 | if (err) | ||
1108 | ret = err; | ||
1109 | } | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | idx = 0; | ||
1114 | evsel = perf_evlist__first(evlist); | ||
1115 | |||
1116 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1117 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1118 | /* skip invalid cache type */ | ||
1119 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1120 | continue; | ||
1121 | |||
1122 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1123 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1124 | name, sizeof(name)); | ||
1125 | if (evsel->idx != idx) | ||
1126 | continue; | ||
1127 | |||
1128 | ++idx; | ||
1129 | |||
1130 | if (strcmp(perf_evsel__name(evsel), name)) { | ||
1131 | pr_debug("%s != %s\n", perf_evsel__name(evsel), name); | ||
1132 | ret = -1; | ||
1133 | } | ||
1134 | |||
1135 | evsel = perf_evsel__next(evsel); | ||
1136 | } | ||
1137 | } | ||
1138 | } | ||
1139 | |||
1140 | perf_evlist__delete(evlist); | ||
1141 | return ret; | ||
1142 | } | ||
1143 | |||
1144 | static int __perf_evsel__name_array_test(const char *names[], int nr_names) | ||
1145 | { | ||
1146 | int i, err; | ||
1147 | struct perf_evsel *evsel; | ||
1148 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1149 | |||
1150 | if (evlist == NULL) | ||
1151 | return -ENOMEM; | ||
1152 | |||
1153 | for (i = 0; i < nr_names; ++i) { | ||
1154 | err = parse_events(evlist, names[i], 0); | ||
1155 | if (err) { | ||
1156 | pr_debug("failed to parse event '%s', err %d\n", | ||
1157 | names[i], err); | ||
1158 | goto out_delete_evlist; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | err = 0; | ||
1163 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
1164 | if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { | ||
1165 | --err; | ||
1166 | pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | out_delete_evlist: | ||
1171 | perf_evlist__delete(evlist); | ||
1172 | return err; | ||
1173 | } | ||
1174 | |||
1175 | #define perf_evsel__name_array_test(names) \ | ||
1176 | __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) | ||
1177 | |||
1178 | static int perf_evsel__roundtrip_name_test(void) | ||
1179 | { | ||
1180 | int err = 0, ret = 0; | ||
1181 | |||
1182 | err = perf_evsel__name_array_test(perf_evsel__hw_names); | ||
1183 | if (err) | ||
1184 | ret = err; | ||
1185 | |||
1186 | err = perf_evsel__name_array_test(perf_evsel__sw_names); | ||
1187 | if (err) | ||
1188 | ret = err; | ||
1189 | |||
1190 | err = perf_evsel__roundtrip_cache_name_test(); | ||
1191 | if (err) | ||
1192 | ret = err; | ||
1193 | |||
1194 | return ret; | ||
1195 | } | ||
1196 | |||
1197 | static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | ||
1198 | int size, bool should_be_signed) | ||
1199 | { | ||
1200 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1201 | int is_signed; | ||
1202 | int ret = 0; | ||
1203 | |||
1204 | if (field == NULL) { | ||
1205 | pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); | ||
1206 | return -1; | ||
1207 | } | ||
1208 | |||
1209 | is_signed = !!(field->flags | FIELD_IS_SIGNED); | ||
1210 | if (should_be_signed && !is_signed) { | ||
1211 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | ||
1212 | evsel->name, name, is_signed, should_be_signed); | ||
1213 | ret = -1; | ||
1214 | } | ||
1215 | |||
1216 | if (field->size != size) { | ||
1217 | pr_debug("%s: \"%s\" size (%d) should be %d!\n", | ||
1218 | evsel->name, name, field->size, size); | ||
1219 | ret = -1; | ||
1220 | } | ||
1221 | |||
1222 | return ret; | ||
1223 | } | ||
1224 | |||
1225 | static int perf_evsel__tp_sched_test(void) | ||
1226 | { | ||
1227 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | ||
1228 | int ret = 0; | ||
1229 | |||
1230 | if (evsel == NULL) { | ||
1231 | pr_debug("perf_evsel__new\n"); | ||
1232 | return -1; | ||
1233 | } | ||
1234 | |||
1235 | if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) | ||
1236 | ret = -1; | ||
1237 | |||
1238 | if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) | ||
1239 | ret = -1; | ||
1240 | |||
1241 | if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) | ||
1242 | ret = -1; | ||
1243 | |||
1244 | if (perf_evsel__test_field(evsel, "prev_state", 8, true)) | ||
1245 | ret = -1; | ||
1246 | |||
1247 | if (perf_evsel__test_field(evsel, "next_comm", 16, true)) | ||
1248 | ret = -1; | ||
1249 | |||
1250 | if (perf_evsel__test_field(evsel, "next_pid", 4, true)) | ||
1251 | ret = -1; | ||
1252 | |||
1253 | if (perf_evsel__test_field(evsel, "next_prio", 4, true)) | ||
1254 | ret = -1; | ||
1255 | |||
1256 | perf_evsel__delete(evsel); | ||
1257 | |||
1258 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | ||
1259 | |||
1260 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | ||
1261 | ret = -1; | ||
1262 | |||
1263 | if (perf_evsel__test_field(evsel, "pid", 4, true)) | ||
1264 | ret = -1; | ||
1265 | |||
1266 | if (perf_evsel__test_field(evsel, "prio", 4, true)) | ||
1267 | ret = -1; | ||
1268 | |||
1269 | if (perf_evsel__test_field(evsel, "success", 4, true)) | ||
1270 | ret = -1; | ||
1271 | |||
1272 | if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) | ||
1273 | ret = -1; | ||
1274 | |||
1275 | return ret; | ||
1276 | } | ||
1277 | |||
1278 | static int test__syscall_open_tp_fields(void) | ||
1279 | { | ||
1280 | struct perf_record_opts opts = { | ||
1281 | .target = { | ||
1282 | .uid = UINT_MAX, | ||
1283 | .uses_mmap = true, | ||
1284 | }, | ||
1285 | .no_delay = true, | ||
1286 | .freq = 1, | ||
1287 | .mmap_pages = 256, | ||
1288 | .raw_samples = true, | ||
1289 | }; | ||
1290 | const char *filename = "/etc/passwd"; | ||
1291 | int flags = O_RDONLY | O_DIRECTORY; | ||
1292 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1293 | struct perf_evsel *evsel; | ||
1294 | int err = -1, i, nr_events = 0, nr_polls = 0; | ||
1295 | |||
1296 | if (evlist == NULL) { | ||
1297 | pr_debug("%s: perf_evlist__new\n", __func__); | ||
1298 | goto out; | ||
1299 | } | ||
1300 | |||
1301 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | ||
1302 | if (evsel == NULL) { | ||
1303 | pr_debug("%s: perf_evsel__newtp\n", __func__); | ||
1304 | goto out_delete_evlist; | ||
1305 | } | ||
1306 | |||
1307 | perf_evlist__add(evlist, evsel); | ||
1308 | |||
1309 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
1310 | if (err < 0) { | ||
1311 | pr_debug("%s: perf_evlist__create_maps\n", __func__); | ||
1312 | goto out_delete_evlist; | ||
1313 | } | ||
1314 | |||
1315 | perf_evsel__config(evsel, &opts, evsel); | ||
1316 | |||
1317 | evlist->threads->map[0] = getpid(); | ||
1318 | |||
1319 | err = perf_evlist__open(evlist); | ||
1320 | if (err < 0) { | ||
1321 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
1322 | goto out_delete_evlist; | ||
1323 | } | ||
1324 | |||
1325 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
1326 | if (err < 0) { | ||
1327 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
1328 | goto out_delete_evlist; | ||
1329 | } | ||
1330 | |||
1331 | perf_evlist__enable(evlist); | ||
1332 | |||
1333 | /* | ||
1334 | * Generate the event: | ||
1335 | */ | ||
1336 | open(filename, flags); | ||
1337 | |||
1338 | while (1) { | ||
1339 | int before = nr_events; | ||
1340 | |||
1341 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
1342 | union perf_event *event; | ||
1343 | |||
1344 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
1345 | const u32 type = event->header.type; | ||
1346 | int tp_flags; | ||
1347 | struct perf_sample sample; | ||
1348 | |||
1349 | ++nr_events; | ||
1350 | |||
1351 | if (type != PERF_RECORD_SAMPLE) | ||
1352 | continue; | ||
1353 | |||
1354 | err = perf_evsel__parse_sample(evsel, event, &sample); | ||
1355 | if (err) { | ||
1356 | pr_err("Can't parse sample, err = %d\n", err); | ||
1357 | goto out_munmap; | ||
1358 | } | ||
1359 | |||
1360 | tp_flags = perf_evsel__intval(evsel, &sample, "flags"); | ||
1361 | |||
1362 | if (flags != tp_flags) { | ||
1363 | pr_debug("%s: Expected flags=%#x, got %#x\n", | ||
1364 | __func__, flags, tp_flags); | ||
1365 | goto out_munmap; | ||
1366 | } | ||
1367 | |||
1368 | goto out_ok; | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | if (nr_events == before) | ||
1373 | poll(evlist->pollfd, evlist->nr_fds, 10); | ||
1374 | |||
1375 | if (++nr_polls > 5) { | ||
1376 | pr_debug("%s: no events!\n", __func__); | ||
1377 | goto out_munmap; | ||
1378 | } | ||
1379 | } | ||
1380 | out_ok: | ||
1381 | err = 0; | ||
1382 | out_munmap: | ||
1383 | perf_evlist__munmap(evlist); | ||
1384 | out_delete_evlist: | ||
1385 | perf_evlist__delete(evlist); | ||
1386 | out: | ||
1387 | return err; | ||
1388 | } | ||
1389 | 12 | ||
1390 | static struct test { | 13 | static struct test { |
1391 | const char *desc; | 14 | const char *desc; |
@@ -1409,7 +32,7 @@ static struct test { | |||
1409 | }, | 32 | }, |
1410 | { | 33 | { |
1411 | .desc = "parse events tests", | 34 | .desc = "parse events tests", |
1412 | .func = parse_events__test, | 35 | .func = test__parse_events, |
1413 | }, | 36 | }, |
1414 | #if defined(__x86_64__) || defined(__i386__) | 37 | #if defined(__x86_64__) || defined(__i386__) |
1415 | { | 38 | { |
@@ -1423,19 +46,19 @@ static struct test { | |||
1423 | }, | 46 | }, |
1424 | { | 47 | { |
1425 | .desc = "Test perf pmu format parsing", | 48 | .desc = "Test perf pmu format parsing", |
1426 | .func = test__perf_pmu, | 49 | .func = test__pmu, |
1427 | }, | 50 | }, |
1428 | { | 51 | { |
1429 | .desc = "Test dso data interface", | 52 | .desc = "Test dso data interface", |
1430 | .func = dso__test_data, | 53 | .func = test__dso_data, |
1431 | }, | 54 | }, |
1432 | { | 55 | { |
1433 | .desc = "roundtrip evsel->name check", | 56 | .desc = "roundtrip evsel->name check", |
1434 | .func = perf_evsel__roundtrip_name_test, | 57 | .func = test__perf_evsel__roundtrip_name_test, |
1435 | }, | 58 | }, |
1436 | { | 59 | { |
1437 | .desc = "Check parsing of sched tracepoints fields", | 60 | .desc = "Check parsing of sched tracepoints fields", |
1438 | .func = perf_evsel__tp_sched_test, | 61 | .func = test__perf_evsel__tp_sched_test, |
1439 | }, | 62 | }, |
1440 | { | 63 | { |
1441 | .desc = "Generate and check syscalls:sys_enter_open event fields", | 64 | .desc = "Generate and check syscalls:sys_enter_open event fields", |
@@ -1443,7 +66,7 @@ static struct test { | |||
1443 | }, | 66 | }, |
1444 | { | 67 | { |
1445 | .desc = "struct perf_event_attr setup", | 68 | .desc = "struct perf_event_attr setup", |
1446 | .func = test_attr__run, | 69 | .func = test__attr, |
1447 | }, | 70 | }, |
1448 | { | 71 | { |
1449 | .func = NULL, | 72 | .func = NULL, |
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c index 0cd42fc9bc13..5eaffa2de9c5 100644 --- a/tools/perf/tests/dso-data.c +++ b/tools/perf/tests/dso-data.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include "machine.h" | 9 | #include "machine.h" |
10 | #include "symbol.h" | 10 | #include "symbol.h" |
11 | #include "tests.h" | ||
11 | 12 | ||
12 | #define TEST_ASSERT_VAL(text, cond) \ | 13 | #define TEST_ASSERT_VAL(text, cond) \ |
13 | do { \ | 14 | do { \ |
@@ -25,6 +26,10 @@ static char *test_file(int size) | |||
25 | unsigned char *buf; | 26 | unsigned char *buf; |
26 | 27 | ||
27 | fd = mkstemp(templ); | 28 | fd = mkstemp(templ); |
29 | if (fd < 0) { | ||
30 | perror("mkstemp failed"); | ||
31 | return NULL; | ||
32 | } | ||
28 | 33 | ||
29 | buf = malloc(size); | 34 | buf = malloc(size); |
30 | if (!buf) { | 35 | if (!buf) { |
@@ -95,7 +100,7 @@ struct test_data_offset offsets[] = { | |||
95 | }, | 100 | }, |
96 | }; | 101 | }; |
97 | 102 | ||
98 | int dso__test_data(void) | 103 | int test__dso_data(void) |
99 | { | 104 | { |
100 | struct machine machine; | 105 | struct machine machine; |
101 | struct dso *dso; | 106 | struct dso *dso; |
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c new file mode 100644 index 000000000000..e61fc828a158 --- /dev/null +++ b/tools/perf/tests/evsel-roundtrip-name.c | |||
@@ -0,0 +1,114 @@ | |||
1 | #include "evlist.h" | ||
2 | #include "evsel.h" | ||
3 | #include "parse-events.h" | ||
4 | #include "tests.h" | ||
5 | |||
6 | static int perf_evsel__roundtrip_cache_name_test(void) | ||
7 | { | ||
8 | char name[128]; | ||
9 | int type, op, err = 0, ret = 0, i, idx; | ||
10 | struct perf_evsel *evsel; | ||
11 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
12 | |||
13 | if (evlist == NULL) | ||
14 | return -ENOMEM; | ||
15 | |||
16 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
17 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
18 | /* skip invalid cache type */ | ||
19 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
20 | continue; | ||
21 | |||
22 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
23 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
24 | name, sizeof(name)); | ||
25 | err = parse_events(evlist, name, 0); | ||
26 | if (err) | ||
27 | ret = err; | ||
28 | } | ||
29 | } | ||
30 | } | ||
31 | |||
32 | idx = 0; | ||
33 | evsel = perf_evlist__first(evlist); | ||
34 | |||
35 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
36 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
37 | /* skip invalid cache type */ | ||
38 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
39 | continue; | ||
40 | |||
41 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
42 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
43 | name, sizeof(name)); | ||
44 | if (evsel->idx != idx) | ||
45 | continue; | ||
46 | |||
47 | ++idx; | ||
48 | |||
49 | if (strcmp(perf_evsel__name(evsel), name)) { | ||
50 | pr_debug("%s != %s\n", perf_evsel__name(evsel), name); | ||
51 | ret = -1; | ||
52 | } | ||
53 | |||
54 | evsel = perf_evsel__next(evsel); | ||
55 | } | ||
56 | } | ||
57 | } | ||
58 | |||
59 | perf_evlist__delete(evlist); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static int __perf_evsel__name_array_test(const char *names[], int nr_names) | ||
64 | { | ||
65 | int i, err; | ||
66 | struct perf_evsel *evsel; | ||
67 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
68 | |||
69 | if (evlist == NULL) | ||
70 | return -ENOMEM; | ||
71 | |||
72 | for (i = 0; i < nr_names; ++i) { | ||
73 | err = parse_events(evlist, names[i], 0); | ||
74 | if (err) { | ||
75 | pr_debug("failed to parse event '%s', err %d\n", | ||
76 | names[i], err); | ||
77 | goto out_delete_evlist; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | err = 0; | ||
82 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
83 | if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { | ||
84 | --err; | ||
85 | pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | out_delete_evlist: | ||
90 | perf_evlist__delete(evlist); | ||
91 | return err; | ||
92 | } | ||
93 | |||
94 | #define perf_evsel__name_array_test(names) \ | ||
95 | __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) | ||
96 | |||
97 | int test__perf_evsel__roundtrip_name_test(void) | ||
98 | { | ||
99 | int err = 0, ret = 0; | ||
100 | |||
101 | err = perf_evsel__name_array_test(perf_evsel__hw_names); | ||
102 | if (err) | ||
103 | ret = err; | ||
104 | |||
105 | err = perf_evsel__name_array_test(perf_evsel__sw_names); | ||
106 | if (err) | ||
107 | ret = err; | ||
108 | |||
109 | err = perf_evsel__roundtrip_cache_name_test(); | ||
110 | if (err) | ||
111 | ret = err; | ||
112 | |||
113 | return ret; | ||
114 | } | ||
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c new file mode 100644 index 000000000000..a5d2fcc5ae35 --- /dev/null +++ b/tools/perf/tests/evsel-tp-sched.c | |||
@@ -0,0 +1,84 @@ | |||
1 | #include "evsel.h" | ||
2 | #include "tests.h" | ||
3 | #include "event-parse.h" | ||
4 | |||
5 | static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | ||
6 | int size, bool should_be_signed) | ||
7 | { | ||
8 | struct format_field *field = perf_evsel__field(evsel, name); | ||
9 | int is_signed; | ||
10 | int ret = 0; | ||
11 | |||
12 | if (field == NULL) { | ||
13 | pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); | ||
14 | return -1; | ||
15 | } | ||
16 | |||
17 | is_signed = !!(field->flags | FIELD_IS_SIGNED); | ||
18 | if (should_be_signed && !is_signed) { | ||
19 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | ||
20 | evsel->name, name, is_signed, should_be_signed); | ||
21 | ret = -1; | ||
22 | } | ||
23 | |||
24 | if (field->size != size) { | ||
25 | pr_debug("%s: \"%s\" size (%d) should be %d!\n", | ||
26 | evsel->name, name, field->size, size); | ||
27 | ret = -1; | ||
28 | } | ||
29 | |||
30 | return ret; | ||
31 | } | ||
32 | |||
33 | int test__perf_evsel__tp_sched_test(void) | ||
34 | { | ||
35 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | ||
36 | int ret = 0; | ||
37 | |||
38 | if (evsel == NULL) { | ||
39 | pr_debug("perf_evsel__new\n"); | ||
40 | return -1; | ||
41 | } | ||
42 | |||
43 | if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) | ||
44 | ret = -1; | ||
45 | |||
46 | if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) | ||
47 | ret = -1; | ||
48 | |||
49 | if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) | ||
50 | ret = -1; | ||
51 | |||
52 | if (perf_evsel__test_field(evsel, "prev_state", 8, true)) | ||
53 | ret = -1; | ||
54 | |||
55 | if (perf_evsel__test_field(evsel, "next_comm", 16, true)) | ||
56 | ret = -1; | ||
57 | |||
58 | if (perf_evsel__test_field(evsel, "next_pid", 4, true)) | ||
59 | ret = -1; | ||
60 | |||
61 | if (perf_evsel__test_field(evsel, "next_prio", 4, true)) | ||
62 | ret = -1; | ||
63 | |||
64 | perf_evsel__delete(evsel); | ||
65 | |||
66 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | ||
67 | |||
68 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | ||
69 | ret = -1; | ||
70 | |||
71 | if (perf_evsel__test_field(evsel, "pid", 4, true)) | ||
72 | ret = -1; | ||
73 | |||
74 | if (perf_evsel__test_field(evsel, "prio", 4, true)) | ||
75 | ret = -1; | ||
76 | |||
77 | if (perf_evsel__test_field(evsel, "success", 4, true)) | ||
78 | ret = -1; | ||
79 | |||
80 | if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) | ||
81 | ret = -1; | ||
82 | |||
83 | return ret; | ||
84 | } | ||
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c new file mode 100644 index 000000000000..e1746811e14b --- /dev/null +++ b/tools/perf/tests/mmap-basic.c | |||
@@ -0,0 +1,162 @@ | |||
1 | #include "evlist.h" | ||
2 | #include "evsel.h" | ||
3 | #include "thread_map.h" | ||
4 | #include "cpumap.h" | ||
5 | #include "tests.h" | ||
6 | |||
7 | /* | ||
8 | * This test will generate random numbers of calls to some getpid syscalls, | ||
9 | * then establish an mmap for a group of events that are created to monitor | ||
10 | * the syscalls. | ||
11 | * | ||
12 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
13 | * sample.id field to map back to its respective perf_evsel instance. | ||
14 | * | ||
15 | * Then it checks if the number of syscalls reported as perf events by | ||
16 | * the kernel corresponds to the number of syscalls made. | ||
17 | */ | ||
18 | int test__basic_mmap(void) | ||
19 | { | ||
20 | int err = -1; | ||
21 | union perf_event *event; | ||
22 | struct thread_map *threads; | ||
23 | struct cpu_map *cpus; | ||
24 | struct perf_evlist *evlist; | ||
25 | struct perf_event_attr attr = { | ||
26 | .type = PERF_TYPE_TRACEPOINT, | ||
27 | .read_format = PERF_FORMAT_ID, | ||
28 | .sample_type = PERF_SAMPLE_ID, | ||
29 | .watermark = 0, | ||
30 | }; | ||
31 | cpu_set_t cpu_set; | ||
32 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
33 | "getpgid", }; | ||
34 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
35 | (void*)getpgid }; | ||
36 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
37 | int ids[nsyscalls]; | ||
38 | unsigned int nr_events[nsyscalls], | ||
39 | expected_nr_events[nsyscalls], i, j; | ||
40 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
41 | |||
42 | for (i = 0; i < nsyscalls; ++i) { | ||
43 | char name[64]; | ||
44 | |||
45 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
46 | ids[i] = trace_event__id(name); | ||
47 | if (ids[i] < 0) { | ||
48 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
49 | return -1; | ||
50 | } | ||
51 | nr_events[i] = 0; | ||
52 | expected_nr_events[i] = random() % 257; | ||
53 | } | ||
54 | |||
55 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
56 | if (threads == NULL) { | ||
57 | pr_debug("thread_map__new\n"); | ||
58 | return -1; | ||
59 | } | ||
60 | |||
61 | cpus = cpu_map__new(NULL); | ||
62 | if (cpus == NULL) { | ||
63 | pr_debug("cpu_map__new\n"); | ||
64 | goto out_free_threads; | ||
65 | } | ||
66 | |||
67 | CPU_ZERO(&cpu_set); | ||
68 | CPU_SET(cpus->map[0], &cpu_set); | ||
69 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
70 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
71 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
72 | cpus->map[0], strerror(errno)); | ||
73 | goto out_free_cpus; | ||
74 | } | ||
75 | |||
76 | evlist = perf_evlist__new(cpus, threads); | ||
77 | if (evlist == NULL) { | ||
78 | pr_debug("perf_evlist__new\n"); | ||
79 | goto out_free_cpus; | ||
80 | } | ||
81 | |||
82 | /* anonymous union fields, can't be initialized above */ | ||
83 | attr.wakeup_events = 1; | ||
84 | attr.sample_period = 1; | ||
85 | |||
86 | for (i = 0; i < nsyscalls; ++i) { | ||
87 | attr.config = ids[i]; | ||
88 | evsels[i] = perf_evsel__new(&attr, i); | ||
89 | if (evsels[i] == NULL) { | ||
90 | pr_debug("perf_evsel__new\n"); | ||
91 | goto out_free_evlist; | ||
92 | } | ||
93 | |||
94 | perf_evlist__add(evlist, evsels[i]); | ||
95 | |||
96 | if (perf_evsel__open(evsels[i], cpus, threads) < 0) { | ||
97 | pr_debug("failed to open counter: %s, " | ||
98 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
99 | strerror(errno)); | ||
100 | goto out_close_fd; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
105 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
106 | strerror(errno)); | ||
107 | goto out_close_fd; | ||
108 | } | ||
109 | |||
110 | for (i = 0; i < nsyscalls; ++i) | ||
111 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
112 | int foo = syscalls[i](); | ||
113 | ++foo; | ||
114 | } | ||
115 | |||
116 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
117 | struct perf_sample sample; | ||
118 | |||
119 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
120 | pr_debug("unexpected %s event\n", | ||
121 | perf_event__name(event->header.type)); | ||
122 | goto out_munmap; | ||
123 | } | ||
124 | |||
125 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
126 | if (err) { | ||
127 | pr_err("Can't parse sample, err = %d\n", err); | ||
128 | goto out_munmap; | ||
129 | } | ||
130 | |||
131 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
132 | if (evsel == NULL) { | ||
133 | pr_debug("event with id %" PRIu64 | ||
134 | " doesn't map to an evsel\n", sample.id); | ||
135 | goto out_munmap; | ||
136 | } | ||
137 | nr_events[evsel->idx]++; | ||
138 | } | ||
139 | |||
140 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
141 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
142 | pr_debug("expected %d %s events, got %d\n", | ||
143 | expected_nr_events[evsel->idx], | ||
144 | perf_evsel__name(evsel), nr_events[evsel->idx]); | ||
145 | goto out_munmap; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | err = 0; | ||
150 | out_munmap: | ||
151 | perf_evlist__munmap(evlist); | ||
152 | out_close_fd: | ||
153 | for (i = 0; i < nsyscalls; ++i) | ||
154 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
155 | out_free_evlist: | ||
156 | perf_evlist__delete(evlist); | ||
157 | out_free_cpus: | ||
158 | cpu_map__delete(cpus); | ||
159 | out_free_threads: | ||
160 | thread_map__delete(threads); | ||
161 | return err; | ||
162 | } | ||
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c new file mode 100644 index 000000000000..31072aba0d54 --- /dev/null +++ b/tools/perf/tests/open-syscall-all-cpus.c | |||
@@ -0,0 +1,120 @@ | |||
1 | #include "evsel.h" | ||
2 | #include "tests.h" | ||
3 | #include "thread_map.h" | ||
4 | #include "cpumap.h" | ||
5 | #include "debug.h" | ||
6 | |||
7 | int test__open_syscall_event_on_all_cpus(void) | ||
8 | { | ||
9 | int err = -1, fd, cpu; | ||
10 | struct thread_map *threads; | ||
11 | struct cpu_map *cpus; | ||
12 | struct perf_evsel *evsel; | ||
13 | struct perf_event_attr attr; | ||
14 | unsigned int nr_open_calls = 111, i; | ||
15 | cpu_set_t cpu_set; | ||
16 | int id = trace_event__id("sys_enter_open"); | ||
17 | |||
18 | if (id < 0) { | ||
19 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
20 | return -1; | ||
21 | } | ||
22 | |||
23 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
24 | if (threads == NULL) { | ||
25 | pr_debug("thread_map__new\n"); | ||
26 | return -1; | ||
27 | } | ||
28 | |||
29 | cpus = cpu_map__new(NULL); | ||
30 | if (cpus == NULL) { | ||
31 | pr_debug("cpu_map__new\n"); | ||
32 | goto out_thread_map_delete; | ||
33 | } | ||
34 | |||
35 | |||
36 | CPU_ZERO(&cpu_set); | ||
37 | |||
38 | memset(&attr, 0, sizeof(attr)); | ||
39 | attr.type = PERF_TYPE_TRACEPOINT; | ||
40 | attr.config = id; | ||
41 | evsel = perf_evsel__new(&attr, 0); | ||
42 | if (evsel == NULL) { | ||
43 | pr_debug("perf_evsel__new\n"); | ||
44 | goto out_thread_map_delete; | ||
45 | } | ||
46 | |||
47 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | ||
48 | pr_debug("failed to open counter: %s, " | ||
49 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
50 | strerror(errno)); | ||
51 | goto out_evsel_delete; | ||
52 | } | ||
53 | |||
54 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
55 | unsigned int ncalls = nr_open_calls + cpu; | ||
56 | /* | ||
57 | * XXX eventually lift this restriction in a way that | ||
58 | * keeps perf building on older glibc installations | ||
59 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
60 | * a reasonable upper limit tho :-) | ||
61 | */ | ||
62 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
63 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
64 | continue; | ||
65 | } | ||
66 | |||
67 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
68 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
69 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
70 | cpus->map[cpu], | ||
71 | strerror(errno)); | ||
72 | goto out_close_fd; | ||
73 | } | ||
74 | for (i = 0; i < ncalls; ++i) { | ||
75 | fd = open("/etc/passwd", O_RDONLY); | ||
76 | close(fd); | ||
77 | } | ||
78 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Here we need to explicitely preallocate the counts, as if | ||
83 | * we use the auto allocation it will allocate just for 1 cpu, | ||
84 | * as we start by cpu 0. | ||
85 | */ | ||
86 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
87 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
88 | goto out_close_fd; | ||
89 | } | ||
90 | |||
91 | err = 0; | ||
92 | |||
93 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
94 | unsigned int expected; | ||
95 | |||
96 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
97 | continue; | ||
98 | |||
99 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
100 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
101 | err = -1; | ||
102 | break; | ||
103 | } | ||
104 | |||
105 | expected = nr_open_calls + cpu; | ||
106 | if (evsel->counts->cpu[cpu].val != expected) { | ||
107 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
108 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
109 | err = -1; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | out_close_fd: | ||
114 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
115 | out_evsel_delete: | ||
116 | perf_evsel__delete(evsel); | ||
117 | out_thread_map_delete: | ||
118 | thread_map__delete(threads); | ||
119 | return err; | ||
120 | } | ||
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c new file mode 100644 index 000000000000..1c52fdc1164e --- /dev/null +++ b/tools/perf/tests/open-syscall-tp-fields.c | |||
@@ -0,0 +1,117 @@ | |||
1 | #include "perf.h" | ||
2 | #include "evlist.h" | ||
3 | #include "evsel.h" | ||
4 | #include "thread_map.h" | ||
5 | #include "tests.h" | ||
6 | |||
7 | int test__syscall_open_tp_fields(void) | ||
8 | { | ||
9 | struct perf_record_opts opts = { | ||
10 | .target = { | ||
11 | .uid = UINT_MAX, | ||
12 | .uses_mmap = true, | ||
13 | }, | ||
14 | .no_delay = true, | ||
15 | .freq = 1, | ||
16 | .mmap_pages = 256, | ||
17 | .raw_samples = true, | ||
18 | }; | ||
19 | const char *filename = "/etc/passwd"; | ||
20 | int flags = O_RDONLY | O_DIRECTORY; | ||
21 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
22 | struct perf_evsel *evsel; | ||
23 | int err = -1, i, nr_events = 0, nr_polls = 0; | ||
24 | |||
25 | if (evlist == NULL) { | ||
26 | pr_debug("%s: perf_evlist__new\n", __func__); | ||
27 | goto out; | ||
28 | } | ||
29 | |||
30 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | ||
31 | if (evsel == NULL) { | ||
32 | pr_debug("%s: perf_evsel__newtp\n", __func__); | ||
33 | goto out_delete_evlist; | ||
34 | } | ||
35 | |||
36 | perf_evlist__add(evlist, evsel); | ||
37 | |||
38 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
39 | if (err < 0) { | ||
40 | pr_debug("%s: perf_evlist__create_maps\n", __func__); | ||
41 | goto out_delete_evlist; | ||
42 | } | ||
43 | |||
44 | perf_evsel__config(evsel, &opts); | ||
45 | |||
46 | evlist->threads->map[0] = getpid(); | ||
47 | |||
48 | err = perf_evlist__open(evlist); | ||
49 | if (err < 0) { | ||
50 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
51 | goto out_delete_evlist; | ||
52 | } | ||
53 | |||
54 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
55 | if (err < 0) { | ||
56 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
57 | goto out_delete_evlist; | ||
58 | } | ||
59 | |||
60 | perf_evlist__enable(evlist); | ||
61 | |||
62 | /* | ||
63 | * Generate the event: | ||
64 | */ | ||
65 | open(filename, flags); | ||
66 | |||
67 | while (1) { | ||
68 | int before = nr_events; | ||
69 | |||
70 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
71 | union perf_event *event; | ||
72 | |||
73 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
74 | const u32 type = event->header.type; | ||
75 | int tp_flags; | ||
76 | struct perf_sample sample; | ||
77 | |||
78 | ++nr_events; | ||
79 | |||
80 | if (type != PERF_RECORD_SAMPLE) | ||
81 | continue; | ||
82 | |||
83 | err = perf_evsel__parse_sample(evsel, event, &sample); | ||
84 | if (err) { | ||
85 | pr_err("Can't parse sample, err = %d\n", err); | ||
86 | goto out_munmap; | ||
87 | } | ||
88 | |||
89 | tp_flags = perf_evsel__intval(evsel, &sample, "flags"); | ||
90 | |||
91 | if (flags != tp_flags) { | ||
92 | pr_debug("%s: Expected flags=%#x, got %#x\n", | ||
93 | __func__, flags, tp_flags); | ||
94 | goto out_munmap; | ||
95 | } | ||
96 | |||
97 | goto out_ok; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | if (nr_events == before) | ||
102 | poll(evlist->pollfd, evlist->nr_fds, 10); | ||
103 | |||
104 | if (++nr_polls > 5) { | ||
105 | pr_debug("%s: no events!\n", __func__); | ||
106 | goto out_munmap; | ||
107 | } | ||
108 | } | ||
109 | out_ok: | ||
110 | err = 0; | ||
111 | out_munmap: | ||
112 | perf_evlist__munmap(evlist); | ||
113 | out_delete_evlist: | ||
114 | perf_evlist__delete(evlist); | ||
115 | out: | ||
116 | return err; | ||
117 | } | ||
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c new file mode 100644 index 000000000000..98be8b518b4f --- /dev/null +++ b/tools/perf/tests/open-syscall.c | |||
@@ -0,0 +1,66 @@ | |||
1 | #include "thread_map.h" | ||
2 | #include "evsel.h" | ||
3 | #include "debug.h" | ||
4 | #include "tests.h" | ||
5 | |||
6 | int test__open_syscall_event(void) | ||
7 | { | ||
8 | int err = -1, fd; | ||
9 | struct thread_map *threads; | ||
10 | struct perf_evsel *evsel; | ||
11 | struct perf_event_attr attr; | ||
12 | unsigned int nr_open_calls = 111, i; | ||
13 | int id = trace_event__id("sys_enter_open"); | ||
14 | |||
15 | if (id < 0) { | ||
16 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
17 | return -1; | ||
18 | } | ||
19 | |||
20 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
21 | if (threads == NULL) { | ||
22 | pr_debug("thread_map__new\n"); | ||
23 | return -1; | ||
24 | } | ||
25 | |||
26 | memset(&attr, 0, sizeof(attr)); | ||
27 | attr.type = PERF_TYPE_TRACEPOINT; | ||
28 | attr.config = id; | ||
29 | evsel = perf_evsel__new(&attr, 0); | ||
30 | if (evsel == NULL) { | ||
31 | pr_debug("perf_evsel__new\n"); | ||
32 | goto out_thread_map_delete; | ||
33 | } | ||
34 | |||
35 | if (perf_evsel__open_per_thread(evsel, threads) < 0) { | ||
36 | pr_debug("failed to open counter: %s, " | ||
37 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
38 | strerror(errno)); | ||
39 | goto out_evsel_delete; | ||
40 | } | ||
41 | |||
42 | for (i = 0; i < nr_open_calls; ++i) { | ||
43 | fd = open("/etc/passwd", O_RDONLY); | ||
44 | close(fd); | ||
45 | } | ||
46 | |||
47 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
48 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
49 | goto out_close_fd; | ||
50 | } | ||
51 | |||
52 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
53 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
54 | nr_open_calls, evsel->counts->cpu[0].val); | ||
55 | goto out_close_fd; | ||
56 | } | ||
57 | |||
58 | err = 0; | ||
59 | out_close_fd: | ||
60 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
61 | out_evsel_delete: | ||
62 | perf_evsel__delete(evsel); | ||
63 | out_thread_map_delete: | ||
64 | thread_map__delete(threads); | ||
65 | return err; | ||
66 | } | ||
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index b49c2eebff33..42a0c8cd3cd5 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include "evlist.h" | 4 | #include "evlist.h" |
5 | #include "sysfs.h" | 5 | #include "sysfs.h" |
6 | #include "../../../include/linux/hw_breakpoint.h" | 6 | #include "../../../include/linux/hw_breakpoint.h" |
7 | #include "tests.h" | ||
7 | 8 | ||
8 | #define TEST_ASSERT_VAL(text, cond) \ | 9 | #define TEST_ASSERT_VAL(text, cond) \ |
9 | do { \ | 10 | do { \ |
@@ -520,7 +521,7 @@ static int test__group1(struct perf_evlist *evlist) | |||
520 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 521 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
521 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 522 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
522 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 523 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
523 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 524 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
524 | 525 | ||
525 | /* cycles:upp */ | 526 | /* cycles:upp */ |
526 | evsel = perf_evsel__next(evsel); | 527 | evsel = perf_evsel__next(evsel); |
@@ -556,7 +557,7 @@ static int test__group2(struct perf_evlist *evlist) | |||
556 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 557 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
557 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 558 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
558 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 559 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
559 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 560 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
560 | 561 | ||
561 | /* cache-references + :u modifier */ | 562 | /* cache-references + :u modifier */ |
562 | evsel = perf_evsel__next(evsel); | 563 | evsel = perf_evsel__next(evsel); |
@@ -582,7 +583,7 @@ static int test__group2(struct perf_evlist *evlist) | |||
582 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 583 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
583 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 584 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
584 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 585 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
585 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 586 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
586 | 587 | ||
587 | return 0; | 588 | return 0; |
588 | } | 589 | } |
@@ -605,7 +606,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) | |||
605 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | 606 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); |
606 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 607 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
607 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 608 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
608 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 609 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
609 | TEST_ASSERT_VAL("wrong group name", | 610 | TEST_ASSERT_VAL("wrong group name", |
610 | !strcmp(leader->group_name, "group1")); | 611 | !strcmp(leader->group_name, "group1")); |
611 | 612 | ||
@@ -635,7 +636,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) | |||
635 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 636 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
636 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | 637 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); |
637 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 638 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
638 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 639 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
639 | TEST_ASSERT_VAL("wrong group name", | 640 | TEST_ASSERT_VAL("wrong group name", |
640 | !strcmp(leader->group_name, "group2")); | 641 | !strcmp(leader->group_name, "group2")); |
641 | 642 | ||
@@ -662,7 +663,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) | |||
662 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 663 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
663 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 664 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
664 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 665 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
665 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 666 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
666 | 667 | ||
667 | return 0; | 668 | return 0; |
668 | } | 669 | } |
@@ -686,7 +687,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) | |||
686 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 687 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
687 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); | 688 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); |
688 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | 689 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); |
689 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 690 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
690 | 691 | ||
691 | /* instructions:kp + p */ | 692 | /* instructions:kp + p */ |
692 | evsel = perf_evsel__next(evsel); | 693 | evsel = perf_evsel__next(evsel); |
@@ -723,7 +724,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) | |||
723 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | 724 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); |
724 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 725 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
725 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | 726 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); |
726 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 727 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
727 | 728 | ||
728 | /* instructions + G */ | 729 | /* instructions + G */ |
729 | evsel = perf_evsel__next(evsel); | 730 | evsel = perf_evsel__next(evsel); |
@@ -750,7 +751,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) | |||
750 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | 751 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); |
751 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 752 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
752 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | 753 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); |
753 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 754 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
754 | 755 | ||
755 | /* instructions:G */ | 756 | /* instructions:G */ |
756 | evsel = perf_evsel__next(evsel); | 757 | evsel = perf_evsel__next(evsel); |
@@ -776,7 +777,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) | |||
776 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | 777 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); |
777 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 778 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
778 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 779 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
779 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | 780 | TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); |
780 | 781 | ||
781 | return 0; | 782 | return 0; |
782 | } | 783 | } |
@@ -1086,7 +1087,7 @@ static int test_pmu_events(void) | |||
1086 | return ret; | 1087 | return ret; |
1087 | } | 1088 | } |
1088 | 1089 | ||
1089 | int parse_events__test(void) | 1090 | int test__parse_events(void) |
1090 | { | 1091 | { |
1091 | int ret1, ret2 = 0; | 1092 | int ret1, ret2 = 0; |
1092 | 1093 | ||
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c new file mode 100644 index 000000000000..70e0d4421df8 --- /dev/null +++ b/tools/perf/tests/perf-record.c | |||
@@ -0,0 +1,312 @@ | |||
1 | #include <sched.h> | ||
2 | #include "evlist.h" | ||
3 | #include "evsel.h" | ||
4 | #include "perf.h" | ||
5 | #include "debug.h" | ||
6 | #include "tests.h" | ||
7 | |||
8 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) | ||
9 | { | ||
10 | int i, cpu = -1, nrcpus = 1024; | ||
11 | realloc: | ||
12 | CPU_ZERO(maskp); | ||
13 | |||
14 | if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { | ||
15 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
16 | nrcpus = nrcpus << 2; | ||
17 | goto realloc; | ||
18 | } | ||
19 | perror("sched_getaffinity"); | ||
20 | return -1; | ||
21 | } | ||
22 | |||
23 | for (i = 0; i < nrcpus; i++) { | ||
24 | if (CPU_ISSET(i, maskp)) { | ||
25 | if (cpu == -1) | ||
26 | cpu = i; | ||
27 | else | ||
28 | CPU_CLR(i, maskp); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | return cpu; | ||
33 | } | ||
34 | |||
35 | int test__PERF_RECORD(void) | ||
36 | { | ||
37 | struct perf_record_opts opts = { | ||
38 | .target = { | ||
39 | .uid = UINT_MAX, | ||
40 | .uses_mmap = true, | ||
41 | }, | ||
42 | .no_delay = true, | ||
43 | .freq = 10, | ||
44 | .mmap_pages = 256, | ||
45 | }; | ||
46 | cpu_set_t cpu_mask; | ||
47 | size_t cpu_mask_size = sizeof(cpu_mask); | ||
48 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
49 | struct perf_evsel *evsel; | ||
50 | struct perf_sample sample; | ||
51 | const char *cmd = "sleep"; | ||
52 | const char *argv[] = { cmd, "1", NULL, }; | ||
53 | char *bname; | ||
54 | u64 prev_time = 0; | ||
55 | bool found_cmd_mmap = false, | ||
56 | found_libc_mmap = false, | ||
57 | found_vdso_mmap = false, | ||
58 | found_ld_mmap = false; | ||
59 | int err = -1, errs = 0, i, wakeups = 0; | ||
60 | u32 cpu; | ||
61 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
62 | |||
63 | if (evlist == NULL || argv == NULL) { | ||
64 | pr_debug("Not enough memory to create evlist\n"); | ||
65 | goto out; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * We need at least one evsel in the evlist, use the default | ||
70 | * one: "cycles". | ||
71 | */ | ||
72 | err = perf_evlist__add_default(evlist); | ||
73 | if (err < 0) { | ||
74 | pr_debug("Not enough memory to create evsel\n"); | ||
75 | goto out_delete_evlist; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Create maps of threads and cpus to monitor. In this case | ||
80 | * we start with all threads and cpus (-1, -1) but then in | ||
81 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
82 | * we're monitoring, the one forked there. | ||
83 | */ | ||
84 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
85 | if (err < 0) { | ||
86 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
87 | goto out_delete_evlist; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
92 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
93 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
94 | * on all the fds) and then mmap them. | ||
95 | */ | ||
96 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
97 | if (err < 0) { | ||
98 | pr_debug("Couldn't run the workload!\n"); | ||
99 | goto out_delete_evlist; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Config the evsels, setting attr->comm on the first one, etc. | ||
104 | */ | ||
105 | evsel = perf_evlist__first(evlist); | ||
106 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
107 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
108 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
109 | perf_evlist__config_attrs(evlist, &opts); | ||
110 | |||
111 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); | ||
112 | if (err < 0) { | ||
113 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
114 | goto out_delete_evlist; | ||
115 | } | ||
116 | |||
117 | cpu = err; | ||
118 | |||
119 | /* | ||
120 | * So that we can check perf_sample.cpu on all the samples. | ||
121 | */ | ||
122 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { | ||
123 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
124 | goto out_delete_evlist; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
129 | * grouping them if asked to. | ||
130 | */ | ||
131 | err = perf_evlist__open(evlist); | ||
132 | if (err < 0) { | ||
133 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
134 | goto out_delete_evlist; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * mmap the first fd on a given CPU and ask for events for the other | ||
139 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
140 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
141 | */ | ||
142 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
143 | if (err < 0) { | ||
144 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
145 | goto out_delete_evlist; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Now that all is properly set up, enable the events, they will | ||
150 | * count just on workload.pid, which will start... | ||
151 | */ | ||
152 | perf_evlist__enable(evlist); | ||
153 | |||
154 | /* | ||
155 | * Now! | ||
156 | */ | ||
157 | perf_evlist__start_workload(evlist); | ||
158 | |||
159 | while (1) { | ||
160 | int before = total_events; | ||
161 | |||
162 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
163 | union perf_event *event; | ||
164 | |||
165 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
166 | const u32 type = event->header.type; | ||
167 | const char *name = perf_event__name(type); | ||
168 | |||
169 | ++total_events; | ||
170 | if (type < PERF_RECORD_MAX) | ||
171 | nr_events[type]++; | ||
172 | |||
173 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
174 | if (err < 0) { | ||
175 | if (verbose) | ||
176 | perf_event__fprintf(event, stderr); | ||
177 | pr_debug("Couldn't parse sample\n"); | ||
178 | goto out_err; | ||
179 | } | ||
180 | |||
181 | if (verbose) { | ||
182 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
183 | perf_event__fprintf(event, stderr); | ||
184 | } | ||
185 | |||
186 | if (prev_time > sample.time) { | ||
187 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
188 | name, prev_time, sample.time); | ||
189 | ++errs; | ||
190 | } | ||
191 | |||
192 | prev_time = sample.time; | ||
193 | |||
194 | if (sample.cpu != cpu) { | ||
195 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
196 | name, cpu, sample.cpu); | ||
197 | ++errs; | ||
198 | } | ||
199 | |||
200 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
201 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
202 | name, evlist->workload.pid, sample.pid); | ||
203 | ++errs; | ||
204 | } | ||
205 | |||
206 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
207 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
208 | name, evlist->workload.pid, sample.tid); | ||
209 | ++errs; | ||
210 | } | ||
211 | |||
212 | if ((type == PERF_RECORD_COMM || | ||
213 | type == PERF_RECORD_MMAP || | ||
214 | type == PERF_RECORD_FORK || | ||
215 | type == PERF_RECORD_EXIT) && | ||
216 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
217 | pr_debug("%s with unexpected pid/tid\n", name); | ||
218 | ++errs; | ||
219 | } | ||
220 | |||
221 | if ((type == PERF_RECORD_COMM || | ||
222 | type == PERF_RECORD_MMAP) && | ||
223 | event->comm.pid != event->comm.tid) { | ||
224 | pr_debug("%s with different pid/tid!\n", name); | ||
225 | ++errs; | ||
226 | } | ||
227 | |||
228 | switch (type) { | ||
229 | case PERF_RECORD_COMM: | ||
230 | if (strcmp(event->comm.comm, cmd)) { | ||
231 | pr_debug("%s with unexpected comm!\n", name); | ||
232 | ++errs; | ||
233 | } | ||
234 | break; | ||
235 | case PERF_RECORD_EXIT: | ||
236 | goto found_exit; | ||
237 | case PERF_RECORD_MMAP: | ||
238 | bname = strrchr(event->mmap.filename, '/'); | ||
239 | if (bname != NULL) { | ||
240 | if (!found_cmd_mmap) | ||
241 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
242 | if (!found_libc_mmap) | ||
243 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
244 | if (!found_ld_mmap) | ||
245 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
246 | } else if (!found_vdso_mmap) | ||
247 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
248 | break; | ||
249 | |||
250 | case PERF_RECORD_SAMPLE: | ||
251 | /* Just ignore samples for now */ | ||
252 | break; | ||
253 | default: | ||
254 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
255 | type); | ||
256 | ++errs; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * We don't use poll here because at least at 3.1 times the | ||
263 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
264 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
265 | */ | ||
266 | if (total_events == before && false) | ||
267 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
268 | |||
269 | sleep(1); | ||
270 | if (++wakeups > 5) { | ||
271 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
272 | break; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | found_exit: | ||
277 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
278 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
279 | ++errs; | ||
280 | } | ||
281 | |||
282 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
283 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
284 | ++errs; | ||
285 | } | ||
286 | |||
287 | if (!found_cmd_mmap) { | ||
288 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
289 | ++errs; | ||
290 | } | ||
291 | |||
292 | if (!found_libc_mmap) { | ||
293 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
294 | ++errs; | ||
295 | } | ||
296 | |||
297 | if (!found_ld_mmap) { | ||
298 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
299 | ++errs; | ||
300 | } | ||
301 | |||
302 | if (!found_vdso_mmap) { | ||
303 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
304 | ++errs; | ||
305 | } | ||
306 | out_err: | ||
307 | perf_evlist__munmap(evlist); | ||
308 | out_delete_evlist: | ||
309 | perf_evlist__delete(evlist); | ||
310 | out: | ||
311 | return (err < 0 || errs > 0) ? -1 : 0; | ||
312 | } | ||
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c new file mode 100644 index 000000000000..a5f379863b8f --- /dev/null +++ b/tools/perf/tests/pmu.c | |||
@@ -0,0 +1,178 @@ | |||
1 | #include "parse-events.h" | ||
2 | #include "pmu.h" | ||
3 | #include "util.h" | ||
4 | #include "tests.h" | ||
5 | |||
6 | /* Simulated format definitions. */ | ||
7 | static struct test_format { | ||
8 | const char *name; | ||
9 | const char *value; | ||
10 | } test_formats[] = { | ||
11 | { "krava01", "config:0-1,62-63\n", }, | ||
12 | { "krava02", "config:10-17\n", }, | ||
13 | { "krava03", "config:5\n", }, | ||
14 | { "krava11", "config1:0,2,4,6,8,20-28\n", }, | ||
15 | { "krava12", "config1:63\n", }, | ||
16 | { "krava13", "config1:45-47\n", }, | ||
17 | { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, | ||
18 | { "krava22", "config2:8,18,48,58\n", }, | ||
19 | { "krava23", "config2:28-29,38\n", }, | ||
20 | }; | ||
21 | |||
22 | #define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) | ||
23 | |||
24 | /* Simulated users input. */ | ||
25 | static struct parse_events__term test_terms[] = { | ||
26 | { | ||
27 | .config = (char *) "krava01", | ||
28 | .val.num = 15, | ||
29 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
30 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
31 | }, | ||
32 | { | ||
33 | .config = (char *) "krava02", | ||
34 | .val.num = 170, | ||
35 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
36 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
37 | }, | ||
38 | { | ||
39 | .config = (char *) "krava03", | ||
40 | .val.num = 1, | ||
41 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
42 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
43 | }, | ||
44 | { | ||
45 | .config = (char *) "krava11", | ||
46 | .val.num = 27, | ||
47 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
48 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
49 | }, | ||
50 | { | ||
51 | .config = (char *) "krava12", | ||
52 | .val.num = 1, | ||
53 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
54 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
55 | }, | ||
56 | { | ||
57 | .config = (char *) "krava13", | ||
58 | .val.num = 2, | ||
59 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
60 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
61 | }, | ||
62 | { | ||
63 | .config = (char *) "krava21", | ||
64 | .val.num = 119, | ||
65 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
66 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
67 | }, | ||
68 | { | ||
69 | .config = (char *) "krava22", | ||
70 | .val.num = 11, | ||
71 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
72 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
73 | }, | ||
74 | { | ||
75 | .config = (char *) "krava23", | ||
76 | .val.num = 2, | ||
77 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
78 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
79 | }, | ||
80 | }; | ||
81 | #define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) | ||
82 | |||
83 | /* | ||
84 | * Prepare format directory data, exported by kernel | ||
85 | * at /sys/bus/event_source/devices/<dev>/format. | ||
86 | */ | ||
87 | static char *test_format_dir_get(void) | ||
88 | { | ||
89 | static char dir[PATH_MAX]; | ||
90 | unsigned int i; | ||
91 | |||
92 | snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX"); | ||
93 | if (!mkdtemp(dir)) | ||
94 | return NULL; | ||
95 | |||
96 | for (i = 0; i < TEST_FORMATS_CNT; i++) { | ||
97 | static char name[PATH_MAX]; | ||
98 | struct test_format *format = &test_formats[i]; | ||
99 | FILE *file; | ||
100 | |||
101 | snprintf(name, PATH_MAX, "%s/%s", dir, format->name); | ||
102 | |||
103 | file = fopen(name, "w"); | ||
104 | if (!file) | ||
105 | return NULL; | ||
106 | |||
107 | if (1 != fwrite(format->value, strlen(format->value), 1, file)) | ||
108 | break; | ||
109 | |||
110 | fclose(file); | ||
111 | } | ||
112 | |||
113 | return dir; | ||
114 | } | ||
115 | |||
116 | /* Cleanup format directory. */ | ||
117 | static int test_format_dir_put(char *dir) | ||
118 | { | ||
119 | char buf[PATH_MAX]; | ||
120 | snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir); | ||
121 | if (system(buf)) | ||
122 | return -1; | ||
123 | |||
124 | snprintf(buf, PATH_MAX, "rmdir %s\n", dir); | ||
125 | return system(buf); | ||
126 | } | ||
127 | |||
128 | static struct list_head *test_terms_list(void) | ||
129 | { | ||
130 | static LIST_HEAD(terms); | ||
131 | unsigned int i; | ||
132 | |||
133 | for (i = 0; i < TERMS_CNT; i++) | ||
134 | list_add_tail(&test_terms[i].list, &terms); | ||
135 | |||
136 | return &terms; | ||
137 | } | ||
138 | |||
139 | #undef TERMS_CNT | ||
140 | |||
141 | int test__pmu(void) | ||
142 | { | ||
143 | char *format = test_format_dir_get(); | ||
144 | LIST_HEAD(formats); | ||
145 | struct list_head *terms = test_terms_list(); | ||
146 | int ret; | ||
147 | |||
148 | if (!format) | ||
149 | return -EINVAL; | ||
150 | |||
151 | do { | ||
152 | struct perf_event_attr attr; | ||
153 | |||
154 | memset(&attr, 0, sizeof(attr)); | ||
155 | |||
156 | ret = perf_pmu__format_parse(format, &formats); | ||
157 | if (ret) | ||
158 | break; | ||
159 | |||
160 | ret = perf_pmu__config_terms(&formats, &attr, terms); | ||
161 | if (ret) | ||
162 | break; | ||
163 | |||
164 | ret = -EINVAL; | ||
165 | |||
166 | if (attr.config != 0xc00000000002a823) | ||
167 | break; | ||
168 | if (attr.config1 != 0x8000400000000145) | ||
169 | break; | ||
170 | if (attr.config2 != 0x0400000020041d07) | ||
171 | break; | ||
172 | |||
173 | ret = 0; | ||
174 | } while (0); | ||
175 | |||
176 | test_format_dir_put(format); | ||
177 | return ret; | ||
178 | } | ||
diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c new file mode 100644 index 000000000000..ff94886aad99 --- /dev/null +++ b/tools/perf/tests/rdpmc.c | |||
@@ -0,0 +1,175 @@ | |||
1 | #include <unistd.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <signal.h> | ||
4 | #include <sys/mman.h> | ||
5 | #include "types.h" | ||
6 | #include "perf.h" | ||
7 | #include "debug.h" | ||
8 | #include "tests.h" | ||
9 | |||
10 | #if defined(__x86_64__) || defined(__i386__) | ||
11 | |||
12 | #define barrier() asm volatile("" ::: "memory") | ||
13 | |||
14 | static u64 rdpmc(unsigned int counter) | ||
15 | { | ||
16 | unsigned int low, high; | ||
17 | |||
18 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); | ||
19 | |||
20 | return low | ((u64)high) << 32; | ||
21 | } | ||
22 | |||
23 | static u64 rdtsc(void) | ||
24 | { | ||
25 | unsigned int low, high; | ||
26 | |||
27 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | ||
28 | |||
29 | return low | ((u64)high) << 32; | ||
30 | } | ||
31 | |||
32 | static u64 mmap_read_self(void *addr) | ||
33 | { | ||
34 | struct perf_event_mmap_page *pc = addr; | ||
35 | u32 seq, idx, time_mult = 0, time_shift = 0; | ||
36 | u64 count, cyc = 0, time_offset = 0, enabled, running, delta; | ||
37 | |||
38 | do { | ||
39 | seq = pc->lock; | ||
40 | barrier(); | ||
41 | |||
42 | enabled = pc->time_enabled; | ||
43 | running = pc->time_running; | ||
44 | |||
45 | if (enabled != running) { | ||
46 | cyc = rdtsc(); | ||
47 | time_mult = pc->time_mult; | ||
48 | time_shift = pc->time_shift; | ||
49 | time_offset = pc->time_offset; | ||
50 | } | ||
51 | |||
52 | idx = pc->index; | ||
53 | count = pc->offset; | ||
54 | if (idx) | ||
55 | count += rdpmc(idx - 1); | ||
56 | |||
57 | barrier(); | ||
58 | } while (pc->lock != seq); | ||
59 | |||
60 | if (enabled != running) { | ||
61 | u64 quot, rem; | ||
62 | |||
63 | quot = (cyc >> time_shift); | ||
64 | rem = cyc & ((1 << time_shift) - 1); | ||
65 | delta = time_offset + quot * time_mult + | ||
66 | ((rem * time_mult) >> time_shift); | ||
67 | |||
68 | enabled += delta; | ||
69 | if (idx) | ||
70 | running += delta; | ||
71 | |||
72 | quot = count / running; | ||
73 | rem = count % running; | ||
74 | count = quot * enabled + (rem * enabled) / running; | ||
75 | } | ||
76 | |||
77 | return count; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * If the RDPMC instruction faults then signal this back to the test parent task: | ||
82 | */ | ||
83 | static void segfault_handler(int sig __maybe_unused, | ||
84 | siginfo_t *info __maybe_unused, | ||
85 | void *uc __maybe_unused) | ||
86 | { | ||
87 | exit(-1); | ||
88 | } | ||
89 | |||
90 | static int __test__rdpmc(void) | ||
91 | { | ||
92 | volatile int tmp = 0; | ||
93 | u64 i, loops = 1000; | ||
94 | int n; | ||
95 | int fd; | ||
96 | void *addr; | ||
97 | struct perf_event_attr attr = { | ||
98 | .type = PERF_TYPE_HARDWARE, | ||
99 | .config = PERF_COUNT_HW_INSTRUCTIONS, | ||
100 | .exclude_kernel = 1, | ||
101 | }; | ||
102 | u64 delta_sum = 0; | ||
103 | struct sigaction sa; | ||
104 | |||
105 | sigfillset(&sa.sa_mask); | ||
106 | sa.sa_sigaction = segfault_handler; | ||
107 | sigaction(SIGSEGV, &sa, NULL); | ||
108 | |||
109 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | ||
110 | if (fd < 0) { | ||
111 | pr_err("Error: sys_perf_event_open() syscall returned " | ||
112 | "with %d (%s)\n", fd, strerror(errno)); | ||
113 | return -1; | ||
114 | } | ||
115 | |||
116 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | ||
117 | if (addr == (void *)(-1)) { | ||
118 | pr_err("Error: mmap() syscall returned with (%s)\n", | ||
119 | strerror(errno)); | ||
120 | goto out_close; | ||
121 | } | ||
122 | |||
123 | for (n = 0; n < 6; n++) { | ||
124 | u64 stamp, now, delta; | ||
125 | |||
126 | stamp = mmap_read_self(addr); | ||
127 | |||
128 | for (i = 0; i < loops; i++) | ||
129 | tmp++; | ||
130 | |||
131 | now = mmap_read_self(addr); | ||
132 | loops *= 10; | ||
133 | |||
134 | delta = now - stamp; | ||
135 | pr_debug("%14d: %14Lu\n", n, (long long)delta); | ||
136 | |||
137 | delta_sum += delta; | ||
138 | } | ||
139 | |||
140 | munmap(addr, page_size); | ||
141 | pr_debug(" "); | ||
142 | out_close: | ||
143 | close(fd); | ||
144 | |||
145 | if (!delta_sum) | ||
146 | return -1; | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | int test__rdpmc(void) | ||
152 | { | ||
153 | int status = 0; | ||
154 | int wret = 0; | ||
155 | int ret; | ||
156 | int pid; | ||
157 | |||
158 | pid = fork(); | ||
159 | if (pid < 0) | ||
160 | return -1; | ||
161 | |||
162 | if (!pid) { | ||
163 | ret = __test__rdpmc(); | ||
164 | |||
165 | exit(ret); | ||
166 | } | ||
167 | |||
168 | wret = waitpid(pid, &status, 0); | ||
169 | if (wret < 0 || status) | ||
170 | return -1; | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | #endif | ||
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h new file mode 100644 index 000000000000..fc121edab016 --- /dev/null +++ b/tools/perf/tests/tests.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef TESTS_H | ||
2 | #define TESTS_H | ||
3 | |||
4 | /* Tests */ | ||
5 | int test__vmlinux_matches_kallsyms(void); | ||
6 | int test__open_syscall_event(void); | ||
7 | int test__open_syscall_event_on_all_cpus(void); | ||
8 | int test__basic_mmap(void); | ||
9 | int test__PERF_RECORD(void); | ||
10 | int test__rdpmc(void); | ||
11 | int test__perf_evsel__roundtrip_name_test(void); | ||
12 | int test__perf_evsel__tp_sched_test(void); | ||
13 | int test__syscall_open_tp_fields(void); | ||
14 | int test__pmu(void); | ||
15 | int test__attr(void); | ||
16 | int test__dso_data(void); | ||
17 | int test__parse_events(void); | ||
18 | |||
19 | /* Util */ | ||
20 | int trace_event__id(const char *evname); | ||
21 | |||
22 | #endif /* TESTS_H */ | ||
diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c new file mode 100644 index 000000000000..748f2e8f6961 --- /dev/null +++ b/tools/perf/tests/util.c | |||
@@ -0,0 +1,30 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <unistd.h> | ||
3 | #include <stdlib.h> | ||
4 | #include <sys/types.h> | ||
5 | #include <sys/stat.h> | ||
6 | #include <fcntl.h> | ||
7 | #include "tests.h" | ||
8 | #include "debugfs.h" | ||
9 | |||
10 | int trace_event__id(const char *evname) | ||
11 | { | ||
12 | char *filename; | ||
13 | int err = -1, fd; | ||
14 | |||
15 | if (asprintf(&filename, | ||
16 | "%s/syscalls/%s/id", | ||
17 | tracing_events_path, evname) < 0) | ||
18 | return -1; | ||
19 | |||
20 | fd = open(filename, O_RDONLY); | ||
21 | if (fd >= 0) { | ||
22 | char id[16]; | ||
23 | if (read(fd, id, sizeof(id)) > 0) | ||
24 | err = atoi(id); | ||
25 | close(fd); | ||
26 | } | ||
27 | |||
28 | free(filename); | ||
29 | return err; | ||
30 | } | ||
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c new file mode 100644 index 000000000000..0d1cdbee2f59 --- /dev/null +++ b/tools/perf/tests/vmlinux-kallsyms.c | |||
@@ -0,0 +1,230 @@ | |||
1 | #include <linux/compiler.h> | ||
2 | #include <linux/rbtree.h> | ||
3 | #include <string.h> | ||
4 | #include "map.h" | ||
5 | #include "symbol.h" | ||
6 | #include "util.h" | ||
7 | #include "tests.h" | ||
8 | #include "debug.h" | ||
9 | #include "machine.h" | ||
10 | |||
11 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, | ||
12 | struct symbol *sym) | ||
13 | { | ||
14 | bool *visited = symbol__priv(sym); | ||
15 | *visited = true; | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | int test__vmlinux_matches_kallsyms(void) | ||
20 | { | ||
21 | int err = -1; | ||
22 | struct rb_node *nd; | ||
23 | struct symbol *sym; | ||
24 | struct map *kallsyms_map, *vmlinux_map; | ||
25 | struct machine kallsyms, vmlinux; | ||
26 | enum map_type type = MAP__FUNCTION; | ||
27 | struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; | ||
28 | |||
29 | /* | ||
30 | * Step 1: | ||
31 | * | ||
32 | * Init the machines that will hold kernel, modules obtained from | ||
33 | * both vmlinux + .ko files and from /proc/kallsyms split by modules. | ||
34 | */ | ||
35 | machine__init(&kallsyms, "", HOST_KERNEL_ID); | ||
36 | machine__init(&vmlinux, "", HOST_KERNEL_ID); | ||
37 | |||
38 | /* | ||
39 | * Step 2: | ||
40 | * | ||
41 | * Create the kernel maps for kallsyms and the DSO where we will then | ||
42 | * load /proc/kallsyms. Also create the modules maps from /proc/modules | ||
43 | * and find the .ko files that match them in /lib/modules/`uname -r`/. | ||
44 | */ | ||
45 | if (machine__create_kernel_maps(&kallsyms) < 0) { | ||
46 | pr_debug("machine__create_kernel_maps "); | ||
47 | return -1; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Step 3: | ||
52 | * | ||
53 | * Load and split /proc/kallsyms into multiple maps, one per module. | ||
54 | */ | ||
55 | if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { | ||
56 | pr_debug("dso__load_kallsyms "); | ||
57 | goto out; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Step 4: | ||
62 | * | ||
63 | * kallsyms will be internally on demand sorted by name so that we can | ||
64 | * find the reference relocation * symbol, i.e. the symbol we will use | ||
65 | * to see if the running kernel was relocated by checking if it has the | ||
66 | * same value in the vmlinux file we load. | ||
67 | */ | ||
68 | kallsyms_map = machine__kernel_map(&kallsyms, type); | ||
69 | |||
70 | sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); | ||
71 | if (sym == NULL) { | ||
72 | pr_debug("dso__find_symbol_by_name "); | ||
73 | goto out; | ||
74 | } | ||
75 | |||
76 | ref_reloc_sym.addr = sym->start; | ||
77 | |||
78 | /* | ||
79 | * Step 5: | ||
80 | * | ||
81 | * Now repeat step 2, this time for the vmlinux file we'll auto-locate. | ||
82 | */ | ||
83 | if (machine__create_kernel_maps(&vmlinux) < 0) { | ||
84 | pr_debug("machine__create_kernel_maps "); | ||
85 | goto out; | ||
86 | } | ||
87 | |||
88 | vmlinux_map = machine__kernel_map(&vmlinux, type); | ||
89 | map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; | ||
90 | |||
91 | /* | ||
92 | * Step 6: | ||
93 | * | ||
94 | * Locate a vmlinux file in the vmlinux path that has a buildid that | ||
95 | * matches the one of the running kernel. | ||
96 | * | ||
97 | * While doing that look if we find the ref reloc symbol, if we find it | ||
98 | * we'll have its ref_reloc_symbol.unrelocated_addr and then | ||
99 | * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines | ||
100 | * to fixup the symbols. | ||
101 | */ | ||
102 | if (machine__load_vmlinux_path(&vmlinux, type, | ||
103 | vmlinux_matches_kallsyms_filter) <= 0) { | ||
104 | pr_debug("machine__load_vmlinux_path "); | ||
105 | goto out; | ||
106 | } | ||
107 | |||
108 | err = 0; | ||
109 | /* | ||
110 | * Step 7: | ||
111 | * | ||
112 | * Now look at the symbols in the vmlinux DSO and check if we find all of them | ||
113 | * in the kallsyms dso. For the ones that are in both, check its names and | ||
114 | * end addresses too. | ||
115 | */ | ||
116 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | ||
117 | struct symbol *pair, *first_pair; | ||
118 | bool backwards = true; | ||
119 | |||
120 | sym = rb_entry(nd, struct symbol, rb_node); | ||
121 | |||
122 | if (sym->start == sym->end) | ||
123 | continue; | ||
124 | |||
125 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
126 | pair = first_pair; | ||
127 | |||
128 | if (pair && pair->start == sym->start) { | ||
129 | next_pair: | ||
130 | if (strcmp(sym->name, pair->name) == 0) { | ||
131 | /* | ||
132 | * kallsyms don't have the symbol end, so we | ||
133 | * set that by using the next symbol start - 1, | ||
134 | * in some cases we get this up to a page | ||
135 | * wrong, trace_kmalloc when I was developing | ||
136 | * this code was one such example, 2106 bytes | ||
137 | * off the real size. More than that and we | ||
138 | * _really_ have a problem. | ||
139 | */ | ||
140 | s64 skew = sym->end - pair->end; | ||
141 | if (llabs(skew) < page_size) | ||
142 | continue; | ||
143 | |||
144 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", | ||
145 | sym->start, sym->name, sym->end, pair->end); | ||
146 | } else { | ||
147 | struct rb_node *nnd; | ||
148 | detour: | ||
149 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
150 | rb_next(&pair->rb_node); | ||
151 | if (nnd) { | ||
152 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | ||
153 | |||
154 | if (next->start == sym->start) { | ||
155 | pair = next; | ||
156 | goto next_pair; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | if (backwards) { | ||
161 | backwards = false; | ||
162 | pair = first_pair; | ||
163 | goto detour; | ||
164 | } | ||
165 | |||
166 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
167 | sym->start, sym->name, pair->name); | ||
168 | } | ||
169 | } else | ||
170 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); | ||
171 | |||
172 | err = -1; | ||
173 | } | ||
174 | |||
175 | if (!verbose) | ||
176 | goto out; | ||
177 | |||
178 | pr_info("Maps only in vmlinux:\n"); | ||
179 | |||
180 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
181 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
182 | /* | ||
183 | * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while | ||
184 | * the kernel will have the path for the vmlinux file being used, | ||
185 | * so use the short name, less descriptive but the same ("[kernel]" in | ||
186 | * both cases. | ||
187 | */ | ||
188 | pair = map_groups__find_by_name(&kallsyms.kmaps, type, | ||
189 | (pos->dso->kernel ? | ||
190 | pos->dso->short_name : | ||
191 | pos->dso->name)); | ||
192 | if (pair) | ||
193 | pair->priv = 1; | ||
194 | else | ||
195 | map__fprintf(pos, stderr); | ||
196 | } | ||
197 | |||
198 | pr_info("Maps in vmlinux with a different name in kallsyms:\n"); | ||
199 | |||
200 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
201 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
202 | |||
203 | pair = map_groups__find(&kallsyms.kmaps, type, pos->start); | ||
204 | if (pair == NULL || pair->priv) | ||
205 | continue; | ||
206 | |||
207 | if (pair->start == pos->start) { | ||
208 | pair->priv = 1; | ||
209 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", | ||
210 | pos->start, pos->end, pos->pgoff, pos->dso->name); | ||
211 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | ||
212 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", | ||
213 | pair->start, pair->end, pair->pgoff); | ||
214 | pr_info(" %s\n", pair->dso->name); | ||
215 | pair->priv = 1; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | pr_info("Maps only in kallsyms:\n"); | ||
220 | |||
221 | for (nd = rb_first(&kallsyms.kmaps.maps[type]); | ||
222 | nd; nd = rb_next(nd)) { | ||
223 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
224 | |||
225 | if (!pos->priv) | ||
226 | map__fprintf(pos, stderr); | ||
227 | } | ||
228 | out: | ||
229 | return err; | ||
230 | } | ||
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index 687af0bba187..856320e2cc05 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h | |||
@@ -30,6 +30,7 @@ struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); | |||
30 | int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); | 30 | int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); |
31 | 31 | ||
32 | void perf_gtk__init_helpline(void); | 32 | void perf_gtk__init_helpline(void); |
33 | void perf_gtk__init_progress(void); | ||
33 | void perf_gtk__init_hpp(void); | 34 | void perf_gtk__init_hpp(void); |
34 | 35 | ||
35 | #ifndef HAVE_GTK_INFO_BAR | 36 | #ifndef HAVE_GTK_INFO_BAR |
diff --git a/tools/perf/ui/gtk/progress.c b/tools/perf/ui/gtk/progress.c new file mode 100644 index 000000000000..482bcf3df9b7 --- /dev/null +++ b/tools/perf/ui/gtk/progress.c | |||
@@ -0,0 +1,59 @@ | |||
1 | #include <inttypes.h> | ||
2 | |||
3 | #include "gtk.h" | ||
4 | #include "../progress.h" | ||
5 | #include "util.h" | ||
6 | |||
7 | static GtkWidget *dialog; | ||
8 | static GtkWidget *progress; | ||
9 | |||
10 | static void gtk_progress_update(u64 curr, u64 total, const char *title) | ||
11 | { | ||
12 | double fraction = total ? 1.0 * curr / total : 0.0; | ||
13 | char buf[1024]; | ||
14 | |||
15 | if (dialog == NULL) { | ||
16 | GtkWidget *vbox = gtk_vbox_new(TRUE, 5); | ||
17 | GtkWidget *label = gtk_label_new(title); | ||
18 | |||
19 | dialog = gtk_window_new(GTK_WINDOW_TOPLEVEL); | ||
20 | progress = gtk_progress_bar_new(); | ||
21 | |||
22 | gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, FALSE, 3); | ||
23 | gtk_box_pack_start(GTK_BOX(vbox), progress, TRUE, TRUE, 3); | ||
24 | |||
25 | gtk_container_add(GTK_CONTAINER(dialog), vbox); | ||
26 | |||
27 | gtk_window_set_title(GTK_WINDOW(dialog), "perf"); | ||
28 | gtk_window_resize(GTK_WINDOW(dialog), 300, 80); | ||
29 | gtk_window_set_position(GTK_WINDOW(dialog), GTK_WIN_POS_CENTER); | ||
30 | |||
31 | gtk_widget_show_all(dialog); | ||
32 | } | ||
33 | |||
34 | gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progress), fraction); | ||
35 | snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, curr, total); | ||
36 | gtk_progress_bar_set_text(GTK_PROGRESS_BAR(progress), buf); | ||
37 | |||
38 | /* we didn't call gtk_main yet, so do it manually */ | ||
39 | while (gtk_events_pending()) | ||
40 | gtk_main_iteration(); | ||
41 | } | ||
42 | |||
43 | static void gtk_progress_finish(void) | ||
44 | { | ||
45 | /* this will also destroy all of its children */ | ||
46 | gtk_widget_destroy(dialog); | ||
47 | |||
48 | dialog = NULL; | ||
49 | } | ||
50 | |||
51 | static struct ui_progress gtk_progress_fns = { | ||
52 | .update = gtk_progress_update, | ||
53 | .finish = gtk_progress_finish, | ||
54 | }; | ||
55 | |||
56 | void perf_gtk__init_progress(void) | ||
57 | { | ||
58 | progress_fns = >k_progress_fns; | ||
59 | } | ||
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c index 3c4c6ef78283..6c2dd2e423f3 100644 --- a/tools/perf/ui/gtk/setup.c +++ b/tools/perf/ui/gtk/setup.c | |||
@@ -8,7 +8,9 @@ int perf_gtk__init(void) | |||
8 | { | 8 | { |
9 | perf_error__register(&perf_gtk_eops); | 9 | perf_error__register(&perf_gtk_eops); |
10 | perf_gtk__init_helpline(); | 10 | perf_gtk__init_helpline(); |
11 | perf_gtk__init_progress(); | ||
11 | perf_gtk__init_hpp(); | 12 | perf_gtk__init_hpp(); |
13 | |||
12 | return gtk_init_check(NULL, NULL) ? 0 : -1; | 14 | return gtk_init_check(NULL, NULL) ? 0 : -1; |
13 | } | 15 | } |
14 | 16 | ||
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c index ccb046aac98b..c06942a41c78 100644 --- a/tools/perf/ui/gtk/util.c +++ b/tools/perf/ui/gtk/util.c | |||
@@ -111,14 +111,3 @@ struct perf_error_ops perf_gtk_eops = { | |||
111 | .warning = perf_gtk__warning_statusbar, | 111 | .warning = perf_gtk__warning_statusbar, |
112 | #endif | 112 | #endif |
113 | }; | 113 | }; |
114 | |||
115 | /* | ||
116 | * FIXME: Functions below should be implemented properly. | ||
117 | * For now, just add stubs for NO_NEWT=1 build. | ||
118 | */ | ||
119 | #ifndef NEWT_SUPPORT | ||
120 | void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, | ||
121 | const char *title __maybe_unused) | ||
122 | { | ||
123 | } | ||
124 | #endif | ||
diff --git a/tools/perf/ui/progress.c b/tools/perf/ui/progress.c index 13aa64e50e11..3ec695607a4d 100644 --- a/tools/perf/ui/progress.c +++ b/tools/perf/ui/progress.c | |||
@@ -1,32 +1,26 @@ | |||
1 | #include "../cache.h" | 1 | #include "../cache.h" |
2 | #include "progress.h" | 2 | #include "progress.h" |
3 | #include "libslang.h" | ||
4 | #include "ui.h" | ||
5 | #include "browser.h" | ||
6 | 3 | ||
7 | void ui_progress__update(u64 curr, u64 total, const char *title) | 4 | static void nop_progress_update(u64 curr __maybe_unused, |
5 | u64 total __maybe_unused, | ||
6 | const char *title __maybe_unused) | ||
8 | { | 7 | { |
9 | int bar, y; | 8 | } |
10 | /* | ||
11 | * FIXME: We should have a per UI backend way of showing progress, | ||
12 | * stdio will just show a percentage as NN%, etc. | ||
13 | */ | ||
14 | if (use_browser <= 0) | ||
15 | return; | ||
16 | 9 | ||
17 | if (total == 0) | 10 | static struct ui_progress default_progress_fns = |
18 | return; | 11 | { |
12 | .update = nop_progress_update, | ||
13 | }; | ||
19 | 14 | ||
20 | ui__refresh_dimensions(true); | 15 | struct ui_progress *progress_fns = &default_progress_fns; |
21 | pthread_mutex_lock(&ui__lock); | 16 | |
22 | y = SLtt_Screen_Rows / 2 - 2; | 17 | void ui_progress__update(u64 curr, u64 total, const char *title) |
23 | SLsmg_set_color(0); | 18 | { |
24 | SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); | 19 | return progress_fns->update(curr, total, title); |
25 | SLsmg_gotorc(y++, 1); | 20 | } |
26 | SLsmg_write_string((char *)title); | 21 | |
27 | SLsmg_set_color(HE_COLORSET_SELECTED); | 22 | void ui_progress__finish(void) |
28 | bar = ((SLtt_Screen_Cols - 2) * curr) / total; | 23 | { |
29 | SLsmg_fill_region(y, 1, 1, bar, ' '); | 24 | if (progress_fns->finish) |
30 | SLsmg_refresh(); | 25 | progress_fns->finish(); |
31 | pthread_mutex_unlock(&ui__lock); | ||
32 | } | 26 | } |
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h index d9c205b59aa1..257cc224f9cf 100644 --- a/tools/perf/ui/progress.h +++ b/tools/perf/ui/progress.h | |||
@@ -3,6 +3,16 @@ | |||
3 | 3 | ||
4 | #include <../types.h> | 4 | #include <../types.h> |
5 | 5 | ||
6 | struct ui_progress { | ||
7 | void (*update)(u64, u64, const char *); | ||
8 | void (*finish)(void); | ||
9 | }; | ||
10 | |||
11 | extern struct ui_progress *progress_fns; | ||
12 | |||
13 | void ui_progress__init(void); | ||
14 | |||
6 | void ui_progress__update(u64 curr, u64 total, const char *title); | 15 | void ui_progress__update(u64 curr, u64 total, const char *title); |
16 | void ui_progress__finish(void); | ||
7 | 17 | ||
8 | #endif | 18 | #endif |
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c new file mode 100644 index 000000000000..6c2184d53cbf --- /dev/null +++ b/tools/perf/ui/tui/progress.c | |||
@@ -0,0 +1,42 @@ | |||
1 | #include "../cache.h" | ||
2 | #include "../progress.h" | ||
3 | #include "../libslang.h" | ||
4 | #include "../ui.h" | ||
5 | #include "../browser.h" | ||
6 | |||
7 | static void tui_progress__update(u64 curr, u64 total, const char *title) | ||
8 | { | ||
9 | int bar, y; | ||
10 | /* | ||
11 | * FIXME: We should have a per UI backend way of showing progress, | ||
12 | * stdio will just show a percentage as NN%, etc. | ||
13 | */ | ||
14 | if (use_browser <= 0) | ||
15 | return; | ||
16 | |||
17 | if (total == 0) | ||
18 | return; | ||
19 | |||
20 | ui__refresh_dimensions(true); | ||
21 | pthread_mutex_lock(&ui__lock); | ||
22 | y = SLtt_Screen_Rows / 2 - 2; | ||
23 | SLsmg_set_color(0); | ||
24 | SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); | ||
25 | SLsmg_gotorc(y++, 1); | ||
26 | SLsmg_write_string((char *)title); | ||
27 | SLsmg_set_color(HE_COLORSET_SELECTED); | ||
28 | bar = ((SLtt_Screen_Cols - 2) * curr) / total; | ||
29 | SLsmg_fill_region(y, 1, 1, bar, ' '); | ||
30 | SLsmg_refresh(); | ||
31 | pthread_mutex_unlock(&ui__lock); | ||
32 | } | ||
33 | |||
34 | static struct ui_progress tui_progress_fns = | ||
35 | { | ||
36 | .update = tui_progress__update, | ||
37 | }; | ||
38 | |||
39 | void ui_progress__init(void) | ||
40 | { | ||
41 | progress_fns = &tui_progress_fns; | ||
42 | } | ||
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 60debb81537a..81efa192e86c 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c | |||
@@ -118,6 +118,7 @@ int ui__init(void) | |||
118 | newtSetSuspendCallback(newt_suspend, NULL); | 118 | newtSetSuspendCallback(newt_suspend, NULL); |
119 | ui_helpline__init(); | 119 | ui_helpline__init(); |
120 | ui_browser__init(); | 120 | ui_browser__init(); |
121 | ui_progress__init(); | ||
121 | 122 | ||
122 | signal(SIGSEGV, ui__signal); | 123 | signal(SIGSEGV, ui__signal); |
123 | signal(SIGFPE, ui__signal); | 124 | signal(SIGFPE, ui__signal); |
diff --git a/tools/perf/ui/ui.h b/tools/perf/ui/ui.h index 7b67045479f6..d86359c99907 100644 --- a/tools/perf/ui/ui.h +++ b/tools/perf/ui/ui.h | |||
@@ -3,9 +3,37 @@ | |||
3 | 3 | ||
4 | #include <pthread.h> | 4 | #include <pthread.h> |
5 | #include <stdbool.h> | 5 | #include <stdbool.h> |
6 | #include <linux/compiler.h> | ||
6 | 7 | ||
7 | extern pthread_mutex_t ui__lock; | 8 | extern pthread_mutex_t ui__lock; |
8 | 9 | ||
10 | extern int use_browser; | ||
11 | |||
12 | void setup_browser(bool fallback_to_pager); | ||
13 | void exit_browser(bool wait_for_ok); | ||
14 | |||
15 | #ifdef NEWT_SUPPORT | ||
16 | int ui__init(void); | ||
17 | void ui__exit(bool wait_for_ok); | ||
18 | #else | ||
19 | static inline int ui__init(void) | ||
20 | { | ||
21 | return -1; | ||
22 | } | ||
23 | static inline void ui__exit(bool wait_for_ok __maybe_unused) {} | ||
24 | #endif | ||
25 | |||
26 | #ifdef GTK2_SUPPORT | ||
27 | int perf_gtk__init(void); | ||
28 | void perf_gtk__exit(bool wait_for_ok); | ||
29 | #else | ||
30 | static inline int perf_gtk__init(void) | ||
31 | { | ||
32 | return -1; | ||
33 | } | ||
34 | static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} | ||
35 | #endif | ||
36 | |||
9 | void ui__refresh_dimensions(bool force); | 37 | void ui__refresh_dimensions(bool force); |
10 | 38 | ||
11 | #endif /* _PERF_UI_H_ */ | 39 | #endif /* _PERF_UI_H_ */ |
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 2bd51370ad28..26e367239873 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include "util.h" | 5 | #include "util.h" |
6 | #include "strbuf.h" | 6 | #include "strbuf.h" |
7 | #include "../perf.h" | 7 | #include "../perf.h" |
8 | #include "../ui/ui.h" | ||
8 | 9 | ||
9 | #define CMD_EXEC_PATH "--exec-path" | 10 | #define CMD_EXEC_PATH "--exec-path" |
10 | #define CMD_PERF_DIR "--perf-dir=" | 11 | #define CMD_PERF_DIR "--perf-dir=" |
@@ -31,44 +32,6 @@ extern const char *pager_program; | |||
31 | extern int pager_in_use(void); | 32 | extern int pager_in_use(void); |
32 | extern int pager_use_color; | 33 | extern int pager_use_color; |
33 | 34 | ||
34 | extern int use_browser; | ||
35 | |||
36 | #if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT) | ||
37 | void setup_browser(bool fallback_to_pager); | ||
38 | void exit_browser(bool wait_for_ok); | ||
39 | |||
40 | #ifdef NEWT_SUPPORT | ||
41 | int ui__init(void); | ||
42 | void ui__exit(bool wait_for_ok); | ||
43 | #else | ||
44 | static inline int ui__init(void) | ||
45 | { | ||
46 | return -1; | ||
47 | } | ||
48 | static inline void ui__exit(bool wait_for_ok __maybe_unused) {} | ||
49 | #endif | ||
50 | |||
51 | #ifdef GTK2_SUPPORT | ||
52 | int perf_gtk__init(void); | ||
53 | void perf_gtk__exit(bool wait_for_ok); | ||
54 | #else | ||
55 | static inline int perf_gtk__init(void) | ||
56 | { | ||
57 | return -1; | ||
58 | } | ||
59 | static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} | ||
60 | #endif | ||
61 | |||
62 | #else /* NEWT_SUPPORT || GTK2_SUPPORT */ | ||
63 | |||
64 | static inline void setup_browser(bool fallback_to_pager) | ||
65 | { | ||
66 | if (fallback_to_pager) | ||
67 | setup_pager(); | ||
68 | } | ||
69 | static inline void exit_browser(bool wait_for_ok __maybe_unused) {} | ||
70 | #endif /* NEWT_SUPPORT || GTK2_SUPPORT */ | ||
71 | |||
72 | char *alias_lookup(const char *alias); | 35 | char *alias_lookup(const char *alias); |
73 | int split_cmdline(char *cmdline, const char ***argv); | 36 | int split_cmdline(char *cmdline, const char ***argv); |
74 | 37 | ||
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index dec98750b484..83e8d234af6b 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -26,6 +26,7 @@ int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); | |||
26 | static inline void ui_progress__update(u64 curr __maybe_unused, | 26 | static inline void ui_progress__update(u64 curr __maybe_unused, |
27 | u64 total __maybe_unused, | 27 | u64 total __maybe_unused, |
28 | const char *title __maybe_unused) {} | 28 | const char *title __maybe_unused) {} |
29 | static inline void ui_progress__finish(void) {} | ||
29 | 30 | ||
30 | #define ui__error(format, arg...) ui__warning(format, ##arg) | 31 | #define ui__error(format, arg...) ui__warning(format, ##arg) |
31 | 32 | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index ca9ca285406a..3cf2c3e0605f 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -193,55 +193,43 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
193 | event->header.misc = PERF_RECORD_MISC_USER; | 193 | event->header.misc = PERF_RECORD_MISC_USER; |
194 | 194 | ||
195 | while (1) { | 195 | while (1) { |
196 | char bf[BUFSIZ], *pbf = bf; | 196 | char bf[BUFSIZ]; |
197 | int n; | 197 | char prot[5]; |
198 | char execname[PATH_MAX]; | ||
199 | char anonstr[] = "//anon"; | ||
198 | size_t size; | 200 | size_t size; |
201 | |||
199 | if (fgets(bf, sizeof(bf), fp) == NULL) | 202 | if (fgets(bf, sizeof(bf), fp) == NULL) |
200 | break; | 203 | break; |
201 | 204 | ||
205 | /* ensure null termination since stack will be reused. */ | ||
206 | strcpy(execname, ""); | ||
207 | |||
202 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ | 208 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ |
203 | n = hex2u64(pbf, &event->mmap.start); | 209 | sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n", |
204 | if (n < 0) | 210 | &event->mmap.start, &event->mmap.len, prot, |
205 | continue; | 211 | &event->mmap.pgoff, execname); |
206 | pbf += n + 1; | 212 | |
207 | n = hex2u64(pbf, &event->mmap.len); | 213 | if (prot[2] != 'x') |
208 | if (n < 0) | ||
209 | continue; | 214 | continue; |
210 | pbf += n + 3; | 215 | |
211 | if (*pbf == 'x') { /* vm_exec */ | 216 | if (!strcmp(execname, "")) |
212 | char anonstr[] = "//anon\n"; | 217 | strcpy(execname, anonstr); |
213 | char *execname = strchr(bf, '/'); | 218 | |
214 | 219 | size = strlen(execname) + 1; | |
215 | /* Catch VDSO */ | 220 | memcpy(event->mmap.filename, execname, size); |
216 | if (execname == NULL) | 221 | size = PERF_ALIGN(size, sizeof(u64)); |
217 | execname = strstr(bf, "[vdso]"); | 222 | event->mmap.len -= event->mmap.start; |
218 | 223 | event->mmap.header.size = (sizeof(event->mmap) - | |
219 | /* Catch anonymous mmaps */ | 224 | (sizeof(event->mmap.filename) - size)); |
220 | if ((execname == NULL) && !strstr(bf, "[")) | 225 | memset(event->mmap.filename + size, 0, machine->id_hdr_size); |
221 | execname = anonstr; | 226 | event->mmap.header.size += machine->id_hdr_size; |
222 | 227 | event->mmap.pid = tgid; | |
223 | if (execname == NULL) | 228 | event->mmap.tid = pid; |
224 | continue; | 229 | |
225 | 230 | if (process(tool, event, &synth_sample, machine) != 0) { | |
226 | pbf += 3; | 231 | rc = -1; |
227 | n = hex2u64(pbf, &event->mmap.pgoff); | 232 | break; |
228 | |||
229 | size = strlen(execname); | ||
230 | execname[size - 1] = '\0'; /* Remove \n */ | ||
231 | memcpy(event->mmap.filename, execname, size); | ||
232 | size = PERF_ALIGN(size, sizeof(u64)); | ||
233 | event->mmap.len -= event->mmap.start; | ||
234 | event->mmap.header.size = (sizeof(event->mmap) - | ||
235 | (sizeof(event->mmap.filename) - size)); | ||
236 | memset(event->mmap.filename + size, 0, machine->id_hdr_size); | ||
237 | event->mmap.header.size += machine->id_hdr_size; | ||
238 | event->mmap.pid = tgid; | ||
239 | event->mmap.tid = pid; | ||
240 | |||
241 | if (process(tool, event, &synth_sample, machine) != 0) { | ||
242 | rc = -1; | ||
243 | break; | ||
244 | } | ||
245 | } | 233 | } |
246 | } | 234 | } |
247 | 235 | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a41dc4a5c2de..705293489e3c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -52,15 +52,13 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | |||
52 | void perf_evlist__config_attrs(struct perf_evlist *evlist, | 52 | void perf_evlist__config_attrs(struct perf_evlist *evlist, |
53 | struct perf_record_opts *opts) | 53 | struct perf_record_opts *opts) |
54 | { | 54 | { |
55 | struct perf_evsel *evsel, *first; | 55 | struct perf_evsel *evsel; |
56 | 56 | ||
57 | if (evlist->cpus->map[0] < 0) | 57 | if (evlist->cpus->map[0] < 0) |
58 | opts->no_inherit = true; | 58 | opts->no_inherit = true; |
59 | 59 | ||
60 | first = perf_evlist__first(evlist); | ||
61 | |||
62 | list_for_each_entry(evsel, &evlist->entries, node) { | 60 | list_for_each_entry(evsel, &evlist->entries, node) { |
63 | perf_evsel__config(evsel, opts, first); | 61 | perf_evsel__config(evsel, opts); |
64 | 62 | ||
65 | if (evlist->nr_entries > 1) | 63 | if (evlist->nr_entries > 1) |
66 | evsel->attr.sample_type |= PERF_SAMPLE_ID; | 64 | evsel->attr.sample_type |= PERF_SAMPLE_ID; |
@@ -224,6 +222,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) | |||
224 | 222 | ||
225 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 223 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
226 | list_for_each_entry(pos, &evlist->entries, node) { | 224 | list_for_each_entry(pos, &evlist->entries, node) { |
225 | if (perf_evsel__is_group_member(pos)) | ||
226 | continue; | ||
227 | for (thread = 0; thread < evlist->threads->nr; thread++) | 227 | for (thread = 0; thread < evlist->threads->nr; thread++) |
228 | ioctl(FD(pos, cpu, thread), | 228 | ioctl(FD(pos, cpu, thread), |
229 | PERF_EVENT_IOC_DISABLE, 0); | 229 | PERF_EVENT_IOC_DISABLE, 0); |
@@ -238,6 +238,8 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
238 | 238 | ||
239 | for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { | 239 | for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { |
240 | list_for_each_entry(pos, &evlist->entries, node) { | 240 | list_for_each_entry(pos, &evlist->entries, node) { |
241 | if (perf_evsel__is_group_member(pos)) | ||
242 | continue; | ||
241 | for (thread = 0; thread < evlist->threads->nr; thread++) | 243 | for (thread = 0; thread < evlist->threads->nr; thread++) |
242 | ioctl(FD(pos, cpu, thread), | 244 | ioctl(FD(pos, cpu, thread), |
243 | PERF_EVENT_IOC_ENABLE, 0); | 245 | PERF_EVENT_IOC_ENABLE, 0); |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 618d41140abd..1fb636c550a1 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -404,13 +404,40 @@ const char *perf_evsel__name(struct perf_evsel *evsel) | |||
404 | return evsel->name ?: "unknown"; | 404 | return evsel->name ?: "unknown"; |
405 | } | 405 | } |
406 | 406 | ||
407 | void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, | 407 | /* |
408 | struct perf_evsel *first) | 408 | * The enable_on_exec/disabled value strategy: |
409 | * | ||
410 | * 1) For any type of traced program: | ||
411 | * - all independent events and group leaders are disabled | ||
412 | * - all group members are enabled | ||
413 | * | ||
414 | * Group members are ruled by group leaders. They need to | ||
415 | * be enabled, because the group scheduling relies on that. | ||
416 | * | ||
417 | * 2) For traced programs executed by perf: | ||
418 | * - all independent events and group leaders have | ||
419 | * enable_on_exec set | ||
420 | * - we don't specifically enable or disable any event during | ||
421 | * the record command | ||
422 | * | ||
423 | * Independent events and group leaders are initially disabled | ||
424 | * and get enabled by exec. Group members are ruled by group | ||
425 | * leaders as stated in 1). | ||
426 | * | ||
427 | * 3) For traced programs attached by perf (pid/tid): | ||
428 | * - we specifically enable or disable all events during | ||
429 | * the record command | ||
430 | * | ||
431 | * When attaching events to already running traced we | ||
432 | * enable/disable events specifically, as there's no | ||
433 | * initial traced exec call. | ||
434 | */ | ||
435 | void perf_evsel__config(struct perf_evsel *evsel, | ||
436 | struct perf_record_opts *opts) | ||
409 | { | 437 | { |
410 | struct perf_event_attr *attr = &evsel->attr; | 438 | struct perf_event_attr *attr = &evsel->attr; |
411 | int track = !evsel->idx; /* only the first counter needs these */ | 439 | int track = !evsel->idx; /* only the first counter needs these */ |
412 | 440 | ||
413 | attr->disabled = 1; | ||
414 | attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; | 441 | attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; |
415 | attr->inherit = !opts->no_inherit; | 442 | attr->inherit = !opts->no_inherit; |
416 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | 443 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | |
@@ -486,10 +513,21 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, | |||
486 | attr->mmap = track; | 513 | attr->mmap = track; |
487 | attr->comm = track; | 514 | attr->comm = track; |
488 | 515 | ||
489 | if (perf_target__none(&opts->target) && | 516 | /* |
490 | (!opts->group || evsel == first)) { | 517 | * XXX see the function comment above |
518 | * | ||
519 | * Disabling only independent events or group leaders, | ||
520 | * keeping group members enabled. | ||
521 | */ | ||
522 | if (!perf_evsel__is_group_member(evsel)) | ||
523 | attr->disabled = 1; | ||
524 | |||
525 | /* | ||
526 | * Setting enable_on_exec for independent events and | ||
527 | * group leaders for traced executed by perf. | ||
528 | */ | ||
529 | if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel)) | ||
491 | attr->enable_on_exec = 1; | 530 | attr->enable_on_exec = 1; |
492 | } | ||
493 | } | 531 | } |
494 | 532 | ||
495 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 533 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
@@ -669,7 +707,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) | |||
669 | struct perf_evsel *leader = evsel->leader; | 707 | struct perf_evsel *leader = evsel->leader; |
670 | int fd; | 708 | int fd; |
671 | 709 | ||
672 | if (!leader) | 710 | if (!perf_evsel__is_group_member(evsel)) |
673 | return -1; | 711 | return -1; |
674 | 712 | ||
675 | /* | 713 | /* |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 6f94d6dea00f..a4c1dd4e149f 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <stdbool.h> | 5 | #include <stdbool.h> |
6 | #include <stddef.h> | ||
6 | #include "../../../include/uapi/linux/perf_event.h" | 7 | #include "../../../include/uapi/linux/perf_event.h" |
7 | #include "types.h" | 8 | #include "types.h" |
8 | #include "xyarray.h" | 9 | #include "xyarray.h" |
@@ -92,8 +93,7 @@ void perf_evsel__exit(struct perf_evsel *evsel); | |||
92 | void perf_evsel__delete(struct perf_evsel *evsel); | 93 | void perf_evsel__delete(struct perf_evsel *evsel); |
93 | 94 | ||
94 | void perf_evsel__config(struct perf_evsel *evsel, | 95 | void perf_evsel__config(struct perf_evsel *evsel, |
95 | struct perf_record_opts *opts, | 96 | struct perf_record_opts *opts); |
96 | struct perf_evsel *first); | ||
97 | 97 | ||
98 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op); | 98 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op); |
99 | 99 | ||
@@ -225,4 +225,9 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) | |||
225 | { | 225 | { |
226 | return list_entry(evsel->node.next, struct perf_evsel, node); | 226 | return list_entry(evsel->node.next, struct perf_evsel, node); |
227 | } | 227 | } |
228 | |||
229 | static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) | ||
230 | { | ||
231 | return evsel->leader != NULL; | ||
232 | } | ||
228 | #endif /* __PERF_EVSEL_H */ | 233 | #endif /* __PERF_EVSEL_H */ |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 7c6e73b1b7ea..cb17e2a8c6ed 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -742,9 +742,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists, | |||
742 | 742 | ||
743 | he = hist_entry__new(pair); | 743 | he = hist_entry__new(pair); |
744 | if (he) { | 744 | if (he) { |
745 | he->stat.nr_events = 0; | 745 | memset(&he->stat, 0, sizeof(he->stat)); |
746 | he->stat.period = 0; | 746 | he->hists = hists; |
747 | he->hists = hists; | ||
748 | rb_link_node(&he->rb_node, parent, p); | 747 | rb_link_node(&he->rb_node, parent, p); |
749 | rb_insert_color(&he->rb_node, &hists->entries); | 748 | rb_insert_color(&he->rb_node, &hists->entries); |
750 | hists__inc_nr_entries(hists, he); | 749 | hists__inc_nr_entries(hists, he); |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 1278c2c72a96..8b091a51e4a2 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -195,7 +195,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self | |||
195 | return 0; | 195 | return 0; |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline int script_browse(const char *script_opt) | 198 | static inline int script_browse(const char *script_opt __maybe_unused) |
199 | { | 199 | { |
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index c0b785b50849..020323af3364 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -722,6 +722,27 @@ static int get_event_modifier(struct event_modifier *mod, char *str, | |||
722 | return 0; | 722 | return 0; |
723 | } | 723 | } |
724 | 724 | ||
725 | /* | ||
726 | * Basic modifier sanity check to validate it contains only one | ||
727 | * instance of any modifier (apart from 'p') present. | ||
728 | */ | ||
729 | static int check_modifier(char *str) | ||
730 | { | ||
731 | char *p = str; | ||
732 | |||
733 | /* The sizeof includes 0 byte as well. */ | ||
734 | if (strlen(str) > (sizeof("ukhGHppp") - 1)) | ||
735 | return -1; | ||
736 | |||
737 | while (*p) { | ||
738 | if (*p != 'p' && strchr(p + 1, *p)) | ||
739 | return -1; | ||
740 | p++; | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
725 | int parse_events__modifier_event(struct list_head *list, char *str, bool add) | 746 | int parse_events__modifier_event(struct list_head *list, char *str, bool add) |
726 | { | 747 | { |
727 | struct perf_evsel *evsel; | 748 | struct perf_evsel *evsel; |
@@ -730,6 +751,9 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) | |||
730 | if (str == NULL) | 751 | if (str == NULL) |
731 | return 0; | 752 | return 0; |
732 | 753 | ||
754 | if (check_modifier(str)) | ||
755 | return -EINVAL; | ||
756 | |||
733 | if (!add && get_event_modifier(&mod, str, NULL)) | 757 | if (!add && get_event_modifier(&mod, str, NULL)) |
734 | return -EINVAL; | 758 | return -EINVAL; |
735 | 759 | ||
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index ac9a6aacf2f5..f6399373d67d 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -99,7 +99,6 @@ void parse_events__set_leader(char *name, struct list_head *list); | |||
99 | void parse_events_update_lists(struct list_head *list_event, | 99 | void parse_events_update_lists(struct list_head *list_event, |
100 | struct list_head *list_all); | 100 | struct list_head *list_all); |
101 | void parse_events_error(void *data, void *scanner, char const *msg); | 101 | void parse_events_error(void *data, void *scanner, char const *msg); |
102 | int parse_events__test(void); | ||
103 | 102 | ||
104 | void print_events(const char *event_glob, bool name_only); | 103 | void print_events(const char *event_glob, bool name_only); |
105 | void print_events_type(u8 type); | 104 | void print_events_type(u8 type); |
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 66959fab6634..e9d1134c2c68 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l | |||
@@ -82,7 +82,7 @@ num_hex 0x[a-fA-F0-9]+ | |||
82 | num_raw_hex [a-fA-F0-9]+ | 82 | num_raw_hex [a-fA-F0-9]+ |
83 | name [a-zA-Z_*?][a-zA-Z0-9_*?]* | 83 | name [a-zA-Z_*?][a-zA-Z0-9_*?]* |
84 | name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* | 84 | name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* |
85 | modifier_event [ukhpGH]{1,8} | 85 | modifier_event [ukhpGH]+ |
86 | modifier_bp [rwx]{1,3} | 86 | modifier_bp [rwx]{1,3} |
87 | 87 | ||
88 | %% | 88 | %% |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 18e84801d4d1..9bdc60c6f138 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
@@ -22,7 +22,7 @@ static LIST_HEAD(pmus); | |||
22 | * Parse & process all the sysfs attributes located under | 22 | * Parse & process all the sysfs attributes located under |
23 | * the directory specified in 'dir' parameter. | 23 | * the directory specified in 'dir' parameter. |
24 | */ | 24 | */ |
25 | static int pmu_format_parse(char *dir, struct list_head *head) | 25 | int perf_pmu__format_parse(char *dir, struct list_head *head) |
26 | { | 26 | { |
27 | struct dirent *evt_ent; | 27 | struct dirent *evt_ent; |
28 | DIR *format_dir; | 28 | DIR *format_dir; |
@@ -77,7 +77,7 @@ static int pmu_format(char *name, struct list_head *format) | |||
77 | if (stat(path, &st) < 0) | 77 | if (stat(path, &st) < 0) |
78 | return 0; /* no error if format does not exist */ | 78 | return 0; /* no error if format does not exist */ |
79 | 79 | ||
80 | if (pmu_format_parse(path, format)) | 80 | if (perf_pmu__format_parse(path, format)) |
81 | return -1; | 81 | return -1; |
82 | 82 | ||
83 | return 0; | 83 | return 0; |
@@ -446,8 +446,9 @@ static int pmu_config_term(struct list_head *formats, | |||
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | static int pmu_config(struct list_head *formats, struct perf_event_attr *attr, | 449 | int perf_pmu__config_terms(struct list_head *formats, |
450 | struct list_head *head_terms) | 450 | struct perf_event_attr *attr, |
451 | struct list_head *head_terms) | ||
451 | { | 452 | { |
452 | struct parse_events__term *term; | 453 | struct parse_events__term *term; |
453 | 454 | ||
@@ -467,7 +468,7 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, | |||
467 | struct list_head *head_terms) | 468 | struct list_head *head_terms) |
468 | { | 469 | { |
469 | attr->type = pmu->type; | 470 | attr->type = pmu->type; |
470 | return pmu_config(&pmu->format, attr, head_terms); | 471 | return perf_pmu__config_terms(&pmu->format, attr, head_terms); |
471 | } | 472 | } |
472 | 473 | ||
473 | static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, | 474 | static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, |
@@ -551,177 +552,3 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) | |||
551 | for (b = from; b <= to; b++) | 552 | for (b = from; b <= to; b++) |
552 | set_bit(b, bits); | 553 | set_bit(b, bits); |
553 | } | 554 | } |
554 | |||
555 | /* Simulated format definitions. */ | ||
556 | static struct test_format { | ||
557 | const char *name; | ||
558 | const char *value; | ||
559 | } test_formats[] = { | ||
560 | { "krava01", "config:0-1,62-63\n", }, | ||
561 | { "krava02", "config:10-17\n", }, | ||
562 | { "krava03", "config:5\n", }, | ||
563 | { "krava11", "config1:0,2,4,6,8,20-28\n", }, | ||
564 | { "krava12", "config1:63\n", }, | ||
565 | { "krava13", "config1:45-47\n", }, | ||
566 | { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, | ||
567 | { "krava22", "config2:8,18,48,58\n", }, | ||
568 | { "krava23", "config2:28-29,38\n", }, | ||
569 | }; | ||
570 | |||
571 | #define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) | ||
572 | |||
573 | /* Simulated users input. */ | ||
574 | static struct parse_events__term test_terms[] = { | ||
575 | { | ||
576 | .config = (char *) "krava01", | ||
577 | .val.num = 15, | ||
578 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
579 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
580 | }, | ||
581 | { | ||
582 | .config = (char *) "krava02", | ||
583 | .val.num = 170, | ||
584 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
585 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
586 | }, | ||
587 | { | ||
588 | .config = (char *) "krava03", | ||
589 | .val.num = 1, | ||
590 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
591 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
592 | }, | ||
593 | { | ||
594 | .config = (char *) "krava11", | ||
595 | .val.num = 27, | ||
596 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
597 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
598 | }, | ||
599 | { | ||
600 | .config = (char *) "krava12", | ||
601 | .val.num = 1, | ||
602 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
603 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
604 | }, | ||
605 | { | ||
606 | .config = (char *) "krava13", | ||
607 | .val.num = 2, | ||
608 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
609 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
610 | }, | ||
611 | { | ||
612 | .config = (char *) "krava21", | ||
613 | .val.num = 119, | ||
614 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
615 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
616 | }, | ||
617 | { | ||
618 | .config = (char *) "krava22", | ||
619 | .val.num = 11, | ||
620 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
621 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
622 | }, | ||
623 | { | ||
624 | .config = (char *) "krava23", | ||
625 | .val.num = 2, | ||
626 | .type_val = PARSE_EVENTS__TERM_TYPE_NUM, | ||
627 | .type_term = PARSE_EVENTS__TERM_TYPE_USER, | ||
628 | }, | ||
629 | }; | ||
630 | #define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) | ||
631 | |||
632 | /* | ||
633 | * Prepare format directory data, exported by kernel | ||
634 | * at /sys/bus/event_source/devices/<dev>/format. | ||
635 | */ | ||
636 | static char *test_format_dir_get(void) | ||
637 | { | ||
638 | static char dir[PATH_MAX]; | ||
639 | unsigned int i; | ||
640 | |||
641 | snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX"); | ||
642 | if (!mkdtemp(dir)) | ||
643 | return NULL; | ||
644 | |||
645 | for (i = 0; i < TEST_FORMATS_CNT; i++) { | ||
646 | static char name[PATH_MAX]; | ||
647 | struct test_format *format = &test_formats[i]; | ||
648 | FILE *file; | ||
649 | |||
650 | snprintf(name, PATH_MAX, "%s/%s", dir, format->name); | ||
651 | |||
652 | file = fopen(name, "w"); | ||
653 | if (!file) | ||
654 | return NULL; | ||
655 | |||
656 | if (1 != fwrite(format->value, strlen(format->value), 1, file)) | ||
657 | break; | ||
658 | |||
659 | fclose(file); | ||
660 | } | ||
661 | |||
662 | return dir; | ||
663 | } | ||
664 | |||
665 | /* Cleanup format directory. */ | ||
666 | static int test_format_dir_put(char *dir) | ||
667 | { | ||
668 | char buf[PATH_MAX]; | ||
669 | snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir); | ||
670 | if (system(buf)) | ||
671 | return -1; | ||
672 | |||
673 | snprintf(buf, PATH_MAX, "rmdir %s\n", dir); | ||
674 | return system(buf); | ||
675 | } | ||
676 | |||
677 | static struct list_head *test_terms_list(void) | ||
678 | { | ||
679 | static LIST_HEAD(terms); | ||
680 | unsigned int i; | ||
681 | |||
682 | for (i = 0; i < TERMS_CNT; i++) | ||
683 | list_add_tail(&test_terms[i].list, &terms); | ||
684 | |||
685 | return &terms; | ||
686 | } | ||
687 | |||
688 | #undef TERMS_CNT | ||
689 | |||
690 | int perf_pmu__test(void) | ||
691 | { | ||
692 | char *format = test_format_dir_get(); | ||
693 | LIST_HEAD(formats); | ||
694 | struct list_head *terms = test_terms_list(); | ||
695 | int ret; | ||
696 | |||
697 | if (!format) | ||
698 | return -EINVAL; | ||
699 | |||
700 | do { | ||
701 | struct perf_event_attr attr; | ||
702 | |||
703 | memset(&attr, 0, sizeof(attr)); | ||
704 | |||
705 | ret = pmu_format_parse(format, &formats); | ||
706 | if (ret) | ||
707 | break; | ||
708 | |||
709 | ret = pmu_config(&formats, &attr, terms); | ||
710 | if (ret) | ||
711 | break; | ||
712 | |||
713 | ret = -EINVAL; | ||
714 | |||
715 | if (attr.config != 0xc00000000002a823) | ||
716 | break; | ||
717 | if (attr.config1 != 0x8000400000000145) | ||
718 | break; | ||
719 | if (attr.config2 != 0x0400000020041d07) | ||
720 | break; | ||
721 | |||
722 | ret = 0; | ||
723 | } while (0); | ||
724 | |||
725 | test_format_dir_put(format); | ||
726 | return ret; | ||
727 | } | ||
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 39f3abac7744..07d553fe8d83 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h | |||
@@ -37,6 +37,9 @@ struct perf_pmu { | |||
37 | struct perf_pmu *perf_pmu__find(char *name); | 37 | struct perf_pmu *perf_pmu__find(char *name); |
38 | int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, | 38 | int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, |
39 | struct list_head *head_terms); | 39 | struct list_head *head_terms); |
40 | int perf_pmu__config_terms(struct list_head *formats, | ||
41 | struct perf_event_attr *attr, | ||
42 | struct list_head *head_terms); | ||
40 | int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); | 43 | int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); |
41 | struct list_head *perf_pmu__alias(struct perf_pmu *pmu, | 44 | struct list_head *perf_pmu__alias(struct perf_pmu *pmu, |
42 | struct list_head *head_terms); | 45 | struct list_head *head_terms); |
@@ -46,6 +49,7 @@ void perf_pmu_error(struct list_head *list, char *name, char const *msg); | |||
46 | int perf_pmu__new_format(struct list_head *list, char *name, | 49 | int perf_pmu__new_format(struct list_head *list, char *name, |
47 | int config, unsigned long *bits); | 50 | int config, unsigned long *bits); |
48 | void perf_pmu__set_format(unsigned long *bits, long from, long to); | 51 | void perf_pmu__set_format(unsigned long *bits, long from, long to); |
52 | int perf_pmu__format_parse(char *dir, struct list_head *head); | ||
49 | 53 | ||
50 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); | 54 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); |
51 | 55 | ||
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c index 13d36faf64eb..daa17aeb6c63 100644 --- a/tools/perf/util/pstack.c +++ b/tools/perf/util/pstack.c | |||
@@ -17,59 +17,59 @@ struct pstack { | |||
17 | 17 | ||
18 | struct pstack *pstack__new(unsigned short max_nr_entries) | 18 | struct pstack *pstack__new(unsigned short max_nr_entries) |
19 | { | 19 | { |
20 | struct pstack *self = zalloc((sizeof(*self) + | 20 | struct pstack *pstack = zalloc((sizeof(*pstack) + |
21 | max_nr_entries * sizeof(void *))); | 21 | max_nr_entries * sizeof(void *))); |
22 | if (self != NULL) | 22 | if (pstack != NULL) |
23 | self->max_nr_entries = max_nr_entries; | 23 | pstack->max_nr_entries = max_nr_entries; |
24 | return self; | 24 | return pstack; |
25 | } | 25 | } |
26 | 26 | ||
27 | void pstack__delete(struct pstack *self) | 27 | void pstack__delete(struct pstack *pstack) |
28 | { | 28 | { |
29 | free(self); | 29 | free(pstack); |
30 | } | 30 | } |
31 | 31 | ||
32 | bool pstack__empty(const struct pstack *self) | 32 | bool pstack__empty(const struct pstack *pstack) |
33 | { | 33 | { |
34 | return self->top == 0; | 34 | return pstack->top == 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | void pstack__remove(struct pstack *self, void *key) | 37 | void pstack__remove(struct pstack *pstack, void *key) |
38 | { | 38 | { |
39 | unsigned short i = self->top, last_index = self->top - 1; | 39 | unsigned short i = pstack->top, last_index = pstack->top - 1; |
40 | 40 | ||
41 | while (i-- != 0) { | 41 | while (i-- != 0) { |
42 | if (self->entries[i] == key) { | 42 | if (pstack->entries[i] == key) { |
43 | if (i < last_index) | 43 | if (i < last_index) |
44 | memmove(self->entries + i, | 44 | memmove(pstack->entries + i, |
45 | self->entries + i + 1, | 45 | pstack->entries + i + 1, |
46 | (last_index - i) * sizeof(void *)); | 46 | (last_index - i) * sizeof(void *)); |
47 | --self->top; | 47 | --pstack->top; |
48 | return; | 48 | return; |
49 | } | 49 | } |
50 | } | 50 | } |
51 | pr_err("%s: %p not on the pstack!\n", __func__, key); | 51 | pr_err("%s: %p not on the pstack!\n", __func__, key); |
52 | } | 52 | } |
53 | 53 | ||
54 | void pstack__push(struct pstack *self, void *key) | 54 | void pstack__push(struct pstack *pstack, void *key) |
55 | { | 55 | { |
56 | if (self->top == self->max_nr_entries) { | 56 | if (pstack->top == pstack->max_nr_entries) { |
57 | pr_err("%s: top=%d, overflow!\n", __func__, self->top); | 57 | pr_err("%s: top=%d, overflow!\n", __func__, pstack->top); |
58 | return; | 58 | return; |
59 | } | 59 | } |
60 | self->entries[self->top++] = key; | 60 | pstack->entries[pstack->top++] = key; |
61 | } | 61 | } |
62 | 62 | ||
63 | void *pstack__pop(struct pstack *self) | 63 | void *pstack__pop(struct pstack *pstack) |
64 | { | 64 | { |
65 | void *ret; | 65 | void *ret; |
66 | 66 | ||
67 | if (self->top == 0) { | 67 | if (pstack->top == 0) { |
68 | pr_err("%s: underflow!\n", __func__); | 68 | pr_err("%s: underflow!\n", __func__); |
69 | return NULL; | 69 | return NULL; |
70 | } | 70 | } |
71 | 71 | ||
72 | ret = self->entries[--self->top]; | 72 | ret = pstack->entries[--pstack->top]; |
73 | self->entries[self->top] = NULL; | 73 | pstack->entries[pstack->top] = NULL; |
74 | return ret; | 74 | return ret; |
75 | } | 75 | } |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 15abe40dc702..ce6f51162386 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -1458,6 +1458,7 @@ more: | |||
1458 | session->ordered_samples.next_flush = ULLONG_MAX; | 1458 | session->ordered_samples.next_flush = ULLONG_MAX; |
1459 | err = flush_sample_queue(session, tool); | 1459 | err = flush_sample_queue(session, tool); |
1460 | out_err: | 1460 | out_err: |
1461 | ui_progress__finish(); | ||
1461 | perf_session__warn_about_errors(session, tool); | 1462 | perf_session__warn_about_errors(session, tool); |
1462 | perf_session_free_sample_buffers(session); | 1463 | perf_session_free_sample_buffers(session); |
1463 | return err; | 1464 | return err; |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 04ccf2962080..de68f98b236d 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -224,7 +224,6 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); | |||
224 | size_t symbol__fprintf(struct symbol *sym, FILE *fp); | 224 | size_t symbol__fprintf(struct symbol *sym, FILE *fp); |
225 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); | 225 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); |
226 | 226 | ||
227 | int dso__test_data(void); | ||
228 | int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, | 227 | int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, |
229 | struct symsrc *runtime_ss, symbol_filter_t filter, | 228 | struct symsrc *runtime_ss, symbol_filter_t filter, |
230 | int kmodule); | 229 | int kmodule); |
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 96ce80a3743b..2964b96aa55f 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include | |||
@@ -1,8 +1,11 @@ | |||
1 | ifeq ("$(origin O)", "command line") | 1 | ifeq ($(origin O), command line) |
2 | dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) | 2 | dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) |
3 | ABSOLUTE_O := $(shell cd $(O) ; pwd) | 3 | ABSOLUTE_O := $(shell cd $(O) ; pwd) |
4 | OUTPUT := $(ABSOLUTE_O)/ | 4 | OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) |
5 | COMMAND_O := O=$(ABSOLUTE_O) | 5 | COMMAND_O := O=$(ABSOLUTE_O) |
6 | ifeq ($(objtree),) | ||
7 | objtree := $(O) | ||
8 | endif | ||
6 | endif | 9 | endif |
7 | 10 | ||
8 | ifneq ($(OUTPUT),) | 11 | ifneq ($(OUTPUT),) |
@@ -41,7 +44,16 @@ else | |||
41 | NO_SUBDIR = : | 44 | NO_SUBDIR = : |
42 | endif | 45 | endif |
43 | 46 | ||
44 | QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir | 47 | # |
48 | # Define a callable command for descending to a new directory | ||
49 | # | ||
50 | # Call by doing: $(call descend,directory[,target]) | ||
51 | # | ||
52 | descend = \ | ||
53 | +mkdir -p $(OUTPUT)$(1) && \ | ||
54 | $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) | ||
55 | |||
56 | QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir | ||
45 | QUIET_SUBDIR1 = | 57 | QUIET_SUBDIR1 = |
46 | 58 | ||
47 | ifneq ($(findstring $(MAKEFLAGS),s),s) | 59 | ifneq ($(findstring $(MAKEFLAGS),s),s) |
@@ -56,5 +68,10 @@ ifndef V | |||
56 | $(MAKE) $(PRINT_DIR) -C $$subdir | 68 | $(MAKE) $(PRINT_DIR) -C $$subdir |
57 | QUIET_FLEX = @echo ' ' FLEX $@; | 69 | QUIET_FLEX = @echo ' ' FLEX $@; |
58 | QUIET_BISON = @echo ' ' BISON $@; | 70 | QUIET_BISON = @echo ' ' BISON $@; |
71 | |||
72 | descend = \ | ||
73 | @echo ' ' DESCEND $(1); \ | ||
74 | mkdir -p $(OUTPUT)$(1) && \ | ||
75 | $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) | ||
59 | endif | 76 | endif |
60 | endif | 77 | endif |