diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-09-13 11:11:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-09-13 11:11:19 -0400 |
commit | be267be8b191d5fac9f65a29e047470f364315eb (patch) | |
tree | 80cdb3f7c1d53fc6209fac2d57ff9a4052e2750e | |
parent | d5cb2aef4fda355fbafe8db4f425b73ea94d2019 (diff) | |
parent | 9ec3f4e437ede2f3b5087d412abe16a0219b3b99 (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
* Remove die()/exit() calls from several tools.
* Add missing perf_regs.h file to MANIFEST
* Clean up and improve 'perf sched' performance by elliminating lots of
needless calls to libtraceevent.
* More patches to make perf build on Android, from Irina Tirdea
* Resolve vdso callchains, from Jiri Olsa
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
92 files changed, 1518 insertions, 1231 deletions
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index f4190b5764de..2c54cdd8ae1b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -1824,7 +1824,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1824 | } | 1824 | } |
1825 | 1825 | ||
1826 | static enum event_type | 1826 | static enum event_type |
1827 | process_entry(struct event_format *event __unused, struct print_arg *arg, | 1827 | process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, |
1828 | char **tok) | 1828 | char **tok) |
1829 | { | 1829 | { |
1830 | enum event_type type; | 1830 | enum event_type type; |
@@ -2458,7 +2458,8 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) | |||
2458 | 2458 | ||
2459 | 2459 | ||
2460 | static enum event_type | 2460 | static enum event_type |
2461 | process_str(struct event_format *event __unused, struct print_arg *arg, char **tok) | 2461 | process_str(struct event_format *event __maybe_unused, struct print_arg *arg, |
2462 | char **tok) | ||
2462 | { | 2463 | { |
2463 | enum event_type type; | 2464 | enum event_type type; |
2464 | char *token; | 2465 | char *token; |
@@ -3653,7 +3654,8 @@ static void free_args(struct print_arg *args) | |||
3653 | } | 3654 | } |
3654 | 3655 | ||
3655 | static char * | 3656 | static char * |
3656 | get_bprint_format(void *data, int size __unused, struct event_format *event) | 3657 | get_bprint_format(void *data, int size __maybe_unused, |
3658 | struct event_format *event) | ||
3657 | { | 3659 | { |
3658 | struct pevent *pevent = event->pevent; | 3660 | struct pevent *pevent = event->pevent; |
3659 | unsigned long long addr; | 3661 | unsigned long long addr; |
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 3318963f1c98..a4bbe2437925 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <stdarg.h> | 24 | #include <stdarg.h> |
25 | #include <regex.h> | 25 | #include <regex.h> |
26 | 26 | ||
27 | #ifndef __unused | 27 | #ifndef __maybe_unused |
28 | #define __unused __attribute__ ((unused)) | 28 | #define __maybe_unused __attribute__((unused)) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* ----------------------- trace_seq ----------------------- */ | 31 | /* ----------------------- trace_seq ----------------------- */ |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index b4b572e8c100..051807990938 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -10,6 +10,7 @@ include/linux/stringify.h | |||
10 | lib/rbtree.c | 10 | lib/rbtree.c |
11 | include/linux/swab.h | 11 | include/linux/swab.h |
12 | arch/*/include/asm/unistd*.h | 12 | arch/*/include/asm/unistd*.h |
13 | arch/*/include/asm/perf_regs.h | ||
13 | arch/*/lib/memcpy*.S | 14 | arch/*/lib/memcpy*.S |
14 | arch/*/lib/memset*.S | 15 | arch/*/lib/memset*.S |
15 | include/linux/poison.h | 16 | include/linux/poison.h |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index e4b2e8f2606c..209774bcee2e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -264,6 +264,7 @@ LIB_H += util/include/linux/ctype.h | |||
264 | LIB_H += util/include/linux/kernel.h | 264 | LIB_H += util/include/linux/kernel.h |
265 | LIB_H += util/include/linux/list.h | 265 | LIB_H += util/include/linux/list.h |
266 | LIB_H += util/include/linux/export.h | 266 | LIB_H += util/include/linux/export.h |
267 | LIB_H += util/include/linux/magic.h | ||
267 | LIB_H += util/include/linux/poison.h | 268 | LIB_H += util/include/linux/poison.h |
268 | LIB_H += util/include/linux/prefetch.h | 269 | LIB_H += util/include/linux/prefetch.h |
269 | LIB_H += util/include/linux/rbtree.h | 270 | LIB_H += util/include/linux/rbtree.h |
@@ -336,6 +337,7 @@ LIB_H += util/intlist.h | |||
336 | LIB_H += util/perf_regs.h | 337 | LIB_H += util/perf_regs.h |
337 | LIB_H += util/unwind.h | 338 | LIB_H += util/unwind.h |
338 | LIB_H += ui/helpline.h | 339 | LIB_H += ui/helpline.h |
340 | LIB_H += util/vdso.h | ||
339 | 341 | ||
340 | LIB_OBJS += $(OUTPUT)util/abspath.o | 342 | LIB_OBJS += $(OUTPUT)util/abspath.o |
341 | LIB_OBJS += $(OUTPUT)util/alias.o | 343 | LIB_OBJS += $(OUTPUT)util/alias.o |
@@ -403,6 +405,7 @@ LIB_OBJS += $(OUTPUT)util/cgroup.o | |||
403 | LIB_OBJS += $(OUTPUT)util/target.o | 405 | LIB_OBJS += $(OUTPUT)util/target.o |
404 | LIB_OBJS += $(OUTPUT)util/rblist.o | 406 | LIB_OBJS += $(OUTPUT)util/rblist.o |
405 | LIB_OBJS += $(OUTPUT)util/intlist.o | 407 | LIB_OBJS += $(OUTPUT)util/intlist.o |
408 | LIB_OBJS += $(OUTPUT)util/vdso.o | ||
406 | 409 | ||
407 | LIB_OBJS += $(OUTPUT)ui/helpline.o | 410 | LIB_OBJS += $(OUTPUT)ui/helpline.o |
408 | LIB_OBJS += $(OUTPUT)ui/hist.o | 411 | LIB_OBJS += $(OUTPUT)ui/hist.o |
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a09bece6dad2..8f89998eeaf4 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | 4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); |
5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | 5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); |
6 | extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); | 6 | extern int bench_mem_memcpy(int argc, const char **argv, |
7 | const char *prefix __maybe_unused); | ||
7 | extern int bench_mem_memset(int argc, const char **argv, const char *prefix); | 8 | extern int bench_mem_memset(int argc, const char **argv, const char *prefix); |
8 | 9 | ||
9 | #define BENCH_FORMAT_DEFAULT_STR "default" | 10 | #define BENCH_FORMAT_DEFAULT_STR "default" |
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 02dad5d3359b..93c83e3cb4a7 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c | |||
@@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) | |||
177 | } while (0) | 177 | } while (0) |
178 | 178 | ||
179 | int bench_mem_memcpy(int argc, const char **argv, | 179 | int bench_mem_memcpy(int argc, const char **argv, |
180 | const char *prefix __used) | 180 | const char *prefix __maybe_unused) |
181 | { | 181 | { |
182 | int i; | 182 | int i; |
183 | size_t len; | 183 | size_t len; |
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index 350cc9557265..c6e4bc523492 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c | |||
@@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) | |||
171 | } while (0) | 171 | } while (0) |
172 | 172 | ||
173 | int bench_mem_memset(int argc, const char **argv, | 173 | int bench_mem_memset(int argc, const char **argv, |
174 | const char *prefix __used) | 174 | const char *prefix __maybe_unused) |
175 | { | 175 | { |
176 | int i; | 176 | int i; |
177 | size_t len; | 177 | size_t len; |
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index d1d1b30f99c1..cc1190a0849b 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c | |||
@@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = { | |||
267 | }; | 267 | }; |
268 | 268 | ||
269 | int bench_sched_messaging(int argc, const char **argv, | 269 | int bench_sched_messaging(int argc, const char **argv, |
270 | const char *prefix __used) | 270 | const char *prefix __maybe_unused) |
271 | { | 271 | { |
272 | unsigned int i, total_children; | 272 | unsigned int i, total_children; |
273 | struct timeval start, stop, diff; | 273 | struct timeval start, stop, diff; |
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 15911e9c587a..69cfba8d4c6c 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c | |||
@@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = { | |||
43 | }; | 43 | }; |
44 | 44 | ||
45 | int bench_sched_pipe(int argc, const char **argv, | 45 | int bench_sched_pipe(int argc, const char **argv, |
46 | const char *prefix __used) | 46 | const char *prefix __maybe_unused) |
47 | { | 47 | { |
48 | int pipe_1[2], pipe_2[2]; | 48 | int pipe_1[2], pipe_2[2]; |
49 | int m = 0, i; | 49 | int m = 0, i; |
@@ -55,8 +55,8 @@ int bench_sched_pipe(int argc, const char **argv, | |||
55 | * discarding returned value of read(), write() | 55 | * discarding returned value of read(), write() |
56 | * causes error in building environment for perf | 56 | * causes error in building environment for perf |
57 | */ | 57 | */ |
58 | int __used ret, wait_stat; | 58 | int __maybe_unused ret, wait_stat; |
59 | pid_t pid, retpid __used; | 59 | pid_t pid, retpid __maybe_unused; |
60 | 60 | ||
61 | argc = parse_options(argc, argv, options, | 61 | argc = parse_options(argc, argv, options, |
62 | bench_sched_pipe_usage, 0); | 62 | bench_sched_pipe_usage, 0); |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 2f3f0029c0f7..9ea38540b873 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -239,7 +239,7 @@ static const char * const annotate_usage[] = { | |||
239 | NULL | 239 | NULL |
240 | }; | 240 | }; |
241 | 241 | ||
242 | int cmd_annotate(int argc, const char **argv, const char *prefix __used) | 242 | int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) |
243 | { | 243 | { |
244 | struct perf_annotate annotate = { | 244 | struct perf_annotate annotate = { |
245 | .tool = { | 245 | .tool = { |
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 1f3100216448..cae9a5fd2ecf 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c | |||
@@ -173,7 +173,7 @@ static void all_subsystem(void) | |||
173 | all_suite(&subsystems[i]); | 173 | all_suite(&subsystems[i]); |
174 | } | 174 | } |
175 | 175 | ||
176 | int cmd_bench(int argc, const char **argv, const char *prefix __used) | 176 | int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) |
177 | { | 177 | { |
178 | int i, j, status = 0; | 178 | int i, j, status = 0; |
179 | 179 | ||
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 29ad20e67919..83654557e108 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c | |||
@@ -43,15 +43,16 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | build_id__sprintf(build_id, sizeof(build_id), sbuild_id); | 45 | build_id__sprintf(build_id, sizeof(build_id), sbuild_id); |
46 | err = build_id_cache__add_s(sbuild_id, debugdir, filename, false); | 46 | err = build_id_cache__add_s(sbuild_id, debugdir, filename, |
47 | false, false); | ||
47 | if (verbose) | 48 | if (verbose) |
48 | pr_info("Adding %s %s: %s\n", sbuild_id, filename, | 49 | pr_info("Adding %s %s: %s\n", sbuild_id, filename, |
49 | err ? "FAIL" : "Ok"); | 50 | err ? "FAIL" : "Ok"); |
50 | return err; | 51 | return err; |
51 | } | 52 | } |
52 | 53 | ||
53 | static int build_id_cache__remove_file(const char *filename __used, | 54 | static int build_id_cache__remove_file(const char *filename __maybe_unused, |
54 | const char *debugdir __used) | 55 | const char *debugdir __maybe_unused) |
55 | { | 56 | { |
56 | u8 build_id[BUILD_ID_SIZE]; | 57 | u8 build_id[BUILD_ID_SIZE]; |
57 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 58 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
@@ -119,7 +120,8 @@ static int __cmd_buildid_cache(void) | |||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
121 | 122 | ||
122 | int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used) | 123 | int cmd_buildid_cache(int argc, const char **argv, |
124 | const char *prefix __maybe_unused) | ||
123 | { | 125 | { |
124 | argc = parse_options(argc, argv, buildid_cache_options, | 126 | argc = parse_options(argc, argv, buildid_cache_options, |
125 | buildid_cache_usage, 0); | 127 | buildid_cache_usage, 0); |
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 7d6842826a0c..1159feeebb19 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c | |||
@@ -103,7 +103,8 @@ static int __cmd_buildid_list(void) | |||
103 | return perf_session__list_build_ids(); | 103 | return perf_session__list_build_ids(); |
104 | } | 104 | } |
105 | 105 | ||
106 | int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) | 106 | int cmd_buildid_list(int argc, const char **argv, |
107 | const char *prefix __maybe_unused) | ||
107 | { | 108 | { |
108 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); | 109 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); |
109 | setup_pager(); | 110 | setup_pager(); |
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index c4c6d76b70ea..761f4197a9e2 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c | |||
@@ -33,7 +33,7 @@ static int hists__add_entry(struct hists *self, | |||
33 | return -ENOMEM; | 33 | return -ENOMEM; |
34 | } | 34 | } |
35 | 35 | ||
36 | static int diff__process_sample_event(struct perf_tool *tool __used, | 36 | static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, |
37 | union perf_event *event, | 37 | union perf_event *event, |
38 | struct perf_sample *sample, | 38 | struct perf_sample *sample, |
39 | struct perf_evsel *evsel, | 39 | struct perf_evsel *evsel, |
@@ -242,7 +242,7 @@ static const struct option options[] = { | |||
242 | OPT_END() | 242 | OPT_END() |
243 | }; | 243 | }; |
244 | 244 | ||
245 | int cmd_diff(int argc, const char **argv, const char *prefix __used) | 245 | int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) |
246 | { | 246 | { |
247 | sort_order = diff__default_sort_order; | 247 | sort_order = diff__default_sort_order; |
248 | argc = parse_options(argc, argv, options, diff_usage, 0); | 248 | argc = parse_options(argc, argv, options, diff_usage, 0); |
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 0dd5a058f766..1fb164164fd0 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c | |||
@@ -113,7 +113,7 @@ static const char * const evlist_usage[] = { | |||
113 | NULL | 113 | NULL |
114 | }; | 114 | }; |
115 | 115 | ||
116 | int cmd_evlist(int argc, const char **argv, const char *prefix __used) | 116 | int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) |
117 | { | 117 | { |
118 | struct perf_attr_details details = { .verbose = false, }; | 118 | struct perf_attr_details details = { .verbose = false, }; |
119 | const char *input_name = NULL; | 119 | const char *input_name = NULL; |
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index f9daae5ac47a..25c8b942ff85 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c | |||
@@ -426,7 +426,7 @@ static int show_html_page(const char *perf_cmd) | |||
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | 428 | ||
429 | int cmd_help(int argc, const char **argv, const char *prefix __used) | 429 | int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) |
430 | { | 430 | { |
431 | const char *alias; | 431 | const char *alias; |
432 | int rc = 0; | 432 | int rc = 0; |
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 64d8ba2fb7bc..1eaa6617c814 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c | |||
@@ -17,9 +17,9 @@ | |||
17 | static char const *input_name = "-"; | 17 | static char const *input_name = "-"; |
18 | static bool inject_build_ids; | 18 | static bool inject_build_ids; |
19 | 19 | ||
20 | static int perf_event__repipe_synth(struct perf_tool *tool __used, | 20 | static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused, |
21 | union perf_event *event, | 21 | union perf_event *event, |
22 | struct machine *machine __used) | 22 | struct machine *machine __maybe_unused) |
23 | { | 23 | { |
24 | uint32_t size; | 24 | uint32_t size; |
25 | void *buf = event; | 25 | void *buf = event; |
@@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used, | |||
40 | 40 | ||
41 | static int perf_event__repipe_op2_synth(struct perf_tool *tool, | 41 | static int perf_event__repipe_op2_synth(struct perf_tool *tool, |
42 | union perf_event *event, | 42 | union perf_event *event, |
43 | struct perf_session *session __used) | 43 | struct perf_session *session |
44 | __maybe_unused) | ||
44 | { | 45 | { |
45 | return perf_event__repipe_synth(tool, event, NULL); | 46 | return perf_event__repipe_synth(tool, event, NULL); |
46 | } | 47 | } |
@@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool, | |||
52 | } | 53 | } |
53 | 54 | ||
54 | static int perf_event__repipe_tracing_data_synth(union perf_event *event, | 55 | static int perf_event__repipe_tracing_data_synth(union perf_event *event, |
55 | struct perf_session *session __used) | 56 | struct perf_session *session |
57 | __maybe_unused) | ||
56 | { | 58 | { |
57 | return perf_event__repipe_synth(NULL, event, NULL); | 59 | return perf_event__repipe_synth(NULL, event, NULL); |
58 | } | 60 | } |
59 | 61 | ||
60 | static int perf_event__repipe_attr(union perf_event *event, | 62 | static int perf_event__repipe_attr(union perf_event *event, |
61 | struct perf_evlist **pevlist __used) | 63 | struct perf_evlist **pevlist __maybe_unused) |
62 | { | 64 | { |
63 | int ret; | 65 | int ret; |
64 | ret = perf_event__process_attr(event, pevlist); | 66 | ret = perf_event__process_attr(event, pevlist); |
@@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event, | |||
70 | 72 | ||
71 | static int perf_event__repipe(struct perf_tool *tool, | 73 | static int perf_event__repipe(struct perf_tool *tool, |
72 | union perf_event *event, | 74 | union perf_event *event, |
73 | struct perf_sample *sample __used, | 75 | struct perf_sample *sample __maybe_unused, |
74 | struct machine *machine) | 76 | struct machine *machine) |
75 | { | 77 | { |
76 | return perf_event__repipe_synth(tool, event, machine); | 78 | return perf_event__repipe_synth(tool, event, machine); |
@@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool, | |||
78 | 80 | ||
79 | static int perf_event__repipe_sample(struct perf_tool *tool, | 81 | static int perf_event__repipe_sample(struct perf_tool *tool, |
80 | union perf_event *event, | 82 | union perf_event *event, |
81 | struct perf_sample *sample __used, | 83 | struct perf_sample *sample __maybe_unused, |
82 | struct perf_evsel *evsel __used, | 84 | struct perf_evsel *evsel __maybe_unused, |
83 | struct machine *machine) | 85 | struct machine *machine) |
84 | { | 86 | { |
85 | return perf_event__repipe_synth(tool, event, machine); | 87 | return perf_event__repipe_synth(tool, event, machine); |
@@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, | |||
163 | static int perf_event__inject_buildid(struct perf_tool *tool, | 165 | static int perf_event__inject_buildid(struct perf_tool *tool, |
164 | union perf_event *event, | 166 | union perf_event *event, |
165 | struct perf_sample *sample, | 167 | struct perf_sample *sample, |
166 | struct perf_evsel *evsel __used, | 168 | struct perf_evsel *evsel __maybe_unused, |
167 | struct machine *machine) | 169 | struct machine *machine) |
168 | { | 170 | { |
169 | struct addr_location al; | 171 | struct addr_location al; |
@@ -224,7 +226,7 @@ struct perf_tool perf_inject = { | |||
224 | 226 | ||
225 | extern volatile int session_done; | 227 | extern volatile int session_done; |
226 | 228 | ||
227 | static void sig_handler(int sig __attribute__((__unused__))) | 229 | static void sig_handler(int sig __maybe_unused) |
228 | { | 230 | { |
229 | session_done = 1; | 231 | session_done = 1; |
230 | } | 232 | } |
@@ -267,7 +269,7 @@ static const struct option options[] = { | |||
267 | OPT_END() | 269 | OPT_END() |
268 | }; | 270 | }; |
269 | 271 | ||
270 | int cmd_inject(int argc, const char **argv, const char *prefix __used) | 272 | int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) |
271 | { | 273 | { |
272 | argc = parse_options(argc, argv, options, report_usage, 0); | 274 | argc = parse_options(argc, argv, options, report_usage, 0); |
273 | 275 | ||
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index fc6607b383f2..f5f8a6b745a3 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c | |||
@@ -58,41 +58,52 @@ static unsigned long nr_allocs, nr_cross_allocs; | |||
58 | 58 | ||
59 | #define PATH_SYS_NODE "/sys/devices/system/node" | 59 | #define PATH_SYS_NODE "/sys/devices/system/node" |
60 | 60 | ||
61 | static void init_cpunode_map(void) | 61 | static int init_cpunode_map(void) |
62 | { | 62 | { |
63 | FILE *fp; | 63 | FILE *fp; |
64 | int i; | 64 | int i, err = -1; |
65 | 65 | ||
66 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); | 66 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); |
67 | if (!fp) { | 67 | if (!fp) { |
68 | max_cpu_num = 4096; | 68 | max_cpu_num = 4096; |
69 | return; | 69 | return 0; |
70 | } | ||
71 | |||
72 | if (fscanf(fp, "%d", &max_cpu_num) < 1) { | ||
73 | pr_err("Failed to read 'kernel_max' from sysfs"); | ||
74 | goto out_close; | ||
70 | } | 75 | } |
71 | 76 | ||
72 | if (fscanf(fp, "%d", &max_cpu_num) < 1) | ||
73 | die("Failed to read 'kernel_max' from sysfs"); | ||
74 | max_cpu_num++; | 77 | max_cpu_num++; |
75 | 78 | ||
76 | cpunode_map = calloc(max_cpu_num, sizeof(int)); | 79 | cpunode_map = calloc(max_cpu_num, sizeof(int)); |
77 | if (!cpunode_map) | 80 | if (!cpunode_map) { |
78 | die("calloc"); | 81 | pr_err("%s: calloc failed\n", __func__); |
82 | goto out_close; | ||
83 | } | ||
84 | |||
79 | for (i = 0; i < max_cpu_num; i++) | 85 | for (i = 0; i < max_cpu_num; i++) |
80 | cpunode_map[i] = -1; | 86 | cpunode_map[i] = -1; |
87 | |||
88 | err = 0; | ||
89 | out_close: | ||
81 | fclose(fp); | 90 | fclose(fp); |
91 | return err; | ||
82 | } | 92 | } |
83 | 93 | ||
84 | static void setup_cpunode_map(void) | 94 | static int setup_cpunode_map(void) |
85 | { | 95 | { |
86 | struct dirent *dent1, *dent2; | 96 | struct dirent *dent1, *dent2; |
87 | DIR *dir1, *dir2; | 97 | DIR *dir1, *dir2; |
88 | unsigned int cpu, mem; | 98 | unsigned int cpu, mem; |
89 | char buf[PATH_MAX]; | 99 | char buf[PATH_MAX]; |
90 | 100 | ||
91 | init_cpunode_map(); | 101 | if (init_cpunode_map()) |
102 | return -1; | ||
92 | 103 | ||
93 | dir1 = opendir(PATH_SYS_NODE); | 104 | dir1 = opendir(PATH_SYS_NODE); |
94 | if (!dir1) | 105 | if (!dir1) |
95 | return; | 106 | return -1; |
96 | 107 | ||
97 | while ((dent1 = readdir(dir1)) != NULL) { | 108 | while ((dent1 = readdir(dir1)) != NULL) { |
98 | if (dent1->d_type != DT_DIR || | 109 | if (dent1->d_type != DT_DIR || |
@@ -112,10 +123,11 @@ static void setup_cpunode_map(void) | |||
112 | closedir(dir2); | 123 | closedir(dir2); |
113 | } | 124 | } |
114 | closedir(dir1); | 125 | closedir(dir1); |
126 | return 0; | ||
115 | } | 127 | } |
116 | 128 | ||
117 | static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | 129 | static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, |
118 | int bytes_req, int bytes_alloc, int cpu) | 130 | int bytes_req, int bytes_alloc, int cpu) |
119 | { | 131 | { |
120 | struct rb_node **node = &root_alloc_stat.rb_node; | 132 | struct rb_node **node = &root_alloc_stat.rb_node; |
121 | struct rb_node *parent = NULL; | 133 | struct rb_node *parent = NULL; |
@@ -139,8 +151,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | |||
139 | data->bytes_alloc += bytes_alloc; | 151 | data->bytes_alloc += bytes_alloc; |
140 | } else { | 152 | } else { |
141 | data = malloc(sizeof(*data)); | 153 | data = malloc(sizeof(*data)); |
142 | if (!data) | 154 | if (!data) { |
143 | die("malloc"); | 155 | pr_err("%s: malloc failed\n", __func__); |
156 | return -1; | ||
157 | } | ||
144 | data->ptr = ptr; | 158 | data->ptr = ptr; |
145 | data->pingpong = 0; | 159 | data->pingpong = 0; |
146 | data->hit = 1; | 160 | data->hit = 1; |
@@ -152,9 +166,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | |||
152 | } | 166 | } |
153 | data->call_site = call_site; | 167 | data->call_site = call_site; |
154 | data->alloc_cpu = cpu; | 168 | data->alloc_cpu = cpu; |
169 | return 0; | ||
155 | } | 170 | } |
156 | 171 | ||
157 | static void insert_caller_stat(unsigned long call_site, | 172 | static int insert_caller_stat(unsigned long call_site, |
158 | int bytes_req, int bytes_alloc) | 173 | int bytes_req, int bytes_alloc) |
159 | { | 174 | { |
160 | struct rb_node **node = &root_caller_stat.rb_node; | 175 | struct rb_node **node = &root_caller_stat.rb_node; |
@@ -179,8 +194,10 @@ static void insert_caller_stat(unsigned long call_site, | |||
179 | data->bytes_alloc += bytes_alloc; | 194 | data->bytes_alloc += bytes_alloc; |
180 | } else { | 195 | } else { |
181 | data = malloc(sizeof(*data)); | 196 | data = malloc(sizeof(*data)); |
182 | if (!data) | 197 | if (!data) { |
183 | die("malloc"); | 198 | pr_err("%s: malloc failed\n", __func__); |
199 | return -1; | ||
200 | } | ||
184 | data->call_site = call_site; | 201 | data->call_site = call_site; |
185 | data->pingpong = 0; | 202 | data->pingpong = 0; |
186 | data->hit = 1; | 203 | data->hit = 1; |
@@ -190,11 +207,12 @@ static void insert_caller_stat(unsigned long call_site, | |||
190 | rb_link_node(&data->node, parent, node); | 207 | rb_link_node(&data->node, parent, node); |
191 | rb_insert_color(&data->node, &root_caller_stat); | 208 | rb_insert_color(&data->node, &root_caller_stat); |
192 | } | 209 | } |
210 | |||
211 | return 0; | ||
193 | } | 212 | } |
194 | 213 | ||
195 | static void perf_evsel__process_alloc_event(struct perf_evsel *evsel, | 214 | static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, |
196 | struct perf_sample *sample, | 215 | struct perf_sample *sample, int node) |
197 | int node) | ||
198 | { | 216 | { |
199 | struct event_format *event = evsel->tp_format; | 217 | struct event_format *event = evsel->tp_format; |
200 | void *data = sample->raw_data; | 218 | void *data = sample->raw_data; |
@@ -209,8 +227,9 @@ static void perf_evsel__process_alloc_event(struct perf_evsel *evsel, | |||
209 | bytes_req = raw_field_value(event, "bytes_req", data); | 227 | bytes_req = raw_field_value(event, "bytes_req", data); |
210 | bytes_alloc = raw_field_value(event, "bytes_alloc", data); | 228 | bytes_alloc = raw_field_value(event, "bytes_alloc", data); |
211 | 229 | ||
212 | insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); | 230 | if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu) || |
213 | insert_caller_stat(call_site, bytes_req, bytes_alloc); | 231 | insert_caller_stat(call_site, bytes_req, bytes_alloc)) |
232 | return -1; | ||
214 | 233 | ||
215 | total_requested += bytes_req; | 234 | total_requested += bytes_req; |
216 | total_allocated += bytes_alloc; | 235 | total_allocated += bytes_alloc; |
@@ -222,6 +241,7 @@ static void perf_evsel__process_alloc_event(struct perf_evsel *evsel, | |||
222 | nr_cross_allocs++; | 241 | nr_cross_allocs++; |
223 | } | 242 | } |
224 | nr_allocs++; | 243 | nr_allocs++; |
244 | return 0; | ||
225 | } | 245 | } |
226 | 246 | ||
227 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); | 247 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); |
@@ -252,8 +272,8 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr, | |||
252 | return NULL; | 272 | return NULL; |
253 | } | 273 | } |
254 | 274 | ||
255 | static void perf_evsel__process_free_event(struct perf_evsel *evsel, | 275 | static int perf_evsel__process_free_event(struct perf_evsel *evsel, |
256 | struct perf_sample *sample) | 276 | struct perf_sample *sample) |
257 | { | 277 | { |
258 | unsigned long ptr = raw_field_value(evsel->tp_format, "ptr", | 278 | unsigned long ptr = raw_field_value(evsel->tp_format, "ptr", |
259 | sample->raw_data); | 279 | sample->raw_data); |
@@ -261,44 +281,46 @@ static void perf_evsel__process_free_event(struct perf_evsel *evsel, | |||
261 | 281 | ||
262 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); | 282 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); |
263 | if (!s_alloc) | 283 | if (!s_alloc) |
264 | return; | 284 | return 0; |
265 | 285 | ||
266 | if ((short)sample->cpu != s_alloc->alloc_cpu) { | 286 | if ((short)sample->cpu != s_alloc->alloc_cpu) { |
267 | s_alloc->pingpong++; | 287 | s_alloc->pingpong++; |
268 | 288 | ||
269 | s_caller = search_alloc_stat(0, s_alloc->call_site, | 289 | s_caller = search_alloc_stat(0, s_alloc->call_site, |
270 | &root_caller_stat, callsite_cmp); | 290 | &root_caller_stat, callsite_cmp); |
271 | assert(s_caller); | 291 | if (!s_caller) |
292 | return -1; | ||
272 | s_caller->pingpong++; | 293 | s_caller->pingpong++; |
273 | } | 294 | } |
274 | s_alloc->alloc_cpu = -1; | 295 | s_alloc->alloc_cpu = -1; |
296 | |||
297 | return 0; | ||
275 | } | 298 | } |
276 | 299 | ||
277 | static void perf_evsel__process_kmem_event(struct perf_evsel *evsel, | 300 | static int perf_evsel__process_kmem_event(struct perf_evsel *evsel, |
278 | struct perf_sample *sample) | 301 | struct perf_sample *sample) |
279 | { | 302 | { |
280 | struct event_format *event = evsel->tp_format; | 303 | struct event_format *event = evsel->tp_format; |
281 | 304 | ||
282 | if (!strcmp(event->name, "kmalloc") || | 305 | if (!strcmp(event->name, "kmalloc") || |
283 | !strcmp(event->name, "kmem_cache_alloc")) { | 306 | !strcmp(event->name, "kmem_cache_alloc")) { |
284 | perf_evsel__process_alloc_event(evsel, sample, 0); | 307 | return perf_evsel__process_alloc_event(evsel, sample, 0); |
285 | return; | ||
286 | } | 308 | } |
287 | 309 | ||
288 | if (!strcmp(event->name, "kmalloc_node") || | 310 | if (!strcmp(event->name, "kmalloc_node") || |
289 | !strcmp(event->name, "kmem_cache_alloc_node")) { | 311 | !strcmp(event->name, "kmem_cache_alloc_node")) { |
290 | perf_evsel__process_alloc_event(evsel, sample, 1); | 312 | return perf_evsel__process_alloc_event(evsel, sample, 1); |
291 | return; | ||
292 | } | 313 | } |
293 | 314 | ||
294 | if (!strcmp(event->name, "kfree") || | 315 | if (!strcmp(event->name, "kfree") || |
295 | !strcmp(event->name, "kmem_cache_free")) { | 316 | !strcmp(event->name, "kmem_cache_free")) { |
296 | perf_evsel__process_free_event(evsel, sample); | 317 | return perf_evsel__process_free_event(evsel, sample); |
297 | return; | ||
298 | } | 318 | } |
319 | |||
320 | return 0; | ||
299 | } | 321 | } |
300 | 322 | ||
301 | static int process_sample_event(struct perf_tool *tool __used, | 323 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
302 | union perf_event *event, | 324 | union perf_event *event, |
303 | struct perf_sample *sample, | 325 | struct perf_sample *sample, |
304 | struct perf_evsel *evsel, | 326 | struct perf_evsel *evsel, |
@@ -314,8 +336,7 @@ static int process_sample_event(struct perf_tool *tool __used, | |||
314 | 336 | ||
315 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | 337 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); |
316 | 338 | ||
317 | perf_evsel__process_kmem_event(evsel, sample); | 339 | return perf_evsel__process_kmem_event(evsel, sample); |
318 | return 0; | ||
319 | } | 340 | } |
320 | 341 | ||
321 | static struct perf_tool perf_kmem = { | 342 | static struct perf_tool perf_kmem = { |
@@ -613,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list) | |||
613 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { | 634 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { |
614 | if (!strcmp(avail_sorts[i]->name, tok)) { | 635 | if (!strcmp(avail_sorts[i]->name, tok)) { |
615 | sort = malloc(sizeof(*sort)); | 636 | sort = malloc(sizeof(*sort)); |
616 | if (!sort) | 637 | if (!sort) { |
617 | die("malloc"); | 638 | pr_err("%s: malloc failed\n", __func__); |
639 | return -1; | ||
640 | } | ||
618 | memcpy(sort, avail_sorts[i], sizeof(*sort)); | 641 | memcpy(sort, avail_sorts[i], sizeof(*sort)); |
619 | list_add_tail(&sort->list, list); | 642 | list_add_tail(&sort->list, list); |
620 | return 0; | 643 | return 0; |
@@ -629,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) | |||
629 | char *tok; | 652 | char *tok; |
630 | char *str = strdup(arg); | 653 | char *str = strdup(arg); |
631 | 654 | ||
632 | if (!str) | 655 | if (!str) { |
633 | die("strdup"); | 656 | pr_err("%s: strdup failed\n", __func__); |
657 | return -1; | ||
658 | } | ||
634 | 659 | ||
635 | while (true) { | 660 | while (true) { |
636 | tok = strsep(&str, ","); | 661 | tok = strsep(&str, ","); |
@@ -647,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) | |||
647 | return 0; | 672 | return 0; |
648 | } | 673 | } |
649 | 674 | ||
650 | static int parse_sort_opt(const struct option *opt __used, | 675 | static int parse_sort_opt(const struct option *opt __maybe_unused, |
651 | const char *arg, int unset __used) | 676 | const char *arg, int unset __maybe_unused) |
652 | { | 677 | { |
653 | if (!arg) | 678 | if (!arg) |
654 | return -1; | 679 | return -1; |
@@ -661,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used, | |||
661 | return 0; | 686 | return 0; |
662 | } | 687 | } |
663 | 688 | ||
664 | static int parse_caller_opt(const struct option *opt __used, | 689 | static int parse_caller_opt(const struct option *opt __maybe_unused, |
665 | const char *arg __used, int unset __used) | 690 | const char *arg __maybe_unused, |
691 | int unset __maybe_unused) | ||
666 | { | 692 | { |
667 | caller_flag = (alloc_flag + 1); | 693 | caller_flag = (alloc_flag + 1); |
668 | return 0; | 694 | return 0; |
669 | } | 695 | } |
670 | 696 | ||
671 | static int parse_alloc_opt(const struct option *opt __used, | 697 | static int parse_alloc_opt(const struct option *opt __maybe_unused, |
672 | const char *arg __used, int unset __used) | 698 | const char *arg __maybe_unused, |
699 | int unset __maybe_unused) | ||
673 | { | 700 | { |
674 | alloc_flag = (caller_flag + 1); | 701 | alloc_flag = (caller_flag + 1); |
675 | return 0; | 702 | return 0; |
676 | } | 703 | } |
677 | 704 | ||
678 | static int parse_line_opt(const struct option *opt __used, | 705 | static int parse_line_opt(const struct option *opt __maybe_unused, |
679 | const char *arg, int unset __used) | 706 | const char *arg, int unset __maybe_unused) |
680 | { | 707 | { |
681 | int lines; | 708 | int lines; |
682 | 709 | ||
@@ -746,7 +773,7 @@ static int __cmd_record(int argc, const char **argv) | |||
746 | return cmd_record(i, rec_argv, NULL); | 773 | return cmd_record(i, rec_argv, NULL); |
747 | } | 774 | } |
748 | 775 | ||
749 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) | 776 | int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) |
750 | { | 777 | { |
751 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); | 778 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); |
752 | 779 | ||
@@ -758,7 +785,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used) | |||
758 | if (!strncmp(argv[0], "rec", 3)) { | 785 | if (!strncmp(argv[0], "rec", 3)) { |
759 | return __cmd_record(argc, argv); | 786 | return __cmd_record(argc, argv); |
760 | } else if (!strcmp(argv[0], "stat")) { | 787 | } else if (!strcmp(argv[0], "stat")) { |
761 | setup_cpunode_map(); | 788 | if (setup_cpunode_map()) |
789 | return -1; | ||
762 | 790 | ||
763 | if (list_empty(&caller_sort)) | 791 | if (list_empty(&caller_sort)) |
764 | setup_sorting(&caller_sort, default_sort_order); | 792 | setup_sorting(&caller_sort, default_sort_order); |
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 9fc6e0fa3dce..4d2aa2cbeca8 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c | |||
@@ -102,7 +102,7 @@ static int __cmd_buildid_list(int argc, const char **argv) | |||
102 | return cmd_buildid_list(i, rec_argv, NULL); | 102 | return cmd_buildid_list(i, rec_argv, NULL); |
103 | } | 103 | } |
104 | 104 | ||
105 | int cmd_kvm(int argc, const char **argv, const char *prefix __used) | 105 | int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) |
106 | { | 106 | { |
107 | perf_host = 0; | 107 | perf_host = 0; |
108 | perf_guest = 1; | 108 | perf_guest = 1; |
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index bdcff81b532a..1948eceb517a 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include "util/parse-events.h" | 14 | #include "util/parse-events.h" |
15 | #include "util/cache.h" | 15 | #include "util/cache.h" |
16 | 16 | ||
17 | int cmd_list(int argc, const char **argv, const char *prefix __used) | 17 | int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) |
18 | { | 18 | { |
19 | setup_pager(); | 19 | setup_pager(); |
20 | 20 | ||
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 75153c87e650..a8035207a3dd 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c | |||
@@ -870,7 +870,7 @@ static int dump_info(void) | |||
870 | return rc; | 870 | return rc; |
871 | } | 871 | } |
872 | 872 | ||
873 | static int process_sample_event(struct perf_tool *tool __used, | 873 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
874 | union perf_event *event, | 874 | union perf_event *event, |
875 | struct perf_sample *sample, | 875 | struct perf_sample *sample, |
876 | struct perf_evsel *evsel, | 876 | struct perf_evsel *evsel, |
@@ -1020,7 +1020,7 @@ static int __cmd_record(int argc, const char **argv) | |||
1020 | return cmd_record(i, rec_argv, NULL); | 1020 | return cmd_record(i, rec_argv, NULL); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | int cmd_lock(int argc, const char **argv, const char *prefix __used) | 1023 | int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) |
1024 | { | 1024 | { |
1025 | unsigned int i; | 1025 | unsigned int i; |
1026 | int rc = 0; | 1026 | int rc = 0; |
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index e215ae61b2ae..118aa8946573 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c | |||
@@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv) | |||
143 | return ret; | 143 | return ret; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int opt_add_probe_event(const struct option *opt __used, | 146 | static int opt_add_probe_event(const struct option *opt __maybe_unused, |
147 | const char *str, int unset __used) | 147 | const char *str, int unset __maybe_unused) |
148 | { | 148 | { |
149 | if (str) { | 149 | if (str) { |
150 | params.mod_events = true; | 150 | params.mod_events = true; |
@@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used, | |||
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | static int opt_del_probe_event(const struct option *opt __used, | 156 | static int opt_del_probe_event(const struct option *opt __maybe_unused, |
157 | const char *str, int unset __used) | 157 | const char *str, int unset __maybe_unused) |
158 | { | 158 | { |
159 | if (str) { | 159 | if (str) { |
160 | params.mod_events = true; | 160 | params.mod_events = true; |
@@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used, | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static int opt_set_target(const struct option *opt, const char *str, | 168 | static int opt_set_target(const struct option *opt, const char *str, |
169 | int unset __used) | 169 | int unset __maybe_unused) |
170 | { | 170 | { |
171 | int ret = -ENOENT; | 171 | int ret = -ENOENT; |
172 | 172 | ||
@@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str, | |||
188 | } | 188 | } |
189 | 189 | ||
190 | #ifdef DWARF_SUPPORT | 190 | #ifdef DWARF_SUPPORT |
191 | static int opt_show_lines(const struct option *opt __used, | 191 | static int opt_show_lines(const struct option *opt __maybe_unused, |
192 | const char *str, int unset __used) | 192 | const char *str, int unset __maybe_unused) |
193 | { | 193 | { |
194 | int ret = 0; | 194 | int ret = 0; |
195 | 195 | ||
@@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used, | |||
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
212 | static int opt_show_vars(const struct option *opt __used, | 212 | static int opt_show_vars(const struct option *opt __maybe_unused, |
213 | const char *str, int unset __used) | 213 | const char *str, int unset __maybe_unused) |
214 | { | 214 | { |
215 | struct perf_probe_event *pev = ¶ms.events[params.nevents]; | 215 | struct perf_probe_event *pev = ¶ms.events[params.nevents]; |
216 | int ret; | 216 | int ret; |
@@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used, | |||
229 | } | 229 | } |
230 | #endif | 230 | #endif |
231 | 231 | ||
232 | static int opt_set_filter(const struct option *opt __used, | 232 | static int opt_set_filter(const struct option *opt __maybe_unused, |
233 | const char *str, int unset __used) | 233 | const char *str, int unset __maybe_unused) |
234 | { | 234 | { |
235 | const char *err; | 235 | const char *err; |
236 | 236 | ||
@@ -327,7 +327,7 @@ static const struct option options[] = { | |||
327 | OPT_END() | 327 | OPT_END() |
328 | }; | 328 | }; |
329 | 329 | ||
330 | int cmd_probe(int argc, const char **argv, const char *prefix __used) | 330 | int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) |
331 | { | 331 | { |
332 | int ret; | 332 | int ret; |
333 | 333 | ||
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 7b8b891d4d56..c643ed669ef9 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -92,8 +92,8 @@ static int write_output(struct perf_record *rec, void *buf, size_t size) | |||
92 | 92 | ||
93 | static int process_synthesized_event(struct perf_tool *tool, | 93 | static int process_synthesized_event(struct perf_tool *tool, |
94 | union perf_event *event, | 94 | union perf_event *event, |
95 | struct perf_sample *sample __used, | 95 | struct perf_sample *sample __maybe_unused, |
96 | struct machine *machine __used) | 96 | struct machine *machine __maybe_unused) |
97 | { | 97 | { |
98 | struct perf_record *rec = container_of(tool, struct perf_record, tool); | 98 | struct perf_record *rec = container_of(tool, struct perf_record, tool); |
99 | if (write_output(rec, event, event->header.size) < 0) | 99 | if (write_output(rec, event, event->header.size) < 0) |
@@ -159,7 +159,7 @@ static void sig_handler(int sig) | |||
159 | signr = sig; | 159 | signr = sig; |
160 | } | 160 | } |
161 | 161 | ||
162 | static void perf_record__sig_exit(int exit_status __used, void *arg) | 162 | static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg) |
163 | { | 163 | { |
164 | struct perf_record *rec = arg; | 164 | struct perf_record *rec = arg; |
165 | int status; | 165 | int status; |
@@ -827,7 +827,7 @@ static int get_stack_size(char *str, unsigned long *_size) | |||
827 | #endif /* !NO_LIBUNWIND_SUPPORT */ | 827 | #endif /* !NO_LIBUNWIND_SUPPORT */ |
828 | 828 | ||
829 | static int | 829 | static int |
830 | parse_callchain_opt(const struct option *opt __used, const char *arg, | 830 | parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, |
831 | int unset) | 831 | int unset) |
832 | { | 832 | { |
833 | struct perf_record *rec = (struct perf_record *)opt->value; | 833 | struct perf_record *rec = (struct perf_record *)opt->value; |
@@ -1003,7 +1003,7 @@ const struct option record_options[] = { | |||
1003 | OPT_END() | 1003 | OPT_END() |
1004 | }; | 1004 | }; |
1005 | 1005 | ||
1006 | int cmd_record(int argc, const char **argv, const char *prefix __used) | 1006 | int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) |
1007 | { | 1007 | { |
1008 | int err = -ENOMEM; | 1008 | int err = -ENOMEM; |
1009 | struct perf_evsel *pos; | 1009 | struct perf_evsel *pos; |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 1f8d11b4f7ff..97b2e6300f4c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool, | |||
223 | 223 | ||
224 | static int process_read_event(struct perf_tool *tool, | 224 | static int process_read_event(struct perf_tool *tool, |
225 | union perf_event *event, | 225 | union perf_event *event, |
226 | struct perf_sample *sample __used, | 226 | struct perf_sample *sample __maybe_unused, |
227 | struct perf_evsel *evsel, | 227 | struct perf_evsel *evsel, |
228 | struct machine *machine __used) | 228 | struct machine *machine __maybe_unused) |
229 | { | 229 | { |
230 | struct perf_report *rep = container_of(tool, struct perf_report, tool); | 230 | struct perf_report *rep = container_of(tool, struct perf_report, tool); |
231 | 231 | ||
@@ -287,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) | |||
287 | 287 | ||
288 | extern volatile int session_done; | 288 | extern volatile int session_done; |
289 | 289 | ||
290 | static void sig_handler(int sig __used) | 290 | static void sig_handler(int sig __maybe_unused) |
291 | { | 291 | { |
292 | session_done = 1; | 292 | session_done = 1; |
293 | } | 293 | } |
@@ -533,13 +533,14 @@ setup: | |||
533 | } | 533 | } |
534 | 534 | ||
535 | static int | 535 | static int |
536 | parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) | 536 | parse_branch_mode(const struct option *opt __maybe_unused, |
537 | const char *str __maybe_unused, int unset) | ||
537 | { | 538 | { |
538 | sort__branch_mode = !unset; | 539 | sort__branch_mode = !unset; |
539 | return 0; | 540 | return 0; |
540 | } | 541 | } |
541 | 542 | ||
542 | int cmd_report(int argc, const char **argv, const char *prefix __used) | 543 | int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) |
543 | { | 544 | { |
544 | struct perf_session *session; | 545 | struct perf_session *session; |
545 | struct stat st; | 546 | struct stat st; |
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index a25a023965bb..af305f57bd22 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -23,26 +23,12 @@ | |||
23 | #include <pthread.h> | 23 | #include <pthread.h> |
24 | #include <math.h> | 24 | #include <math.h> |
25 | 25 | ||
26 | static const char *input_name; | ||
27 | |||
28 | static char default_sort_order[] = "avg, max, switch, runtime"; | ||
29 | static const char *sort_order = default_sort_order; | ||
30 | |||
31 | static int profile_cpu = -1; | ||
32 | |||
33 | #define PR_SET_NAME 15 /* Set process name */ | 26 | #define PR_SET_NAME 15 /* Set process name */ |
34 | #define MAX_CPUS 4096 | 27 | #define MAX_CPUS 4096 |
35 | |||
36 | static u64 run_measurement_overhead; | ||
37 | static u64 sleep_measurement_overhead; | ||
38 | |||
39 | #define COMM_LEN 20 | 28 | #define COMM_LEN 20 |
40 | #define SYM_LEN 129 | 29 | #define SYM_LEN 129 |
41 | |||
42 | #define MAX_PID 65536 | 30 | #define MAX_PID 65536 |
43 | 31 | ||
44 | static unsigned long nr_tasks; | ||
45 | |||
46 | struct sched_atom; | 32 | struct sched_atom; |
47 | 33 | ||
48 | struct task_desc { | 34 | struct task_desc { |
@@ -80,44 +66,6 @@ struct sched_atom { | |||
80 | struct task_desc *wakee; | 66 | struct task_desc *wakee; |
81 | }; | 67 | }; |
82 | 68 | ||
83 | static struct task_desc *pid_to_task[MAX_PID]; | ||
84 | |||
85 | static struct task_desc **tasks; | ||
86 | |||
87 | static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER; | ||
88 | static u64 start_time; | ||
89 | |||
90 | static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER; | ||
91 | |||
92 | static unsigned long nr_run_events; | ||
93 | static unsigned long nr_sleep_events; | ||
94 | static unsigned long nr_wakeup_events; | ||
95 | |||
96 | static unsigned long nr_sleep_corrections; | ||
97 | static unsigned long nr_run_events_optimized; | ||
98 | |||
99 | static unsigned long targetless_wakeups; | ||
100 | static unsigned long multitarget_wakeups; | ||
101 | |||
102 | static u64 cpu_usage; | ||
103 | static u64 runavg_cpu_usage; | ||
104 | static u64 parent_cpu_usage; | ||
105 | static u64 runavg_parent_cpu_usage; | ||
106 | |||
107 | static unsigned long nr_runs; | ||
108 | static u64 sum_runtime; | ||
109 | static u64 sum_fluct; | ||
110 | static u64 run_avg; | ||
111 | |||
112 | static unsigned int replay_repeat = 10; | ||
113 | static unsigned long nr_timestamps; | ||
114 | static unsigned long nr_unordered_timestamps; | ||
115 | static unsigned long nr_state_machine_bugs; | ||
116 | static unsigned long nr_context_switch_bugs; | ||
117 | static unsigned long nr_events; | ||
118 | static unsigned long nr_lost_chunks; | ||
119 | static unsigned long nr_lost_events; | ||
120 | |||
121 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 69 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
122 | 70 | ||
123 | enum thread_state { | 71 | enum thread_state { |
@@ -149,11 +97,79 @@ struct work_atoms { | |||
149 | 97 | ||
150 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); | 98 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); |
151 | 99 | ||
152 | static struct rb_root atom_root, sorted_atom_root; | 100 | struct perf_sched; |
153 | 101 | ||
154 | static u64 all_runtime; | 102 | struct trace_sched_handler { |
155 | static u64 all_count; | 103 | int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
104 | struct perf_sample *sample, struct machine *machine); | ||
105 | |||
106 | int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
107 | struct perf_sample *sample, struct machine *machine); | ||
108 | |||
109 | int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
110 | struct perf_sample *sample, struct machine *machine); | ||
111 | |||
112 | int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
113 | struct perf_sample *sample); | ||
156 | 114 | ||
115 | int (*migrate_task_event)(struct perf_sched *sched, | ||
116 | struct perf_evsel *evsel, | ||
117 | struct perf_sample *sample, | ||
118 | struct machine *machine); | ||
119 | }; | ||
120 | |||
121 | struct perf_sched { | ||
122 | struct perf_tool tool; | ||
123 | const char *input_name; | ||
124 | const char *sort_order; | ||
125 | unsigned long nr_tasks; | ||
126 | struct task_desc *pid_to_task[MAX_PID]; | ||
127 | struct task_desc **tasks; | ||
128 | const struct trace_sched_handler *tp_handler; | ||
129 | pthread_mutex_t start_work_mutex; | ||
130 | pthread_mutex_t work_done_wait_mutex; | ||
131 | int profile_cpu; | ||
132 | /* | ||
133 | * Track the current task - that way we can know whether there's any | ||
134 | * weird events, such as a task being switched away that is not current. | ||
135 | */ | ||
136 | int max_cpu; | ||
137 | u32 curr_pid[MAX_CPUS]; | ||
138 | struct thread *curr_thread[MAX_CPUS]; | ||
139 | char next_shortname1; | ||
140 | char next_shortname2; | ||
141 | unsigned int replay_repeat; | ||
142 | unsigned long nr_run_events; | ||
143 | unsigned long nr_sleep_events; | ||
144 | unsigned long nr_wakeup_events; | ||
145 | unsigned long nr_sleep_corrections; | ||
146 | unsigned long nr_run_events_optimized; | ||
147 | unsigned long targetless_wakeups; | ||
148 | unsigned long multitarget_wakeups; | ||
149 | unsigned long nr_runs; | ||
150 | unsigned long nr_timestamps; | ||
151 | unsigned long nr_unordered_timestamps; | ||
152 | unsigned long nr_state_machine_bugs; | ||
153 | unsigned long nr_context_switch_bugs; | ||
154 | unsigned long nr_events; | ||
155 | unsigned long nr_lost_chunks; | ||
156 | unsigned long nr_lost_events; | ||
157 | u64 run_measurement_overhead; | ||
158 | u64 sleep_measurement_overhead; | ||
159 | u64 start_time; | ||
160 | u64 cpu_usage; | ||
161 | u64 runavg_cpu_usage; | ||
162 | u64 parent_cpu_usage; | ||
163 | u64 runavg_parent_cpu_usage; | ||
164 | u64 sum_runtime; | ||
165 | u64 sum_fluct; | ||
166 | u64 run_avg; | ||
167 | u64 all_runtime; | ||
168 | u64 all_count; | ||
169 | u64 cpu_last_switched[MAX_CPUS]; | ||
170 | struct rb_root atom_root, sorted_atom_root; | ||
171 | struct list_head sort_list, cmp_pid; | ||
172 | }; | ||
157 | 173 | ||
158 | static u64 get_nsecs(void) | 174 | static u64 get_nsecs(void) |
159 | { | 175 | { |
@@ -164,13 +180,13 @@ static u64 get_nsecs(void) | |||
164 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; | 180 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
165 | } | 181 | } |
166 | 182 | ||
167 | static void burn_nsecs(u64 nsecs) | 183 | static void burn_nsecs(struct perf_sched *sched, u64 nsecs) |
168 | { | 184 | { |
169 | u64 T0 = get_nsecs(), T1; | 185 | u64 T0 = get_nsecs(), T1; |
170 | 186 | ||
171 | do { | 187 | do { |
172 | T1 = get_nsecs(); | 188 | T1 = get_nsecs(); |
173 | } while (T1 + run_measurement_overhead < T0 + nsecs); | 189 | } while (T1 + sched->run_measurement_overhead < T0 + nsecs); |
174 | } | 190 | } |
175 | 191 | ||
176 | static void sleep_nsecs(u64 nsecs) | 192 | static void sleep_nsecs(u64 nsecs) |
@@ -183,24 +199,24 @@ static void sleep_nsecs(u64 nsecs) | |||
183 | nanosleep(&ts, NULL); | 199 | nanosleep(&ts, NULL); |
184 | } | 200 | } |
185 | 201 | ||
186 | static void calibrate_run_measurement_overhead(void) | 202 | static void calibrate_run_measurement_overhead(struct perf_sched *sched) |
187 | { | 203 | { |
188 | u64 T0, T1, delta, min_delta = 1000000000ULL; | 204 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
189 | int i; | 205 | int i; |
190 | 206 | ||
191 | for (i = 0; i < 10; i++) { | 207 | for (i = 0; i < 10; i++) { |
192 | T0 = get_nsecs(); | 208 | T0 = get_nsecs(); |
193 | burn_nsecs(0); | 209 | burn_nsecs(sched, 0); |
194 | T1 = get_nsecs(); | 210 | T1 = get_nsecs(); |
195 | delta = T1-T0; | 211 | delta = T1-T0; |
196 | min_delta = min(min_delta, delta); | 212 | min_delta = min(min_delta, delta); |
197 | } | 213 | } |
198 | run_measurement_overhead = min_delta; | 214 | sched->run_measurement_overhead = min_delta; |
199 | 215 | ||
200 | printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); | 216 | printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
201 | } | 217 | } |
202 | 218 | ||
203 | static void calibrate_sleep_measurement_overhead(void) | 219 | static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) |
204 | { | 220 | { |
205 | u64 T0, T1, delta, min_delta = 1000000000ULL; | 221 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
206 | int i; | 222 | int i; |
@@ -213,7 +229,7 @@ static void calibrate_sleep_measurement_overhead(void) | |||
213 | min_delta = min(min_delta, delta); | 229 | min_delta = min(min_delta, delta); |
214 | } | 230 | } |
215 | min_delta -= 10000; | 231 | min_delta -= 10000; |
216 | sleep_measurement_overhead = min_delta; | 232 | sched->sleep_measurement_overhead = min_delta; |
217 | 233 | ||
218 | printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); | 234 | printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
219 | } | 235 | } |
@@ -246,8 +262,8 @@ static struct sched_atom *last_event(struct task_desc *task) | |||
246 | return task->atoms[task->nr_events - 1]; | 262 | return task->atoms[task->nr_events - 1]; |
247 | } | 263 | } |
248 | 264 | ||
249 | static void | 265 | static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, |
250 | add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | 266 | u64 timestamp, u64 duration) |
251 | { | 267 | { |
252 | struct sched_atom *event, *curr_event = last_event(task); | 268 | struct sched_atom *event, *curr_event = last_event(task); |
253 | 269 | ||
@@ -256,7 +272,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | |||
256 | * to it: | 272 | * to it: |
257 | */ | 273 | */ |
258 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { | 274 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { |
259 | nr_run_events_optimized++; | 275 | sched->nr_run_events_optimized++; |
260 | curr_event->duration += duration; | 276 | curr_event->duration += duration; |
261 | return; | 277 | return; |
262 | } | 278 | } |
@@ -266,12 +282,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | |||
266 | event->type = SCHED_EVENT_RUN; | 282 | event->type = SCHED_EVENT_RUN; |
267 | event->duration = duration; | 283 | event->duration = duration; |
268 | 284 | ||
269 | nr_run_events++; | 285 | sched->nr_run_events++; |
270 | } | 286 | } |
271 | 287 | ||
272 | static void | 288 | static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, |
273 | add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | 289 | u64 timestamp, struct task_desc *wakee) |
274 | struct task_desc *wakee) | ||
275 | { | 290 | { |
276 | struct sched_atom *event, *wakee_event; | 291 | struct sched_atom *event, *wakee_event; |
277 | 292 | ||
@@ -281,11 +296,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | |||
281 | 296 | ||
282 | wakee_event = last_event(wakee); | 297 | wakee_event = last_event(wakee); |
283 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { | 298 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { |
284 | targetless_wakeups++; | 299 | sched->targetless_wakeups++; |
285 | return; | 300 | return; |
286 | } | 301 | } |
287 | if (wakee_event->wait_sem) { | 302 | if (wakee_event->wait_sem) { |
288 | multitarget_wakeups++; | 303 | sched->multitarget_wakeups++; |
289 | return; | 304 | return; |
290 | } | 305 | } |
291 | 306 | ||
@@ -294,89 +309,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | |||
294 | wakee_event->specific_wait = 1; | 309 | wakee_event->specific_wait = 1; |
295 | event->wait_sem = wakee_event->wait_sem; | 310 | event->wait_sem = wakee_event->wait_sem; |
296 | 311 | ||
297 | nr_wakeup_events++; | 312 | sched->nr_wakeup_events++; |
298 | } | 313 | } |
299 | 314 | ||
300 | static void | 315 | static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, |
301 | add_sched_event_sleep(struct task_desc *task, u64 timestamp, | 316 | u64 timestamp, u64 task_state __maybe_unused) |
302 | u64 task_state __used) | ||
303 | { | 317 | { |
304 | struct sched_atom *event = get_new_event(task, timestamp); | 318 | struct sched_atom *event = get_new_event(task, timestamp); |
305 | 319 | ||
306 | event->type = SCHED_EVENT_SLEEP; | 320 | event->type = SCHED_EVENT_SLEEP; |
307 | 321 | ||
308 | nr_sleep_events++; | 322 | sched->nr_sleep_events++; |
309 | } | 323 | } |
310 | 324 | ||
311 | static struct task_desc *register_pid(unsigned long pid, const char *comm) | 325 | static struct task_desc *register_pid(struct perf_sched *sched, |
326 | unsigned long pid, const char *comm) | ||
312 | { | 327 | { |
313 | struct task_desc *task; | 328 | struct task_desc *task; |
314 | 329 | ||
315 | BUG_ON(pid >= MAX_PID); | 330 | BUG_ON(pid >= MAX_PID); |
316 | 331 | ||
317 | task = pid_to_task[pid]; | 332 | task = sched->pid_to_task[pid]; |
318 | 333 | ||
319 | if (task) | 334 | if (task) |
320 | return task; | 335 | return task; |
321 | 336 | ||
322 | task = zalloc(sizeof(*task)); | 337 | task = zalloc(sizeof(*task)); |
323 | task->pid = pid; | 338 | task->pid = pid; |
324 | task->nr = nr_tasks; | 339 | task->nr = sched->nr_tasks; |
325 | strcpy(task->comm, comm); | 340 | strcpy(task->comm, comm); |
326 | /* | 341 | /* |
327 | * every task starts in sleeping state - this gets ignored | 342 | * every task starts in sleeping state - this gets ignored |
328 | * if there's no wakeup pointing to this sleep state: | 343 | * if there's no wakeup pointing to this sleep state: |
329 | */ | 344 | */ |
330 | add_sched_event_sleep(task, 0, 0); | 345 | add_sched_event_sleep(sched, task, 0, 0); |
331 | 346 | ||
332 | pid_to_task[pid] = task; | 347 | sched->pid_to_task[pid] = task; |
333 | nr_tasks++; | 348 | sched->nr_tasks++; |
334 | tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *)); | 349 | sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); |
335 | BUG_ON(!tasks); | 350 | BUG_ON(!sched->tasks); |
336 | tasks[task->nr] = task; | 351 | sched->tasks[task->nr] = task; |
337 | 352 | ||
338 | if (verbose) | 353 | if (verbose) |
339 | printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm); | 354 | printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); |
340 | 355 | ||
341 | return task; | 356 | return task; |
342 | } | 357 | } |
343 | 358 | ||
344 | 359 | ||
345 | static void print_task_traces(void) | 360 | static void print_task_traces(struct perf_sched *sched) |
346 | { | 361 | { |
347 | struct task_desc *task; | 362 | struct task_desc *task; |
348 | unsigned long i; | 363 | unsigned long i; |
349 | 364 | ||
350 | for (i = 0; i < nr_tasks; i++) { | 365 | for (i = 0; i < sched->nr_tasks; i++) { |
351 | task = tasks[i]; | 366 | task = sched->tasks[i]; |
352 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", | 367 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", |
353 | task->nr, task->comm, task->pid, task->nr_events); | 368 | task->nr, task->comm, task->pid, task->nr_events); |
354 | } | 369 | } |
355 | } | 370 | } |
356 | 371 | ||
357 | static void add_cross_task_wakeups(void) | 372 | static void add_cross_task_wakeups(struct perf_sched *sched) |
358 | { | 373 | { |
359 | struct task_desc *task1, *task2; | 374 | struct task_desc *task1, *task2; |
360 | unsigned long i, j; | 375 | unsigned long i, j; |
361 | 376 | ||
362 | for (i = 0; i < nr_tasks; i++) { | 377 | for (i = 0; i < sched->nr_tasks; i++) { |
363 | task1 = tasks[i]; | 378 | task1 = sched->tasks[i]; |
364 | j = i + 1; | 379 | j = i + 1; |
365 | if (j == nr_tasks) | 380 | if (j == sched->nr_tasks) |
366 | j = 0; | 381 | j = 0; |
367 | task2 = tasks[j]; | 382 | task2 = sched->tasks[j]; |
368 | add_sched_event_wakeup(task1, 0, task2); | 383 | add_sched_event_wakeup(sched, task1, 0, task2); |
369 | } | 384 | } |
370 | } | 385 | } |
371 | 386 | ||
372 | static void | 387 | static void perf_sched__process_event(struct perf_sched *sched, |
373 | process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) | 388 | struct sched_atom *atom) |
374 | { | 389 | { |
375 | int ret = 0; | 390 | int ret = 0; |
376 | 391 | ||
377 | switch (atom->type) { | 392 | switch (atom->type) { |
378 | case SCHED_EVENT_RUN: | 393 | case SCHED_EVENT_RUN: |
379 | burn_nsecs(atom->duration); | 394 | burn_nsecs(sched, atom->duration); |
380 | break; | 395 | break; |
381 | case SCHED_EVENT_SLEEP: | 396 | case SCHED_EVENT_SLEEP: |
382 | if (atom->wait_sem) | 397 | if (atom->wait_sem) |
@@ -423,8 +438,8 @@ static int self_open_counters(void) | |||
423 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 438 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
424 | 439 | ||
425 | if (fd < 0) | 440 | if (fd < 0) |
426 | die("Error: sys_perf_event_open() syscall returned" | 441 | pr_debug("Error: sys_perf_event_open() syscall returned" |
427 | "with %d (%s)\n", fd, strerror(errno)); | 442 | "with %d (%s)\n", fd, strerror(errno)); |
428 | return fd; | 443 | return fd; |
429 | } | 444 | } |
430 | 445 | ||
@@ -439,31 +454,41 @@ static u64 get_cpu_usage_nsec_self(int fd) | |||
439 | return runtime; | 454 | return runtime; |
440 | } | 455 | } |
441 | 456 | ||
457 | struct sched_thread_parms { | ||
458 | struct task_desc *task; | ||
459 | struct perf_sched *sched; | ||
460 | }; | ||
461 | |||
442 | static void *thread_func(void *ctx) | 462 | static void *thread_func(void *ctx) |
443 | { | 463 | { |
444 | struct task_desc *this_task = ctx; | 464 | struct sched_thread_parms *parms = ctx; |
465 | struct task_desc *this_task = parms->task; | ||
466 | struct perf_sched *sched = parms->sched; | ||
445 | u64 cpu_usage_0, cpu_usage_1; | 467 | u64 cpu_usage_0, cpu_usage_1; |
446 | unsigned long i, ret; | 468 | unsigned long i, ret; |
447 | char comm2[22]; | 469 | char comm2[22]; |
448 | int fd; | 470 | int fd; |
449 | 471 | ||
472 | free(parms); | ||
473 | |||
450 | sprintf(comm2, ":%s", this_task->comm); | 474 | sprintf(comm2, ":%s", this_task->comm); |
451 | prctl(PR_SET_NAME, comm2); | 475 | prctl(PR_SET_NAME, comm2); |
452 | fd = self_open_counters(); | 476 | fd = self_open_counters(); |
453 | 477 | if (fd < 0) | |
478 | return NULL; | ||
454 | again: | 479 | again: |
455 | ret = sem_post(&this_task->ready_for_work); | 480 | ret = sem_post(&this_task->ready_for_work); |
456 | BUG_ON(ret); | 481 | BUG_ON(ret); |
457 | ret = pthread_mutex_lock(&start_work_mutex); | 482 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
458 | BUG_ON(ret); | 483 | BUG_ON(ret); |
459 | ret = pthread_mutex_unlock(&start_work_mutex); | 484 | ret = pthread_mutex_unlock(&sched->start_work_mutex); |
460 | BUG_ON(ret); | 485 | BUG_ON(ret); |
461 | 486 | ||
462 | cpu_usage_0 = get_cpu_usage_nsec_self(fd); | 487 | cpu_usage_0 = get_cpu_usage_nsec_self(fd); |
463 | 488 | ||
464 | for (i = 0; i < this_task->nr_events; i++) { | 489 | for (i = 0; i < this_task->nr_events; i++) { |
465 | this_task->curr_event = i; | 490 | this_task->curr_event = i; |
466 | process_sched_event(this_task, this_task->atoms[i]); | 491 | perf_sched__process_event(sched, this_task->atoms[i]); |
467 | } | 492 | } |
468 | 493 | ||
469 | cpu_usage_1 = get_cpu_usage_nsec_self(fd); | 494 | cpu_usage_1 = get_cpu_usage_nsec_self(fd); |
@@ -471,15 +496,15 @@ again: | |||
471 | ret = sem_post(&this_task->work_done_sem); | 496 | ret = sem_post(&this_task->work_done_sem); |
472 | BUG_ON(ret); | 497 | BUG_ON(ret); |
473 | 498 | ||
474 | ret = pthread_mutex_lock(&work_done_wait_mutex); | 499 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
475 | BUG_ON(ret); | 500 | BUG_ON(ret); |
476 | ret = pthread_mutex_unlock(&work_done_wait_mutex); | 501 | ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); |
477 | BUG_ON(ret); | 502 | BUG_ON(ret); |
478 | 503 | ||
479 | goto again; | 504 | goto again; |
480 | } | 505 | } |
481 | 506 | ||
482 | static void create_tasks(void) | 507 | static void create_tasks(struct perf_sched *sched) |
483 | { | 508 | { |
484 | struct task_desc *task; | 509 | struct task_desc *task; |
485 | pthread_attr_t attr; | 510 | pthread_attr_t attr; |
@@ -491,128 +516,129 @@ static void create_tasks(void) | |||
491 | err = pthread_attr_setstacksize(&attr, | 516 | err = pthread_attr_setstacksize(&attr, |
492 | (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); | 517 | (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); |
493 | BUG_ON(err); | 518 | BUG_ON(err); |
494 | err = pthread_mutex_lock(&start_work_mutex); | 519 | err = pthread_mutex_lock(&sched->start_work_mutex); |
495 | BUG_ON(err); | 520 | BUG_ON(err); |
496 | err = pthread_mutex_lock(&work_done_wait_mutex); | 521 | err = pthread_mutex_lock(&sched->work_done_wait_mutex); |
497 | BUG_ON(err); | 522 | BUG_ON(err); |
498 | for (i = 0; i < nr_tasks; i++) { | 523 | for (i = 0; i < sched->nr_tasks; i++) { |
499 | task = tasks[i]; | 524 | struct sched_thread_parms *parms = malloc(sizeof(*parms)); |
525 | BUG_ON(parms == NULL); | ||
526 | parms->task = task = sched->tasks[i]; | ||
527 | parms->sched = sched; | ||
500 | sem_init(&task->sleep_sem, 0, 0); | 528 | sem_init(&task->sleep_sem, 0, 0); |
501 | sem_init(&task->ready_for_work, 0, 0); | 529 | sem_init(&task->ready_for_work, 0, 0); |
502 | sem_init(&task->work_done_sem, 0, 0); | 530 | sem_init(&task->work_done_sem, 0, 0); |
503 | task->curr_event = 0; | 531 | task->curr_event = 0; |
504 | err = pthread_create(&task->thread, &attr, thread_func, task); | 532 | err = pthread_create(&task->thread, &attr, thread_func, parms); |
505 | BUG_ON(err); | 533 | BUG_ON(err); |
506 | } | 534 | } |
507 | } | 535 | } |
508 | 536 | ||
509 | static void wait_for_tasks(void) | 537 | static void wait_for_tasks(struct perf_sched *sched) |
510 | { | 538 | { |
511 | u64 cpu_usage_0, cpu_usage_1; | 539 | u64 cpu_usage_0, cpu_usage_1; |
512 | struct task_desc *task; | 540 | struct task_desc *task; |
513 | unsigned long i, ret; | 541 | unsigned long i, ret; |
514 | 542 | ||
515 | start_time = get_nsecs(); | 543 | sched->start_time = get_nsecs(); |
516 | cpu_usage = 0; | 544 | sched->cpu_usage = 0; |
517 | pthread_mutex_unlock(&work_done_wait_mutex); | 545 | pthread_mutex_unlock(&sched->work_done_wait_mutex); |
518 | 546 | ||
519 | for (i = 0; i < nr_tasks; i++) { | 547 | for (i = 0; i < sched->nr_tasks; i++) { |
520 | task = tasks[i]; | 548 | task = sched->tasks[i]; |
521 | ret = sem_wait(&task->ready_for_work); | 549 | ret = sem_wait(&task->ready_for_work); |
522 | BUG_ON(ret); | 550 | BUG_ON(ret); |
523 | sem_init(&task->ready_for_work, 0, 0); | 551 | sem_init(&task->ready_for_work, 0, 0); |
524 | } | 552 | } |
525 | ret = pthread_mutex_lock(&work_done_wait_mutex); | 553 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
526 | BUG_ON(ret); | 554 | BUG_ON(ret); |
527 | 555 | ||
528 | cpu_usage_0 = get_cpu_usage_nsec_parent(); | 556 | cpu_usage_0 = get_cpu_usage_nsec_parent(); |
529 | 557 | ||
530 | pthread_mutex_unlock(&start_work_mutex); | 558 | pthread_mutex_unlock(&sched->start_work_mutex); |
531 | 559 | ||
532 | for (i = 0; i < nr_tasks; i++) { | 560 | for (i = 0; i < sched->nr_tasks; i++) { |
533 | task = tasks[i]; | 561 | task = sched->tasks[i]; |
534 | ret = sem_wait(&task->work_done_sem); | 562 | ret = sem_wait(&task->work_done_sem); |
535 | BUG_ON(ret); | 563 | BUG_ON(ret); |
536 | sem_init(&task->work_done_sem, 0, 0); | 564 | sem_init(&task->work_done_sem, 0, 0); |
537 | cpu_usage += task->cpu_usage; | 565 | sched->cpu_usage += task->cpu_usage; |
538 | task->cpu_usage = 0; | 566 | task->cpu_usage = 0; |
539 | } | 567 | } |
540 | 568 | ||
541 | cpu_usage_1 = get_cpu_usage_nsec_parent(); | 569 | cpu_usage_1 = get_cpu_usage_nsec_parent(); |
542 | if (!runavg_cpu_usage) | 570 | if (!sched->runavg_cpu_usage) |
543 | runavg_cpu_usage = cpu_usage; | 571 | sched->runavg_cpu_usage = sched->cpu_usage; |
544 | runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10; | 572 | sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; |
545 | 573 | ||
546 | parent_cpu_usage = cpu_usage_1 - cpu_usage_0; | 574 | sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; |
547 | if (!runavg_parent_cpu_usage) | 575 | if (!sched->runavg_parent_cpu_usage) |
548 | runavg_parent_cpu_usage = parent_cpu_usage; | 576 | sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; |
549 | runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 + | 577 | sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + |
550 | parent_cpu_usage)/10; | 578 | sched->parent_cpu_usage)/10; |
551 | 579 | ||
552 | ret = pthread_mutex_lock(&start_work_mutex); | 580 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
553 | BUG_ON(ret); | 581 | BUG_ON(ret); |
554 | 582 | ||
555 | for (i = 0; i < nr_tasks; i++) { | 583 | for (i = 0; i < sched->nr_tasks; i++) { |
556 | task = tasks[i]; | 584 | task = sched->tasks[i]; |
557 | sem_init(&task->sleep_sem, 0, 0); | 585 | sem_init(&task->sleep_sem, 0, 0); |
558 | task->curr_event = 0; | 586 | task->curr_event = 0; |
559 | } | 587 | } |
560 | } | 588 | } |
561 | 589 | ||
562 | static void run_one_test(void) | 590 | static void run_one_test(struct perf_sched *sched) |
563 | { | 591 | { |
564 | u64 T0, T1, delta, avg_delta, fluct; | 592 | u64 T0, T1, delta, avg_delta, fluct; |
565 | 593 | ||
566 | T0 = get_nsecs(); | 594 | T0 = get_nsecs(); |
567 | wait_for_tasks(); | 595 | wait_for_tasks(sched); |
568 | T1 = get_nsecs(); | 596 | T1 = get_nsecs(); |
569 | 597 | ||
570 | delta = T1 - T0; | 598 | delta = T1 - T0; |
571 | sum_runtime += delta; | 599 | sched->sum_runtime += delta; |
572 | nr_runs++; | 600 | sched->nr_runs++; |
573 | 601 | ||
574 | avg_delta = sum_runtime / nr_runs; | 602 | avg_delta = sched->sum_runtime / sched->nr_runs; |
575 | if (delta < avg_delta) | 603 | if (delta < avg_delta) |
576 | fluct = avg_delta - delta; | 604 | fluct = avg_delta - delta; |
577 | else | 605 | else |
578 | fluct = delta - avg_delta; | 606 | fluct = delta - avg_delta; |
579 | sum_fluct += fluct; | 607 | sched->sum_fluct += fluct; |
580 | if (!run_avg) | 608 | if (!sched->run_avg) |
581 | run_avg = delta; | 609 | sched->run_avg = delta; |
582 | run_avg = (run_avg*9 + delta)/10; | 610 | sched->run_avg = (sched->run_avg * 9 + delta) / 10; |
583 | 611 | ||
584 | printf("#%-3ld: %0.3f, ", | 612 | printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); |
585 | nr_runs, (double)delta/1000000.0); | ||
586 | 613 | ||
587 | printf("ravg: %0.2f, ", | 614 | printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6); |
588 | (double)run_avg/1e6); | ||
589 | 615 | ||
590 | printf("cpu: %0.2f / %0.2f", | 616 | printf("cpu: %0.2f / %0.2f", |
591 | (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6); | 617 | (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6); |
592 | 618 | ||
593 | #if 0 | 619 | #if 0 |
594 | /* | 620 | /* |
595 | * rusage statistics done by the parent, these are less | 621 | * rusage statistics done by the parent, these are less |
596 | * accurate than the sum_exec_runtime based statistics: | 622 | * accurate than the sched->sum_exec_runtime based statistics: |
597 | */ | 623 | */ |
598 | printf(" [%0.2f / %0.2f]", | 624 | printf(" [%0.2f / %0.2f]", |
599 | (double)parent_cpu_usage/1e6, | 625 | (double)sched->parent_cpu_usage/1e6, |
600 | (double)runavg_parent_cpu_usage/1e6); | 626 | (double)sched->runavg_parent_cpu_usage/1e6); |
601 | #endif | 627 | #endif |
602 | 628 | ||
603 | printf("\n"); | 629 | printf("\n"); |
604 | 630 | ||
605 | if (nr_sleep_corrections) | 631 | if (sched->nr_sleep_corrections) |
606 | printf(" (%ld sleep corrections)\n", nr_sleep_corrections); | 632 | printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); |
607 | nr_sleep_corrections = 0; | 633 | sched->nr_sleep_corrections = 0; |
608 | } | 634 | } |
609 | 635 | ||
610 | static void test_calibrations(void) | 636 | static void test_calibrations(struct perf_sched *sched) |
611 | { | 637 | { |
612 | u64 T0, T1; | 638 | u64 T0, T1; |
613 | 639 | ||
614 | T0 = get_nsecs(); | 640 | T0 = get_nsecs(); |
615 | burn_nsecs(1e6); | 641 | burn_nsecs(sched, 1e6); |
616 | T1 = get_nsecs(); | 642 | T1 = get_nsecs(); |
617 | 643 | ||
618 | printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); | 644 | printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); |
@@ -624,216 +650,92 @@ static void test_calibrations(void) | |||
624 | printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); | 650 | printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); |
625 | } | 651 | } |
626 | 652 | ||
627 | #define FILL_FIELD(ptr, field, event, data) \ | 653 | static int |
628 | ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) | 654 | replay_wakeup_event(struct perf_sched *sched, |
629 | 655 | struct perf_evsel *evsel, struct perf_sample *sample, | |
630 | #define FILL_ARRAY(ptr, array, event, data) \ | 656 | struct machine *machine __maybe_unused) |
631 | do { \ | ||
632 | void *__array = raw_field_ptr(event, #array, data); \ | ||
633 | memcpy(ptr.array, __array, sizeof(ptr.array)); \ | ||
634 | } while(0) | ||
635 | |||
636 | #define FILL_COMMON_FIELDS(ptr, event, data) \ | ||
637 | do { \ | ||
638 | FILL_FIELD(ptr, common_type, event, data); \ | ||
639 | FILL_FIELD(ptr, common_flags, event, data); \ | ||
640 | FILL_FIELD(ptr, common_preempt_count, event, data); \ | ||
641 | FILL_FIELD(ptr, common_pid, event, data); \ | ||
642 | FILL_FIELD(ptr, common_tgid, event, data); \ | ||
643 | } while (0) | ||
644 | |||
645 | |||
646 | |||
647 | struct trace_switch_event { | ||
648 | u32 size; | ||
649 | |||
650 | u16 common_type; | ||
651 | u8 common_flags; | ||
652 | u8 common_preempt_count; | ||
653 | u32 common_pid; | ||
654 | u32 common_tgid; | ||
655 | |||
656 | char prev_comm[16]; | ||
657 | u32 prev_pid; | ||
658 | u32 prev_prio; | ||
659 | u64 prev_state; | ||
660 | char next_comm[16]; | ||
661 | u32 next_pid; | ||
662 | u32 next_prio; | ||
663 | }; | ||
664 | |||
665 | struct trace_runtime_event { | ||
666 | u32 size; | ||
667 | |||
668 | u16 common_type; | ||
669 | u8 common_flags; | ||
670 | u8 common_preempt_count; | ||
671 | u32 common_pid; | ||
672 | u32 common_tgid; | ||
673 | |||
674 | char comm[16]; | ||
675 | u32 pid; | ||
676 | u64 runtime; | ||
677 | u64 vruntime; | ||
678 | }; | ||
679 | |||
680 | struct trace_wakeup_event { | ||
681 | u32 size; | ||
682 | |||
683 | u16 common_type; | ||
684 | u8 common_flags; | ||
685 | u8 common_preempt_count; | ||
686 | u32 common_pid; | ||
687 | u32 common_tgid; | ||
688 | |||
689 | char comm[16]; | ||
690 | u32 pid; | ||
691 | |||
692 | u32 prio; | ||
693 | u32 success; | ||
694 | u32 cpu; | ||
695 | }; | ||
696 | |||
697 | struct trace_fork_event { | ||
698 | u32 size; | ||
699 | |||
700 | u16 common_type; | ||
701 | u8 common_flags; | ||
702 | u8 common_preempt_count; | ||
703 | u32 common_pid; | ||
704 | u32 common_tgid; | ||
705 | |||
706 | char parent_comm[16]; | ||
707 | u32 parent_pid; | ||
708 | char child_comm[16]; | ||
709 | u32 child_pid; | ||
710 | }; | ||
711 | |||
712 | struct trace_migrate_task_event { | ||
713 | u32 size; | ||
714 | |||
715 | u16 common_type; | ||
716 | u8 common_flags; | ||
717 | u8 common_preempt_count; | ||
718 | u32 common_pid; | ||
719 | u32 common_tgid; | ||
720 | |||
721 | char comm[16]; | ||
722 | u32 pid; | ||
723 | |||
724 | u32 prio; | ||
725 | u32 cpu; | ||
726 | }; | ||
727 | |||
728 | struct trace_sched_handler { | ||
729 | void (*switch_event)(struct trace_switch_event *, | ||
730 | struct machine *, | ||
731 | struct event_format *, | ||
732 | struct perf_sample *sample); | ||
733 | |||
734 | void (*runtime_event)(struct trace_runtime_event *, | ||
735 | struct machine *, | ||
736 | struct perf_sample *sample); | ||
737 | |||
738 | void (*wakeup_event)(struct trace_wakeup_event *, | ||
739 | struct machine *, | ||
740 | struct event_format *, | ||
741 | struct perf_sample *sample); | ||
742 | |||
743 | void (*fork_event)(struct trace_fork_event *, | ||
744 | struct event_format *event); | ||
745 | |||
746 | void (*migrate_task_event)(struct trace_migrate_task_event *, | ||
747 | struct machine *machine, | ||
748 | struct perf_sample *sample); | ||
749 | }; | ||
750 | |||
751 | |||
752 | static void | ||
753 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | ||
754 | struct machine *machine __used, | ||
755 | struct event_format *event, struct perf_sample *sample) | ||
756 | { | 657 | { |
658 | const char *comm = perf_evsel__strval(evsel, sample, "comm"); | ||
659 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
757 | struct task_desc *waker, *wakee; | 660 | struct task_desc *waker, *wakee; |
758 | 661 | ||
759 | if (verbose) { | 662 | if (verbose) { |
760 | printf("sched_wakeup event %p\n", event); | 663 | printf("sched_wakeup event %p\n", evsel); |
761 | 664 | ||
762 | printf(" ... pid %d woke up %s/%d\n", | 665 | printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); |
763 | wakeup_event->common_pid, | ||
764 | wakeup_event->comm, | ||
765 | wakeup_event->pid); | ||
766 | } | 666 | } |
767 | 667 | ||
768 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); | 668 | waker = register_pid(sched, sample->tid, "<unknown>"); |
769 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); | 669 | wakee = register_pid(sched, pid, comm); |
770 | 670 | ||
771 | add_sched_event_wakeup(waker, sample->time, wakee); | 671 | add_sched_event_wakeup(sched, waker, sample->time, wakee); |
672 | return 0; | ||
772 | } | 673 | } |
773 | 674 | ||
774 | static u64 cpu_last_switched[MAX_CPUS]; | 675 | static int replay_switch_event(struct perf_sched *sched, |
775 | 676 | struct perf_evsel *evsel, | |
776 | static void | 677 | struct perf_sample *sample, |
777 | replay_switch_event(struct trace_switch_event *switch_event, | 678 | struct machine *machine __maybe_unused) |
778 | struct machine *machine __used, | ||
779 | struct event_format *event, | ||
780 | struct perf_sample *sample) | ||
781 | { | 679 | { |
782 | struct task_desc *prev, __used *next; | 680 | const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), |
681 | *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); | ||
682 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
683 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
684 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
685 | struct task_desc *prev, __maybe_unused *next; | ||
783 | u64 timestamp0, timestamp = sample->time; | 686 | u64 timestamp0, timestamp = sample->time; |
784 | int cpu = sample->cpu; | 687 | int cpu = sample->cpu; |
785 | s64 delta; | 688 | s64 delta; |
786 | 689 | ||
787 | if (verbose) | 690 | if (verbose) |
788 | printf("sched_switch event %p\n", event); | 691 | printf("sched_switch event %p\n", evsel); |
789 | 692 | ||
790 | if (cpu >= MAX_CPUS || cpu < 0) | 693 | if (cpu >= MAX_CPUS || cpu < 0) |
791 | return; | 694 | return 0; |
792 | 695 | ||
793 | timestamp0 = cpu_last_switched[cpu]; | 696 | timestamp0 = sched->cpu_last_switched[cpu]; |
794 | if (timestamp0) | 697 | if (timestamp0) |
795 | delta = timestamp - timestamp0; | 698 | delta = timestamp - timestamp0; |
796 | else | 699 | else |
797 | delta = 0; | 700 | delta = 0; |
798 | 701 | ||
799 | if (delta < 0) | 702 | if (delta < 0) { |
800 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 703 | pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
801 | 704 | return -1; | |
802 | if (verbose) { | ||
803 | printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", | ||
804 | switch_event->prev_comm, switch_event->prev_pid, | ||
805 | switch_event->next_comm, switch_event->next_pid, | ||
806 | delta); | ||
807 | } | 705 | } |
808 | 706 | ||
809 | prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); | 707 | pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", |
810 | next = register_pid(switch_event->next_pid, switch_event->next_comm); | 708 | prev_comm, prev_pid, next_comm, next_pid, delta); |
811 | 709 | ||
812 | cpu_last_switched[cpu] = timestamp; | 710 | prev = register_pid(sched, prev_pid, prev_comm); |
711 | next = register_pid(sched, next_pid, next_comm); | ||
813 | 712 | ||
814 | add_sched_event_run(prev, timestamp, delta); | 713 | sched->cpu_last_switched[cpu] = timestamp; |
815 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); | ||
816 | } | ||
817 | 714 | ||
715 | add_sched_event_run(sched, prev, timestamp, delta); | ||
716 | add_sched_event_sleep(sched, prev, timestamp, prev_state); | ||
818 | 717 | ||
819 | static void | 718 | return 0; |
820 | replay_fork_event(struct trace_fork_event *fork_event, | 719 | } |
821 | struct event_format *event) | 720 | |
721 | static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, | ||
722 | struct perf_sample *sample) | ||
822 | { | 723 | { |
724 | const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), | ||
725 | *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); | ||
726 | const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), | ||
727 | child_pid = perf_evsel__intval(evsel, sample, "child_pid"); | ||
728 | |||
823 | if (verbose) { | 729 | if (verbose) { |
824 | printf("sched_fork event %p\n", event); | 730 | printf("sched_fork event %p\n", evsel); |
825 | printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); | 731 | printf("... parent: %s/%d\n", parent_comm, parent_pid); |
826 | printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); | 732 | printf("... child: %s/%d\n", child_comm, child_pid); |
827 | } | 733 | } |
828 | register_pid(fork_event->parent_pid, fork_event->parent_comm); | ||
829 | register_pid(fork_event->child_pid, fork_event->child_comm); | ||
830 | } | ||
831 | 734 | ||
832 | static struct trace_sched_handler replay_ops = { | 735 | register_pid(sched, parent_pid, parent_comm); |
833 | .wakeup_event = replay_wakeup_event, | 736 | register_pid(sched, child_pid, child_comm); |
834 | .switch_event = replay_switch_event, | 737 | return 0; |
835 | .fork_event = replay_fork_event, | 738 | } |
836 | }; | ||
837 | 739 | ||
838 | struct sort_dimension { | 740 | struct sort_dimension { |
839 | const char *name; | 741 | const char *name; |
@@ -841,8 +743,6 @@ struct sort_dimension { | |||
841 | struct list_head list; | 743 | struct list_head list; |
842 | }; | 744 | }; |
843 | 745 | ||
844 | static LIST_HEAD(cmp_pid); | ||
845 | |||
846 | static int | 746 | static int |
847 | thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) | 747 | thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) |
848 | { | 748 | { |
@@ -911,40 +811,45 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, | |||
911 | rb_insert_color(&data->node, root); | 811 | rb_insert_color(&data->node, root); |
912 | } | 812 | } |
913 | 813 | ||
914 | static void thread_atoms_insert(struct thread *thread) | 814 | static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) |
915 | { | 815 | { |
916 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); | 816 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); |
917 | if (!atoms) | 817 | if (!atoms) { |
918 | die("No memory"); | 818 | pr_err("No memory at %s\n", __func__); |
819 | return -1; | ||
820 | } | ||
919 | 821 | ||
920 | atoms->thread = thread; | 822 | atoms->thread = thread; |
921 | INIT_LIST_HEAD(&atoms->work_list); | 823 | INIT_LIST_HEAD(&atoms->work_list); |
922 | __thread_latency_insert(&atom_root, atoms, &cmp_pid); | 824 | __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); |
825 | return 0; | ||
923 | } | 826 | } |
924 | 827 | ||
925 | static void | 828 | static int latency_fork_event(struct perf_sched *sched __maybe_unused, |
926 | latency_fork_event(struct trace_fork_event *fork_event __used, | 829 | struct perf_evsel *evsel __maybe_unused, |
927 | struct event_format *event __used) | 830 | struct perf_sample *sample __maybe_unused) |
928 | { | 831 | { |
929 | /* should insert the newcomer */ | 832 | /* should insert the newcomer */ |
833 | return 0; | ||
930 | } | 834 | } |
931 | 835 | ||
932 | __used | 836 | static char sched_out_state(u64 prev_state) |
933 | static char sched_out_state(struct trace_switch_event *switch_event) | ||
934 | { | 837 | { |
935 | const char *str = TASK_STATE_TO_CHAR_STR; | 838 | const char *str = TASK_STATE_TO_CHAR_STR; |
936 | 839 | ||
937 | return str[switch_event->prev_state]; | 840 | return str[prev_state]; |
938 | } | 841 | } |
939 | 842 | ||
940 | static void | 843 | static int |
941 | add_sched_out_event(struct work_atoms *atoms, | 844 | add_sched_out_event(struct work_atoms *atoms, |
942 | char run_state, | 845 | char run_state, |
943 | u64 timestamp) | 846 | u64 timestamp) |
944 | { | 847 | { |
945 | struct work_atom *atom = zalloc(sizeof(*atom)); | 848 | struct work_atom *atom = zalloc(sizeof(*atom)); |
946 | if (!atom) | 849 | if (!atom) { |
947 | die("Non memory"); | 850 | pr_err("Non memory at %s", __func__); |
851 | return -1; | ||
852 | } | ||
948 | 853 | ||
949 | atom->sched_out_time = timestamp; | 854 | atom->sched_out_time = timestamp; |
950 | 855 | ||
@@ -954,10 +859,12 @@ add_sched_out_event(struct work_atoms *atoms, | |||
954 | } | 859 | } |
955 | 860 | ||
956 | list_add_tail(&atom->list, &atoms->work_list); | 861 | list_add_tail(&atom->list, &atoms->work_list); |
862 | return 0; | ||
957 | } | 863 | } |
958 | 864 | ||
959 | static void | 865 | static void |
960 | add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used) | 866 | add_runtime_event(struct work_atoms *atoms, u64 delta, |
867 | u64 timestamp __maybe_unused) | ||
961 | { | 868 | { |
962 | struct work_atom *atom; | 869 | struct work_atom *atom; |
963 | 870 | ||
@@ -1000,12 +907,14 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
1000 | atoms->nb_atoms++; | 907 | atoms->nb_atoms++; |
1001 | } | 908 | } |
1002 | 909 | ||
1003 | static void | 910 | static int latency_switch_event(struct perf_sched *sched, |
1004 | latency_switch_event(struct trace_switch_event *switch_event, | 911 | struct perf_evsel *evsel, |
1005 | struct machine *machine, | 912 | struct perf_sample *sample, |
1006 | struct event_format *event __used, | 913 | struct machine *machine) |
1007 | struct perf_sample *sample) | ||
1008 | { | 914 | { |
915 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
916 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
917 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
1009 | struct work_atoms *out_events, *in_events; | 918 | struct work_atoms *out_events, *in_events; |
1010 | struct thread *sched_out, *sched_in; | 919 | struct thread *sched_out, *sched_in; |
1011 | u64 timestamp0, timestamp = sample->time; | 920 | u64 timestamp0, timestamp = sample->time; |
@@ -1014,87 +923,112 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1014 | 923 | ||
1015 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 924 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1016 | 925 | ||
1017 | timestamp0 = cpu_last_switched[cpu]; | 926 | timestamp0 = sched->cpu_last_switched[cpu]; |
1018 | cpu_last_switched[cpu] = timestamp; | 927 | sched->cpu_last_switched[cpu] = timestamp; |
1019 | if (timestamp0) | 928 | if (timestamp0) |
1020 | delta = timestamp - timestamp0; | 929 | delta = timestamp - timestamp0; |
1021 | else | 930 | else |
1022 | delta = 0; | 931 | delta = 0; |
1023 | 932 | ||
1024 | if (delta < 0) | 933 | if (delta < 0) { |
1025 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 934 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1026 | 935 | return -1; | |
936 | } | ||
1027 | 937 | ||
1028 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 938 | sched_out = machine__findnew_thread(machine, prev_pid); |
1029 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 939 | sched_in = machine__findnew_thread(machine, next_pid); |
1030 | 940 | ||
1031 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 941 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
1032 | if (!out_events) { | 942 | if (!out_events) { |
1033 | thread_atoms_insert(sched_out); | 943 | if (thread_atoms_insert(sched, sched_out)) |
1034 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 944 | return -1; |
1035 | if (!out_events) | 945 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
1036 | die("out-event: Internal tree error"); | 946 | if (!out_events) { |
947 | pr_err("out-event: Internal tree error"); | ||
948 | return -1; | ||
949 | } | ||
1037 | } | 950 | } |
1038 | add_sched_out_event(out_events, sched_out_state(switch_event), timestamp); | 951 | if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) |
952 | return -1; | ||
1039 | 953 | ||
1040 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 954 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
1041 | if (!in_events) { | 955 | if (!in_events) { |
1042 | thread_atoms_insert(sched_in); | 956 | if (thread_atoms_insert(sched, sched_in)) |
1043 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 957 | return -1; |
1044 | if (!in_events) | 958 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
1045 | die("in-event: Internal tree error"); | 959 | if (!in_events) { |
960 | pr_err("in-event: Internal tree error"); | ||
961 | return -1; | ||
962 | } | ||
1046 | /* | 963 | /* |
1047 | * Take came in we have not heard about yet, | 964 | * Take came in we have not heard about yet, |
1048 | * add in an initial atom in runnable state: | 965 | * add in an initial atom in runnable state: |
1049 | */ | 966 | */ |
1050 | add_sched_out_event(in_events, 'R', timestamp); | 967 | if (add_sched_out_event(in_events, 'R', timestamp)) |
968 | return -1; | ||
1051 | } | 969 | } |
1052 | add_sched_in_event(in_events, timestamp); | 970 | add_sched_in_event(in_events, timestamp); |
971 | |||
972 | return 0; | ||
1053 | } | 973 | } |
1054 | 974 | ||
1055 | static void | 975 | static int latency_runtime_event(struct perf_sched *sched, |
1056 | latency_runtime_event(struct trace_runtime_event *runtime_event, | 976 | struct perf_evsel *evsel, |
1057 | struct machine *machine, struct perf_sample *sample) | 977 | struct perf_sample *sample, |
978 | struct machine *machine) | ||
1058 | { | 979 | { |
1059 | struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); | 980 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
1060 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 981 | const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); |
982 | struct thread *thread = machine__findnew_thread(machine, pid); | ||
983 | struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); | ||
1061 | u64 timestamp = sample->time; | 984 | u64 timestamp = sample->time; |
1062 | int cpu = sample->cpu; | 985 | int cpu = sample->cpu; |
1063 | 986 | ||
1064 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 987 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1065 | if (!atoms) { | 988 | if (!atoms) { |
1066 | thread_atoms_insert(thread); | 989 | if (thread_atoms_insert(sched, thread)) |
1067 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 990 | return -1; |
1068 | if (!atoms) | 991 | atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); |
1069 | die("in-event: Internal tree error"); | 992 | if (!atoms) { |
1070 | add_sched_out_event(atoms, 'R', timestamp); | 993 | pr_debug("in-event: Internal tree error"); |
994 | return -1; | ||
995 | } | ||
996 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
997 | return -1; | ||
1071 | } | 998 | } |
1072 | 999 | ||
1073 | add_runtime_event(atoms, runtime_event->runtime, timestamp); | 1000 | add_runtime_event(atoms, runtime, timestamp); |
1001 | return 0; | ||
1074 | } | 1002 | } |
1075 | 1003 | ||
1076 | static void | 1004 | static int latency_wakeup_event(struct perf_sched *sched, |
1077 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | 1005 | struct perf_evsel *evsel, |
1078 | struct machine *machine, struct event_format *event __used, | 1006 | struct perf_sample *sample, |
1079 | struct perf_sample *sample) | 1007 | struct machine *machine) |
1080 | { | 1008 | { |
1009 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1010 | success = perf_evsel__intval(evsel, sample, "success"); | ||
1081 | struct work_atoms *atoms; | 1011 | struct work_atoms *atoms; |
1082 | struct work_atom *atom; | 1012 | struct work_atom *atom; |
1083 | struct thread *wakee; | 1013 | struct thread *wakee; |
1084 | u64 timestamp = sample->time; | 1014 | u64 timestamp = sample->time; |
1085 | 1015 | ||
1086 | /* Note for later, it may be interesting to observe the failing cases */ | 1016 | /* Note for later, it may be interesting to observe the failing cases */ |
1087 | if (!wakeup_event->success) | 1017 | if (!success) |
1088 | return; | 1018 | return 0; |
1089 | 1019 | ||
1090 | wakee = machine__findnew_thread(machine, wakeup_event->pid); | 1020 | wakee = machine__findnew_thread(machine, pid); |
1091 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1021 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1092 | if (!atoms) { | 1022 | if (!atoms) { |
1093 | thread_atoms_insert(wakee); | 1023 | if (thread_atoms_insert(sched, wakee)) |
1094 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1024 | return -1; |
1095 | if (!atoms) | 1025 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1096 | die("wakeup-event: Internal tree error"); | 1026 | if (!atoms) { |
1097 | add_sched_out_event(atoms, 'S', timestamp); | 1027 | pr_debug("wakeup-event: Internal tree error"); |
1028 | return -1; | ||
1029 | } | ||
1030 | if (add_sched_out_event(atoms, 'S', timestamp)) | ||
1031 | return -1; | ||
1098 | } | 1032 | } |
1099 | 1033 | ||
1100 | BUG_ON(list_empty(&atoms->work_list)); | 1034 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1106,23 +1040,26 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1106 | * one CPU, or are only looking at only one, so don't | 1040 | * one CPU, or are only looking at only one, so don't |
1107 | * make useless noise. | 1041 | * make useless noise. |
1108 | */ | 1042 | */ |
1109 | if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) | 1043 | if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) |
1110 | nr_state_machine_bugs++; | 1044 | sched->nr_state_machine_bugs++; |
1111 | 1045 | ||
1112 | nr_timestamps++; | 1046 | sched->nr_timestamps++; |
1113 | if (atom->sched_out_time > timestamp) { | 1047 | if (atom->sched_out_time > timestamp) { |
1114 | nr_unordered_timestamps++; | 1048 | sched->nr_unordered_timestamps++; |
1115 | return; | 1049 | return 0; |
1116 | } | 1050 | } |
1117 | 1051 | ||
1118 | atom->state = THREAD_WAIT_CPU; | 1052 | atom->state = THREAD_WAIT_CPU; |
1119 | atom->wake_up_time = timestamp; | 1053 | atom->wake_up_time = timestamp; |
1054 | return 0; | ||
1120 | } | 1055 | } |
1121 | 1056 | ||
1122 | static void | 1057 | static int latency_migrate_task_event(struct perf_sched *sched, |
1123 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | 1058 | struct perf_evsel *evsel, |
1124 | struct machine *machine, struct perf_sample *sample) | 1059 | struct perf_sample *sample, |
1060 | struct machine *machine) | ||
1125 | { | 1061 | { |
1062 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
1126 | u64 timestamp = sample->time; | 1063 | u64 timestamp = sample->time; |
1127 | struct work_atoms *atoms; | 1064 | struct work_atoms *atoms; |
1128 | struct work_atom *atom; | 1065 | struct work_atom *atom; |
@@ -1131,18 +1068,22 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1131 | /* | 1068 | /* |
1132 | * Only need to worry about migration when profiling one CPU. | 1069 | * Only need to worry about migration when profiling one CPU. |
1133 | */ | 1070 | */ |
1134 | if (profile_cpu == -1) | 1071 | if (sched->profile_cpu == -1) |
1135 | return; | 1072 | return 0; |
1136 | 1073 | ||
1137 | migrant = machine__findnew_thread(machine, migrate_task_event->pid); | 1074 | migrant = machine__findnew_thread(machine, pid); |
1138 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1075 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1139 | if (!atoms) { | 1076 | if (!atoms) { |
1140 | thread_atoms_insert(migrant); | 1077 | if (thread_atoms_insert(sched, migrant)) |
1141 | register_pid(migrant->pid, migrant->comm); | 1078 | return -1; |
1142 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1079 | register_pid(sched, migrant->pid, migrant->comm); |
1143 | if (!atoms) | 1080 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1144 | die("migration-event: Internal tree error"); | 1081 | if (!atoms) { |
1145 | add_sched_out_event(atoms, 'R', timestamp); | 1082 | pr_debug("migration-event: Internal tree error"); |
1083 | return -1; | ||
1084 | } | ||
1085 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
1086 | return -1; | ||
1146 | } | 1087 | } |
1147 | 1088 | ||
1148 | BUG_ON(list_empty(&atoms->work_list)); | 1089 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1150,21 +1091,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1150 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); | 1091 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
1151 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; | 1092 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; |
1152 | 1093 | ||
1153 | nr_timestamps++; | 1094 | sched->nr_timestamps++; |
1154 | 1095 | ||
1155 | if (atom->sched_out_time > timestamp) | 1096 | if (atom->sched_out_time > timestamp) |
1156 | nr_unordered_timestamps++; | 1097 | sched->nr_unordered_timestamps++; |
1157 | } | ||
1158 | 1098 | ||
1159 | static struct trace_sched_handler lat_ops = { | 1099 | return 0; |
1160 | .wakeup_event = latency_wakeup_event, | 1100 | } |
1161 | .switch_event = latency_switch_event, | ||
1162 | .runtime_event = latency_runtime_event, | ||
1163 | .fork_event = latency_fork_event, | ||
1164 | .migrate_task_event = latency_migrate_task_event, | ||
1165 | }; | ||
1166 | 1101 | ||
1167 | static void output_lat_thread(struct work_atoms *work_list) | 1102 | static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) |
1168 | { | 1103 | { |
1169 | int i; | 1104 | int i; |
1170 | int ret; | 1105 | int ret; |
@@ -1178,8 +1113,8 @@ static void output_lat_thread(struct work_atoms *work_list) | |||
1178 | if (!strcmp(work_list->thread->comm, "swapper")) | 1113 | if (!strcmp(work_list->thread->comm, "swapper")) |
1179 | return; | 1114 | return; |
1180 | 1115 | ||
1181 | all_runtime += work_list->total_runtime; | 1116 | sched->all_runtime += work_list->total_runtime; |
1182 | all_count += work_list->nb_atoms; | 1117 | sched->all_count += work_list->nb_atoms; |
1183 | 1118 | ||
1184 | ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); | 1119 | ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); |
1185 | 1120 | ||
@@ -1205,11 +1140,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1205 | return 0; | 1140 | return 0; |
1206 | } | 1141 | } |
1207 | 1142 | ||
1208 | static struct sort_dimension pid_sort_dimension = { | ||
1209 | .name = "pid", | ||
1210 | .cmp = pid_cmp, | ||
1211 | }; | ||
1212 | |||
1213 | static int avg_cmp(struct work_atoms *l, struct work_atoms *r) | 1143 | static int avg_cmp(struct work_atoms *l, struct work_atoms *r) |
1214 | { | 1144 | { |
1215 | u64 avgl, avgr; | 1145 | u64 avgl, avgr; |
@@ -1231,11 +1161,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1231 | return 0; | 1161 | return 0; |
1232 | } | 1162 | } |
1233 | 1163 | ||
1234 | static struct sort_dimension avg_sort_dimension = { | ||
1235 | .name = "avg", | ||
1236 | .cmp = avg_cmp, | ||
1237 | }; | ||
1238 | |||
1239 | static int max_cmp(struct work_atoms *l, struct work_atoms *r) | 1164 | static int max_cmp(struct work_atoms *l, struct work_atoms *r) |
1240 | { | 1165 | { |
1241 | if (l->max_lat < r->max_lat) | 1166 | if (l->max_lat < r->max_lat) |
@@ -1246,11 +1171,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1246 | return 0; | 1171 | return 0; |
1247 | } | 1172 | } |
1248 | 1173 | ||
1249 | static struct sort_dimension max_sort_dimension = { | ||
1250 | .name = "max", | ||
1251 | .cmp = max_cmp, | ||
1252 | }; | ||
1253 | |||
1254 | static int switch_cmp(struct work_atoms *l, struct work_atoms *r) | 1174 | static int switch_cmp(struct work_atoms *l, struct work_atoms *r) |
1255 | { | 1175 | { |
1256 | if (l->nb_atoms < r->nb_atoms) | 1176 | if (l->nb_atoms < r->nb_atoms) |
@@ -1261,11 +1181,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1261 | return 0; | 1181 | return 0; |
1262 | } | 1182 | } |
1263 | 1183 | ||
1264 | static struct sort_dimension switch_sort_dimension = { | ||
1265 | .name = "switch", | ||
1266 | .cmp = switch_cmp, | ||
1267 | }; | ||
1268 | |||
1269 | static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) | 1184 | static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) |
1270 | { | 1185 | { |
1271 | if (l->total_runtime < r->total_runtime) | 1186 | if (l->total_runtime < r->total_runtime) |
@@ -1276,28 +1191,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1276 | return 0; | 1191 | return 0; |
1277 | } | 1192 | } |
1278 | 1193 | ||
1279 | static struct sort_dimension runtime_sort_dimension = { | ||
1280 | .name = "runtime", | ||
1281 | .cmp = runtime_cmp, | ||
1282 | }; | ||
1283 | |||
1284 | static struct sort_dimension *available_sorts[] = { | ||
1285 | &pid_sort_dimension, | ||
1286 | &avg_sort_dimension, | ||
1287 | &max_sort_dimension, | ||
1288 | &switch_sort_dimension, | ||
1289 | &runtime_sort_dimension, | ||
1290 | }; | ||
1291 | |||
1292 | #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *)) | ||
1293 | |||
1294 | static LIST_HEAD(sort_list); | ||
1295 | |||
1296 | static int sort_dimension__add(const char *tok, struct list_head *list) | 1194 | static int sort_dimension__add(const char *tok, struct list_head *list) |
1297 | { | 1195 | { |
1298 | int i; | 1196 | size_t i; |
1197 | static struct sort_dimension avg_sort_dimension = { | ||
1198 | .name = "avg", | ||
1199 | .cmp = avg_cmp, | ||
1200 | }; | ||
1201 | static struct sort_dimension max_sort_dimension = { | ||
1202 | .name = "max", | ||
1203 | .cmp = max_cmp, | ||
1204 | }; | ||
1205 | static struct sort_dimension pid_sort_dimension = { | ||
1206 | .name = "pid", | ||
1207 | .cmp = pid_cmp, | ||
1208 | }; | ||
1209 | static struct sort_dimension runtime_sort_dimension = { | ||
1210 | .name = "runtime", | ||
1211 | .cmp = runtime_cmp, | ||
1212 | }; | ||
1213 | static struct sort_dimension switch_sort_dimension = { | ||
1214 | .name = "switch", | ||
1215 | .cmp = switch_cmp, | ||
1216 | }; | ||
1217 | struct sort_dimension *available_sorts[] = { | ||
1218 | &pid_sort_dimension, | ||
1219 | &avg_sort_dimension, | ||
1220 | &max_sort_dimension, | ||
1221 | &switch_sort_dimension, | ||
1222 | &runtime_sort_dimension, | ||
1223 | }; | ||
1299 | 1224 | ||
1300 | for (i = 0; i < NB_AVAILABLE_SORTS; i++) { | 1225 | for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { |
1301 | if (!strcmp(available_sorts[i]->name, tok)) { | 1226 | if (!strcmp(available_sorts[i]->name, tok)) { |
1302 | list_add_tail(&available_sorts[i]->list, list); | 1227 | list_add_tail(&available_sorts[i]->list, list); |
1303 | 1228 | ||
@@ -1308,68 +1233,41 @@ static int sort_dimension__add(const char *tok, struct list_head *list) | |||
1308 | return -1; | 1233 | return -1; |
1309 | } | 1234 | } |
1310 | 1235 | ||
1311 | static void setup_sorting(void); | 1236 | static void perf_sched__sort_lat(struct perf_sched *sched) |
1312 | |||
1313 | static void sort_lat(void) | ||
1314 | { | 1237 | { |
1315 | struct rb_node *node; | 1238 | struct rb_node *node; |
1316 | 1239 | ||
1317 | for (;;) { | 1240 | for (;;) { |
1318 | struct work_atoms *data; | 1241 | struct work_atoms *data; |
1319 | node = rb_first(&atom_root); | 1242 | node = rb_first(&sched->atom_root); |
1320 | if (!node) | 1243 | if (!node) |
1321 | break; | 1244 | break; |
1322 | 1245 | ||
1323 | rb_erase(node, &atom_root); | 1246 | rb_erase(node, &sched->atom_root); |
1324 | data = rb_entry(node, struct work_atoms, node); | 1247 | data = rb_entry(node, struct work_atoms, node); |
1325 | __thread_latency_insert(&sorted_atom_root, data, &sort_list); | 1248 | __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); |
1326 | } | 1249 | } |
1327 | } | 1250 | } |
1328 | 1251 | ||
1329 | static struct trace_sched_handler *trace_handler; | 1252 | static int process_sched_wakeup_event(struct perf_tool *tool, |
1330 | 1253 | struct perf_evsel *evsel, | |
1331 | static void | 1254 | struct perf_sample *sample, |
1332 | process_sched_wakeup_event(struct perf_tool *tool __used, | 1255 | struct machine *machine) |
1333 | struct event_format *event, | ||
1334 | struct perf_sample *sample, | ||
1335 | struct machine *machine, | ||
1336 | struct thread *thread __used) | ||
1337 | { | 1256 | { |
1338 | void *data = sample->raw_data; | 1257 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1339 | struct trace_wakeup_event wakeup_event; | ||
1340 | 1258 | ||
1341 | FILL_COMMON_FIELDS(wakeup_event, event, data); | 1259 | if (sched->tp_handler->wakeup_event) |
1260 | return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); | ||
1342 | 1261 | ||
1343 | FILL_ARRAY(wakeup_event, comm, event, data); | 1262 | return 0; |
1344 | FILL_FIELD(wakeup_event, pid, event, data); | ||
1345 | FILL_FIELD(wakeup_event, prio, event, data); | ||
1346 | FILL_FIELD(wakeup_event, success, event, data); | ||
1347 | FILL_FIELD(wakeup_event, cpu, event, data); | ||
1348 | |||
1349 | if (trace_handler->wakeup_event) | ||
1350 | trace_handler->wakeup_event(&wakeup_event, machine, event, sample); | ||
1351 | } | 1263 | } |
1352 | 1264 | ||
1353 | /* | 1265 | static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, |
1354 | * Track the current task - that way we can know whether there's any | 1266 | struct perf_sample *sample, struct machine *machine) |
1355 | * weird events, such as a task being switched away that is not current. | ||
1356 | */ | ||
1357 | static int max_cpu; | ||
1358 | |||
1359 | static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 }; | ||
1360 | |||
1361 | static struct thread *curr_thread[MAX_CPUS]; | ||
1362 | |||
1363 | static char next_shortname1 = 'A'; | ||
1364 | static char next_shortname2 = '0'; | ||
1365 | |||
1366 | static void | ||
1367 | map_switch_event(struct trace_switch_event *switch_event, | ||
1368 | struct machine *machine, | ||
1369 | struct event_format *event __used, | ||
1370 | struct perf_sample *sample) | ||
1371 | { | 1267 | { |
1372 | struct thread *sched_out __used, *sched_in; | 1268 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
1269 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
1270 | struct thread *sched_out __maybe_unused, *sched_in; | ||
1373 | int new_shortname; | 1271 | int new_shortname; |
1374 | u64 timestamp0, timestamp = sample->time; | 1272 | u64 timestamp0, timestamp = sample->time; |
1375 | s64 delta; | 1273 | s64 delta; |
@@ -1377,54 +1275,55 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1377 | 1275 | ||
1378 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); | 1276 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); |
1379 | 1277 | ||
1380 | if (this_cpu > max_cpu) | 1278 | if (this_cpu > sched->max_cpu) |
1381 | max_cpu = this_cpu; | 1279 | sched->max_cpu = this_cpu; |
1382 | 1280 | ||
1383 | timestamp0 = cpu_last_switched[this_cpu]; | 1281 | timestamp0 = sched->cpu_last_switched[this_cpu]; |
1384 | cpu_last_switched[this_cpu] = timestamp; | 1282 | sched->cpu_last_switched[this_cpu] = timestamp; |
1385 | if (timestamp0) | 1283 | if (timestamp0) |
1386 | delta = timestamp - timestamp0; | 1284 | delta = timestamp - timestamp0; |
1387 | else | 1285 | else |
1388 | delta = 0; | 1286 | delta = 0; |
1389 | 1287 | ||
1390 | if (delta < 0) | 1288 | if (delta < 0) { |
1391 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 1289 | pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1392 | 1290 | return -1; | |
1291 | } | ||
1393 | 1292 | ||
1394 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 1293 | sched_out = machine__findnew_thread(machine, prev_pid); |
1395 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 1294 | sched_in = machine__findnew_thread(machine, next_pid); |
1396 | 1295 | ||
1397 | curr_thread[this_cpu] = sched_in; | 1296 | sched->curr_thread[this_cpu] = sched_in; |
1398 | 1297 | ||
1399 | printf(" "); | 1298 | printf(" "); |
1400 | 1299 | ||
1401 | new_shortname = 0; | 1300 | new_shortname = 0; |
1402 | if (!sched_in->shortname[0]) { | 1301 | if (!sched_in->shortname[0]) { |
1403 | sched_in->shortname[0] = next_shortname1; | 1302 | sched_in->shortname[0] = sched->next_shortname1; |
1404 | sched_in->shortname[1] = next_shortname2; | 1303 | sched_in->shortname[1] = sched->next_shortname2; |
1405 | 1304 | ||
1406 | if (next_shortname1 < 'Z') { | 1305 | if (sched->next_shortname1 < 'Z') { |
1407 | next_shortname1++; | 1306 | sched->next_shortname1++; |
1408 | } else { | 1307 | } else { |
1409 | next_shortname1='A'; | 1308 | sched->next_shortname1='A'; |
1410 | if (next_shortname2 < '9') { | 1309 | if (sched->next_shortname2 < '9') { |
1411 | next_shortname2++; | 1310 | sched->next_shortname2++; |
1412 | } else { | 1311 | } else { |
1413 | next_shortname2='0'; | 1312 | sched->next_shortname2='0'; |
1414 | } | 1313 | } |
1415 | } | 1314 | } |
1416 | new_shortname = 1; | 1315 | new_shortname = 1; |
1417 | } | 1316 | } |
1418 | 1317 | ||
1419 | for (cpu = 0; cpu <= max_cpu; cpu++) { | 1318 | for (cpu = 0; cpu <= sched->max_cpu; cpu++) { |
1420 | if (cpu != this_cpu) | 1319 | if (cpu != this_cpu) |
1421 | printf(" "); | 1320 | printf(" "); |
1422 | else | 1321 | else |
1423 | printf("*"); | 1322 | printf("*"); |
1424 | 1323 | ||
1425 | if (curr_thread[cpu]) { | 1324 | if (sched->curr_thread[cpu]) { |
1426 | if (curr_thread[cpu]->pid) | 1325 | if (sched->curr_thread[cpu]->pid) |
1427 | printf("%2s ", curr_thread[cpu]->shortname); | 1326 | printf("%2s ", sched->curr_thread[cpu]->shortname); |
1428 | else | 1327 | else |
1429 | printf(". "); | 1328 | printf(". "); |
1430 | } else | 1329 | } else |
@@ -1438,127 +1337,97 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1438 | } else { | 1337 | } else { |
1439 | printf("\n"); | 1338 | printf("\n"); |
1440 | } | 1339 | } |
1340 | |||
1341 | return 0; | ||
1441 | } | 1342 | } |
1442 | 1343 | ||
1443 | static void | 1344 | static int process_sched_switch_event(struct perf_tool *tool, |
1444 | process_sched_switch_event(struct perf_tool *tool __used, | 1345 | struct perf_evsel *evsel, |
1445 | struct event_format *event, | 1346 | struct perf_sample *sample, |
1446 | struct perf_sample *sample, | 1347 | struct machine *machine) |
1447 | struct machine *machine, | ||
1448 | struct thread *thread __used) | ||
1449 | { | 1348 | { |
1450 | int this_cpu = sample->cpu; | 1349 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1451 | void *data = sample->raw_data; | 1350 | int this_cpu = sample->cpu, err = 0; |
1452 | struct trace_switch_event switch_event; | 1351 | u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
1453 | 1352 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | |
1454 | FILL_COMMON_FIELDS(switch_event, event, data); | ||
1455 | 1353 | ||
1456 | FILL_ARRAY(switch_event, prev_comm, event, data); | 1354 | if (sched->curr_pid[this_cpu] != (u32)-1) { |
1457 | FILL_FIELD(switch_event, prev_pid, event, data); | ||
1458 | FILL_FIELD(switch_event, prev_prio, event, data); | ||
1459 | FILL_FIELD(switch_event, prev_state, event, data); | ||
1460 | FILL_ARRAY(switch_event, next_comm, event, data); | ||
1461 | FILL_FIELD(switch_event, next_pid, event, data); | ||
1462 | FILL_FIELD(switch_event, next_prio, event, data); | ||
1463 | |||
1464 | if (curr_pid[this_cpu] != (u32)-1) { | ||
1465 | /* | 1355 | /* |
1466 | * Are we trying to switch away a PID that is | 1356 | * Are we trying to switch away a PID that is |
1467 | * not current? | 1357 | * not current? |
1468 | */ | 1358 | */ |
1469 | if (curr_pid[this_cpu] != switch_event.prev_pid) | 1359 | if (sched->curr_pid[this_cpu] != prev_pid) |
1470 | nr_context_switch_bugs++; | 1360 | sched->nr_context_switch_bugs++; |
1471 | } | 1361 | } |
1472 | if (trace_handler->switch_event) | ||
1473 | trace_handler->switch_event(&switch_event, machine, event, sample); | ||
1474 | 1362 | ||
1475 | curr_pid[this_cpu] = switch_event.next_pid; | 1363 | if (sched->tp_handler->switch_event) |
1364 | err = sched->tp_handler->switch_event(sched, evsel, sample, machine); | ||
1365 | |||
1366 | sched->curr_pid[this_cpu] = next_pid; | ||
1367 | return err; | ||
1476 | } | 1368 | } |
1477 | 1369 | ||
1478 | static void | 1370 | static int process_sched_runtime_event(struct perf_tool *tool, |
1479 | process_sched_runtime_event(struct perf_tool *tool __used, | 1371 | struct perf_evsel *evsel, |
1480 | struct event_format *event, | 1372 | struct perf_sample *sample, |
1481 | struct perf_sample *sample, | 1373 | struct machine *machine) |
1482 | struct machine *machine, | ||
1483 | struct thread *thread __used) | ||
1484 | { | 1374 | { |
1485 | void *data = sample->raw_data; | 1375 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1486 | struct trace_runtime_event runtime_event; | ||
1487 | 1376 | ||
1488 | FILL_ARRAY(runtime_event, comm, event, data); | 1377 | if (sched->tp_handler->runtime_event) |
1489 | FILL_FIELD(runtime_event, pid, event, data); | 1378 | return sched->tp_handler->runtime_event(sched, evsel, sample, machine); |
1490 | FILL_FIELD(runtime_event, runtime, event, data); | ||
1491 | FILL_FIELD(runtime_event, vruntime, event, data); | ||
1492 | 1379 | ||
1493 | if (trace_handler->runtime_event) | 1380 | return 0; |
1494 | trace_handler->runtime_event(&runtime_event, machine, sample); | ||
1495 | } | 1381 | } |
1496 | 1382 | ||
1497 | static void | 1383 | static int process_sched_fork_event(struct perf_tool *tool, |
1498 | process_sched_fork_event(struct perf_tool *tool __used, | 1384 | struct perf_evsel *evsel, |
1499 | struct event_format *event, | 1385 | struct perf_sample *sample, |
1500 | struct perf_sample *sample, | 1386 | struct machine *machine __maybe_unused) |
1501 | struct machine *machine __used, | ||
1502 | struct thread *thread __used) | ||
1503 | { | 1387 | { |
1504 | void *data = sample->raw_data; | 1388 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1505 | struct trace_fork_event fork_event; | ||
1506 | |||
1507 | FILL_COMMON_FIELDS(fork_event, event, data); | ||
1508 | 1389 | ||
1509 | FILL_ARRAY(fork_event, parent_comm, event, data); | 1390 | if (sched->tp_handler->fork_event) |
1510 | FILL_FIELD(fork_event, parent_pid, event, data); | 1391 | return sched->tp_handler->fork_event(sched, evsel, sample); |
1511 | FILL_ARRAY(fork_event, child_comm, event, data); | ||
1512 | FILL_FIELD(fork_event, child_pid, event, data); | ||
1513 | 1392 | ||
1514 | if (trace_handler->fork_event) | 1393 | return 0; |
1515 | trace_handler->fork_event(&fork_event, event); | ||
1516 | } | 1394 | } |
1517 | 1395 | ||
1518 | static void | 1396 | static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, |
1519 | process_sched_exit_event(struct perf_tool *tool __used, | 1397 | struct perf_evsel *evsel, |
1520 | struct event_format *event, | 1398 | struct perf_sample *sample __maybe_unused, |
1521 | struct perf_sample *sample __used, | 1399 | struct machine *machine __maybe_unused) |
1522 | struct machine *machine __used, | ||
1523 | struct thread *thread __used) | ||
1524 | { | 1400 | { |
1525 | if (verbose) | 1401 | pr_debug("sched_exit event %p\n", evsel); |
1526 | printf("sched_exit event %p\n", event); | 1402 | return 0; |
1527 | } | 1403 | } |
1528 | 1404 | ||
1529 | static void | 1405 | static int process_sched_migrate_task_event(struct perf_tool *tool, |
1530 | process_sched_migrate_task_event(struct perf_tool *tool __used, | 1406 | struct perf_evsel *evsel, |
1531 | struct event_format *event, | 1407 | struct perf_sample *sample, |
1532 | struct perf_sample *sample, | 1408 | struct machine *machine) |
1533 | struct machine *machine, | ||
1534 | struct thread *thread __used) | ||
1535 | { | 1409 | { |
1536 | void *data = sample->raw_data; | 1410 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1537 | struct trace_migrate_task_event migrate_task_event; | ||
1538 | 1411 | ||
1539 | FILL_COMMON_FIELDS(migrate_task_event, event, data); | 1412 | if (sched->tp_handler->migrate_task_event) |
1413 | return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); | ||
1540 | 1414 | ||
1541 | FILL_ARRAY(migrate_task_event, comm, event, data); | 1415 | return 0; |
1542 | FILL_FIELD(migrate_task_event, pid, event, data); | ||
1543 | FILL_FIELD(migrate_task_event, prio, event, data); | ||
1544 | FILL_FIELD(migrate_task_event, cpu, event, data); | ||
1545 | |||
1546 | if (trace_handler->migrate_task_event) | ||
1547 | trace_handler->migrate_task_event(&migrate_task_event, machine, sample); | ||
1548 | } | 1416 | } |
1549 | 1417 | ||
1550 | typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, | 1418 | typedef int (*tracepoint_handler)(struct perf_tool *tool, |
1551 | struct perf_sample *sample, | 1419 | struct perf_evsel *evsel, |
1552 | struct machine *machine, | 1420 | struct perf_sample *sample, |
1553 | struct thread *thread); | 1421 | struct machine *machine); |
1554 | 1422 | ||
1555 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, | 1423 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, |
1556 | union perf_event *event __used, | 1424 | union perf_event *event __maybe_unused, |
1557 | struct perf_sample *sample, | 1425 | struct perf_sample *sample, |
1558 | struct perf_evsel *evsel, | 1426 | struct perf_evsel *evsel, |
1559 | struct machine *machine) | 1427 | struct machine *machine) |
1560 | { | 1428 | { |
1561 | struct thread *thread = machine__findnew_thread(machine, sample->pid); | 1429 | struct thread *thread = machine__findnew_thread(machine, sample->pid); |
1430 | int err = 0; | ||
1562 | 1431 | ||
1563 | if (thread == NULL) { | 1432 | if (thread == NULL) { |
1564 | pr_debug("problem processing %s event, skipping it.\n", | 1433 | pr_debug("problem processing %s event, skipping it.\n", |
@@ -1571,23 +1440,15 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, | |||
1571 | 1440 | ||
1572 | if (evsel->handler.func != NULL) { | 1441 | if (evsel->handler.func != NULL) { |
1573 | tracepoint_handler f = evsel->handler.func; | 1442 | tracepoint_handler f = evsel->handler.func; |
1574 | f(tool, evsel->tp_format, sample, machine, thread); | 1443 | err = f(tool, evsel, sample, machine); |
1575 | } | 1444 | } |
1576 | 1445 | ||
1577 | return 0; | 1446 | return err; |
1578 | } | 1447 | } |
1579 | 1448 | ||
1580 | static struct perf_tool perf_sched = { | 1449 | static int perf_sched__read_events(struct perf_sched *sched, bool destroy, |
1581 | .sample = perf_sched__process_tracepoint_sample, | 1450 | struct perf_session **psession) |
1582 | .comm = perf_event__process_comm, | ||
1583 | .lost = perf_event__process_lost, | ||
1584 | .fork = perf_event__process_task, | ||
1585 | .ordered_samples = true, | ||
1586 | }; | ||
1587 | |||
1588 | static void read_events(bool destroy, struct perf_session **psession) | ||
1589 | { | 1451 | { |
1590 | int err = -EINVAL; | ||
1591 | const struct perf_evsel_str_handler handlers[] = { | 1452 | const struct perf_evsel_str_handler handlers[] = { |
1592 | { "sched:sched_switch", process_sched_switch_event, }, | 1453 | { "sched:sched_switch", process_sched_switch_event, }, |
1593 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, | 1454 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, |
@@ -1599,21 +1460,25 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1599 | }; | 1460 | }; |
1600 | struct perf_session *session; | 1461 | struct perf_session *session; |
1601 | 1462 | ||
1602 | session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); | 1463 | session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool); |
1603 | if (session == NULL) | 1464 | if (session == NULL) { |
1604 | die("No Memory"); | 1465 | pr_debug("No Memory for session\n"); |
1466 | return -1; | ||
1467 | } | ||
1605 | 1468 | ||
1606 | err = perf_session__set_tracepoints_handlers(session, handlers); | 1469 | if (perf_session__set_tracepoints_handlers(session, handlers)) |
1607 | assert(err == 0); | 1470 | goto out_delete; |
1608 | 1471 | ||
1609 | if (perf_session__has_traces(session, "record -R")) { | 1472 | if (perf_session__has_traces(session, "record -R")) { |
1610 | err = perf_session__process_events(session, &perf_sched); | 1473 | int err = perf_session__process_events(session, &sched->tool); |
1611 | if (err) | 1474 | if (err) { |
1612 | die("Failed to process events, error %d", err); | 1475 | pr_err("Failed to process events, error %d", err); |
1476 | goto out_delete; | ||
1477 | } | ||
1613 | 1478 | ||
1614 | nr_events = session->hists.stats.nr_events[0]; | 1479 | sched->nr_events = session->hists.stats.nr_events[0]; |
1615 | nr_lost_events = session->hists.stats.total_lost; | 1480 | sched->nr_lost_events = session->hists.stats.total_lost; |
1616 | nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; | 1481 | sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; |
1617 | } | 1482 | } |
1618 | 1483 | ||
1619 | if (destroy) | 1484 | if (destroy) |
@@ -1621,208 +1486,166 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1621 | 1486 | ||
1622 | if (psession) | 1487 | if (psession) |
1623 | *psession = session; | 1488 | *psession = session; |
1489 | |||
1490 | return 0; | ||
1491 | |||
1492 | out_delete: | ||
1493 | perf_session__delete(session); | ||
1494 | return -1; | ||
1624 | } | 1495 | } |
1625 | 1496 | ||
1626 | static void print_bad_events(void) | 1497 | static void print_bad_events(struct perf_sched *sched) |
1627 | { | 1498 | { |
1628 | if (nr_unordered_timestamps && nr_timestamps) { | 1499 | if (sched->nr_unordered_timestamps && sched->nr_timestamps) { |
1629 | printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", | 1500 | printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", |
1630 | (double)nr_unordered_timestamps/(double)nr_timestamps*100.0, | 1501 | (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, |
1631 | nr_unordered_timestamps, nr_timestamps); | 1502 | sched->nr_unordered_timestamps, sched->nr_timestamps); |
1632 | } | 1503 | } |
1633 | if (nr_lost_events && nr_events) { | 1504 | if (sched->nr_lost_events && sched->nr_events) { |
1634 | printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", | 1505 | printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", |
1635 | (double)nr_lost_events/(double)nr_events*100.0, | 1506 | (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, |
1636 | nr_lost_events, nr_events, nr_lost_chunks); | 1507 | sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); |
1637 | } | 1508 | } |
1638 | if (nr_state_machine_bugs && nr_timestamps) { | 1509 | if (sched->nr_state_machine_bugs && sched->nr_timestamps) { |
1639 | printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", | 1510 | printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", |
1640 | (double)nr_state_machine_bugs/(double)nr_timestamps*100.0, | 1511 | (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0, |
1641 | nr_state_machine_bugs, nr_timestamps); | 1512 | sched->nr_state_machine_bugs, sched->nr_timestamps); |
1642 | if (nr_lost_events) | 1513 | if (sched->nr_lost_events) |
1643 | printf(" (due to lost events?)"); | 1514 | printf(" (due to lost events?)"); |
1644 | printf("\n"); | 1515 | printf("\n"); |
1645 | } | 1516 | } |
1646 | if (nr_context_switch_bugs && nr_timestamps) { | 1517 | if (sched->nr_context_switch_bugs && sched->nr_timestamps) { |
1647 | printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", | 1518 | printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", |
1648 | (double)nr_context_switch_bugs/(double)nr_timestamps*100.0, | 1519 | (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, |
1649 | nr_context_switch_bugs, nr_timestamps); | 1520 | sched->nr_context_switch_bugs, sched->nr_timestamps); |
1650 | if (nr_lost_events) | 1521 | if (sched->nr_lost_events) |
1651 | printf(" (due to lost events?)"); | 1522 | printf(" (due to lost events?)"); |
1652 | printf("\n"); | 1523 | printf("\n"); |
1653 | } | 1524 | } |
1654 | } | 1525 | } |
1655 | 1526 | ||
1656 | static void __cmd_lat(void) | 1527 | static int perf_sched__lat(struct perf_sched *sched) |
1657 | { | 1528 | { |
1658 | struct rb_node *next; | 1529 | struct rb_node *next; |
1659 | struct perf_session *session; | 1530 | struct perf_session *session; |
1660 | 1531 | ||
1661 | setup_pager(); | 1532 | setup_pager(); |
1662 | read_events(false, &session); | 1533 | if (perf_sched__read_events(sched, false, &session)) |
1663 | sort_lat(); | 1534 | return -1; |
1535 | perf_sched__sort_lat(sched); | ||
1664 | 1536 | ||
1665 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); | 1537 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); |
1666 | printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); | 1538 | printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); |
1667 | printf(" ---------------------------------------------------------------------------------------------------------------\n"); | 1539 | printf(" ---------------------------------------------------------------------------------------------------------------\n"); |
1668 | 1540 | ||
1669 | next = rb_first(&sorted_atom_root); | 1541 | next = rb_first(&sched->sorted_atom_root); |
1670 | 1542 | ||
1671 | while (next) { | 1543 | while (next) { |
1672 | struct work_atoms *work_list; | 1544 | struct work_atoms *work_list; |
1673 | 1545 | ||
1674 | work_list = rb_entry(next, struct work_atoms, node); | 1546 | work_list = rb_entry(next, struct work_atoms, node); |
1675 | output_lat_thread(work_list); | 1547 | output_lat_thread(sched, work_list); |
1676 | next = rb_next(next); | 1548 | next = rb_next(next); |
1677 | } | 1549 | } |
1678 | 1550 | ||
1679 | printf(" -----------------------------------------------------------------------------------------\n"); | 1551 | printf(" -----------------------------------------------------------------------------------------\n"); |
1680 | printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", | 1552 | printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", |
1681 | (double)all_runtime/1e6, all_count); | 1553 | (double)sched->all_runtime / 1e6, sched->all_count); |
1682 | 1554 | ||
1683 | printf(" ---------------------------------------------------\n"); | 1555 | printf(" ---------------------------------------------------\n"); |
1684 | 1556 | ||
1685 | print_bad_events(); | 1557 | print_bad_events(sched); |
1686 | printf("\n"); | 1558 | printf("\n"); |
1687 | 1559 | ||
1688 | perf_session__delete(session); | 1560 | perf_session__delete(session); |
1561 | return 0; | ||
1689 | } | 1562 | } |
1690 | 1563 | ||
1691 | static struct trace_sched_handler map_ops = { | 1564 | static int perf_sched__map(struct perf_sched *sched) |
1692 | .wakeup_event = NULL, | ||
1693 | .switch_event = map_switch_event, | ||
1694 | .runtime_event = NULL, | ||
1695 | .fork_event = NULL, | ||
1696 | }; | ||
1697 | |||
1698 | static void __cmd_map(void) | ||
1699 | { | 1565 | { |
1700 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); | 1566 | sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); |
1701 | 1567 | ||
1702 | setup_pager(); | 1568 | setup_pager(); |
1703 | read_events(true, NULL); | 1569 | if (perf_sched__read_events(sched, true, NULL)) |
1704 | print_bad_events(); | 1570 | return -1; |
1571 | print_bad_events(sched); | ||
1572 | return 0; | ||
1705 | } | 1573 | } |
1706 | 1574 | ||
1707 | static void __cmd_replay(void) | 1575 | static int perf_sched__replay(struct perf_sched *sched) |
1708 | { | 1576 | { |
1709 | unsigned long i; | 1577 | unsigned long i; |
1710 | 1578 | ||
1711 | calibrate_run_measurement_overhead(); | 1579 | calibrate_run_measurement_overhead(sched); |
1712 | calibrate_sleep_measurement_overhead(); | 1580 | calibrate_sleep_measurement_overhead(sched); |
1713 | 1581 | ||
1714 | test_calibrations(); | 1582 | test_calibrations(sched); |
1715 | 1583 | ||
1716 | read_events(true, NULL); | 1584 | if (perf_sched__read_events(sched, true, NULL)) |
1585 | return -1; | ||
1717 | 1586 | ||
1718 | printf("nr_run_events: %ld\n", nr_run_events); | 1587 | printf("nr_run_events: %ld\n", sched->nr_run_events); |
1719 | printf("nr_sleep_events: %ld\n", nr_sleep_events); | 1588 | printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); |
1720 | printf("nr_wakeup_events: %ld\n", nr_wakeup_events); | 1589 | printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); |
1721 | 1590 | ||
1722 | if (targetless_wakeups) | 1591 | if (sched->targetless_wakeups) |
1723 | printf("target-less wakeups: %ld\n", targetless_wakeups); | 1592 | printf("target-less wakeups: %ld\n", sched->targetless_wakeups); |
1724 | if (multitarget_wakeups) | 1593 | if (sched->multitarget_wakeups) |
1725 | printf("multi-target wakeups: %ld\n", multitarget_wakeups); | 1594 | printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); |
1726 | if (nr_run_events_optimized) | 1595 | if (sched->nr_run_events_optimized) |
1727 | printf("run atoms optimized: %ld\n", | 1596 | printf("run atoms optimized: %ld\n", |
1728 | nr_run_events_optimized); | 1597 | sched->nr_run_events_optimized); |
1729 | 1598 | ||
1730 | print_task_traces(); | 1599 | print_task_traces(sched); |
1731 | add_cross_task_wakeups(); | 1600 | add_cross_task_wakeups(sched); |
1732 | 1601 | ||
1733 | create_tasks(); | 1602 | create_tasks(sched); |
1734 | printf("------------------------------------------------------------\n"); | 1603 | printf("------------------------------------------------------------\n"); |
1735 | for (i = 0; i < replay_repeat; i++) | 1604 | for (i = 0; i < sched->replay_repeat; i++) |
1736 | run_one_test(); | 1605 | run_one_test(sched); |
1737 | } | ||
1738 | |||
1739 | 1606 | ||
1740 | static const char * const sched_usage[] = { | 1607 | return 0; |
1741 | "perf sched [<options>] {record|latency|map|replay|script}", | 1608 | } |
1742 | NULL | ||
1743 | }; | ||
1744 | |||
1745 | static const struct option sched_options[] = { | ||
1746 | OPT_STRING('i', "input", &input_name, "file", | ||
1747 | "input file name"), | ||
1748 | OPT_INCR('v', "verbose", &verbose, | ||
1749 | "be more verbose (show symbol address, etc)"), | ||
1750 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1751 | "dump raw trace in ASCII"), | ||
1752 | OPT_END() | ||
1753 | }; | ||
1754 | |||
1755 | static const char * const latency_usage[] = { | ||
1756 | "perf sched latency [<options>]", | ||
1757 | NULL | ||
1758 | }; | ||
1759 | |||
1760 | static const struct option latency_options[] = { | ||
1761 | OPT_STRING('s', "sort", &sort_order, "key[,key2...]", | ||
1762 | "sort by key(s): runtime, switch, avg, max"), | ||
1763 | OPT_INCR('v', "verbose", &verbose, | ||
1764 | "be more verbose (show symbol address, etc)"), | ||
1765 | OPT_INTEGER('C', "CPU", &profile_cpu, | ||
1766 | "CPU to profile on"), | ||
1767 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1768 | "dump raw trace in ASCII"), | ||
1769 | OPT_END() | ||
1770 | }; | ||
1771 | |||
1772 | static const char * const replay_usage[] = { | ||
1773 | "perf sched replay [<options>]", | ||
1774 | NULL | ||
1775 | }; | ||
1776 | |||
1777 | static const struct option replay_options[] = { | ||
1778 | OPT_UINTEGER('r', "repeat", &replay_repeat, | ||
1779 | "repeat the workload replay N times (-1: infinite)"), | ||
1780 | OPT_INCR('v', "verbose", &verbose, | ||
1781 | "be more verbose (show symbol address, etc)"), | ||
1782 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1783 | "dump raw trace in ASCII"), | ||
1784 | OPT_END() | ||
1785 | }; | ||
1786 | 1609 | ||
1787 | static void setup_sorting(void) | 1610 | static void setup_sorting(struct perf_sched *sched, const struct option *options, |
1611 | const char * const usage_msg[]) | ||
1788 | { | 1612 | { |
1789 | char *tmp, *tok, *str = strdup(sort_order); | 1613 | char *tmp, *tok, *str = strdup(sched->sort_order); |
1790 | 1614 | ||
1791 | for (tok = strtok_r(str, ", ", &tmp); | 1615 | for (tok = strtok_r(str, ", ", &tmp); |
1792 | tok; tok = strtok_r(NULL, ", ", &tmp)) { | 1616 | tok; tok = strtok_r(NULL, ", ", &tmp)) { |
1793 | if (sort_dimension__add(tok, &sort_list) < 0) { | 1617 | if (sort_dimension__add(tok, &sched->sort_list) < 0) { |
1794 | error("Unknown --sort key: `%s'", tok); | 1618 | error("Unknown --sort key: `%s'", tok); |
1795 | usage_with_options(latency_usage, latency_options); | 1619 | usage_with_options(usage_msg, options); |
1796 | } | 1620 | } |
1797 | } | 1621 | } |
1798 | 1622 | ||
1799 | free(str); | 1623 | free(str); |
1800 | 1624 | ||
1801 | sort_dimension__add("pid", &cmp_pid); | 1625 | sort_dimension__add("pid", &sched->cmp_pid); |
1802 | } | 1626 | } |
1803 | 1627 | ||
1804 | static const char *record_args[] = { | ||
1805 | "record", | ||
1806 | "-a", | ||
1807 | "-R", | ||
1808 | "-f", | ||
1809 | "-m", "1024", | ||
1810 | "-c", "1", | ||
1811 | "-e", "sched:sched_switch", | ||
1812 | "-e", "sched:sched_stat_wait", | ||
1813 | "-e", "sched:sched_stat_sleep", | ||
1814 | "-e", "sched:sched_stat_iowait", | ||
1815 | "-e", "sched:sched_stat_runtime", | ||
1816 | "-e", "sched:sched_process_exit", | ||
1817 | "-e", "sched:sched_process_fork", | ||
1818 | "-e", "sched:sched_wakeup", | ||
1819 | "-e", "sched:sched_migrate_task", | ||
1820 | }; | ||
1821 | |||
1822 | static int __cmd_record(int argc, const char **argv) | 1628 | static int __cmd_record(int argc, const char **argv) |
1823 | { | 1629 | { |
1824 | unsigned int rec_argc, i, j; | 1630 | unsigned int rec_argc, i, j; |
1825 | const char **rec_argv; | 1631 | const char **rec_argv; |
1632 | const char * const record_args[] = { | ||
1633 | "record", | ||
1634 | "-a", | ||
1635 | "-R", | ||
1636 | "-f", | ||
1637 | "-m", "1024", | ||
1638 | "-c", "1", | ||
1639 | "-e", "sched:sched_switch", | ||
1640 | "-e", "sched:sched_stat_wait", | ||
1641 | "-e", "sched:sched_stat_sleep", | ||
1642 | "-e", "sched:sched_stat_iowait", | ||
1643 | "-e", "sched:sched_stat_runtime", | ||
1644 | "-e", "sched:sched_process_exit", | ||
1645 | "-e", "sched:sched_process_fork", | ||
1646 | "-e", "sched:sched_wakeup", | ||
1647 | "-e", "sched:sched_migrate_task", | ||
1648 | }; | ||
1826 | 1649 | ||
1827 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | 1650 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
1828 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | 1651 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
@@ -1841,8 +1664,85 @@ static int __cmd_record(int argc, const char **argv) | |||
1841 | return cmd_record(i, rec_argv, NULL); | 1664 | return cmd_record(i, rec_argv, NULL); |
1842 | } | 1665 | } |
1843 | 1666 | ||
1844 | int cmd_sched(int argc, const char **argv, const char *prefix __used) | 1667 | int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) |
1845 | { | 1668 | { |
1669 | const char default_sort_order[] = "avg, max, switch, runtime"; | ||
1670 | struct perf_sched sched = { | ||
1671 | .tool = { | ||
1672 | .sample = perf_sched__process_tracepoint_sample, | ||
1673 | .comm = perf_event__process_comm, | ||
1674 | .lost = perf_event__process_lost, | ||
1675 | .fork = perf_event__process_task, | ||
1676 | .ordered_samples = true, | ||
1677 | }, | ||
1678 | .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), | ||
1679 | .sort_list = LIST_HEAD_INIT(sched.sort_list), | ||
1680 | .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, | ||
1681 | .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, | ||
1682 | .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, | ||
1683 | .sort_order = default_sort_order, | ||
1684 | .replay_repeat = 10, | ||
1685 | .profile_cpu = -1, | ||
1686 | .next_shortname1 = 'A', | ||
1687 | .next_shortname2 = '0', | ||
1688 | }; | ||
1689 | const struct option latency_options[] = { | ||
1690 | OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", | ||
1691 | "sort by key(s): runtime, switch, avg, max"), | ||
1692 | OPT_INCR('v', "verbose", &verbose, | ||
1693 | "be more verbose (show symbol address, etc)"), | ||
1694 | OPT_INTEGER('C', "CPU", &sched.profile_cpu, | ||
1695 | "CPU to profile on"), | ||
1696 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1697 | "dump raw trace in ASCII"), | ||
1698 | OPT_END() | ||
1699 | }; | ||
1700 | const struct option replay_options[] = { | ||
1701 | OPT_UINTEGER('r', "repeat", &sched.replay_repeat, | ||
1702 | "repeat the workload replay N times (-1: infinite)"), | ||
1703 | OPT_INCR('v', "verbose", &verbose, | ||
1704 | "be more verbose (show symbol address, etc)"), | ||
1705 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1706 | "dump raw trace in ASCII"), | ||
1707 | OPT_END() | ||
1708 | }; | ||
1709 | const struct option sched_options[] = { | ||
1710 | OPT_STRING('i', "input", &sched.input_name, "file", | ||
1711 | "input file name"), | ||
1712 | OPT_INCR('v', "verbose", &verbose, | ||
1713 | "be more verbose (show symbol address, etc)"), | ||
1714 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1715 | "dump raw trace in ASCII"), | ||
1716 | OPT_END() | ||
1717 | }; | ||
1718 | const char * const latency_usage[] = { | ||
1719 | "perf sched latency [<options>]", | ||
1720 | NULL | ||
1721 | }; | ||
1722 | const char * const replay_usage[] = { | ||
1723 | "perf sched replay [<options>]", | ||
1724 | NULL | ||
1725 | }; | ||
1726 | const char * const sched_usage[] = { | ||
1727 | "perf sched [<options>] {record|latency|map|replay|script}", | ||
1728 | NULL | ||
1729 | }; | ||
1730 | struct trace_sched_handler lat_ops = { | ||
1731 | .wakeup_event = latency_wakeup_event, | ||
1732 | .switch_event = latency_switch_event, | ||
1733 | .runtime_event = latency_runtime_event, | ||
1734 | .fork_event = latency_fork_event, | ||
1735 | .migrate_task_event = latency_migrate_task_event, | ||
1736 | }; | ||
1737 | struct trace_sched_handler map_ops = { | ||
1738 | .switch_event = map_switch_event, | ||
1739 | }; | ||
1740 | struct trace_sched_handler replay_ops = { | ||
1741 | .wakeup_event = replay_wakeup_event, | ||
1742 | .switch_event = replay_switch_event, | ||
1743 | .fork_event = replay_fork_event, | ||
1744 | }; | ||
1745 | |||
1846 | argc = parse_options(argc, argv, sched_options, sched_usage, | 1746 | argc = parse_options(argc, argv, sched_options, sched_usage, |
1847 | PARSE_OPT_STOP_AT_NON_OPTION); | 1747 | PARSE_OPT_STOP_AT_NON_OPTION); |
1848 | if (!argc) | 1748 | if (!argc) |
@@ -1858,26 +1758,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) | |||
1858 | if (!strncmp(argv[0], "rec", 3)) { | 1758 | if (!strncmp(argv[0], "rec", 3)) { |
1859 | return __cmd_record(argc, argv); | 1759 | return __cmd_record(argc, argv); |
1860 | } else if (!strncmp(argv[0], "lat", 3)) { | 1760 | } else if (!strncmp(argv[0], "lat", 3)) { |
1861 | trace_handler = &lat_ops; | 1761 | sched.tp_handler = &lat_ops; |
1862 | if (argc > 1) { | 1762 | if (argc > 1) { |
1863 | argc = parse_options(argc, argv, latency_options, latency_usage, 0); | 1763 | argc = parse_options(argc, argv, latency_options, latency_usage, 0); |
1864 | if (argc) | 1764 | if (argc) |
1865 | usage_with_options(latency_usage, latency_options); | 1765 | usage_with_options(latency_usage, latency_options); |
1866 | } | 1766 | } |
1867 | setup_sorting(); | 1767 | setup_sorting(&sched, latency_options, latency_usage); |
1868 | __cmd_lat(); | 1768 | return perf_sched__lat(&sched); |
1869 | } else if (!strcmp(argv[0], "map")) { | 1769 | } else if (!strcmp(argv[0], "map")) { |
1870 | trace_handler = &map_ops; | 1770 | sched.tp_handler = &map_ops; |
1871 | setup_sorting(); | 1771 | setup_sorting(&sched, latency_options, latency_usage); |
1872 | __cmd_map(); | 1772 | return perf_sched__map(&sched); |
1873 | } else if (!strncmp(argv[0], "rep", 3)) { | 1773 | } else if (!strncmp(argv[0], "rep", 3)) { |
1874 | trace_handler = &replay_ops; | 1774 | sched.tp_handler = &replay_ops; |
1875 | if (argc) { | 1775 | if (argc) { |
1876 | argc = parse_options(argc, argv, replay_options, replay_usage, 0); | 1776 | argc = parse_options(argc, argv, replay_options, replay_usage, 0); |
1877 | if (argc) | 1777 | if (argc) |
1878 | usage_with_options(replay_usage, replay_options); | 1778 | usage_with_options(replay_usage, replay_options); |
1879 | } | 1779 | } |
1880 | __cmd_replay(); | 1780 | return perf_sched__replay(&sched); |
1881 | } else { | 1781 | } else { |
1882 | usage_with_options(sched_usage, sched_options); | 1782 | usage_with_options(sched_usage, sched_options); |
1883 | } | 1783 | } |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index c350cfee3157..6d98a83d5a60 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -430,9 +430,9 @@ static void process_event(union perf_event *event, struct perf_sample *sample, | |||
430 | printf("\n"); | 430 | printf("\n"); |
431 | } | 431 | } |
432 | 432 | ||
433 | static int default_start_script(const char *script __unused, | 433 | static int default_start_script(const char *script __maybe_unused, |
434 | int argc __unused, | 434 | int argc __maybe_unused, |
435 | const char **argv __unused) | 435 | const char **argv __maybe_unused) |
436 | { | 436 | { |
437 | return 0; | 437 | return 0; |
438 | } | 438 | } |
@@ -442,8 +442,8 @@ static int default_stop_script(void) | |||
442 | return 0; | 442 | return 0; |
443 | } | 443 | } |
444 | 444 | ||
445 | static int default_generate_script(struct pevent *pevent __unused, | 445 | static int default_generate_script(struct pevent *pevent __maybe_unused, |
446 | const char *outfile __unused) | 446 | const char *outfile __maybe_unused) |
447 | { | 447 | { |
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
@@ -474,7 +474,7 @@ static int cleanup_scripting(void) | |||
474 | 474 | ||
475 | static const char *input_name; | 475 | static const char *input_name; |
476 | 476 | ||
477 | static int process_sample_event(struct perf_tool *tool __used, | 477 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
478 | union perf_event *event, | 478 | union perf_event *event, |
479 | struct perf_sample *sample, | 479 | struct perf_sample *sample, |
480 | struct perf_evsel *evsel, | 480 | struct perf_evsel *evsel, |
@@ -534,7 +534,7 @@ static struct perf_tool perf_script = { | |||
534 | 534 | ||
535 | extern volatile int session_done; | 535 | extern volatile int session_done; |
536 | 536 | ||
537 | static void sig_handler(int sig __unused) | 537 | static void sig_handler(int sig __maybe_unused) |
538 | { | 538 | { |
539 | session_done = 1; | 539 | session_done = 1; |
540 | } | 540 | } |
@@ -644,8 +644,8 @@ static void list_available_languages(void) | |||
644 | fprintf(stderr, "\n"); | 644 | fprintf(stderr, "\n"); |
645 | } | 645 | } |
646 | 646 | ||
647 | static int parse_scriptname(const struct option *opt __used, | 647 | static int parse_scriptname(const struct option *opt __maybe_unused, |
648 | const char *str, int unset __used) | 648 | const char *str, int unset __maybe_unused) |
649 | { | 649 | { |
650 | char spec[PATH_MAX]; | 650 | char spec[PATH_MAX]; |
651 | const char *script, *ext; | 651 | const char *script, *ext; |
@@ -690,8 +690,8 @@ static int parse_scriptname(const struct option *opt __used, | |||
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | 692 | ||
693 | static int parse_output_fields(const struct option *opt __used, | 693 | static int parse_output_fields(const struct option *opt __maybe_unused, |
694 | const char *arg, int unset __used) | 694 | const char *arg, int unset __maybe_unused) |
695 | { | 695 | { |
696 | char *tok; | 696 | char *tok; |
697 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); | 697 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); |
@@ -982,8 +982,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix) | |||
982 | return script_root; | 982 | return script_root; |
983 | } | 983 | } |
984 | 984 | ||
985 | static int list_available_scripts(const struct option *opt __used, | 985 | static int list_available_scripts(const struct option *opt __maybe_unused, |
986 | const char *s __used, int unset __used) | 986 | const char *s __maybe_unused, |
987 | int unset __maybe_unused) | ||
987 | { | 988 | { |
988 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; | 989 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; |
989 | char scripts_path[MAXPATHLEN]; | 990 | char scripts_path[MAXPATHLEN]; |
@@ -1172,7 +1173,7 @@ static int have_cmd(int argc, const char **argv) | |||
1172 | return 0; | 1173 | return 0; |
1173 | } | 1174 | } |
1174 | 1175 | ||
1175 | int cmd_script(int argc, const char **argv, const char *prefix __used) | 1176 | int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) |
1176 | { | 1177 | { |
1177 | char *rec_script_path = NULL; | 1178 | char *rec_script_path = NULL; |
1178 | char *rep_script_path = NULL; | 1179 | char *rep_script_path = NULL; |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 02f49eba677f..dab347d7b010 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -417,7 +417,7 @@ static int read_counter(struct perf_evsel *counter) | |||
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
419 | 419 | ||
420 | static int run_perf_stat(int argc __used, const char **argv) | 420 | static int run_perf_stat(int argc __maybe_unused, const char **argv) |
421 | { | 421 | { |
422 | unsigned long long t0, t1; | 422 | unsigned long long t0, t1; |
423 | struct perf_evsel *counter, *first; | 423 | struct perf_evsel *counter, *first; |
@@ -634,7 +634,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio) | |||
634 | return color; | 634 | return color; |
635 | } | 635 | } |
636 | 636 | ||
637 | static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) | 637 | static void print_stalled_cycles_frontend(int cpu, |
638 | struct perf_evsel *evsel | ||
639 | __maybe_unused, double avg) | ||
638 | { | 640 | { |
639 | double total, ratio = 0.0; | 641 | double total, ratio = 0.0; |
640 | const char *color; | 642 | const char *color; |
@@ -651,7 +653,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us | |||
651 | fprintf(output, " frontend cycles idle "); | 653 | fprintf(output, " frontend cycles idle "); |
652 | } | 654 | } |
653 | 655 | ||
654 | static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) | 656 | static void print_stalled_cycles_backend(int cpu, |
657 | struct perf_evsel *evsel | ||
658 | __maybe_unused, double avg) | ||
655 | { | 659 | { |
656 | double total, ratio = 0.0; | 660 | double total, ratio = 0.0; |
657 | const char *color; | 661 | const char *color; |
@@ -668,7 +672,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use | |||
668 | fprintf(output, " backend cycles idle "); | 672 | fprintf(output, " backend cycles idle "); |
669 | } | 673 | } |
670 | 674 | ||
671 | static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 675 | static void print_branch_misses(int cpu, |
676 | struct perf_evsel *evsel __maybe_unused, | ||
677 | double avg) | ||
672 | { | 678 | { |
673 | double total, ratio = 0.0; | 679 | double total, ratio = 0.0; |
674 | const char *color; | 680 | const char *color; |
@@ -685,7 +691,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double | |||
685 | fprintf(output, " of all branches "); | 691 | fprintf(output, " of all branches "); |
686 | } | 692 | } |
687 | 693 | ||
688 | static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 694 | static void print_l1_dcache_misses(int cpu, |
695 | struct perf_evsel *evsel __maybe_unused, | ||
696 | double avg) | ||
689 | { | 697 | { |
690 | double total, ratio = 0.0; | 698 | double total, ratio = 0.0; |
691 | const char *color; | 699 | const char *color; |
@@ -702,7 +710,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou | |||
702 | fprintf(output, " of all L1-dcache hits "); | 710 | fprintf(output, " of all L1-dcache hits "); |
703 | } | 711 | } |
704 | 712 | ||
705 | static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 713 | static void print_l1_icache_misses(int cpu, |
714 | struct perf_evsel *evsel __maybe_unused, | ||
715 | double avg) | ||
706 | { | 716 | { |
707 | double total, ratio = 0.0; | 717 | double total, ratio = 0.0; |
708 | const char *color; | 718 | const char *color; |
@@ -719,7 +729,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou | |||
719 | fprintf(output, " of all L1-icache hits "); | 729 | fprintf(output, " of all L1-icache hits "); |
720 | } | 730 | } |
721 | 731 | ||
722 | static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 732 | static void print_dtlb_cache_misses(int cpu, |
733 | struct perf_evsel *evsel __maybe_unused, | ||
734 | double avg) | ||
723 | { | 735 | { |
724 | double total, ratio = 0.0; | 736 | double total, ratio = 0.0; |
725 | const char *color; | 737 | const char *color; |
@@ -736,7 +748,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do | |||
736 | fprintf(output, " of all dTLB cache hits "); | 748 | fprintf(output, " of all dTLB cache hits "); |
737 | } | 749 | } |
738 | 750 | ||
739 | static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 751 | static void print_itlb_cache_misses(int cpu, |
752 | struct perf_evsel *evsel __maybe_unused, | ||
753 | double avg) | ||
740 | { | 754 | { |
741 | double total, ratio = 0.0; | 755 | double total, ratio = 0.0; |
742 | const char *color; | 756 | const char *color; |
@@ -753,7 +767,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do | |||
753 | fprintf(output, " of all iTLB cache hits "); | 767 | fprintf(output, " of all iTLB cache hits "); |
754 | } | 768 | } |
755 | 769 | ||
756 | static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 770 | static void print_ll_cache_misses(int cpu, |
771 | struct perf_evsel *evsel __maybe_unused, | ||
772 | double avg) | ||
757 | { | 773 | { |
758 | double total, ratio = 0.0; | 774 | double total, ratio = 0.0; |
759 | const char *color; | 775 | const char *color; |
@@ -1059,8 +1075,8 @@ static const char * const stat_usage[] = { | |||
1059 | NULL | 1075 | NULL |
1060 | }; | 1076 | }; |
1061 | 1077 | ||
1062 | static int stat__set_big_num(const struct option *opt __used, | 1078 | static int stat__set_big_num(const struct option *opt __maybe_unused, |
1063 | const char *s __used, int unset) | 1079 | const char *s __maybe_unused, int unset) |
1064 | { | 1080 | { |
1065 | big_num_opt = unset ? 0 : 1; | 1081 | big_num_opt = unset ? 0 : 1; |
1066 | return 0; | 1082 | return 0; |
@@ -1154,7 +1170,7 @@ static int add_default_attributes(void) | |||
1154 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); | 1170 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); |
1155 | } | 1171 | } |
1156 | 1172 | ||
1157 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 1173 | int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) |
1158 | { | 1174 | { |
1159 | struct perf_evsel *pos; | 1175 | struct perf_evsel *pos; |
1160 | int status = -ENOMEM; | 1176 | int status = -ENOMEM; |
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index cf33e5081c36..d33143efefce 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -18,7 +18,8 @@ | |||
18 | 18 | ||
19 | #include <sys/mman.h> | 19 | #include <sys/mman.h> |
20 | 20 | ||
21 | static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) | 21 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, |
22 | struct symbol *sym) | ||
22 | { | 23 | { |
23 | bool *visited = symbol__priv(sym); | 24 | bool *visited = symbol__priv(sym); |
24 | *visited = true; | 25 | *visited = true; |
@@ -996,7 +997,9 @@ static u64 mmap_read_self(void *addr) | |||
996 | /* | 997 | /* |
997 | * If the RDPMC instruction faults then signal this back to the test parent task: | 998 | * If the RDPMC instruction faults then signal this back to the test parent task: |
998 | */ | 999 | */ |
999 | static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used) | 1000 | static void segfault_handler(int sig __maybe_unused, |
1001 | siginfo_t *info __maybe_unused, | ||
1002 | void *uc __maybe_unused) | ||
1000 | { | 1003 | { |
1001 | exit(-1); | 1004 | exit(-1); |
1002 | } | 1005 | } |
@@ -1023,14 +1026,16 @@ static int __test__rdpmc(void) | |||
1023 | 1026 | ||
1024 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 1027 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
1025 | if (fd < 0) { | 1028 | if (fd < 0) { |
1026 | die("Error: sys_perf_event_open() syscall returned " | 1029 | pr_debug("Error: sys_perf_event_open() syscall returned " |
1027 | "with %d (%s)\n", fd, strerror(errno)); | 1030 | "with %d (%s)\n", fd, strerror(errno)); |
1031 | return -1; | ||
1028 | } | 1032 | } |
1029 | 1033 | ||
1030 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | 1034 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); |
1031 | if (addr == (void *)(-1)) { | 1035 | if (addr == (void *)(-1)) { |
1032 | die("Error: mmap() syscall returned " | 1036 | pr_debug("Error: mmap() syscall returned with (%s)\n", |
1033 | "with (%s)\n", strerror(errno)); | 1037 | strerror(errno)); |
1038 | goto out_close; | ||
1034 | } | 1039 | } |
1035 | 1040 | ||
1036 | for (n = 0; n < 6; n++) { | 1041 | for (n = 0; n < 6; n++) { |
@@ -1051,9 +1056,9 @@ static int __test__rdpmc(void) | |||
1051 | } | 1056 | } |
1052 | 1057 | ||
1053 | munmap(addr, page_size); | 1058 | munmap(addr, page_size); |
1054 | close(fd); | ||
1055 | |||
1056 | pr_debug(" "); | 1059 | pr_debug(" "); |
1060 | out_close: | ||
1061 | close(fd); | ||
1057 | 1062 | ||
1058 | if (!delta_sum) | 1063 | if (!delta_sum) |
1059 | return -1; | 1064 | return -1; |
@@ -1313,7 +1318,7 @@ static int perf_test__list(int argc, const char **argv) | |||
1313 | return 0; | 1318 | return 0; |
1314 | } | 1319 | } |
1315 | 1320 | ||
1316 | int cmd_test(int argc, const char **argv, const char *prefix __used) | 1321 | int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) |
1317 | { | 1322 | { |
1318 | const char * const test_usage[] = { | 1323 | const char * const test_usage[] = { |
1319 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", | 1324 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 3b75b2e21ea5..55a3a6c6b9e7 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -275,28 +275,28 @@ static int cpus_cstate_state[MAX_CPUS]; | |||
275 | static u64 cpus_pstate_start_times[MAX_CPUS]; | 275 | static u64 cpus_pstate_start_times[MAX_CPUS]; |
276 | static u64 cpus_pstate_state[MAX_CPUS]; | 276 | static u64 cpus_pstate_state[MAX_CPUS]; |
277 | 277 | ||
278 | static int process_comm_event(struct perf_tool *tool __used, | 278 | static int process_comm_event(struct perf_tool *tool __maybe_unused, |
279 | union perf_event *event, | 279 | union perf_event *event, |
280 | struct perf_sample *sample __used, | 280 | struct perf_sample *sample __maybe_unused, |
281 | struct machine *machine __used) | 281 | struct machine *machine __maybe_unused) |
282 | { | 282 | { |
283 | pid_set_comm(event->comm.tid, event->comm.comm); | 283 | pid_set_comm(event->comm.tid, event->comm.comm); |
284 | return 0; | 284 | return 0; |
285 | } | 285 | } |
286 | 286 | ||
287 | static int process_fork_event(struct perf_tool *tool __used, | 287 | static int process_fork_event(struct perf_tool *tool __maybe_unused, |
288 | union perf_event *event, | 288 | union perf_event *event, |
289 | struct perf_sample *sample __used, | 289 | struct perf_sample *sample __maybe_unused, |
290 | struct machine *machine __used) | 290 | struct machine *machine __maybe_unused) |
291 | { | 291 | { |
292 | pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); | 292 | pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); |
293 | return 0; | 293 | return 0; |
294 | } | 294 | } |
295 | 295 | ||
296 | static int process_exit_event(struct perf_tool *tool __used, | 296 | static int process_exit_event(struct perf_tool *tool __maybe_unused, |
297 | union perf_event *event, | 297 | union perf_event *event, |
298 | struct perf_sample *sample __used, | 298 | struct perf_sample *sample __maybe_unused, |
299 | struct machine *machine __used) | 299 | struct machine *machine __maybe_unused) |
300 | { | 300 | { |
301 | pid_exit(event->fork.pid, event->fork.time); | 301 | pid_exit(event->fork.pid, event->fork.time); |
302 | return 0; | 302 | return 0; |
@@ -491,11 +491,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | |||
491 | } | 491 | } |
492 | 492 | ||
493 | 493 | ||
494 | static int process_sample_event(struct perf_tool *tool __used, | 494 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
495 | union perf_event *event __used, | 495 | union perf_event *event __maybe_unused, |
496 | struct perf_sample *sample, | 496 | struct perf_sample *sample, |
497 | struct perf_evsel *evsel, | 497 | struct perf_evsel *evsel, |
498 | struct machine *machine __used) | 498 | struct machine *machine __maybe_unused) |
499 | { | 499 | { |
500 | struct trace_entry *te; | 500 | struct trace_entry *te; |
501 | 501 | ||
@@ -1081,7 +1081,8 @@ static int __cmd_record(int argc, const char **argv) | |||
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | static int | 1083 | static int |
1084 | parse_process(const struct option *opt __used, const char *arg, int __used unset) | 1084 | parse_process(const struct option *opt __maybe_unused, const char *arg, |
1085 | int __maybe_unused unset) | ||
1085 | { | 1086 | { |
1086 | if (arg) | 1087 | if (arg) |
1087 | add_process_filter(arg); | 1088 | add_process_filter(arg); |
@@ -1106,7 +1107,8 @@ static const struct option options[] = { | |||
1106 | }; | 1107 | }; |
1107 | 1108 | ||
1108 | 1109 | ||
1109 | int cmd_timechart(int argc, const char **argv, const char *prefix __used) | 1110 | int cmd_timechart(int argc, const char **argv, |
1111 | const char *prefix __maybe_unused) | ||
1110 | { | 1112 | { |
1111 | argc = parse_options(argc, argv, options, timechart_usage, | 1113 | argc = parse_options(argc, argv, options, timechart_usage, |
1112 | PARSE_OPT_STOP_AT_NON_OPTION); | 1114 | PARSE_OPT_STOP_AT_NON_OPTION); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 0513aaa659f9..5550754c05f2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -95,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top) | |||
95 | top->print_entries -= 9; | 95 | top->print_entries -= 9; |
96 | } | 96 | } |
97 | 97 | ||
98 | static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) | 98 | static void perf_top__sig_winch(int sig __maybe_unused, |
99 | siginfo_t *info __maybe_unused, void *arg) | ||
99 | { | 100 | { |
100 | struct perf_top *top = arg; | 101 | struct perf_top *top = arg; |
101 | 102 | ||
@@ -663,7 +664,7 @@ static const char *skip_symbols[] = { | |||
663 | NULL | 664 | NULL |
664 | }; | 665 | }; |
665 | 666 | ||
666 | static int symbol_filter(struct map *map __used, struct symbol *sym) | 667 | static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) |
667 | { | 668 | { |
668 | const char *name = sym->name; | 669 | const char *name = sym->name; |
669 | int i; | 670 | int i; |
@@ -1163,7 +1164,7 @@ static const char * const top_usage[] = { | |||
1163 | NULL | 1164 | NULL |
1164 | }; | 1165 | }; |
1165 | 1166 | ||
1166 | int cmd_top(int argc, const char **argv, const char *prefix __used) | 1167 | int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) |
1167 | { | 1168 | { |
1168 | struct perf_evsel *pos; | 1169 | struct perf_evsel *pos; |
1169 | int status; | 1170 | int status; |
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 1818a531f1d3..4aeb7d5df939 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c | |||
@@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title, | |||
269 | return err ? 0 : -1; | 269 | return err ? 0 : -1; |
270 | } | 270 | } |
271 | 271 | ||
272 | void ui_browser__hide(struct ui_browser *browser __used) | 272 | void ui_browser__hide(struct ui_browser *browser __maybe_unused) |
273 | { | 273 | { |
274 | pthread_mutex_lock(&ui__lock); | 274 | pthread_mutex_lock(&ui__lock); |
275 | ui_helpline__pop(); | 275 | ui_helpline__pop(); |
@@ -518,7 +518,7 @@ static struct ui_browser__colorset { | |||
518 | 518 | ||
519 | 519 | ||
520 | static int ui_browser__color_config(const char *var, const char *value, | 520 | static int ui_browser__color_config(const char *var, const char *value, |
521 | void *data __used) | 521 | void *data __maybe_unused) |
522 | { | 522 | { |
523 | char *fg = NULL, *bg; | 523 | char *fg = NULL, *bg; |
524 | int i; | 524 | int i; |
@@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column, | |||
602 | SLsmg_set_char_set(0); | 602 | SLsmg_set_char_set(0); |
603 | } | 603 | } |
604 | 604 | ||
605 | void ui_browser__write_graph(struct ui_browser *browser __used, int graph) | 605 | void ui_browser__write_graph(struct ui_browser *browser __maybe_unused, |
606 | int graph) | ||
606 | { | 607 | { |
607 | SLsmg_set_char_set(1); | 608 | SLsmg_set_char_set(1); |
608 | SLsmg_write_char(graph); | 609 | SLsmg_write_char(graph); |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 67a2703e666a..8f8cd2d73b3b 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin | |||
54 | return (struct browser_disasm_line *)(dl + 1); | 54 | return (struct browser_disasm_line *)(dl + 1); |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool disasm_line__filter(struct ui_browser *browser __used, void *entry) | 57 | static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, |
58 | void *entry) | ||
58 | { | 59 | { |
59 | if (annotate_browser__opts.hide_src_code) { | 60 | if (annotate_browser__opts.hide_src_code) { |
60 | struct disasm_line *dl = list_entry(entry, struct disasm_line, node); | 61 | struct disasm_line *dl = list_entry(entry, struct disasm_line, node); |
@@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp) | |||
928 | return strcmp(name, cfg->name); | 929 | return strcmp(name, cfg->name); |
929 | } | 930 | } |
930 | 931 | ||
931 | static int annotate__config(const char *var, const char *value, void *data __used) | 932 | static int annotate__config(const char *var, const char *value, |
933 | void *data __maybe_unused) | ||
932 | { | 934 | { |
933 | struct annotate__config *cfg; | 935 | struct annotate__config *cfg; |
934 | const char *name; | 936 | const char *name; |
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index 3c16ab50e0f8..55acba6e0df4 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c | |||
@@ -237,8 +237,9 @@ static GtkWidget *perf_gtk__setup_statusbar(void) | |||
237 | 237 | ||
238 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, | 238 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, |
239 | const char *help, | 239 | const char *help, |
240 | void (*timer) (void *arg)__used, | 240 | void (*timer) (void *arg)__maybe_unused, |
241 | void *arg __used, int delay_secs __used) | 241 | void *arg __maybe_unused, |
242 | int delay_secs __maybe_unused) | ||
242 | { | 243 | { |
243 | struct perf_evsel *pos; | 244 | struct perf_evsel *pos; |
244 | GtkWidget *vbox; | 245 | GtkWidget *vbox; |
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c index 26429437e19e..3c4c6ef78283 100644 --- a/tools/perf/ui/gtk/setup.c +++ b/tools/perf/ui/gtk/setup.c | |||
@@ -12,7 +12,7 @@ int perf_gtk__init(void) | |||
12 | return gtk_init_check(NULL, NULL) ? 0 : -1; | 12 | return gtk_init_check(NULL, NULL) ? 0 : -1; |
13 | } | 13 | } |
14 | 14 | ||
15 | void perf_gtk__exit(bool wait_for_ok __used) | 15 | void perf_gtk__exit(bool wait_for_ok __maybe_unused) |
16 | { | 16 | { |
17 | if (!perf_gtk__is_active_context(pgctx)) | 17 | if (!perf_gtk__is_active_context(pgctx)) |
18 | return; | 18 | return; |
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c index b8efb966f94c..8aada5b3c04c 100644 --- a/tools/perf/ui/gtk/util.c +++ b/tools/perf/ui/gtk/util.c | |||
@@ -117,8 +117,8 @@ struct perf_error_ops perf_gtk_eops = { | |||
117 | * For now, just add stubs for NO_NEWT=1 build. | 117 | * For now, just add stubs for NO_NEWT=1 build. |
118 | */ | 118 | */ |
119 | #ifdef NO_NEWT_SUPPORT | 119 | #ifdef NO_NEWT_SUPPORT |
120 | void ui_progress__update(u64 curr __used, u64 total __used, | 120 | void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, |
121 | const char *title __used) | 121 | const char *title __maybe_unused) |
122 | { | 122 | { |
123 | } | 123 | } |
124 | #endif | 124 | #endif |
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index 78ba28ac7a2c..a49bcf3c190b 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c | |||
@@ -12,7 +12,7 @@ static void nop_helpline__pop(void) | |||
12 | { | 12 | { |
13 | } | 13 | } |
14 | 14 | ||
15 | static void nop_helpline__push(const char *msg __used) | 15 | static void nop_helpline__push(const char *msg __maybe_unused) |
16 | { | 16 | { |
17 | } | 17 | } |
18 | 18 | ||
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h index a2487f93aa48..2b667ee454c3 100644 --- a/tools/perf/ui/helpline.h +++ b/tools/perf/ui/helpline.h | |||
@@ -24,8 +24,8 @@ void ui_helpline__puts(const char *msg); | |||
24 | extern char ui_helpline__current[512]; | 24 | extern char ui_helpline__current[512]; |
25 | 25 | ||
26 | #ifdef NO_NEWT_SUPPORT | 26 | #ifdef NO_NEWT_SUPPORT |
27 | static inline int ui_helpline__show_help(const char *format __used, | 27 | static inline int ui_helpline__show_help(const char *format __maybe_unused, |
28 | va_list ap __used) | 28 | va_list ap __maybe_unused) |
29 | { | 29 | { |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
@@ -35,8 +35,8 @@ int ui_helpline__show_help(const char *format, va_list ap); | |||
35 | #endif /* NO_NEWT_SUPPORT */ | 35 | #endif /* NO_NEWT_SUPPORT */ |
36 | 36 | ||
37 | #ifdef NO_GTK2_SUPPORT | 37 | #ifdef NO_GTK2_SUPPORT |
38 | static inline int perf_gtk__show_helpline(const char *format __used, | 38 | static inline int perf_gtk__show_helpline(const char *format __maybe_unused, |
39 | va_list ap __used) | 39 | va_list ap __maybe_unused) |
40 | { | 40 | { |
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 031b349a3f84..407e855cccb8 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c | |||
@@ -13,7 +13,7 @@ static int hpp__header_overhead(struct perf_hpp *hpp) | |||
13 | return scnprintf(hpp->buf, hpp->size, fmt); | 13 | return scnprintf(hpp->buf, hpp->size, fmt); |
14 | } | 14 | } |
15 | 15 | ||
16 | static int hpp__width_overhead(struct perf_hpp *hpp __used) | 16 | static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) |
17 | { | 17 | { |
18 | return 8; | 18 | return 8; |
19 | } | 19 | } |
@@ -62,7 +62,7 @@ static int hpp__header_overhead_sys(struct perf_hpp *hpp) | |||
62 | return scnprintf(hpp->buf, hpp->size, fmt, "sys"); | 62 | return scnprintf(hpp->buf, hpp->size, fmt, "sys"); |
63 | } | 63 | } |
64 | 64 | ||
65 | static int hpp__width_overhead_sys(struct perf_hpp *hpp __used) | 65 | static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) |
66 | { | 66 | { |
67 | return 6; | 67 | return 6; |
68 | } | 68 | } |
@@ -88,7 +88,7 @@ static int hpp__header_overhead_us(struct perf_hpp *hpp) | |||
88 | return scnprintf(hpp->buf, hpp->size, fmt, "user"); | 88 | return scnprintf(hpp->buf, hpp->size, fmt, "user"); |
89 | } | 89 | } |
90 | 90 | ||
91 | static int hpp__width_overhead_us(struct perf_hpp *hpp __used) | 91 | static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) |
92 | { | 92 | { |
93 | return 6; | 93 | return 6; |
94 | } | 94 | } |
@@ -112,7 +112,7 @@ static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) | |||
112 | return scnprintf(hpp->buf, hpp->size, "guest sys"); | 112 | return scnprintf(hpp->buf, hpp->size, "guest sys"); |
113 | } | 113 | } |
114 | 114 | ||
115 | static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __used) | 115 | static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) |
116 | { | 116 | { |
117 | return 9; | 117 | return 9; |
118 | } | 118 | } |
@@ -138,7 +138,7 @@ static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) | |||
138 | return scnprintf(hpp->buf, hpp->size, "guest usr"); | 138 | return scnprintf(hpp->buf, hpp->size, "guest usr"); |
139 | } | 139 | } |
140 | 140 | ||
141 | static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __used) | 141 | static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) |
142 | { | 142 | { |
143 | return 9; | 143 | return 9; |
144 | } | 144 | } |
@@ -166,7 +166,7 @@ static int hpp__header_samples(struct perf_hpp *hpp) | |||
166 | return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); | 166 | return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); |
167 | } | 167 | } |
168 | 168 | ||
169 | static int hpp__width_samples(struct perf_hpp *hpp __used) | 169 | static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) |
170 | { | 170 | { |
171 | return 11; | 171 | return 11; |
172 | } | 172 | } |
@@ -185,7 +185,7 @@ static int hpp__header_period(struct perf_hpp *hpp) | |||
185 | return scnprintf(hpp->buf, hpp->size, fmt, "Period"); | 185 | return scnprintf(hpp->buf, hpp->size, fmt, "Period"); |
186 | } | 186 | } |
187 | 187 | ||
188 | static int hpp__width_period(struct perf_hpp *hpp __used) | 188 | static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) |
189 | { | 189 | { |
190 | return 12; | 190 | return 12; |
191 | } | 191 | } |
@@ -204,7 +204,7 @@ static int hpp__header_delta(struct perf_hpp *hpp) | |||
204 | return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); | 204 | return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); |
205 | } | 205 | } |
206 | 206 | ||
207 | static int hpp__width_delta(struct perf_hpp *hpp __used) | 207 | static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) |
208 | { | 208 | { |
209 | return 7; | 209 | return 7; |
210 | } | 210 | } |
@@ -238,12 +238,13 @@ static int hpp__header_displ(struct perf_hpp *hpp) | |||
238 | return scnprintf(hpp->buf, hpp->size, "Displ."); | 238 | return scnprintf(hpp->buf, hpp->size, "Displ."); |
239 | } | 239 | } |
240 | 240 | ||
241 | static int hpp__width_displ(struct perf_hpp *hpp __used) | 241 | static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) |
242 | { | 242 | { |
243 | return 6; | 243 | return 6; |
244 | } | 244 | } |
245 | 245 | ||
246 | static int hpp__entry_displ(struct perf_hpp *hpp, struct hist_entry *he __used) | 246 | static int hpp__entry_displ(struct perf_hpp *hpp, |
247 | struct hist_entry *he __maybe_unused) | ||
247 | { | 248 | { |
248 | const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; | 249 | const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; |
249 | char buf[32] = " "; | 250 | char buf[32] = " "; |
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 4dc0887c04f1..60debb81537a 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c | |||
@@ -28,7 +28,7 @@ void ui__refresh_dimensions(bool force) | |||
28 | } | 28 | } |
29 | } | 29 | } |
30 | 30 | ||
31 | static void ui__sigwinch(int sig __used) | 31 | static void ui__sigwinch(int sig __maybe_unused) |
32 | { | 32 | { |
33 | ui__need_resize = 1; | 33 | ui__need_resize = 1; |
34 | } | 34 | } |
@@ -88,7 +88,7 @@ int ui__getch(int delay_secs) | |||
88 | return SLkp_getkey(); | 88 | return SLkp_getkey(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void newt_suspend(void *d __used) | 91 | static void newt_suspend(void *d __maybe_unused) |
92 | { | 92 | { |
93 | newtSuspend(); | 93 | newtSuspend(); |
94 | raise(SIGTSTP); | 94 | raise(SIGTSTP); |
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c index b8144e80bb1e..e6d134773d0a 100644 --- a/tools/perf/util/alias.c +++ b/tools/perf/util/alias.c | |||
@@ -3,7 +3,8 @@ | |||
3 | static const char *alias_key; | 3 | static const char *alias_key; |
4 | static char *alias_val; | 4 | static char *alias_val; |
5 | 5 | ||
6 | static int alias_lookup_cb(const char *k, const char *v, void *cb __used) | 6 | static int alias_lookup_cb(const char *k, const char *v, |
7 | void *cb __maybe_unused) | ||
7 | { | 8 | { |
8 | if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { | 9 | if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { |
9 | if (!v) | 10 | if (!v) |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 04eafd3939df..f0a910371377 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -313,8 +313,8 @@ static struct ins_ops dec_ops = { | |||
313 | .scnprintf = dec__scnprintf, | 313 | .scnprintf = dec__scnprintf, |
314 | }; | 314 | }; |
315 | 315 | ||
316 | static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size, | 316 | static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, |
317 | struct ins_operands *ops __used) | 317 | struct ins_operands *ops __maybe_unused) |
318 | { | 318 | { |
319 | return scnprintf(bf, size, "%-6.6s", "nop"); | 319 | return scnprintf(bf, size, "%-6.6s", "nop"); |
320 | } | 320 | } |
@@ -416,7 +416,7 @@ static struct ins *ins__find(const char *name) | |||
416 | return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); | 416 | return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); |
417 | } | 417 | } |
418 | 418 | ||
419 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) | 419 | int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) |
420 | { | 420 | { |
421 | struct annotation *notes = symbol__annotation(sym); | 421 | struct annotation *notes = symbol__annotation(sym); |
422 | pthread_mutex_init(¬es->lock, NULL); | 422 | pthread_mutex_init(¬es->lock, NULL); |
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 62a6e7a7365d..9b5b21e7b032 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h | |||
@@ -126,7 +126,7 @@ int symbol__alloc_hist(struct symbol *sym); | |||
126 | void symbol__annotate_zero_histograms(struct symbol *sym); | 126 | void symbol__annotate_zero_histograms(struct symbol *sym); |
127 | 127 | ||
128 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); | 128 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); |
129 | int symbol__annotate_init(struct map *map __used, struct symbol *sym); | 129 | int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); |
130 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | 130 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, |
131 | bool full_paths, int min_pcnt, int max_lines, | 131 | bool full_paths, int min_pcnt, int max_lines, |
132 | int context); | 132 | int context); |
@@ -139,11 +139,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | |||
139 | int max_lines); | 139 | int max_lines); |
140 | 140 | ||
141 | #ifdef NO_NEWT_SUPPORT | 141 | #ifdef NO_NEWT_SUPPORT |
142 | static inline int symbol__tui_annotate(struct symbol *sym __used, | 142 | static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, |
143 | struct map *map __used, | 143 | struct map *map __maybe_unused, |
144 | int evidx __used, | 144 | int evidx __maybe_unused, |
145 | void(*timer)(void *arg) __used, | 145 | void(*timer)(void *arg) __maybe_unused, |
146 | void *arg __used, int delay_secs __used) | 146 | void *arg __maybe_unused, |
147 | int delay_secs __maybe_unused) | ||
147 | { | 148 | { |
148 | return 0; | 149 | return 0; |
149 | } | 150 | } |
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index fd9a5944b627..8e3a740ddbd4 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -16,10 +16,10 @@ | |||
16 | #include "session.h" | 16 | #include "session.h" |
17 | #include "tool.h" | 17 | #include "tool.h" |
18 | 18 | ||
19 | static int build_id__mark_dso_hit(struct perf_tool *tool __used, | 19 | static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, |
20 | union perf_event *event, | 20 | union perf_event *event, |
21 | struct perf_sample *sample __used, | 21 | struct perf_sample *sample __maybe_unused, |
22 | struct perf_evsel *evsel __used, | 22 | struct perf_evsel *evsel __maybe_unused, |
23 | struct machine *machine) | 23 | struct machine *machine) |
24 | { | 24 | { |
25 | struct addr_location al; | 25 | struct addr_location al; |
@@ -41,9 +41,10 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, | |||
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static int perf_event__exit_del_thread(struct perf_tool *tool __used, | 44 | static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, |
45 | union perf_event *event, | 45 | union perf_event *event, |
46 | struct perf_sample *sample __used, | 46 | struct perf_sample *sample |
47 | __maybe_unused, | ||
47 | struct machine *machine) | 48 | struct machine *machine) |
48 | { | 49 | { |
49 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 50 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); |
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index cff18c617d13..ab1769426541 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -39,7 +39,7 @@ static inline void setup_browser(bool fallback_to_pager) | |||
39 | if (fallback_to_pager) | 39 | if (fallback_to_pager) |
40 | setup_pager(); | 40 | setup_pager(); |
41 | } | 41 | } |
42 | static inline void exit_browser(bool wait_for_ok __used) {} | 42 | static inline void exit_browser(bool wait_for_ok __maybe_unused) {} |
43 | #else | 43 | #else |
44 | void setup_browser(bool fallback_to_pager); | 44 | void setup_browser(bool fallback_to_pager); |
45 | void exit_browser(bool wait_for_ok); | 45 | void exit_browser(bool wait_for_ok); |
@@ -49,7 +49,7 @@ static inline int ui__init(void) | |||
49 | { | 49 | { |
50 | return -1; | 50 | return -1; |
51 | } | 51 | } |
52 | static inline void ui__exit(bool wait_for_ok __used) {} | 52 | static inline void ui__exit(bool wait_for_ok __maybe_unused) {} |
53 | #else | 53 | #else |
54 | int ui__init(void); | 54 | int ui__init(void); |
55 | void ui__exit(bool wait_for_ok); | 55 | void ui__exit(bool wait_for_ok); |
@@ -60,7 +60,7 @@ static inline int perf_gtk__init(void) | |||
60 | { | 60 | { |
61 | return -1; | 61 | return -1; |
62 | } | 62 | } |
63 | static inline void perf_gtk__exit(bool wait_for_ok __used) {} | 63 | static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} |
64 | #else | 64 | #else |
65 | int perf_gtk__init(void); | 65 | int perf_gtk__init(void); |
66 | void perf_gtk__exit(bool wait_for_ok); | 66 | void perf_gtk__exit(bool wait_for_ok); |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3a6bff47614f..d3b3f5d82137 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -93,7 +93,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, | |||
93 | */ | 93 | */ |
94 | static void | 94 | static void |
95 | sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, | 95 | sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, |
96 | u64 min_hit, struct callchain_param *param __used) | 96 | u64 min_hit, struct callchain_param *param __maybe_unused) |
97 | { | 97 | { |
98 | __sort_chain_flat(rb_root, &root->node, min_hit); | 98 | __sort_chain_flat(rb_root, &root->node, min_hit); |
99 | } | 99 | } |
@@ -115,7 +115,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, | |||
115 | 115 | ||
116 | static void | 116 | static void |
117 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, | 117 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, |
118 | u64 min_hit, struct callchain_param *param __used) | 118 | u64 min_hit, struct callchain_param *param __maybe_unused) |
119 | { | 119 | { |
120 | __sort_chain_graph_abs(&chain_root->node, min_hit); | 120 | __sort_chain_graph_abs(&chain_root->node, min_hit); |
121 | rb_root->rb_node = chain_root->node.rb_root.rb_node; | 121 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
@@ -140,7 +140,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node, | |||
140 | 140 | ||
141 | static void | 141 | static void |
142 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, | 142 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, |
143 | u64 min_hit __used, struct callchain_param *param) | 143 | u64 min_hit __maybe_unused, struct callchain_param *param) |
144 | { | 144 | { |
145 | __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); | 145 | __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); |
146 | rb_root->rb_node = chain_root->node.rb_root.rb_node; | 146 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index dbe2f16b1a1a..96bbda1ddb83 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c | |||
@@ -138,8 +138,8 @@ void close_cgroup(struct cgroup_sel *cgrp) | |||
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
141 | int parse_cgroups(const struct option *opt __used, const char *str, | 141 | int parse_cgroups(const struct option *opt __maybe_unused, const char *str, |
142 | int unset __used) | 142 | int unset __maybe_unused) |
143 | { | 143 | { |
144 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 144 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
145 | const char *p, *e, *eos = str + strlen(str); | 145 | const char *p, *e, *eos = str + strlen(str); |
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 6faa3a18bfbd..3e0fdd369ccb 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c | |||
@@ -342,13 +342,15 @@ const char *perf_config_dirname(const char *name, const char *value) | |||
342 | return value; | 342 | return value; |
343 | } | 343 | } |
344 | 344 | ||
345 | static int perf_default_core_config(const char *var __used, const char *value __used) | 345 | static int perf_default_core_config(const char *var __maybe_unused, |
346 | const char *value __maybe_unused) | ||
346 | { | 347 | { |
347 | /* Add other config variables here. */ | 348 | /* Add other config variables here. */ |
348 | return 0; | 349 | return 0; |
349 | } | 350 | } |
350 | 351 | ||
351 | int perf_default_config(const char *var, const char *value, void *dummy __used) | 352 | int perf_default_config(const char *var, const char *value, |
353 | void *dummy __maybe_unused) | ||
352 | { | 354 | { |
353 | if (!prefixcmp(var, "core.")) | 355 | if (!prefixcmp(var, "core.")) |
354 | return perf_default_core_config(var, value); | 356 | return perf_default_core_config(var, value); |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 05e660cbf7e2..bb2e7d1007ab 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -16,19 +16,20 @@ struct ui_progress; | |||
16 | struct perf_error_ops; | 16 | struct perf_error_ops; |
17 | 17 | ||
18 | #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) | 18 | #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) |
19 | static inline void ui_progress__update(u64 curr __used, u64 total __used, | 19 | static inline void ui_progress__update(u64 curr __maybe_unused, |
20 | const char *title __used) {} | 20 | u64 total __maybe_unused, |
21 | const char *title __maybe_unused) {} | ||
21 | 22 | ||
22 | #define ui__error(format, arg...) ui__warning(format, ##arg) | 23 | #define ui__error(format, arg...) ui__warning(format, ##arg) |
23 | 24 | ||
24 | static inline int | 25 | static inline int |
25 | perf_error__register(struct perf_error_ops *eops __used) | 26 | perf_error__register(struct perf_error_ops *eops __maybe_unused) |
26 | { | 27 | { |
27 | return 0; | 28 | return 0; |
28 | } | 29 | } |
29 | 30 | ||
30 | static inline int | 31 | static inline int |
31 | perf_error__unregister(struct perf_error_ops *eops __used) | 32 | perf_error__unregister(struct perf_error_ops *eops __maybe_unused) |
32 | { | 33 | { |
33 | return 0; | 34 | return 0; |
34 | } | 35 | } |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 84ff6f160cd0..8202f5ca0483 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -112,7 +112,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
112 | event->comm.header.type = PERF_RECORD_COMM; | 112 | event->comm.header.type = PERF_RECORD_COMM; |
113 | 113 | ||
114 | size = strlen(event->comm.comm) + 1; | 114 | size = strlen(event->comm.comm) + 1; |
115 | size = ALIGN(size, sizeof(u64)); | 115 | size = PERF_ALIGN(size, sizeof(u64)); |
116 | memset(event->comm.comm + size, 0, machine->id_hdr_size); | 116 | memset(event->comm.comm + size, 0, machine->id_hdr_size); |
117 | event->comm.header.size = (sizeof(event->comm) - | 117 | event->comm.header.size = (sizeof(event->comm) - |
118 | (sizeof(event->comm.comm) - size) + | 118 | (sizeof(event->comm.comm) - size) + |
@@ -145,7 +145,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
145 | sizeof(event->comm.comm)); | 145 | sizeof(event->comm.comm)); |
146 | 146 | ||
147 | size = strlen(event->comm.comm) + 1; | 147 | size = strlen(event->comm.comm) + 1; |
148 | size = ALIGN(size, sizeof(u64)); | 148 | size = PERF_ALIGN(size, sizeof(u64)); |
149 | memset(event->comm.comm + size, 0, machine->id_hdr_size); | 149 | memset(event->comm.comm + size, 0, machine->id_hdr_size); |
150 | event->comm.header.size = (sizeof(event->comm) - | 150 | event->comm.header.size = (sizeof(event->comm) - |
151 | (sizeof(event->comm.comm) - size) + | 151 | (sizeof(event->comm.comm) - size) + |
@@ -228,7 +228,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
228 | size = strlen(execname); | 228 | size = strlen(execname); |
229 | execname[size - 1] = '\0'; /* Remove \n */ | 229 | execname[size - 1] = '\0'; /* Remove \n */ |
230 | memcpy(event->mmap.filename, execname, size); | 230 | memcpy(event->mmap.filename, execname, size); |
231 | size = ALIGN(size, sizeof(u64)); | 231 | size = PERF_ALIGN(size, sizeof(u64)); |
232 | event->mmap.len -= event->mmap.start; | 232 | event->mmap.len -= event->mmap.start; |
233 | event->mmap.header.size = (sizeof(event->mmap) - | 233 | event->mmap.header.size = (sizeof(event->mmap) - |
234 | (sizeof(event->mmap.filename) - size)); | 234 | (sizeof(event->mmap.filename) - size)); |
@@ -282,7 +282,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, | |||
282 | if (pos->dso->kernel) | 282 | if (pos->dso->kernel) |
283 | continue; | 283 | continue; |
284 | 284 | ||
285 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); | 285 | size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); |
286 | event->mmap.header.type = PERF_RECORD_MMAP; | 286 | event->mmap.header.type = PERF_RECORD_MMAP; |
287 | event->mmap.header.size = (sizeof(event->mmap) - | 287 | event->mmap.header.size = (sizeof(event->mmap) - |
288 | (sizeof(event->mmap.filename) - size)); | 288 | (sizeof(event->mmap.filename) - size)); |
@@ -494,7 +494,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | |||
494 | map = machine->vmlinux_maps[MAP__FUNCTION]; | 494 | map = machine->vmlinux_maps[MAP__FUNCTION]; |
495 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), | 495 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), |
496 | "%s%s", mmap_name, symbol_name) + 1; | 496 | "%s%s", mmap_name, symbol_name) + 1; |
497 | size = ALIGN(size, sizeof(u64)); | 497 | size = PERF_ALIGN(size, sizeof(u64)); |
498 | event->mmap.header.type = PERF_RECORD_MMAP; | 498 | event->mmap.header.type = PERF_RECORD_MMAP; |
499 | event->mmap.header.size = (sizeof(event->mmap) - | 499 | event->mmap.header.size = (sizeof(event->mmap) - |
500 | (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); | 500 | (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); |
@@ -514,9 +514,9 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) | |||
514 | return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); | 514 | return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); |
515 | } | 515 | } |
516 | 516 | ||
517 | int perf_event__process_comm(struct perf_tool *tool __used, | 517 | int perf_event__process_comm(struct perf_tool *tool __maybe_unused, |
518 | union perf_event *event, | 518 | union perf_event *event, |
519 | struct perf_sample *sample __used, | 519 | struct perf_sample *sample __maybe_unused, |
520 | struct machine *machine) | 520 | struct machine *machine) |
521 | { | 521 | { |
522 | struct thread *thread = machine__findnew_thread(machine, event->comm.tid); | 522 | struct thread *thread = machine__findnew_thread(machine, event->comm.tid); |
@@ -532,10 +532,10 @@ int perf_event__process_comm(struct perf_tool *tool __used, | |||
532 | return 0; | 532 | return 0; |
533 | } | 533 | } |
534 | 534 | ||
535 | int perf_event__process_lost(struct perf_tool *tool __used, | 535 | int perf_event__process_lost(struct perf_tool *tool __maybe_unused, |
536 | union perf_event *event, | 536 | union perf_event *event, |
537 | struct perf_sample *sample __used, | 537 | struct perf_sample *sample __maybe_unused, |
538 | struct machine *machine __used) | 538 | struct machine *machine __maybe_unused) |
539 | { | 539 | { |
540 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", | 540 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", |
541 | event->lost.id, event->lost.lost); | 541 | event->lost.id, event->lost.lost); |
@@ -555,7 +555,8 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, | |||
555 | maps[MAP__FUNCTION]->end = ~0ULL; | 555 | maps[MAP__FUNCTION]->end = ~0ULL; |
556 | } | 556 | } |
557 | 557 | ||
558 | static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, | 558 | static int perf_event__process_kernel_mmap(struct perf_tool *tool |
559 | __maybe_unused, | ||
559 | union perf_event *event, | 560 | union perf_event *event, |
560 | struct machine *machine) | 561 | struct machine *machine) |
561 | { | 562 | { |
@@ -657,7 +658,7 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) | |||
657 | 658 | ||
658 | int perf_event__process_mmap(struct perf_tool *tool, | 659 | int perf_event__process_mmap(struct perf_tool *tool, |
659 | union perf_event *event, | 660 | union perf_event *event, |
660 | struct perf_sample *sample __used, | 661 | struct perf_sample *sample __maybe_unused, |
661 | struct machine *machine) | 662 | struct machine *machine) |
662 | { | 663 | { |
663 | struct thread *thread; | 664 | struct thread *thread; |
@@ -701,9 +702,9 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) | |||
701 | event->fork.ppid, event->fork.ptid); | 702 | event->fork.ppid, event->fork.ptid); |
702 | } | 703 | } |
703 | 704 | ||
704 | int perf_event__process_task(struct perf_tool *tool __used, | 705 | int perf_event__process_task(struct perf_tool *tool __maybe_unused, |
705 | union perf_event *event, | 706 | union perf_event *event, |
706 | struct perf_sample *sample __used, | 707 | struct perf_sample *sample __maybe_unused, |
707 | struct machine *machine) | 708 | struct machine *machine) |
708 | { | 709 | { |
709 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 710 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 0e088d046e56..21b99e741a87 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -101,7 +101,7 @@ struct perf_sample { | |||
101 | struct build_id_event { | 101 | struct build_id_event { |
102 | struct perf_event_header header; | 102 | struct perf_event_header header; |
103 | pid_t pid; | 103 | pid_t pid; |
104 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | 104 | u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; |
105 | char filename[]; | 105 | char filename[]; |
106 | }; | 106 | }; |
107 | 107 | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 06f76441547a..1506ba0453f1 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <byteswap.h> | 10 | #include <byteswap.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include "asm/bug.h" | 12 | #include "asm/bug.h" |
13 | #include "event-parse.h" | ||
13 | #include "evsel.h" | 14 | #include "evsel.h" |
14 | #include "evlist.h" | 15 | #include "evlist.h" |
15 | #include "util.h" | 16 | #include "util.h" |
@@ -1000,3 +1001,37 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
1000 | 1001 | ||
1001 | return 0; | 1002 | return 0; |
1002 | } | 1003 | } |
1004 | |||
1005 | char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
1006 | const char *name) | ||
1007 | { | ||
1008 | struct format_field *field = pevent_find_field(evsel->tp_format, name); | ||
1009 | int offset; | ||
1010 | |||
1011 | if (!field) | ||
1012 | return NULL; | ||
1013 | |||
1014 | offset = field->offset; | ||
1015 | |||
1016 | if (field->flags & FIELD_IS_DYNAMIC) { | ||
1017 | offset = *(int *)(sample->raw_data + field->offset); | ||
1018 | offset &= 0xffff; | ||
1019 | } | ||
1020 | |||
1021 | return sample->raw_data + offset; | ||
1022 | } | ||
1023 | |||
1024 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
1025 | const char *name) | ||
1026 | { | ||
1027 | struct format_field *field = pevent_find_field(evsel->tp_format, name); | ||
1028 | u64 val; | ||
1029 | |||
1030 | if (!field) | ||
1031 | return 0; | ||
1032 | |||
1033 | val = pevent_read_number(evsel->tp_format->pevent, | ||
1034 | sample->raw_data + field->offset, field->size); | ||
1035 | return val; | ||
1036 | |||
1037 | } | ||
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 390690eb8781..dc40fe32210b 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -120,6 +120,13 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | |||
120 | struct thread_map *threads); | 120 | struct thread_map *threads); |
121 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); | 121 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); |
122 | 122 | ||
123 | struct perf_sample; | ||
124 | |||
125 | char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
126 | const char *name); | ||
127 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
128 | const char *name); | ||
129 | |||
123 | #define perf_evsel__match(evsel, t, c) \ | 130 | #define perf_evsel__match(evsel, t, c) \ |
124 | (evsel->attr.type == PERF_TYPE_##t && \ | 131 | (evsel->attr.type == PERF_TYPE_##t && \ |
125 | evsel->attr.config == PERF_COUNT_##c) | 132 | evsel->attr.config == PERF_COUNT_##c) |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d07bc134e562..acbf6336199e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "debug.h" | 21 | #include "debug.h" |
22 | #include "cpumap.h" | 22 | #include "cpumap.h" |
23 | #include "pmu.h" | 23 | #include "pmu.h" |
24 | #include "vdso.h" | ||
24 | 25 | ||
25 | static bool no_buildid_cache = false; | 26 | static bool no_buildid_cache = false; |
26 | 27 | ||
@@ -129,7 +130,7 @@ static int do_write_string(int fd, const char *str) | |||
129 | int ret; | 130 | int ret; |
130 | 131 | ||
131 | olen = strlen(str) + 1; | 132 | olen = strlen(str) + 1; |
132 | len = ALIGN(olen, NAME_ALIGN); | 133 | len = PERF_ALIGN(olen, NAME_ALIGN); |
133 | 134 | ||
134 | /* write len, incl. \0 */ | 135 | /* write len, incl. \0 */ |
135 | ret = do_write(fd, &len, sizeof(len)); | 136 | ret = do_write(fd, &len, sizeof(len)); |
@@ -207,6 +208,29 @@ perf_header__set_cmdline(int argc, const char **argv) | |||
207 | continue; \ | 208 | continue; \ |
208 | else | 209 | else |
209 | 210 | ||
211 | static int write_buildid(char *name, size_t name_len, u8 *build_id, | ||
212 | pid_t pid, u16 misc, int fd) | ||
213 | { | ||
214 | int err; | ||
215 | struct build_id_event b; | ||
216 | size_t len; | ||
217 | |||
218 | len = name_len + 1; | ||
219 | len = PERF_ALIGN(len, NAME_ALIGN); | ||
220 | |||
221 | memset(&b, 0, sizeof(b)); | ||
222 | memcpy(&b.build_id, build_id, BUILD_ID_SIZE); | ||
223 | b.pid = pid; | ||
224 | b.header.misc = misc; | ||
225 | b.header.size = sizeof(b) + len; | ||
226 | |||
227 | err = do_write(fd, &b, sizeof(b)); | ||
228 | if (err < 0) | ||
229 | return err; | ||
230 | |||
231 | return write_padded(fd, name, name_len + 1, len); | ||
232 | } | ||
233 | |||
210 | static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | 234 | static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, |
211 | u16 misc, int fd) | 235 | u16 misc, int fd) |
212 | { | 236 | { |
@@ -214,24 +238,23 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | |||
214 | 238 | ||
215 | dsos__for_each_with_build_id(pos, head) { | 239 | dsos__for_each_with_build_id(pos, head) { |
216 | int err; | 240 | int err; |
217 | struct build_id_event b; | 241 | char *name; |
218 | size_t len; | 242 | size_t name_len; |
219 | 243 | ||
220 | if (!pos->hit) | 244 | if (!pos->hit) |
221 | continue; | 245 | continue; |
222 | len = pos->long_name_len + 1; | 246 | |
223 | len = ALIGN(len, NAME_ALIGN); | 247 | if (is_vdso_map(pos->short_name)) { |
224 | memset(&b, 0, sizeof(b)); | 248 | name = (char *) VDSO__MAP_NAME; |
225 | memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); | 249 | name_len = sizeof(VDSO__MAP_NAME) + 1; |
226 | b.pid = pid; | 250 | } else { |
227 | b.header.misc = misc; | 251 | name = pos->long_name; |
228 | b.header.size = sizeof(b) + len; | 252 | name_len = pos->long_name_len + 1; |
229 | err = do_write(fd, &b, sizeof(b)); | 253 | } |
230 | if (err < 0) | 254 | |
231 | return err; | 255 | err = write_buildid(name, name_len, pos->build_id, |
232 | err = write_padded(fd, pos->long_name, | 256 | pid, misc, fd); |
233 | pos->long_name_len + 1, len); | 257 | if (err) |
234 | if (err < 0) | ||
235 | return err; | 258 | return err; |
236 | } | 259 | } |
237 | 260 | ||
@@ -277,19 +300,20 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd) | |||
277 | } | 300 | } |
278 | 301 | ||
279 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | 302 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, |
280 | const char *name, bool is_kallsyms) | 303 | const char *name, bool is_kallsyms, bool is_vdso) |
281 | { | 304 | { |
282 | const size_t size = PATH_MAX; | 305 | const size_t size = PATH_MAX; |
283 | char *realname, *filename = zalloc(size), | 306 | char *realname, *filename = zalloc(size), |
284 | *linkname = zalloc(size), *targetname; | 307 | *linkname = zalloc(size), *targetname; |
285 | int len, err = -1; | 308 | int len, err = -1; |
309 | bool slash = is_kallsyms || is_vdso; | ||
286 | 310 | ||
287 | if (is_kallsyms) { | 311 | if (is_kallsyms) { |
288 | if (symbol_conf.kptr_restrict) { | 312 | if (symbol_conf.kptr_restrict) { |
289 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); | 313 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); |
290 | return 0; | 314 | return 0; |
291 | } | 315 | } |
292 | realname = (char *)name; | 316 | realname = (char *) name; |
293 | } else | 317 | } else |
294 | realname = realpath(name, NULL); | 318 | realname = realpath(name, NULL); |
295 | 319 | ||
@@ -297,7 +321,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
297 | goto out_free; | 321 | goto out_free; |
298 | 322 | ||
299 | len = scnprintf(filename, size, "%s%s%s", | 323 | len = scnprintf(filename, size, "%s%s%s", |
300 | debugdir, is_kallsyms ? "/" : "", realname); | 324 | debugdir, slash ? "/" : "", |
325 | is_vdso ? VDSO__MAP_NAME : realname); | ||
301 | if (mkdir_p(filename, 0755)) | 326 | if (mkdir_p(filename, 0755)) |
302 | goto out_free; | 327 | goto out_free; |
303 | 328 | ||
@@ -333,13 +358,14 @@ out_free: | |||
333 | 358 | ||
334 | static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, | 359 | static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, |
335 | const char *name, const char *debugdir, | 360 | const char *name, const char *debugdir, |
336 | bool is_kallsyms) | 361 | bool is_kallsyms, bool is_vdso) |
337 | { | 362 | { |
338 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 363 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
339 | 364 | ||
340 | build_id__sprintf(build_id, build_id_size, sbuild_id); | 365 | build_id__sprintf(build_id, build_id_size, sbuild_id); |
341 | 366 | ||
342 | return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); | 367 | return build_id_cache__add_s(sbuild_id, debugdir, name, |
368 | is_kallsyms, is_vdso); | ||
343 | } | 369 | } |
344 | 370 | ||
345 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) | 371 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) |
@@ -383,9 +409,11 @@ out_free: | |||
383 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) | 409 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) |
384 | { | 410 | { |
385 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; | 411 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; |
412 | bool is_vdso = is_vdso_map(dso->short_name); | ||
386 | 413 | ||
387 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), | 414 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), |
388 | dso->long_name, debugdir, is_kallsyms); | 415 | dso->long_name, debugdir, |
416 | is_kallsyms, is_vdso); | ||
389 | } | 417 | } |
390 | 418 | ||
391 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | 419 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) |
@@ -447,7 +475,7 @@ static bool perf_session__read_build_ids(struct perf_session *session, bool with | |||
447 | return ret; | 475 | return ret; |
448 | } | 476 | } |
449 | 477 | ||
450 | static int write_tracing_data(int fd, struct perf_header *h __used, | 478 | static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, |
451 | struct perf_evlist *evlist) | 479 | struct perf_evlist *evlist) |
452 | { | 480 | { |
453 | return read_tracing_data(fd, &evlist->entries); | 481 | return read_tracing_data(fd, &evlist->entries); |
@@ -455,7 +483,7 @@ static int write_tracing_data(int fd, struct perf_header *h __used, | |||
455 | 483 | ||
456 | 484 | ||
457 | static int write_build_id(int fd, struct perf_header *h, | 485 | static int write_build_id(int fd, struct perf_header *h, |
458 | struct perf_evlist *evlist __used) | 486 | struct perf_evlist *evlist __maybe_unused) |
459 | { | 487 | { |
460 | struct perf_session *session; | 488 | struct perf_session *session; |
461 | int err; | 489 | int err; |
@@ -476,8 +504,8 @@ static int write_build_id(int fd, struct perf_header *h, | |||
476 | return 0; | 504 | return 0; |
477 | } | 505 | } |
478 | 506 | ||
479 | static int write_hostname(int fd, struct perf_header *h __used, | 507 | static int write_hostname(int fd, struct perf_header *h __maybe_unused, |
480 | struct perf_evlist *evlist __used) | 508 | struct perf_evlist *evlist __maybe_unused) |
481 | { | 509 | { |
482 | struct utsname uts; | 510 | struct utsname uts; |
483 | int ret; | 511 | int ret; |
@@ -489,8 +517,8 @@ static int write_hostname(int fd, struct perf_header *h __used, | |||
489 | return do_write_string(fd, uts.nodename); | 517 | return do_write_string(fd, uts.nodename); |
490 | } | 518 | } |
491 | 519 | ||
492 | static int write_osrelease(int fd, struct perf_header *h __used, | 520 | static int write_osrelease(int fd, struct perf_header *h __maybe_unused, |
493 | struct perf_evlist *evlist __used) | 521 | struct perf_evlist *evlist __maybe_unused) |
494 | { | 522 | { |
495 | struct utsname uts; | 523 | struct utsname uts; |
496 | int ret; | 524 | int ret; |
@@ -502,8 +530,8 @@ static int write_osrelease(int fd, struct perf_header *h __used, | |||
502 | return do_write_string(fd, uts.release); | 530 | return do_write_string(fd, uts.release); |
503 | } | 531 | } |
504 | 532 | ||
505 | static int write_arch(int fd, struct perf_header *h __used, | 533 | static int write_arch(int fd, struct perf_header *h __maybe_unused, |
506 | struct perf_evlist *evlist __used) | 534 | struct perf_evlist *evlist __maybe_unused) |
507 | { | 535 | { |
508 | struct utsname uts; | 536 | struct utsname uts; |
509 | int ret; | 537 | int ret; |
@@ -515,14 +543,14 @@ static int write_arch(int fd, struct perf_header *h __used, | |||
515 | return do_write_string(fd, uts.machine); | 543 | return do_write_string(fd, uts.machine); |
516 | } | 544 | } |
517 | 545 | ||
518 | static int write_version(int fd, struct perf_header *h __used, | 546 | static int write_version(int fd, struct perf_header *h __maybe_unused, |
519 | struct perf_evlist *evlist __used) | 547 | struct perf_evlist *evlist __maybe_unused) |
520 | { | 548 | { |
521 | return do_write_string(fd, perf_version_string); | 549 | return do_write_string(fd, perf_version_string); |
522 | } | 550 | } |
523 | 551 | ||
524 | static int write_cpudesc(int fd, struct perf_header *h __used, | 552 | static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, |
525 | struct perf_evlist *evlist __used) | 553 | struct perf_evlist *evlist __maybe_unused) |
526 | { | 554 | { |
527 | #ifndef CPUINFO_PROC | 555 | #ifndef CPUINFO_PROC |
528 | #define CPUINFO_PROC NULL | 556 | #define CPUINFO_PROC NULL |
@@ -580,8 +608,8 @@ done: | |||
580 | return ret; | 608 | return ret; |
581 | } | 609 | } |
582 | 610 | ||
583 | static int write_nrcpus(int fd, struct perf_header *h __used, | 611 | static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, |
584 | struct perf_evlist *evlist __used) | 612 | struct perf_evlist *evlist __maybe_unused) |
585 | { | 613 | { |
586 | long nr; | 614 | long nr; |
587 | u32 nrc, nra; | 615 | u32 nrc, nra; |
@@ -606,7 +634,7 @@ static int write_nrcpus(int fd, struct perf_header *h __used, | |||
606 | return do_write(fd, &nra, sizeof(nra)); | 634 | return do_write(fd, &nra, sizeof(nra)); |
607 | } | 635 | } |
608 | 636 | ||
609 | static int write_event_desc(int fd, struct perf_header *h __used, | 637 | static int write_event_desc(int fd, struct perf_header *h __maybe_unused, |
610 | struct perf_evlist *evlist) | 638 | struct perf_evlist *evlist) |
611 | { | 639 | { |
612 | struct perf_evsel *evsel; | 640 | struct perf_evsel *evsel; |
@@ -663,8 +691,8 @@ static int write_event_desc(int fd, struct perf_header *h __used, | |||
663 | return 0; | 691 | return 0; |
664 | } | 692 | } |
665 | 693 | ||
666 | static int write_cmdline(int fd, struct perf_header *h __used, | 694 | static int write_cmdline(int fd, struct perf_header *h __maybe_unused, |
667 | struct perf_evlist *evlist __used) | 695 | struct perf_evlist *evlist __maybe_unused) |
668 | { | 696 | { |
669 | char buf[MAXPATHLEN]; | 697 | char buf[MAXPATHLEN]; |
670 | char proc[32]; | 698 | char proc[32]; |
@@ -832,8 +860,8 @@ static struct cpu_topo *build_cpu_topology(void) | |||
832 | return tp; | 860 | return tp; |
833 | } | 861 | } |
834 | 862 | ||
835 | static int write_cpu_topology(int fd, struct perf_header *h __used, | 863 | static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, |
836 | struct perf_evlist *evlist __used) | 864 | struct perf_evlist *evlist __maybe_unused) |
837 | { | 865 | { |
838 | struct cpu_topo *tp; | 866 | struct cpu_topo *tp; |
839 | u32 i; | 867 | u32 i; |
@@ -868,8 +896,8 @@ done: | |||
868 | 896 | ||
869 | 897 | ||
870 | 898 | ||
871 | static int write_total_mem(int fd, struct perf_header *h __used, | 899 | static int write_total_mem(int fd, struct perf_header *h __maybe_unused, |
872 | struct perf_evlist *evlist __used) | 900 | struct perf_evlist *evlist __maybe_unused) |
873 | { | 901 | { |
874 | char *buf = NULL; | 902 | char *buf = NULL; |
875 | FILE *fp; | 903 | FILE *fp; |
@@ -954,8 +982,8 @@ done: | |||
954 | return ret; | 982 | return ret; |
955 | } | 983 | } |
956 | 984 | ||
957 | static int write_numa_topology(int fd, struct perf_header *h __used, | 985 | static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, |
958 | struct perf_evlist *evlist __used) | 986 | struct perf_evlist *evlist __maybe_unused) |
959 | { | 987 | { |
960 | char *buf = NULL; | 988 | char *buf = NULL; |
961 | size_t len = 0; | 989 | size_t len = 0; |
@@ -1015,8 +1043,8 @@ done: | |||
1015 | * }; | 1043 | * }; |
1016 | */ | 1044 | */ |
1017 | 1045 | ||
1018 | static int write_pmu_mappings(int fd, struct perf_header *h __used, | 1046 | static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, |
1019 | struct perf_evlist *evlist __used) | 1047 | struct perf_evlist *evlist __maybe_unused) |
1020 | { | 1048 | { |
1021 | struct perf_pmu *pmu = NULL; | 1049 | struct perf_pmu *pmu = NULL; |
1022 | off_t offset = lseek(fd, 0, SEEK_CUR); | 1050 | off_t offset = lseek(fd, 0, SEEK_CUR); |
@@ -1046,13 +1074,14 @@ static int write_pmu_mappings(int fd, struct perf_header *h __used, | |||
1046 | * default get_cpuid(): nothing gets recorded | 1074 | * default get_cpuid(): nothing gets recorded |
1047 | * actual implementation must be in arch/$(ARCH)/util/header.c | 1075 | * actual implementation must be in arch/$(ARCH)/util/header.c |
1048 | */ | 1076 | */ |
1049 | int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) | 1077 | int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, |
1078 | size_t sz __maybe_unused) | ||
1050 | { | 1079 | { |
1051 | return -1; | 1080 | return -1; |
1052 | } | 1081 | } |
1053 | 1082 | ||
1054 | static int write_cpuid(int fd, struct perf_header *h __used, | 1083 | static int write_cpuid(int fd, struct perf_header *h __maybe_unused, |
1055 | struct perf_evlist *evlist __used) | 1084 | struct perf_evlist *evlist __maybe_unused) |
1056 | { | 1085 | { |
1057 | char buffer[64]; | 1086 | char buffer[64]; |
1058 | int ret; | 1087 | int ret; |
@@ -1066,8 +1095,9 @@ write_it: | |||
1066 | return do_write_string(fd, buffer); | 1095 | return do_write_string(fd, buffer); |
1067 | } | 1096 | } |
1068 | 1097 | ||
1069 | static int write_branch_stack(int fd __used, struct perf_header *h __used, | 1098 | static int write_branch_stack(int fd __maybe_unused, |
1070 | struct perf_evlist *evlist __used) | 1099 | struct perf_header *h __maybe_unused, |
1100 | struct perf_evlist *evlist __maybe_unused) | ||
1071 | { | 1101 | { |
1072 | return 0; | 1102 | return 0; |
1073 | } | 1103 | } |
@@ -1344,7 +1374,8 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) | |||
1344 | free_event_desc(events); | 1374 | free_event_desc(events); |
1345 | } | 1375 | } |
1346 | 1376 | ||
1347 | static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) | 1377 | static void print_total_mem(struct perf_header *h __maybe_unused, int fd, |
1378 | FILE *fp) | ||
1348 | { | 1379 | { |
1349 | uint64_t mem; | 1380 | uint64_t mem; |
1350 | ssize_t ret; | 1381 | ssize_t ret; |
@@ -1362,7 +1393,8 @@ error: | |||
1362 | fprintf(fp, "# total memory : unknown\n"); | 1393 | fprintf(fp, "# total memory : unknown\n"); |
1363 | } | 1394 | } |
1364 | 1395 | ||
1365 | static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) | 1396 | static void print_numa_topology(struct perf_header *h __maybe_unused, int fd, |
1397 | FILE *fp) | ||
1366 | { | 1398 | { |
1367 | ssize_t ret; | 1399 | ssize_t ret; |
1368 | u32 nr, c, i; | 1400 | u32 nr, c, i; |
@@ -1422,7 +1454,8 @@ static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) | |||
1422 | free(str); | 1454 | free(str); |
1423 | } | 1455 | } |
1424 | 1456 | ||
1425 | static void print_branch_stack(struct perf_header *ph __used, int fd __used, | 1457 | static void print_branch_stack(struct perf_header *ph __maybe_unused, |
1458 | int fd __maybe_unused, | ||
1426 | FILE *fp) | 1459 | FILE *fp) |
1427 | { | 1460 | { |
1428 | fprintf(fp, "# contains samples with branch stack\n"); | 1461 | fprintf(fp, "# contains samples with branch stack\n"); |
@@ -1532,7 +1565,7 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, | |||
1532 | struct perf_session *session = container_of(header, struct perf_session, header); | 1565 | struct perf_session *session = container_of(header, struct perf_session, header); |
1533 | struct { | 1566 | struct { |
1534 | struct perf_event_header header; | 1567 | struct perf_event_header header; |
1535 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | 1568 | u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; |
1536 | char filename[0]; | 1569 | char filename[0]; |
1537 | } old_bev; | 1570 | } old_bev; |
1538 | struct build_id_event bev; | 1571 | struct build_id_event bev; |
@@ -1621,9 +1654,10 @@ out: | |||
1621 | return err; | 1654 | return err; |
1622 | } | 1655 | } |
1623 | 1656 | ||
1624 | static int process_tracing_data(struct perf_file_section *section __unused, | 1657 | static int process_tracing_data(struct perf_file_section *section |
1625 | struct perf_header *ph __unused, | 1658 | __maybe_unused, |
1626 | int feat __unused, int fd, void *data) | 1659 | struct perf_header *ph __maybe_unused, |
1660 | int feat __maybe_unused, int fd, void *data) | ||
1627 | { | 1661 | { |
1628 | trace_report(fd, data, false); | 1662 | trace_report(fd, data, false); |
1629 | return 0; | 1663 | return 0; |
@@ -1631,7 +1665,8 @@ static int process_tracing_data(struct perf_file_section *section __unused, | |||
1631 | 1665 | ||
1632 | static int process_build_id(struct perf_file_section *section, | 1666 | static int process_build_id(struct perf_file_section *section, |
1633 | struct perf_header *ph, | 1667 | struct perf_header *ph, |
1634 | int feat __unused, int fd, void *data __used) | 1668 | int feat __maybe_unused, int fd, |
1669 | void *data __maybe_unused) | ||
1635 | { | 1670 | { |
1636 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) | 1671 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) |
1637 | pr_debug("Failed to read buildids, continuing...\n"); | 1672 | pr_debug("Failed to read buildids, continuing...\n"); |
@@ -1670,9 +1705,9 @@ perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event | |||
1670 | } | 1705 | } |
1671 | 1706 | ||
1672 | static int | 1707 | static int |
1673 | process_event_desc(struct perf_file_section *section __unused, | 1708 | process_event_desc(struct perf_file_section *section __maybe_unused, |
1674 | struct perf_header *header, int feat __unused, int fd, | 1709 | struct perf_header *header, int feat __maybe_unused, int fd, |
1675 | void *data __used) | 1710 | void *data __maybe_unused) |
1676 | { | 1711 | { |
1677 | struct perf_session *session = container_of(header, struct perf_session, header); | 1712 | struct perf_session *session = container_of(header, struct perf_session, header); |
1678 | struct perf_evsel *evsel, *events = read_event_desc(header, fd); | 1713 | struct perf_evsel *evsel, *events = read_event_desc(header, fd); |
@@ -2439,7 +2474,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, | |||
2439 | int err; | 2474 | int err; |
2440 | 2475 | ||
2441 | size = sizeof(struct perf_event_attr); | 2476 | size = sizeof(struct perf_event_attr); |
2442 | size = ALIGN(size, sizeof(u64)); | 2477 | size = PERF_ALIGN(size, sizeof(u64)); |
2443 | size += sizeof(struct perf_event_header); | 2478 | size += sizeof(struct perf_event_header); |
2444 | size += ids * sizeof(u64); | 2479 | size += ids * sizeof(u64); |
2445 | 2480 | ||
@@ -2537,7 +2572,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool, | |||
2537 | 2572 | ||
2538 | ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; | 2573 | ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; |
2539 | size = strlen(ev.event_type.event_type.name); | 2574 | size = strlen(ev.event_type.event_type.name); |
2540 | size = ALIGN(size, sizeof(u64)); | 2575 | size = PERF_ALIGN(size, sizeof(u64)); |
2541 | ev.event_type.header.size = sizeof(ev.event_type) - | 2576 | ev.event_type.header.size = sizeof(ev.event_type) - |
2542 | (sizeof(ev.event_type.event_type.name) - size); | 2577 | (sizeof(ev.event_type.event_type.name) - size); |
2543 | 2578 | ||
@@ -2568,7 +2603,7 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, | |||
2568 | return err; | 2603 | return err; |
2569 | } | 2604 | } |
2570 | 2605 | ||
2571 | int perf_event__process_event_type(struct perf_tool *tool __unused, | 2606 | int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, |
2572 | union perf_event *event) | 2607 | union perf_event *event) |
2573 | { | 2608 | { |
2574 | if (perf_header__push_event(event->event_type.event_type.event_id, | 2609 | if (perf_header__push_event(event->event_type.event_type.event_id, |
@@ -2585,7 +2620,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | |||
2585 | union perf_event ev; | 2620 | union perf_event ev; |
2586 | struct tracing_data *tdata; | 2621 | struct tracing_data *tdata; |
2587 | ssize_t size = 0, aligned_size = 0, padding; | 2622 | ssize_t size = 0, aligned_size = 0, padding; |
2588 | int err __used = 0; | 2623 | int err __maybe_unused = 0; |
2589 | 2624 | ||
2590 | /* | 2625 | /* |
2591 | * We are going to store the size of the data followed | 2626 | * We are going to store the size of the data followed |
@@ -2606,7 +2641,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | |||
2606 | 2641 | ||
2607 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 2642 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
2608 | size = tdata->size; | 2643 | size = tdata->size; |
2609 | aligned_size = ALIGN(size, sizeof(u64)); | 2644 | aligned_size = PERF_ALIGN(size, sizeof(u64)); |
2610 | padding = aligned_size - size; | 2645 | padding = aligned_size - size; |
2611 | ev.tracing_data.header.size = sizeof(ev.tracing_data); | 2646 | ev.tracing_data.header.size = sizeof(ev.tracing_data); |
2612 | ev.tracing_data.size = aligned_size; | 2647 | ev.tracing_data.size = aligned_size; |
@@ -2637,7 +2672,7 @@ int perf_event__process_tracing_data(union perf_event *event, | |||
2637 | 2672 | ||
2638 | size_read = trace_report(session->fd, &session->pevent, | 2673 | size_read = trace_report(session->fd, &session->pevent, |
2639 | session->repipe); | 2674 | session->repipe); |
2640 | padding = ALIGN(size_read, sizeof(u64)) - size_read; | 2675 | padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; |
2641 | 2676 | ||
2642 | if (read(session->fd, buf, padding) < 0) | 2677 | if (read(session->fd, buf, padding) < 0) |
2643 | die("reading input file"); | 2678 | die("reading input file"); |
@@ -2671,7 +2706,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, | |||
2671 | memset(&ev, 0, sizeof(ev)); | 2706 | memset(&ev, 0, sizeof(ev)); |
2672 | 2707 | ||
2673 | len = pos->long_name_len + 1; | 2708 | len = pos->long_name_len + 1; |
2674 | len = ALIGN(len, NAME_ALIGN); | 2709 | len = PERF_ALIGN(len, NAME_ALIGN); |
2675 | memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); | 2710 | memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); |
2676 | ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; | 2711 | ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; |
2677 | ev.build_id.header.misc = misc; | 2712 | ev.build_id.header.misc = misc; |
@@ -2684,7 +2719,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, | |||
2684 | return err; | 2719 | return err; |
2685 | } | 2720 | } |
2686 | 2721 | ||
2687 | int perf_event__process_build_id(struct perf_tool *tool __used, | 2722 | int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, |
2688 | union perf_event *event, | 2723 | union perf_event *event, |
2689 | struct perf_session *session) | 2724 | struct perf_session *session) |
2690 | { | 2725 | { |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 9d5eedceda72..209dad4fee2b 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -96,7 +96,7 @@ int perf_header__process_sections(struct perf_header *header, int fd, | |||
96 | int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); | 96 | int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); |
97 | 97 | ||
98 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | 98 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, |
99 | const char *name, bool is_kallsyms); | 99 | const char *name, bool is_kallsyms, bool is_vdso); |
100 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); | 100 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); |
101 | 101 | ||
102 | int perf_event__synthesize_attr(struct perf_tool *tool, | 102 | int perf_event__synthesize_attr(struct perf_tool *tool, |
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c index 4fa764d8f7d7..8b1f6e891b8a 100644 --- a/tools/perf/util/help.c +++ b/tools/perf/util/help.c | |||
@@ -332,7 +332,8 @@ const char *help_unknown_cmd(const char *cmd) | |||
332 | exit(1); | 332 | exit(1); |
333 | } | 333 | } |
334 | 334 | ||
335 | int cmd_version(int argc __used, const char **argv __used, const char *prefix __used) | 335 | int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, |
336 | const char *prefix __maybe_unused) | ||
336 | { | 337 | { |
337 | printf("perf version %s\n", perf_version_string); | 338 | printf("perf version %s\n", perf_version_string); |
338 | return 0; | 339 | return 0; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 0ba65ad07cd1..6ec5398de89d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -394,7 +394,7 @@ void hist_entry__free(struct hist_entry *he) | |||
394 | * collapse the histogram | 394 | * collapse the histogram |
395 | */ | 395 | */ |
396 | 396 | ||
397 | static bool hists__collapse_insert_entry(struct hists *hists __used, | 397 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
398 | struct rb_root *root, | 398 | struct rb_root *root, |
399 | struct hist_entry *he) | 399 | struct hist_entry *he) |
400 | { | 400 | { |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 4146f51124f0..f011ad4756e8 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -156,20 +156,22 @@ struct perf_evlist; | |||
156 | 156 | ||
157 | #ifdef NO_NEWT_SUPPORT | 157 | #ifdef NO_NEWT_SUPPORT |
158 | static inline | 158 | static inline |
159 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, | 159 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, |
160 | const char *help __used, | 160 | const char *help __maybe_unused, |
161 | void(*timer)(void *arg) __used, | 161 | void(*timer)(void *arg) __maybe_unused, |
162 | void *arg __used, | 162 | void *arg __maybe_unused, |
163 | int refresh __used) | 163 | int refresh __maybe_unused) |
164 | { | 164 | { |
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used, | 168 | static inline int hist_entry__tui_annotate(struct hist_entry *self |
169 | int evidx __used, | 169 | __maybe_unused, |
170 | void(*timer)(void *arg) __used, | 170 | int evidx __maybe_unused, |
171 | void *arg __used, | 171 | void(*timer)(void *arg) |
172 | int delay_secs __used) | 172 | __maybe_unused, |
173 | void *arg __maybe_unused, | ||
174 | int delay_secs __maybe_unused) | ||
173 | { | 175 | { |
174 | return 0; | 176 | return 0; |
175 | } | 177 | } |
@@ -187,11 +189,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, | |||
187 | 189 | ||
188 | #ifdef NO_GTK2_SUPPORT | 190 | #ifdef NO_GTK2_SUPPORT |
189 | static inline | 191 | static inline |
190 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used, | 192 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, |
191 | const char *help __used, | 193 | const char *help __maybe_unused, |
192 | void(*timer)(void *arg) __used, | 194 | void(*timer)(void *arg) __maybe_unused, |
193 | void *arg __used, | 195 | void *arg __maybe_unused, |
194 | int refresh __used) | 196 | int refresh __maybe_unused) |
195 | { | 197 | { |
196 | return 0; | 198 | return 0; |
197 | } | 199 | } |
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index 587a230d2075..a55d8cf083c9 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -5,6 +5,10 @@ | |||
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | #include <asm/hweight.h> | 6 | #include <asm/hweight.h> |
7 | 7 | ||
8 | #ifndef __WORDSIZE | ||
9 | #define __WORDSIZE (__SIZEOF_LONG__ * 8) | ||
10 | #endif | ||
11 | |||
8 | #define BITS_PER_LONG __WORDSIZE | 12 | #define BITS_PER_LONG __WORDSIZE |
9 | #define BITS_PER_BYTE 8 | 13 | #define BITS_PER_BYTE 8 |
10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 14 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h index 2dc867128e46..96b919dae11c 100644 --- a/tools/perf/util/include/linux/compiler.h +++ b/tools/perf/util/include/linux/compiler.h | |||
@@ -9,7 +9,13 @@ | |||
9 | #define __attribute_const__ | 9 | #define __attribute_const__ |
10 | #endif | 10 | #endif |
11 | 11 | ||
12 | #define __used __attribute__((__unused__)) | 12 | #ifndef __maybe_unused |
13 | #define __maybe_unused __attribute__((unused)) | ||
14 | #endif | ||
13 | #define __packed __attribute__((__packed__)) | 15 | #define __packed __attribute__((__packed__)) |
14 | 16 | ||
17 | #ifndef __force | ||
18 | #define __force | ||
19 | #endif | ||
20 | |||
15 | #endif | 21 | #endif |
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index 4af9a10cc2d2..d8c927c868ee 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h | |||
@@ -8,8 +8,8 @@ | |||
8 | 8 | ||
9 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 9 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
10 | 10 | ||
11 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) | 11 | #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) |
12 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) | 12 | #define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) |
13 | 13 | ||
14 | #ifndef offsetof | 14 | #ifndef offsetof |
15 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | 15 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
@@ -46,6 +46,15 @@ | |||
46 | _min1 < _min2 ? _min1 : _min2; }) | 46 | _min1 < _min2 ? _min1 : _min2; }) |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifndef roundup | ||
50 | #define roundup(x, y) ( \ | ||
51 | { \ | ||
52 | const typeof(y) __y = y; \ | ||
53 | (((x) + (__y - 1)) / __y) * __y; \ | ||
54 | } \ | ||
55 | ) | ||
56 | #endif | ||
57 | |||
49 | #ifndef BUG_ON | 58 | #ifndef BUG_ON |
50 | #ifdef NDEBUG | 59 | #ifdef NDEBUG |
51 | #define BUG_ON(cond) do { if (cond) {} } while (0) | 60 | #define BUG_ON(cond) do { if (cond) {} } while (0) |
diff --git a/tools/perf/util/include/linux/magic.h b/tools/perf/util/include/linux/magic.h new file mode 100644 index 000000000000..58b64ed4da12 --- /dev/null +++ b/tools/perf/util/include/linux/magic.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _PERF_LINUX_MAGIC_H_ | ||
2 | #define _PERF_LINUX_MAGIC_H_ | ||
3 | |||
4 | #ifndef DEBUGFS_MAGIC | ||
5 | #define DEBUGFS_MAGIC 0x64626720 | ||
6 | #endif | ||
7 | |||
8 | #ifndef SYSFS_MAGIC | ||
9 | #define SYSFS_MAGIC 0x62656572 | ||
10 | #endif | ||
11 | |||
12 | #endif | ||
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h index 3b2f5900276f..6f19c548ecc0 100644 --- a/tools/perf/util/include/linux/string.h +++ b/tools/perf/util/include/linux/string.h | |||
@@ -1 +1,3 @@ | |||
1 | #include <string.h> | 1 | #include <string.h> |
2 | |||
3 | void *memdup(const void *src, size_t len); | ||
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index 12de3b8112f9..eb464786c084 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h | |||
@@ -3,6 +3,14 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | 5 | ||
6 | #ifndef __bitwise | ||
7 | #define __bitwise | ||
8 | #endif | ||
9 | |||
10 | #ifndef __le32 | ||
11 | typedef __u32 __bitwise __le32; | ||
12 | #endif | ||
13 | |||
6 | #define DECLARE_BITMAP(name,bits) \ | 14 | #define DECLARE_BITMAP(name,bits) \ |
7 | unsigned long name[BITS_TO_LONGS(bits)] | 15 | unsigned long name[BITS_TO_LONGS(bits)] |
8 | 16 | ||
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index 77c504ff0088..9d0740024ba8 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include "intlist.h" | 12 | #include "intlist.h" |
13 | 13 | ||
14 | static struct rb_node *intlist__node_new(struct rblist *rblist __used, | 14 | static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, |
15 | const void *entry) | 15 | const void *entry) |
16 | { | 16 | { |
17 | int i = (int)((long)entry); | 17 | int i = (int)((long)entry); |
@@ -31,7 +31,7 @@ static void int_node__delete(struct int_node *ilist) | |||
31 | free(ilist); | 31 | free(ilist); |
32 | } | 32 | } |
33 | 33 | ||
34 | static void intlist__node_delete(struct rblist *rblist __used, | 34 | static void intlist__node_delete(struct rblist *rblist __maybe_unused, |
35 | struct rb_node *rb_node) | 35 | struct rb_node *rb_node) |
36 | { | 36 | { |
37 | struct int_node *node = container_of(rb_node, struct int_node, rb_node); | 37 | struct int_node *node = container_of(rb_node, struct int_node, rb_node); |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 7d37159c1e99..b442ee49452b 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include "map.h" | 9 | #include "map.h" |
10 | #include "thread.h" | 10 | #include "thread.h" |
11 | #include "strlist.h" | 11 | #include "strlist.h" |
12 | #include "vdso.h" | ||
12 | 13 | ||
13 | const char *map_type__name[MAP__NR_TYPES] = { | 14 | const char *map_type__name[MAP__NR_TYPES] = { |
14 | [MAP__FUNCTION] = "Functions", | 15 | [MAP__FUNCTION] = "Functions", |
@@ -23,7 +24,6 @@ static inline int is_anon_memory(const char *filename) | |||
23 | static inline int is_no_dso_memory(const char *filename) | 24 | static inline int is_no_dso_memory(const char *filename) |
24 | { | 25 | { |
25 | return !strcmp(filename, "[stack]") || | 26 | return !strcmp(filename, "[stack]") || |
26 | !strcmp(filename, "[vdso]") || | ||
27 | !strcmp(filename, "[heap]"); | 27 | !strcmp(filename, "[heap]"); |
28 | } | 28 | } |
29 | 29 | ||
@@ -52,9 +52,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
52 | if (self != NULL) { | 52 | if (self != NULL) { |
53 | char newfilename[PATH_MAX]; | 53 | char newfilename[PATH_MAX]; |
54 | struct dso *dso; | 54 | struct dso *dso; |
55 | int anon, no_dso; | 55 | int anon, no_dso, vdso; |
56 | 56 | ||
57 | anon = is_anon_memory(filename); | 57 | anon = is_anon_memory(filename); |
58 | vdso = is_vdso_map(filename); | ||
58 | no_dso = is_no_dso_memory(filename); | 59 | no_dso = is_no_dso_memory(filename); |
59 | 60 | ||
60 | if (anon) { | 61 | if (anon) { |
@@ -62,7 +63,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
62 | filename = newfilename; | 63 | filename = newfilename; |
63 | } | 64 | } |
64 | 65 | ||
65 | dso = __dsos__findnew(dsos__list, filename); | 66 | if (vdso) { |
67 | pgoff = 0; | ||
68 | dso = vdso__dso_findnew(dsos__list); | ||
69 | } else | ||
70 | dso = __dsos__findnew(dsos__list, filename); | ||
71 | |||
66 | if (dso == NULL) | 72 | if (dso == NULL) |
67 | goto out_delete; | 73 | goto out_delete; |
68 | 74 | ||
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 25ab4cdbc446..d2250fc97e25 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -96,7 +96,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip) | |||
96 | return ip + map->start - map->pgoff; | 96 | return ip + map->start - map->pgoff; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline u64 identity__map_ip(struct map *map __used, u64 ip) | 99 | static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) |
100 | { | 100 | { |
101 | return ip; | 101 | return ip; |
102 | } | 102 | } |
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c index bc8b65130ae0..d7244e553670 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/util/parse-events-test.c | |||
@@ -569,7 +569,7 @@ static int test__group2(struct perf_evlist *evlist) | |||
569 | return 0; | 569 | return 0; |
570 | } | 570 | } |
571 | 571 | ||
572 | static int test__group3(struct perf_evlist *evlist __used) | 572 | static int test__group3(struct perf_evlist *evlist __maybe_unused) |
573 | { | 573 | { |
574 | struct perf_evsel *evsel, *leader; | 574 | struct perf_evsel *evsel, *leader; |
575 | 575 | ||
@@ -648,7 +648,7 @@ static int test__group3(struct perf_evlist *evlist __used) | |||
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | static int test__group4(struct perf_evlist *evlist __used) | 651 | static int test__group4(struct perf_evlist *evlist __maybe_unused) |
652 | { | 652 | { |
653 | struct perf_evsel *evsel, *leader; | 653 | struct perf_evsel *evsel, *leader; |
654 | 654 | ||
@@ -684,7 +684,7 @@ static int test__group4(struct perf_evlist *evlist __used) | |||
684 | return 0; | 684 | return 0; |
685 | } | 685 | } |
686 | 686 | ||
687 | static int test__group5(struct perf_evlist *evlist __used) | 687 | static int test__group5(struct perf_evlist *evlist __maybe_unused) |
688 | { | 688 | { |
689 | struct perf_evsel *evsel, *leader; | 689 | struct perf_evsel *evsel, *leader; |
690 | 690 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a031ee1f54f6..44afcf40f796 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -807,7 +807,8 @@ int parse_events_terms(struct list_head *terms, const char *str) | |||
807 | return ret; | 807 | return ret; |
808 | } | 808 | } |
809 | 809 | ||
810 | int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) | 810 | int parse_events(struct perf_evlist *evlist, const char *str, |
811 | int unset __maybe_unused) | ||
811 | { | 812 | { |
812 | struct parse_events_data__events data = { | 813 | struct parse_events_data__events data = { |
813 | .list = LIST_HEAD_INIT(data.list), | 814 | .list = LIST_HEAD_INIT(data.list), |
@@ -833,14 +834,14 @@ int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) | |||
833 | } | 834 | } |
834 | 835 | ||
835 | int parse_events_option(const struct option *opt, const char *str, | 836 | int parse_events_option(const struct option *opt, const char *str, |
836 | int unset __used) | 837 | int unset __maybe_unused) |
837 | { | 838 | { |
838 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 839 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
839 | return parse_events(evlist, str, unset); | 840 | return parse_events(evlist, str, unset); |
840 | } | 841 | } |
841 | 842 | ||
842 | int parse_filter(const struct option *opt, const char *str, | 843 | int parse_filter(const struct option *opt, const char *str, |
843 | int unset __used) | 844 | int unset __maybe_unused) |
844 | { | 845 | { |
845 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 846 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
846 | struct perf_evsel *last = NULL; | 847 | struct perf_evsel *last = NULL; |
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index f5e28dc68270..c87efc12579d 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l | |||
@@ -207,7 +207,7 @@ r{num_raw_hex} { return raw(yyscanner); } | |||
207 | 207 | ||
208 | %% | 208 | %% |
209 | 209 | ||
210 | int parse_events_wrap(void *scanner __used) | 210 | int parse_events_wrap(void *scanner __maybe_unused) |
211 | { | 211 | { |
212 | return 1; | 212 | return 1; |
213 | } | 213 | } |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 42d9a17b83b1..cd88209e3c58 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y | |||
@@ -391,7 +391,7 @@ sep_slash_dc: '/' | ':' | | |||
391 | 391 | ||
392 | %% | 392 | %% |
393 | 393 | ||
394 | void parse_events_error(void *data __used, void *scanner __used, | 394 | void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, |
395 | char const *msg __used) | 395 | char const *msg __maybe_unused) |
396 | { | 396 | { |
397 | } | 397 | } |
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index 594f8fad5ecd..443fc116512b 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c | |||
@@ -557,7 +557,8 @@ int parse_options_usage(const char * const *usagestr, | |||
557 | } | 557 | } |
558 | 558 | ||
559 | 559 | ||
560 | int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used, | 560 | int parse_opt_verbosity_cb(const struct option *opt, |
561 | const char *arg __maybe_unused, | ||
561 | int unset) | 562 | int unset) |
562 | { | 563 | { |
563 | int *target = opt->value; | 564 | int *target = opt->value; |
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 9bd6c4e069c8..316dbe7f86ed 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #else | 6 | #else |
7 | #define PERF_REGS_MASK 0 | 7 | #define PERF_REGS_MASK 0 |
8 | 8 | ||
9 | static inline const char *perf_reg_name(int id __used) | 9 | static inline const char *perf_reg_name(int id __maybe_unused) |
10 | { | 10 | { |
11 | return NULL; | 11 | return NULL; |
12 | } | 12 | } |
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index 20ea77e93169..ec898047ebb9 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y | |||
@@ -86,8 +86,8 @@ PP_VALUE | |||
86 | 86 | ||
87 | %% | 87 | %% |
88 | 88 | ||
89 | void perf_pmu_error(struct list_head *list __used, | 89 | void perf_pmu_error(struct list_head *list __maybe_unused, |
90 | char *name __used, | 90 | char *name __maybe_unused, |
91 | char const *msg __used) | 91 | char const *msg __maybe_unused) |
92 | { | 92 | { |
93 | } | 93 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e8c72de0f70c..4ce04c2281d3 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include "symbol.h" | 41 | #include "symbol.h" |
42 | #include "thread.h" | 42 | #include "thread.h" |
43 | #include "debugfs.h" | 43 | #include "debugfs.h" |
44 | #include "trace-event.h" /* For __unused */ | 44 | #include "trace-event.h" /* For __maybe_unused */ |
45 | #include "probe-event.h" | 45 | #include "probe-event.h" |
46 | #include "probe-finder.h" | 46 | #include "probe-finder.h" |
47 | #include "session.h" | 47 | #include "session.h" |
@@ -647,8 +647,8 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | |||
647 | } | 647 | } |
648 | 648 | ||
649 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | 649 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, |
650 | struct probe_trace_event **tevs __unused, | 650 | struct probe_trace_event **tevs __maybe_unused, |
651 | int max_tevs __unused, const char *target) | 651 | int max_tevs __maybe_unused, const char *target) |
652 | { | 652 | { |
653 | if (perf_probe_event_need_dwarf(pev)) { | 653 | if (perf_probe_event_need_dwarf(pev)) { |
654 | pr_warning("Debuginfo-analysis is not supported.\n"); | 654 | pr_warning("Debuginfo-analysis is not supported.\n"); |
@@ -661,17 +661,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
661 | return 0; | 661 | return 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | int show_line_range(struct line_range *lr __unused, const char *module __unused) | 664 | int show_line_range(struct line_range *lr __maybe_unused, |
665 | const char *module __maybe_unused) | ||
665 | { | 666 | { |
666 | pr_warning("Debuginfo-analysis is not supported.\n"); | 667 | pr_warning("Debuginfo-analysis is not supported.\n"); |
667 | return -ENOSYS; | 668 | return -ENOSYS; |
668 | } | 669 | } |
669 | 670 | ||
670 | int show_available_vars(struct perf_probe_event *pevs __unused, | 671 | int show_available_vars(struct perf_probe_event *pevs __maybe_unused, |
671 | int npevs __unused, int max_vls __unused, | 672 | int npevs __maybe_unused, int max_vls __maybe_unused, |
672 | const char *module __unused, | 673 | const char *module __maybe_unused, |
673 | struct strfilter *filter __unused, | 674 | struct strfilter *filter __maybe_unused, |
674 | bool externs __unused) | 675 | bool externs __maybe_unused) |
675 | { | 676 | { |
676 | pr_warning("Debuginfo-analysis is not supported.\n"); | 677 | pr_warning("Debuginfo-analysis is not supported.\n"); |
677 | return -ENOSYS; | 678 | return -ENOSYS; |
@@ -2183,7 +2184,7 @@ static struct strfilter *available_func_filter; | |||
2183 | * If a symbol corresponds to a function with global binding and | 2184 | * If a symbol corresponds to a function with global binding and |
2184 | * matches filter return 0. For all others return 1. | 2185 | * matches filter return 0. For all others return 1. |
2185 | */ | 2186 | */ |
2186 | static int filter_available_functions(struct map *map __unused, | 2187 | static int filter_available_functions(struct map *map __maybe_unused, |
2187 | struct symbol *sym) | 2188 | struct symbol *sym) |
2188 | { | 2189 | { |
2189 | if (sym->binding == STB_GLOBAL && | 2190 | if (sym->binding == STB_GLOBAL && |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index d448984ed789..526ba56e720b 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -207,7 +207,7 @@ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, | |||
207 | #else | 207 | #else |
208 | /* With older elfutils, this just support kernel module... */ | 208 | /* With older elfutils, this just support kernel module... */ |
209 | static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, | 209 | static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, |
210 | Dwarf_Addr addr __used) | 210 | Dwarf_Addr addr __maybe_unused) |
211 | { | 211 | { |
212 | const char *path = kernel_get_module_path("kernel"); | 212 | const char *path = kernel_get_module_path("kernel"); |
213 | 213 | ||
@@ -1419,7 +1419,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, | |||
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | static int line_range_walk_cb(const char *fname, int lineno, | 1421 | static int line_range_walk_cb(const char *fname, int lineno, |
1422 | Dwarf_Addr addr __used, | 1422 | Dwarf_Addr addr __maybe_unused, |
1423 | void *data) | 1423 | void *data) |
1424 | { | 1424 | { |
1425 | struct line_finder *lf = data; | 1425 | struct line_finder *lf = data; |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 27187f0b71f0..ca85444bcfbf 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -672,7 +672,7 @@ struct pyrf_evlist { | |||
672 | }; | 672 | }; |
673 | 673 | ||
674 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, | 674 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, |
675 | PyObject *args, PyObject *kwargs __used) | 675 | PyObject *args, PyObject *kwargs __maybe_unused) |
676 | { | 676 | { |
677 | PyObject *pcpus = NULL, *pthreads = NULL; | 677 | PyObject *pcpus = NULL, *pthreads = NULL; |
678 | struct cpu_map *cpus; | 678 | struct cpu_map *cpus; |
@@ -733,7 +733,8 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, | |||
733 | } | 733 | } |
734 | 734 | ||
735 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, | 735 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, |
736 | PyObject *args __used, PyObject *kwargs __used) | 736 | PyObject *args __maybe_unused, |
737 | PyObject *kwargs __maybe_unused) | ||
737 | { | 738 | { |
738 | struct perf_evlist *evlist = &pevlist->evlist; | 739 | struct perf_evlist *evlist = &pevlist->evlist; |
739 | PyObject *list = PyList_New(0); | 740 | PyObject *list = PyList_New(0); |
@@ -765,7 +766,8 @@ free_list: | |||
765 | 766 | ||
766 | 767 | ||
767 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, | 768 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, |
768 | PyObject *args, PyObject *kwargs __used) | 769 | PyObject *args, |
770 | PyObject *kwargs __maybe_unused) | ||
769 | { | 771 | { |
770 | struct perf_evlist *evlist = &pevlist->evlist; | 772 | struct perf_evlist *evlist = &pevlist->evlist; |
771 | PyObject *pevsel; | 773 | PyObject *pevsel; |
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 94e673643bcb..ffde3e4e34aa 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -257,10 +257,10 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) | |||
257 | return event; | 257 | return event; |
258 | } | 258 | } |
259 | 259 | ||
260 | static void perl_process_tracepoint(union perf_event *perf_event __unused, | 260 | static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, |
261 | struct perf_sample *sample, | 261 | struct perf_sample *sample, |
262 | struct perf_evsel *evsel, | 262 | struct perf_evsel *evsel, |
263 | struct machine *machine __unused, | 263 | struct machine *machine __maybe_unused, |
264 | struct addr_location *al) | 264 | struct addr_location *al) |
265 | { | 265 | { |
266 | struct format_field *field; | 266 | struct format_field *field; |
@@ -349,8 +349,8 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, | |||
349 | static void perl_process_event_generic(union perf_event *event, | 349 | static void perl_process_event_generic(union perf_event *event, |
350 | struct perf_sample *sample, | 350 | struct perf_sample *sample, |
351 | struct perf_evsel *evsel, | 351 | struct perf_evsel *evsel, |
352 | struct machine *machine __unused, | 352 | struct machine *machine __maybe_unused, |
353 | struct addr_location *al __unused) | 353 | struct addr_location *al __maybe_unused) |
354 | { | 354 | { |
355 | dSP; | 355 | dSP; |
356 | 356 | ||
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index afba09729183..730c6630cba5 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -221,10 +221,11 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) | |||
221 | return event; | 221 | return event; |
222 | } | 222 | } |
223 | 223 | ||
224 | static void python_process_tracepoint(union perf_event *perf_event __unused, | 224 | static void python_process_tracepoint(union perf_event *perf_event |
225 | __maybe_unused, | ||
225 | struct perf_sample *sample, | 226 | struct perf_sample *sample, |
226 | struct perf_evsel *evsel, | 227 | struct perf_evsel *evsel, |
227 | struct machine *machine __unused, | 228 | struct machine *machine __maybe_unused, |
228 | struct addr_location *al) | 229 | struct addr_location *al) |
229 | { | 230 | { |
230 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; | 231 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; |
@@ -339,10 +340,11 @@ static void python_process_tracepoint(union perf_event *perf_event __unused, | |||
339 | Py_DECREF(t); | 340 | Py_DECREF(t); |
340 | } | 341 | } |
341 | 342 | ||
342 | static void python_process_general_event(union perf_event *perf_event __unused, | 343 | static void python_process_general_event(union perf_event *perf_event |
344 | __maybe_unused, | ||
343 | struct perf_sample *sample, | 345 | struct perf_sample *sample, |
344 | struct perf_evsel *evsel, | 346 | struct perf_evsel *evsel, |
345 | struct machine *machine __unused, | 347 | struct machine *machine __maybe_unused, |
346 | struct addr_location *al) | 348 | struct addr_location *al) |
347 | { | 349 | { |
348 | PyObject *handler, *retval, *t, *dict; | 350 | PyObject *handler, *retval, *t, *dict; |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 945375897c2a..3049b0ae7003 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include "event-parse.h" | 17 | #include "event-parse.h" |
18 | #include "perf_regs.h" | 18 | #include "perf_regs.h" |
19 | #include "unwind.h" | 19 | #include "unwind.h" |
20 | #include "vdso.h" | ||
20 | 21 | ||
21 | static int perf_session__open(struct perf_session *self, bool force) | 22 | static int perf_session__open(struct perf_session *self, bool force) |
22 | { | 23 | { |
@@ -211,6 +212,7 @@ void perf_session__delete(struct perf_session *self) | |||
211 | machine__exit(&self->host_machine); | 212 | machine__exit(&self->host_machine); |
212 | close(self->fd); | 213 | close(self->fd); |
213 | free(self); | 214 | free(self); |
215 | vdso__exit(); | ||
214 | } | 216 | } |
215 | 217 | ||
216 | void machine__remove_thread(struct machine *self, struct thread *th) | 218 | void machine__remove_thread(struct machine *self, struct thread *th) |
@@ -388,55 +390,64 @@ int machine__resolve_callchain(struct machine *machine, | |||
388 | (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) | 390 | (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) |
389 | return 0; | 391 | return 0; |
390 | 392 | ||
393 | /* Bail out if nothing was captured. */ | ||
394 | if ((!sample->user_regs.regs) || | ||
395 | (!sample->user_stack.size)) | ||
396 | return 0; | ||
397 | |||
391 | return unwind__get_entries(unwind_entry, &callchain_cursor, machine, | 398 | return unwind__get_entries(unwind_entry, &callchain_cursor, machine, |
392 | thread, evsel->attr.sample_regs_user, | 399 | thread, evsel->attr.sample_regs_user, |
393 | sample); | 400 | sample); |
394 | 401 | ||
395 | } | 402 | } |
396 | 403 | ||
397 | static int process_event_synth_tracing_data_stub(union perf_event *event __used, | 404 | static int process_event_synth_tracing_data_stub(union perf_event *event |
398 | struct perf_session *session __used) | 405 | __maybe_unused, |
406 | struct perf_session *session | ||
407 | __maybe_unused) | ||
399 | { | 408 | { |
400 | dump_printf(": unhandled!\n"); | 409 | dump_printf(": unhandled!\n"); |
401 | return 0; | 410 | return 0; |
402 | } | 411 | } |
403 | 412 | ||
404 | static int process_event_synth_attr_stub(union perf_event *event __used, | 413 | static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, |
405 | struct perf_evlist **pevlist __used) | 414 | struct perf_evlist **pevlist |
415 | __maybe_unused) | ||
406 | { | 416 | { |
407 | dump_printf(": unhandled!\n"); | 417 | dump_printf(": unhandled!\n"); |
408 | return 0; | 418 | return 0; |
409 | } | 419 | } |
410 | 420 | ||
411 | static int process_event_sample_stub(struct perf_tool *tool __used, | 421 | static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, |
412 | union perf_event *event __used, | 422 | union perf_event *event __maybe_unused, |
413 | struct perf_sample *sample __used, | 423 | struct perf_sample *sample __maybe_unused, |
414 | struct perf_evsel *evsel __used, | 424 | struct perf_evsel *evsel __maybe_unused, |
415 | struct machine *machine __used) | 425 | struct machine *machine __maybe_unused) |
416 | { | 426 | { |
417 | dump_printf(": unhandled!\n"); | 427 | dump_printf(": unhandled!\n"); |
418 | return 0; | 428 | return 0; |
419 | } | 429 | } |
420 | 430 | ||
421 | static int process_event_stub(struct perf_tool *tool __used, | 431 | static int process_event_stub(struct perf_tool *tool __maybe_unused, |
422 | union perf_event *event __used, | 432 | union perf_event *event __maybe_unused, |
423 | struct perf_sample *sample __used, | 433 | struct perf_sample *sample __maybe_unused, |
424 | struct machine *machine __used) | 434 | struct machine *machine __maybe_unused) |
425 | { | 435 | { |
426 | dump_printf(": unhandled!\n"); | 436 | dump_printf(": unhandled!\n"); |
427 | return 0; | 437 | return 0; |
428 | } | 438 | } |
429 | 439 | ||
430 | static int process_finished_round_stub(struct perf_tool *tool __used, | 440 | static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, |
431 | union perf_event *event __used, | 441 | union perf_event *event __maybe_unused, |
432 | struct perf_session *perf_session __used) | 442 | struct perf_session *perf_session |
443 | __maybe_unused) | ||
433 | { | 444 | { |
434 | dump_printf(": unhandled!\n"); | 445 | dump_printf(": unhandled!\n"); |
435 | return 0; | 446 | return 0; |
436 | } | 447 | } |
437 | 448 | ||
438 | static int process_event_type_stub(struct perf_tool *tool __used, | 449 | static int process_event_type_stub(struct perf_tool *tool __maybe_unused, |
439 | union perf_event *event __used) | 450 | union perf_event *event __maybe_unused) |
440 | { | 451 | { |
441 | dump_printf(": unhandled!\n"); | 452 | dump_printf(": unhandled!\n"); |
442 | return 0; | 453 | return 0; |
@@ -513,7 +524,7 @@ static void swap_sample_id_all(union perf_event *event, void *data) | |||
513 | } | 524 | } |
514 | 525 | ||
515 | static void perf_event__all64_swap(union perf_event *event, | 526 | static void perf_event__all64_swap(union perf_event *event, |
516 | bool sample_id_all __used) | 527 | bool sample_id_all __maybe_unused) |
517 | { | 528 | { |
518 | struct perf_event_header *hdr = &event->header; | 529 | struct perf_event_header *hdr = &event->header; |
519 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); | 530 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
@@ -527,7 +538,7 @@ static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) | |||
527 | if (sample_id_all) { | 538 | if (sample_id_all) { |
528 | void *data = &event->comm.comm; | 539 | void *data = &event->comm.comm; |
529 | 540 | ||
530 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | 541 | data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); |
531 | swap_sample_id_all(event, data); | 542 | swap_sample_id_all(event, data); |
532 | } | 543 | } |
533 | } | 544 | } |
@@ -544,7 +555,7 @@ static void perf_event__mmap_swap(union perf_event *event, | |||
544 | if (sample_id_all) { | 555 | if (sample_id_all) { |
545 | void *data = &event->mmap.filename; | 556 | void *data = &event->mmap.filename; |
546 | 557 | ||
547 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | 558 | data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); |
548 | swap_sample_id_all(event, data); | 559 | swap_sample_id_all(event, data); |
549 | } | 560 | } |
550 | } | 561 | } |
@@ -624,7 +635,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr) | |||
624 | } | 635 | } |
625 | 636 | ||
626 | static void perf_event__hdr_attr_swap(union perf_event *event, | 637 | static void perf_event__hdr_attr_swap(union perf_event *event, |
627 | bool sample_id_all __used) | 638 | bool sample_id_all __maybe_unused) |
628 | { | 639 | { |
629 | size_t size; | 640 | size_t size; |
630 | 641 | ||
@@ -636,14 +647,14 @@ static void perf_event__hdr_attr_swap(union perf_event *event, | |||
636 | } | 647 | } |
637 | 648 | ||
638 | static void perf_event__event_type_swap(union perf_event *event, | 649 | static void perf_event__event_type_swap(union perf_event *event, |
639 | bool sample_id_all __used) | 650 | bool sample_id_all __maybe_unused) |
640 | { | 651 | { |
641 | event->event_type.event_type.event_id = | 652 | event->event_type.event_type.event_id = |
642 | bswap_64(event->event_type.event_type.event_id); | 653 | bswap_64(event->event_type.event_type.event_id); |
643 | } | 654 | } |
644 | 655 | ||
645 | static void perf_event__tracing_data_swap(union perf_event *event, | 656 | static void perf_event__tracing_data_swap(union perf_event *event, |
646 | bool sample_id_all __used) | 657 | bool sample_id_all __maybe_unused) |
647 | { | 658 | { |
648 | event->tracing_data.size = bswap_32(event->tracing_data.size); | 659 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
649 | } | 660 | } |
@@ -784,7 +795,7 @@ static int flush_sample_queue(struct perf_session *s, | |||
784 | * etc... | 795 | * etc... |
785 | */ | 796 | */ |
786 | static int process_finished_round(struct perf_tool *tool, | 797 | static int process_finished_round(struct perf_tool *tool, |
787 | union perf_event *event __used, | 798 | union perf_event *event __maybe_unused, |
788 | struct perf_session *session) | 799 | struct perf_session *session) |
789 | { | 800 | { |
790 | int ret = flush_sample_queue(session, tool); | 801 | int ret = flush_sample_queue(session, tool); |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 7a2fbd8855b7..0981bc7a2917 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -170,7 +170,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | |||
170 | 170 | ||
171 | static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, | 171 | static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, |
172 | u64 ip, char level, char *bf, size_t size, | 172 | u64 ip, char level, char *bf, size_t size, |
173 | unsigned int width __used) | 173 | unsigned int width __maybe_unused) |
174 | { | 174 | { |
175 | size_t ret = 0; | 175 | size_t ret = 0; |
176 | 176 | ||
@@ -205,7 +205,8 @@ struct sort_entry sort_dso = { | |||
205 | }; | 205 | }; |
206 | 206 | ||
207 | static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | 207 | static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, |
208 | size_t size, unsigned int width __used) | 208 | size_t size, |
209 | unsigned int width __maybe_unused) | ||
209 | { | 210 | { |
210 | return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, | 211 | return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, |
211 | self->level, bf, size, width); | 212 | self->level, bf, size, width); |
@@ -248,7 +249,8 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) | |||
248 | } | 249 | } |
249 | 250 | ||
250 | static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, | 251 | static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, |
251 | size_t size, unsigned int width __used) | 252 | size_t size, |
253 | unsigned int width __maybe_unused) | ||
252 | { | 254 | { |
253 | FILE *fp; | 255 | FILE *fp; |
254 | char cmd[PATH_MAX + 2], *path = self->srcline, *nl; | 256 | char cmd[PATH_MAX + 2], *path = self->srcline, *nl; |
@@ -397,7 +399,8 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) | |||
397 | } | 399 | } |
398 | 400 | ||
399 | static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, | 401 | static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, |
400 | size_t size, unsigned int width __used) | 402 | size_t size, |
403 | unsigned int width __maybe_unused) | ||
401 | { | 404 | { |
402 | struct addr_map_symbol *from = &self->branch_info->from; | 405 | struct addr_map_symbol *from = &self->branch_info->from; |
403 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, | 406 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, |
@@ -406,7 +409,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, | |||
406 | } | 409 | } |
407 | 410 | ||
408 | static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, | 411 | static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, |
409 | size_t size, unsigned int width __used) | 412 | size_t size, |
413 | unsigned int width __maybe_unused) | ||
410 | { | 414 | { |
411 | struct addr_map_symbol *to = &self->branch_info->to; | 415 | struct addr_map_symbol *to = &self->branch_info->to; |
412 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, | 416 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, |
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 199bc4d8905d..32170590892d 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include "util.h" | 1 | #include "util.h" |
2 | #include "string.h" | 2 | #include "linux/string.h" |
3 | 3 | ||
4 | #define K 1024LL | 4 | #define K 1024LL |
5 | /* | 5 | /* |
@@ -335,3 +335,19 @@ char *rtrim(char *s) | |||
335 | 335 | ||
336 | return s; | 336 | return s; |
337 | } | 337 | } |
338 | |||
339 | /** | ||
340 | * memdup - duplicate region of memory | ||
341 | * @src: memory region to duplicate | ||
342 | * @len: memory region length | ||
343 | */ | ||
344 | void *memdup(const void *src, size_t len) | ||
345 | { | ||
346 | void *p; | ||
347 | |||
348 | p = malloc(len); | ||
349 | if (p) | ||
350 | memcpy(p, src, len); | ||
351 | |||
352 | return p; | ||
353 | } | ||
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 6738ea128c90..259f8f2ea9c9 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c | |||
@@ -69,8 +69,9 @@ static int read_build_id(void *note_data, size_t note_len, void *bf, | |||
69 | return -1; | 69 | return -1; |
70 | } | 70 | } |
71 | 71 | ||
72 | int filename__read_debuglink(const char *filename __used, | 72 | int filename__read_debuglink(const char *filename __maybe_unused, |
73 | char *debuglink __used, size_t size __used) | 73 | char *debuglink __maybe_unused, |
74 | size_t size __maybe_unused) | ||
74 | { | 75 | { |
75 | return -1; | 76 | return -1; |
76 | } | 77 | } |
@@ -241,7 +242,8 @@ out: | |||
241 | return ret; | 242 | return ret; |
242 | } | 243 | } |
243 | 244 | ||
244 | int symsrc__init(struct symsrc *ss, struct dso *dso __used, const char *name, | 245 | int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused, |
246 | const char *name, | ||
245 | enum dso_binary_type type) | 247 | enum dso_binary_type type) |
246 | { | 248 | { |
247 | int fd = open(name, O_RDONLY); | 249 | int fd = open(name, O_RDONLY); |
@@ -260,13 +262,13 @@ out_close: | |||
260 | return -1; | 262 | return -1; |
261 | } | 263 | } |
262 | 264 | ||
263 | bool symsrc__possibly_runtime(struct symsrc *ss __used) | 265 | bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) |
264 | { | 266 | { |
265 | /* Assume all sym sources could be a runtime image. */ | 267 | /* Assume all sym sources could be a runtime image. */ |
266 | return true; | 268 | return true; |
267 | } | 269 | } |
268 | 270 | ||
269 | bool symsrc__has_symtab(struct symsrc *ss __used) | 271 | bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) |
270 | { | 272 | { |
271 | return false; | 273 | return false; |
272 | } | 274 | } |
@@ -277,17 +279,19 @@ void symsrc__destroy(struct symsrc *ss) | |||
277 | close(ss->fd); | 279 | close(ss->fd); |
278 | } | 280 | } |
279 | 281 | ||
280 | int dso__synthesize_plt_symbols(struct dso *dso __used, | 282 | int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, |
281 | struct symsrc *ss __used, | 283 | struct symsrc *ss __maybe_unused, |
282 | struct map *map __used, | 284 | struct map *map __maybe_unused, |
283 | symbol_filter_t filter __used) | 285 | symbol_filter_t filter __maybe_unused) |
284 | { | 286 | { |
285 | return 0; | 287 | return 0; |
286 | } | 288 | } |
287 | 289 | ||
288 | int dso__load_sym(struct dso *dso, struct map *map __used, struct symsrc *ss, | 290 | int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, |
289 | struct symsrc *runtime_ss __used, | 291 | struct symsrc *ss, |
290 | symbol_filter_t filter __used, int kmodule __used) | 292 | struct symsrc *runtime_ss __maybe_unused, |
293 | symbol_filter_t filter __maybe_unused, | ||
294 | int kmodule __maybe_unused) | ||
291 | { | 295 | { |
292 | unsigned char *build_id[BUILD_ID_SIZE]; | 296 | unsigned char *build_id[BUILD_ID_SIZE]; |
293 | 297 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 753699a20bc8..e2e8c697cffe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -1596,7 +1596,7 @@ void dsos__add(struct list_head *head, struct dso *dso) | |||
1596 | list_add_tail(&dso->node, head); | 1596 | list_add_tail(&dso->node, head); |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | static struct dso *dsos__find(struct list_head *head, const char *name) | 1599 | struct dso *dsos__find(struct list_head *head, const char *name) |
1600 | { | 1600 | { |
1601 | struct dso *pos; | 1601 | struct dso *pos; |
1602 | 1602 | ||
@@ -1755,7 +1755,7 @@ struct process_args { | |||
1755 | }; | 1755 | }; |
1756 | 1756 | ||
1757 | static int symbol__in_kernel(void *arg, const char *name, | 1757 | static int symbol__in_kernel(void *arg, const char *name, |
1758 | char type __used, u64 start) | 1758 | char type __maybe_unused, u64 start) |
1759 | { | 1759 | { |
1760 | struct process_args *args = arg; | 1760 | struct process_args *args = arg; |
1761 | 1761 | ||
@@ -1991,7 +1991,7 @@ int symbol__init(void) | |||
1991 | if (symbol_conf.initialized) | 1991 | if (symbol_conf.initialized) |
1992 | return 0; | 1992 | return 0; |
1993 | 1993 | ||
1994 | symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64)); | 1994 | symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); |
1995 | 1995 | ||
1996 | symbol__elf_init(); | 1996 | symbol__elf_init(); |
1997 | 1997 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 41a15dac4120..4ff45e30c726 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -21,14 +21,15 @@ | |||
21 | #ifdef HAVE_CPLUS_DEMANGLE | 21 | #ifdef HAVE_CPLUS_DEMANGLE |
22 | extern char *cplus_demangle(const char *, int); | 22 | extern char *cplus_demangle(const char *, int); |
23 | 23 | ||
24 | static inline char *bfd_demangle(void __used *v, const char *c, int i) | 24 | static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) |
25 | { | 25 | { |
26 | return cplus_demangle(c, i); | 26 | return cplus_demangle(c, i); |
27 | } | 27 | } |
28 | #else | 28 | #else |
29 | #ifdef NO_DEMANGLE | 29 | #ifdef NO_DEMANGLE |
30 | static inline char *bfd_demangle(void __used *v, const char __used *c, | 30 | static inline char *bfd_demangle(void __maybe_unused *v, |
31 | int __used i) | 31 | const char __maybe_unused *c, |
32 | int __maybe_unused i) | ||
32 | { | 33 | { |
33 | return NULL; | 34 | return NULL; |
34 | } | 35 | } |
@@ -294,6 +295,7 @@ static inline void dso__set_loaded(struct dso *dso, enum map_type type) | |||
294 | void dso__sort_by_name(struct dso *dso, enum map_type type); | 295 | void dso__sort_by_name(struct dso *dso, enum map_type type); |
295 | 296 | ||
296 | void dsos__add(struct list_head *head, struct dso *dso); | 297 | void dsos__add(struct list_head *head, struct dso *dso); |
298 | struct dso *dsos__find(struct list_head *head, const char *name); | ||
297 | struct dso *__dsos__findnew(struct list_head *head, const char *name); | 299 | struct dso *__dsos__findnew(struct list_head *head, const char *name); |
298 | 300 | ||
299 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); | 301 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index a5a554efeb50..aa4c860a21d1 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -221,7 +221,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size, | |||
221 | } | 221 | } |
222 | 222 | ||
223 | void parse_proc_kallsyms(struct pevent *pevent, | 223 | void parse_proc_kallsyms(struct pevent *pevent, |
224 | char *file, unsigned int size __unused) | 224 | char *file, unsigned int size __maybe_unused) |
225 | { | 225 | { |
226 | unsigned long long addr; | 226 | unsigned long long addr; |
227 | char *func; | 227 | char *func; |
@@ -253,7 +253,7 @@ void parse_proc_kallsyms(struct pevent *pevent, | |||
253 | } | 253 | } |
254 | 254 | ||
255 | void parse_ftrace_printk(struct pevent *pevent, | 255 | void parse_ftrace_printk(struct pevent *pevent, |
256 | char *file, unsigned int size __unused) | 256 | char *file, unsigned int size __maybe_unused) |
257 | { | 257 | { |
258 | unsigned long long addr; | 258 | unsigned long long addr; |
259 | char *printk; | 259 | char *printk; |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 302ff262494c..8715a1006d00 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -35,11 +35,11 @@ static int stop_script_unsupported(void) | |||
35 | return 0; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | static void process_event_unsupported(union perf_event *event __unused, | 38 | static void process_event_unsupported(union perf_event *event __maybe_unused, |
39 | struct perf_sample *sample __unused, | 39 | struct perf_sample *sample __maybe_unused, |
40 | struct perf_evsel *evsel __unused, | 40 | struct perf_evsel *evsel __maybe_unused, |
41 | struct machine *machine __unused, | 41 | struct machine *machine __maybe_unused, |
42 | struct addr_location *al __unused) | 42 | struct addr_location *al __maybe_unused) |
43 | { | 43 | { |
44 | } | 44 | } |
45 | 45 | ||
@@ -52,17 +52,19 @@ static void print_python_unsupported_msg(void) | |||
52 | "\n etc.\n"); | 52 | "\n etc.\n"); |
53 | } | 53 | } |
54 | 54 | ||
55 | static int python_start_script_unsupported(const char *script __unused, | 55 | static int python_start_script_unsupported(const char *script __maybe_unused, |
56 | int argc __unused, | 56 | int argc __maybe_unused, |
57 | const char **argv __unused) | 57 | const char **argv __maybe_unused) |
58 | { | 58 | { |
59 | print_python_unsupported_msg(); | 59 | print_python_unsupported_msg(); |
60 | 60 | ||
61 | return -1; | 61 | return -1; |
62 | } | 62 | } |
63 | 63 | ||
64 | static int python_generate_script_unsupported(struct pevent *pevent __unused, | 64 | static int python_generate_script_unsupported(struct pevent *pevent |
65 | const char *outfile __unused) | 65 | __maybe_unused, |
66 | const char *outfile | ||
67 | __maybe_unused) | ||
66 | { | 68 | { |
67 | print_python_unsupported_msg(); | 69 | print_python_unsupported_msg(); |
68 | 70 | ||
@@ -114,17 +116,18 @@ static void print_perl_unsupported_msg(void) | |||
114 | "\n etc.\n"); | 116 | "\n etc.\n"); |
115 | } | 117 | } |
116 | 118 | ||
117 | static int perl_start_script_unsupported(const char *script __unused, | 119 | static int perl_start_script_unsupported(const char *script __maybe_unused, |
118 | int argc __unused, | 120 | int argc __maybe_unused, |
119 | const char **argv __unused) | 121 | const char **argv __maybe_unused) |
120 | { | 122 | { |
121 | print_perl_unsupported_msg(); | 123 | print_perl_unsupported_msg(); |
122 | 124 | ||
123 | return -1; | 125 | return -1; |
124 | } | 126 | } |
125 | 127 | ||
126 | static int perl_generate_script_unsupported(struct pevent *pevent __unused, | 128 | static int perl_generate_script_unsupported(struct pevent *pevent |
127 | const char *outfile __unused) | 129 | __maybe_unused, |
130 | const char *outfile __maybe_unused) | ||
128 | { | 131 | { |
129 | print_perl_unsupported_msg(); | 132 | print_perl_unsupported_msg(); |
130 | 133 | ||
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c index 00a42aa8d5c1..958723ba3d2e 100644 --- a/tools/perf/util/unwind.c +++ b/tools/perf/util/unwind.c | |||
@@ -307,32 +307,36 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, | |||
307 | need_unwind_info, arg); | 307 | need_unwind_info, arg); |
308 | } | 308 | } |
309 | 309 | ||
310 | static int access_fpreg(unw_addr_space_t __used as, unw_regnum_t __used num, | 310 | static int access_fpreg(unw_addr_space_t __maybe_unused as, |
311 | unw_fpreg_t __used *val, int __used __write, | 311 | unw_regnum_t __maybe_unused num, |
312 | void __used *arg) | 312 | unw_fpreg_t __maybe_unused *val, |
313 | int __maybe_unused __write, | ||
314 | void __maybe_unused *arg) | ||
313 | { | 315 | { |
314 | pr_err("unwind: access_fpreg unsupported\n"); | 316 | pr_err("unwind: access_fpreg unsupported\n"); |
315 | return -UNW_EINVAL; | 317 | return -UNW_EINVAL; |
316 | } | 318 | } |
317 | 319 | ||
318 | static int get_dyn_info_list_addr(unw_addr_space_t __used as, | 320 | static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as, |
319 | unw_word_t __used *dil_addr, | 321 | unw_word_t __maybe_unused *dil_addr, |
320 | void __used *arg) | 322 | void __maybe_unused *arg) |
321 | { | 323 | { |
322 | return -UNW_ENOINFO; | 324 | return -UNW_ENOINFO; |
323 | } | 325 | } |
324 | 326 | ||
325 | static int resume(unw_addr_space_t __used as, unw_cursor_t __used *cu, | 327 | static int resume(unw_addr_space_t __maybe_unused as, |
326 | void __used *arg) | 328 | unw_cursor_t __maybe_unused *cu, |
329 | void __maybe_unused *arg) | ||
327 | { | 330 | { |
328 | pr_err("unwind: resume unsupported\n"); | 331 | pr_err("unwind: resume unsupported\n"); |
329 | return -UNW_EINVAL; | 332 | return -UNW_EINVAL; |
330 | } | 333 | } |
331 | 334 | ||
332 | static int | 335 | static int |
333 | get_proc_name(unw_addr_space_t __used as, unw_word_t __used addr, | 336 | get_proc_name(unw_addr_space_t __maybe_unused as, |
334 | char __used *bufp, size_t __used buf_len, | 337 | unw_word_t __maybe_unused addr, |
335 | unw_word_t __used *offp, void __used *arg) | 338 | char __maybe_unused *bufp, size_t __maybe_unused buf_len, |
339 | unw_word_t __maybe_unused *offp, void __maybe_unused *arg) | ||
336 | { | 340 | { |
337 | pr_err("unwind: get_proc_name unsupported\n"); | 341 | pr_err("unwind: get_proc_name unsupported\n"); |
338 | return -UNW_EINVAL; | 342 | return -UNW_EINVAL; |
@@ -377,7 +381,7 @@ static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id, | |||
377 | return 0; | 381 | return 0; |
378 | } | 382 | } |
379 | 383 | ||
380 | static int access_mem(unw_addr_space_t __used as, | 384 | static int access_mem(unw_addr_space_t __maybe_unused as, |
381 | unw_word_t addr, unw_word_t *valp, | 385 | unw_word_t addr, unw_word_t *valp, |
382 | int __write, void *arg) | 386 | int __write, void *arg) |
383 | { | 387 | { |
@@ -422,7 +426,7 @@ static int access_mem(unw_addr_space_t __used as, | |||
422 | return 0; | 426 | return 0; |
423 | } | 427 | } |
424 | 428 | ||
425 | static int access_reg(unw_addr_space_t __used as, | 429 | static int access_reg(unw_addr_space_t __maybe_unused as, |
426 | unw_regnum_t regnum, unw_word_t *valp, | 430 | unw_regnum_t regnum, unw_word_t *valp, |
427 | int __write, void *arg) | 431 | int __write, void *arg) |
428 | { | 432 | { |
@@ -454,9 +458,9 @@ static int access_reg(unw_addr_space_t __used as, | |||
454 | return 0; | 458 | return 0; |
455 | } | 459 | } |
456 | 460 | ||
457 | static void put_unwind_info(unw_addr_space_t __used as, | 461 | static void put_unwind_info(unw_addr_space_t __maybe_unused as, |
458 | unw_proc_info_t *pi __used, | 462 | unw_proc_info_t *pi __maybe_unused, |
459 | void *arg __used) | 463 | void *arg __maybe_unused) |
460 | { | 464 | { |
461 | pr_debug("unwind: put_unwind_info called\n"); | 465 | pr_debug("unwind: put_unwind_info called\n"); |
462 | } | 466 | } |
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index 919bd6ad8501..a78c8b303bb5 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h | |||
@@ -22,11 +22,12 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, | |||
22 | int unwind__arch_reg_id(int regnum); | 22 | int unwind__arch_reg_id(int regnum); |
23 | #else | 23 | #else |
24 | static inline int | 24 | static inline int |
25 | unwind__get_entries(unwind_entry_cb_t cb __used, void *arg __used, | 25 | unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, |
26 | struct machine *machine __used, | 26 | void *arg __maybe_unused, |
27 | struct thread *thread __used, | 27 | struct machine *machine __maybe_unused, |
28 | u64 sample_uregs __used, | 28 | struct thread *thread __maybe_unused, |
29 | struct perf_sample *data __used) | 29 | u64 sample_uregs __maybe_unused, |
30 | struct perf_sample *data __maybe_unused) | ||
30 | { | 31 | { |
31 | return 0; | 32 | return 0; |
32 | } | 33 | } |
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 67a371355c75..70fa70b535b2 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -70,7 +70,7 @@ | |||
70 | #include <sys/socket.h> | 70 | #include <sys/socket.h> |
71 | #include <sys/ioctl.h> | 71 | #include <sys/ioctl.h> |
72 | #include <inttypes.h> | 72 | #include <inttypes.h> |
73 | #include "../../../include/linux/magic.h" | 73 | #include <linux/magic.h> |
74 | #include "types.h" | 74 | #include "types.h" |
75 | #include <sys/ttydefaults.h> | 75 | #include <sys/ttydefaults.h> |
76 | 76 | ||
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c new file mode 100644 index 000000000000..e60951fcdb12 --- /dev/null +++ b/tools/perf/util/vdso.c | |||
@@ -0,0 +1,111 @@ | |||
1 | |||
2 | #include <unistd.h> | ||
3 | #include <stdio.h> | ||
4 | #include <string.h> | ||
5 | #include <sys/types.h> | ||
6 | #include <sys/stat.h> | ||
7 | #include <fcntl.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <linux/kernel.h> | ||
10 | |||
11 | #include "vdso.h" | ||
12 | #include "util.h" | ||
13 | #include "symbol.h" | ||
14 | #include "linux/string.h" | ||
15 | |||
16 | static bool vdso_found; | ||
17 | static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX"; | ||
18 | |||
19 | static int find_vdso_map(void **start, void **end) | ||
20 | { | ||
21 | FILE *maps; | ||
22 | char line[128]; | ||
23 | int found = 0; | ||
24 | |||
25 | maps = fopen("/proc/self/maps", "r"); | ||
26 | if (!maps) { | ||
27 | pr_err("vdso: cannot open maps\n"); | ||
28 | return -1; | ||
29 | } | ||
30 | |||
31 | while (!found && fgets(line, sizeof(line), maps)) { | ||
32 | int m = -1; | ||
33 | |||
34 | /* We care only about private r-x mappings. */ | ||
35 | if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", | ||
36 | start, end, &m)) | ||
37 | continue; | ||
38 | if (m < 0) | ||
39 | continue; | ||
40 | |||
41 | if (!strncmp(&line[m], VDSO__MAP_NAME, | ||
42 | sizeof(VDSO__MAP_NAME) - 1)) | ||
43 | found = 1; | ||
44 | } | ||
45 | |||
46 | fclose(maps); | ||
47 | return !found; | ||
48 | } | ||
49 | |||
50 | static char *get_file(void) | ||
51 | { | ||
52 | char *vdso = NULL; | ||
53 | char *buf = NULL; | ||
54 | void *start, *end; | ||
55 | size_t size; | ||
56 | int fd; | ||
57 | |||
58 | if (vdso_found) | ||
59 | return vdso_file; | ||
60 | |||
61 | if (find_vdso_map(&start, &end)) | ||
62 | return NULL; | ||
63 | |||
64 | size = end - start; | ||
65 | |||
66 | buf = memdup(start, size); | ||
67 | if (!buf) | ||
68 | return NULL; | ||
69 | |||
70 | fd = mkstemp(vdso_file); | ||
71 | if (fd < 0) | ||
72 | goto out; | ||
73 | |||
74 | if (size == (size_t) write(fd, buf, size)) | ||
75 | vdso = vdso_file; | ||
76 | |||
77 | close(fd); | ||
78 | |||
79 | out: | ||
80 | free(buf); | ||
81 | |||
82 | vdso_found = (vdso != NULL); | ||
83 | return vdso; | ||
84 | } | ||
85 | |||
86 | void vdso__exit(void) | ||
87 | { | ||
88 | if (vdso_found) | ||
89 | unlink(vdso_file); | ||
90 | } | ||
91 | |||
92 | struct dso *vdso__dso_findnew(struct list_head *head) | ||
93 | { | ||
94 | struct dso *dso = dsos__find(head, VDSO__MAP_NAME); | ||
95 | |||
96 | if (!dso) { | ||
97 | char *file; | ||
98 | |||
99 | file = get_file(); | ||
100 | if (!file) | ||
101 | return NULL; | ||
102 | |||
103 | dso = dso__new(VDSO__MAP_NAME); | ||
104 | if (dso != NULL) { | ||
105 | dsos__add(head, dso); | ||
106 | dso__set_long_name(dso, file); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return dso; | ||
111 | } | ||
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h new file mode 100644 index 000000000000..0f76e7caf6f8 --- /dev/null +++ b/tools/perf/util/vdso.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __PERF_VDSO__ | ||
2 | #define __PERF_VDSO__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <string.h> | ||
6 | #include <stdbool.h> | ||
7 | |||
8 | #define VDSO__MAP_NAME "[vdso]" | ||
9 | |||
10 | static inline bool is_vdso_map(const char *filename) | ||
11 | { | ||
12 | return !strcmp(filename, VDSO__MAP_NAME); | ||
13 | } | ||
14 | |||
15 | struct dso *vdso__dso_findnew(struct list_head *head); | ||
16 | void vdso__exit(void); | ||
17 | |||
18 | #endif /* __PERF_VDSO__ */ | ||
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c index 73e900edb5a2..19f15b650703 100644 --- a/tools/perf/util/wrapper.c +++ b/tools/perf/util/wrapper.c | |||
@@ -7,7 +7,8 @@ | |||
7 | * There's no pack memory to release - but stay close to the Git | 7 | * There's no pack memory to release - but stay close to the Git |
8 | * version so wrap this away: | 8 | * version so wrap this away: |
9 | */ | 9 | */ |
10 | static inline void release_pack_memory(size_t size __used, int flag __used) | 10 | static inline void release_pack_memory(size_t size __maybe_unused, |
11 | int flag __maybe_unused) | ||
11 | { | 12 | { |
12 | } | 13 | } |
13 | 14 | ||