diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 02:56:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 02:56:32 -0500 |
commit | fcd7476f9e03a36e709e0807198d47a826cc4e3a (patch) | |
tree | 1a9017988a864fae9ec62fd9e08e18cdc42d06cf /tools | |
parent | d320e203bad4cfcef3613e83a52f8c70a77e8a60 (diff) | |
parent | d969135aae1434547f41853f0e8eaa622e8b8816 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"A number of fixes:
- Fix segfault on perf trace -i perf.data, from Namhyung Kim.
- Fix segfault with --no-mmap-pages, from David Ahern.
- Don't force a refresh during progress update in the TUI, greatly
reducing startup costs, fix from Patrick Palka.
- Fix sw clock event period test wrt not checking if using >
max_sample_freq.
- Handle throttle events in 'object code reading' test, fix from
Adrian Hunter.
- Prevent condition that all sort keys are elided, fix from Namhyung
Kim.
- Round mmap pages to power 2, from David Ahern.
And a number of late arrival changes:
- Add summary only option to 'perf trace', suppressing the decoding
of events, from David Ahern
- 'perf trace --summary' formatting simplifications, from Pekka
Enberg.
- Beautify fifth argument of mmap() as fd, in 'perf trace', from
Namhyung Kim.
- Add direct access to dynamic arrays in libtraceevent, from Steven
Rostedt.
- Synthesize non-exec MMAP records when --data used, allowing the
resolution of data addresses to symbols (global variables, etc), by
Arnaldo Carvalho de Melo.
- Code cleanups by David Ahern and Adrian Hunter"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
tools lib traceevent: Add direct access to dynamic arrays
perf target: Shorten perf_target__ to target__
perf tests: Handle throttle events in 'object code reading' test
perf evlist: Refactor mmap_pages parsing
perf evlist: Round mmap pages to power 2 - v2
perf record: Fix segfault with --no-mmap-pages
perf trace: Add summary only option
perf trace: Simplify '--summary' output
perf trace: Change syscall summary duration order
perf tests: Compensate lower sample freq with longer test loop
perf trace: Fix segfault on perf trace -i perf.data
perf trace: Separate tp syscall field caching into init routine to be reused
perf trace: Beautify fifth argument of mmap() as fd
perf tests: Use lower sample_freq in sw clock event period test
perf tests: Check return of perf_evlist__open sw clock event period test
perf record: Move existing write_output into helper function
perf record: Use correct return type for write()
perf tools: Prevent condition that all sort keys are elided
perf machine: Simplify synthesize_threads method
perf machine: Introduce synthesize_threads method out of open coded equivalent
...
Diffstat (limited to 'tools')
31 files changed, 363 insertions, 264 deletions
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 8f450adaa9c2..0362d575de7d 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -3435,6 +3435,19 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3435 | goto out_warning_op; | 3435 | goto out_warning_op; |
3436 | } | 3436 | } |
3437 | break; | 3437 | break; |
3438 | case PRINT_DYNAMIC_ARRAY: | ||
3439 | /* Without [], we pass the address to the dynamic data */ | ||
3440 | offset = pevent_read_number(pevent, | ||
3441 | data + arg->dynarray.field->offset, | ||
3442 | arg->dynarray.field->size); | ||
3443 | /* | ||
3444 | * The actual length of the dynamic array is stored | ||
3445 | * in the top half of the field, and the offset | ||
3446 | * is in the bottom half of the 32 bit field. | ||
3447 | */ | ||
3448 | offset &= 0xffff; | ||
3449 | val = (unsigned long long)(data + offset); | ||
3450 | break; | ||
3438 | default: /* not sure what to do there */ | 3451 | default: /* not sure what to do there */ |
3439 | return 0; | 3452 | return 0; |
3440 | } | 3453 | } |
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 7b0497f95a75..fae38d9a44a4 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt | |||
@@ -93,9 +93,15 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs. | |||
93 | --comm:: | 93 | --comm:: |
94 | Show process COMM right beside its ID, on by default, disable with --no-comm. | 94 | Show process COMM right beside its ID, on by default, disable with --no-comm. |
95 | 95 | ||
96 | -s:: | ||
96 | --summary:: | 97 | --summary:: |
97 | Show a summary of syscalls by thread with min, max, and average times (in | 98 | Show only a summary of syscalls by thread with min, max, and average times |
98 | msec) and relative stddev. | 99 | (in msec) and relative stddev. |
100 | |||
101 | -S:: | ||
102 | --with-summary:: | ||
103 | Show all syscalls followed by a summary by thread with min, max, and | ||
104 | average times (in msec) and relative stddev. | ||
99 | 105 | ||
100 | --tool_stats:: | 106 | --tool_stats:: |
101 | Show tool stats such as number of times fd->pathname was discovered thru | 107 | Show tool stats such as number of times fd->pathname was discovered thru |
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index cd9f92078aba..f8bf5f244d77 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c | |||
@@ -1510,13 +1510,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, | |||
1510 | /* | 1510 | /* |
1511 | * target related setups | 1511 | * target related setups |
1512 | */ | 1512 | */ |
1513 | err = perf_target__validate(&kvm->opts.target); | 1513 | err = target__validate(&kvm->opts.target); |
1514 | if (err) { | 1514 | if (err) { |
1515 | perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); | 1515 | target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); |
1516 | ui__warning("%s", errbuf); | 1516 | ui__warning("%s", errbuf); |
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | if (perf_target__none(&kvm->opts.target)) | 1519 | if (target__none(&kvm->opts.target)) |
1520 | kvm->opts.target.system_wide = true; | 1520 | kvm->opts.target.system_wide = true; |
1521 | 1521 | ||
1522 | 1522 | ||
@@ -1544,18 +1544,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, | |||
1544 | } | 1544 | } |
1545 | kvm->session->evlist = kvm->evlist; | 1545 | kvm->session->evlist = kvm->evlist; |
1546 | perf_session__set_id_hdr_size(kvm->session); | 1546 | perf_session__set_id_hdr_size(kvm->session); |
1547 | 1547 | machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, | |
1548 | 1548 | kvm->evlist->threads, false); | |
1549 | if (perf_target__has_task(&kvm->opts.target)) | ||
1550 | perf_event__synthesize_thread_map(&kvm->tool, | ||
1551 | kvm->evlist->threads, | ||
1552 | perf_event__process, | ||
1553 | &kvm->session->machines.host); | ||
1554 | else | ||
1555 | perf_event__synthesize_threads(&kvm->tool, perf_event__process, | ||
1556 | &kvm->session->machines.host); | ||
1557 | |||
1558 | |||
1559 | err = kvm_live_open_events(kvm); | 1549 | err = kvm_live_open_events(kvm); |
1560 | if (err) | 1550 | if (err) |
1561 | goto out; | 1551 | goto out; |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 15280b5e5574..4d644fe2d5b7 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -76,12 +76,12 @@ struct perf_record { | |||
76 | long samples; | 76 | long samples; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static int write_output(struct perf_record *rec, void *buf, size_t size) | 79 | static int do_write_output(struct perf_record *rec, void *buf, size_t size) |
80 | { | 80 | { |
81 | struct perf_data_file *file = &rec->file; | 81 | struct perf_data_file *file = &rec->file; |
82 | 82 | ||
83 | while (size) { | 83 | while (size) { |
84 | int ret = write(file->fd, buf, size); | 84 | ssize_t ret = write(file->fd, buf, size); |
85 | 85 | ||
86 | if (ret < 0) { | 86 | if (ret < 0) { |
87 | pr_err("failed to write perf data, error: %m\n"); | 87 | pr_err("failed to write perf data, error: %m\n"); |
@@ -97,6 +97,11 @@ static int write_output(struct perf_record *rec, void *buf, size_t size) | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int write_output(struct perf_record *rec, void *buf, size_t size) | ||
101 | { | ||
102 | return do_write_output(rec, buf, size); | ||
103 | } | ||
104 | |||
100 | static int process_synthesized_event(struct perf_tool *tool, | 105 | static int process_synthesized_event(struct perf_tool *tool, |
101 | union perf_event *event, | 106 | union perf_event *event, |
102 | struct perf_sample *sample __maybe_unused, | 107 | struct perf_sample *sample __maybe_unused, |
@@ -480,16 +485,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
480 | perf_event__synthesize_guest_os, tool); | 485 | perf_event__synthesize_guest_os, tool); |
481 | } | 486 | } |
482 | 487 | ||
483 | if (perf_target__has_task(&opts->target)) | 488 | err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads, |
484 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, | 489 | process_synthesized_event, opts->sample_address); |
485 | process_synthesized_event, | ||
486 | machine); | ||
487 | else if (perf_target__has_cpu(&opts->target)) | ||
488 | err = perf_event__synthesize_threads(tool, process_synthesized_event, | ||
489 | machine); | ||
490 | else /* command specified */ | ||
491 | err = 0; | ||
492 | |||
493 | if (err != 0) | 490 | if (err != 0) |
494 | goto out_delete_session; | 491 | goto out_delete_session; |
495 | 492 | ||
@@ -509,7 +506,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
509 | * (apart from group members) have enable_on_exec=1 set, | 506 | * (apart from group members) have enable_on_exec=1 set, |
510 | * so don't spoil it by prematurely enabling them. | 507 | * so don't spoil it by prematurely enabling them. |
511 | */ | 508 | */ |
512 | if (!perf_target__none(&opts->target)) | 509 | if (!target__none(&opts->target)) |
513 | perf_evlist__enable(evsel_list); | 510 | perf_evlist__enable(evsel_list); |
514 | 511 | ||
515 | /* | 512 | /* |
@@ -538,7 +535,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
538 | * die with the process and we wait for that. Thus no need to | 535 | * die with the process and we wait for that. Thus no need to |
539 | * disable events in this case. | 536 | * disable events in this case. |
540 | */ | 537 | */ |
541 | if (done && !disabled && !perf_target__none(&opts->target)) { | 538 | if (done && !disabled && !target__none(&opts->target)) { |
542 | perf_evlist__disable(evsel_list); | 539 | perf_evlist__disable(evsel_list); |
543 | disabled = true; | 540 | disabled = true; |
544 | } | 541 | } |
@@ -909,7 +906,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) | |||
909 | 906 | ||
910 | argc = parse_options(argc, argv, record_options, record_usage, | 907 | argc = parse_options(argc, argv, record_options, record_usage, |
911 | PARSE_OPT_STOP_AT_NON_OPTION); | 908 | PARSE_OPT_STOP_AT_NON_OPTION); |
912 | if (!argc && perf_target__none(&rec->opts.target)) | 909 | if (!argc && target__none(&rec->opts.target)) |
913 | usage_with_options(record_usage, record_options); | 910 | usage_with_options(record_usage, record_options); |
914 | 911 | ||
915 | if (nr_cgroups && !rec->opts.target.system_wide) { | 912 | if (nr_cgroups && !rec->opts.target.system_wide) { |
@@ -939,17 +936,17 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) | |||
939 | goto out_symbol_exit; | 936 | goto out_symbol_exit; |
940 | } | 937 | } |
941 | 938 | ||
942 | err = perf_target__validate(&rec->opts.target); | 939 | err = target__validate(&rec->opts.target); |
943 | if (err) { | 940 | if (err) { |
944 | perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); | 941 | target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); |
945 | ui__warning("%s", errbuf); | 942 | ui__warning("%s", errbuf); |
946 | } | 943 | } |
947 | 944 | ||
948 | err = perf_target__parse_uid(&rec->opts.target); | 945 | err = target__parse_uid(&rec->opts.target); |
949 | if (err) { | 946 | if (err) { |
950 | int saved_errno = errno; | 947 | int saved_errno = errno; |
951 | 948 | ||
952 | perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); | 949 | target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); |
953 | ui__error("%s", errbuf); | 950 | ui__error("%s", errbuf); |
954 | 951 | ||
955 | err = -saved_errno; | 952 | err = -saved_errno; |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0fc1c941a73c..ee0d565f83e3 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -108,7 +108,7 @@ enum { | |||
108 | 108 | ||
109 | static struct perf_evlist *evsel_list; | 109 | static struct perf_evlist *evsel_list; |
110 | 110 | ||
111 | static struct perf_target target = { | 111 | static struct target target = { |
112 | .uid = UINT_MAX, | 112 | .uid = UINT_MAX, |
113 | }; | 113 | }; |
114 | 114 | ||
@@ -294,11 +294,10 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) | |||
294 | 294 | ||
295 | attr->inherit = !no_inherit; | 295 | attr->inherit = !no_inherit; |
296 | 296 | ||
297 | if (perf_target__has_cpu(&target)) | 297 | if (target__has_cpu(&target)) |
298 | return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); | 298 | return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); |
299 | 299 | ||
300 | if (!perf_target__has_task(&target) && | 300 | if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { |
301 | perf_evsel__is_group_leader(evsel)) { | ||
302 | attr->disabled = 1; | 301 | attr->disabled = 1; |
303 | if (!initial_delay) | 302 | if (!initial_delay) |
304 | attr->enable_on_exec = 1; | 303 | attr->enable_on_exec = 1; |
@@ -1236,7 +1235,7 @@ static void print_stat(int argc, const char **argv) | |||
1236 | fprintf(output, "\'system wide"); | 1235 | fprintf(output, "\'system wide"); |
1237 | else if (target.cpu_list) | 1236 | else if (target.cpu_list) |
1238 | fprintf(output, "\'CPU(s) %s", target.cpu_list); | 1237 | fprintf(output, "\'CPU(s) %s", target.cpu_list); |
1239 | else if (!perf_target__has_task(&target)) { | 1238 | else if (!target__has_task(&target)) { |
1240 | fprintf(output, "\'%s", argv[0]); | 1239 | fprintf(output, "\'%s", argv[0]); |
1241 | for (i = 1; i < argc; i++) | 1240 | for (i = 1; i < argc; i++) |
1242 | fprintf(output, " %s", argv[i]); | 1241 | fprintf(output, " %s", argv[i]); |
@@ -1667,7 +1666,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1667 | } else if (big_num_opt == 0) /* User passed --no-big-num */ | 1666 | } else if (big_num_opt == 0) /* User passed --no-big-num */ |
1668 | big_num = false; | 1667 | big_num = false; |
1669 | 1668 | ||
1670 | if (!argc && perf_target__none(&target)) | 1669 | if (!argc && target__none(&target)) |
1671 | usage_with_options(stat_usage, options); | 1670 | usage_with_options(stat_usage, options); |
1672 | 1671 | ||
1673 | if (run_count < 0) { | 1672 | if (run_count < 0) { |
@@ -1680,8 +1679,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1680 | } | 1679 | } |
1681 | 1680 | ||
1682 | /* no_aggr, cgroup are for system-wide only */ | 1681 | /* no_aggr, cgroup are for system-wide only */ |
1683 | if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) | 1682 | if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) && |
1684 | && !perf_target__has_cpu(&target)) { | 1683 | !target__has_cpu(&target)) { |
1685 | fprintf(stderr, "both cgroup and no-aggregation " | 1684 | fprintf(stderr, "both cgroup and no-aggregation " |
1686 | "modes only available in system-wide mode\n"); | 1685 | "modes only available in system-wide mode\n"); |
1687 | 1686 | ||
@@ -1694,14 +1693,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1694 | if (add_default_attributes()) | 1693 | if (add_default_attributes()) |
1695 | goto out; | 1694 | goto out; |
1696 | 1695 | ||
1697 | perf_target__validate(&target); | 1696 | target__validate(&target); |
1698 | 1697 | ||
1699 | if (perf_evlist__create_maps(evsel_list, &target) < 0) { | 1698 | if (perf_evlist__create_maps(evsel_list, &target) < 0) { |
1700 | if (perf_target__has_task(&target)) { | 1699 | if (target__has_task(&target)) { |
1701 | pr_err("Problems finding threads of monitor\n"); | 1700 | pr_err("Problems finding threads of monitor\n"); |
1702 | parse_options_usage(stat_usage, options, "p", 1); | 1701 | parse_options_usage(stat_usage, options, "p", 1); |
1703 | parse_options_usage(NULL, options, "t", 1); | 1702 | parse_options_usage(NULL, options, "t", 1); |
1704 | } else if (perf_target__has_cpu(&target)) { | 1703 | } else if (target__has_cpu(&target)) { |
1705 | perror("failed to parse CPUs map"); | 1704 | perror("failed to parse CPUs map"); |
1706 | parse_options_usage(stat_usage, options, "C", 1); | 1705 | parse_options_usage(stat_usage, options, "C", 1); |
1707 | parse_options_usage(NULL, options, "a", 1); | 1706 | parse_options_usage(NULL, options, "a", 1); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 9acca8856ccb..b8f8e29db332 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -950,14 +950,8 @@ static int __cmd_top(struct perf_top *top) | |||
950 | if (ret) | 950 | if (ret) |
951 | goto out_delete; | 951 | goto out_delete; |
952 | 952 | ||
953 | if (perf_target__has_task(&opts->target)) | 953 | machine__synthesize_threads(&top->session->machines.host, &opts->target, |
954 | perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, | 954 | top->evlist->threads, false); |
955 | perf_event__process, | ||
956 | &top->session->machines.host); | ||
957 | else | ||
958 | perf_event__synthesize_threads(&top->tool, perf_event__process, | ||
959 | &top->session->machines.host); | ||
960 | |||
961 | ret = perf_top__start_counters(top); | 955 | ret = perf_top__start_counters(top); |
962 | if (ret) | 956 | if (ret) |
963 | goto out_delete; | 957 | goto out_delete; |
@@ -973,7 +967,7 @@ static int __cmd_top(struct perf_top *top) | |||
973 | * XXX 'top' still doesn't start workloads like record, trace, but should, | 967 | * XXX 'top' still doesn't start workloads like record, trace, but should, |
974 | * so leave the check here. | 968 | * so leave the check here. |
975 | */ | 969 | */ |
976 | if (!perf_target__none(&opts->target)) | 970 | if (!target__none(&opts->target)) |
977 | perf_evlist__enable(top->evlist); | 971 | perf_evlist__enable(top->evlist); |
978 | 972 | ||
979 | /* Wait for a minimal set of events before starting the snapshot */ | 973 | /* Wait for a minimal set of events before starting the snapshot */ |
@@ -1059,7 +1053,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1059 | .sym_pcnt_filter = 5, | 1053 | .sym_pcnt_filter = 5, |
1060 | }; | 1054 | }; |
1061 | struct perf_record_opts *opts = &top.record_opts; | 1055 | struct perf_record_opts *opts = &top.record_opts; |
1062 | struct perf_target *target = &opts->target; | 1056 | struct target *target = &opts->target; |
1063 | const struct option options[] = { | 1057 | const struct option options[] = { |
1064 | OPT_CALLBACK('e', "event", &top.evlist, "event", | 1058 | OPT_CALLBACK('e', "event", &top.evlist, "event", |
1065 | "event selector. use 'perf list' to list available events", | 1059 | "event selector. use 'perf list' to list available events", |
@@ -1175,24 +1169,24 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1175 | 1169 | ||
1176 | setup_browser(false); | 1170 | setup_browser(false); |
1177 | 1171 | ||
1178 | status = perf_target__validate(target); | 1172 | status = target__validate(target); |
1179 | if (status) { | 1173 | if (status) { |
1180 | perf_target__strerror(target, status, errbuf, BUFSIZ); | 1174 | target__strerror(target, status, errbuf, BUFSIZ); |
1181 | ui__warning("%s", errbuf); | 1175 | ui__warning("%s", errbuf); |
1182 | } | 1176 | } |
1183 | 1177 | ||
1184 | status = perf_target__parse_uid(target); | 1178 | status = target__parse_uid(target); |
1185 | if (status) { | 1179 | if (status) { |
1186 | int saved_errno = errno; | 1180 | int saved_errno = errno; |
1187 | 1181 | ||
1188 | perf_target__strerror(target, status, errbuf, BUFSIZ); | 1182 | target__strerror(target, status, errbuf, BUFSIZ); |
1189 | ui__error("%s", errbuf); | 1183 | ui__error("%s", errbuf); |
1190 | 1184 | ||
1191 | status = -saved_errno; | 1185 | status = -saved_errno; |
1192 | goto out_delete_evlist; | 1186 | goto out_delete_evlist; |
1193 | } | 1187 | } |
1194 | 1188 | ||
1195 | if (perf_target__none(target)) | 1189 | if (target__none(target)) |
1196 | target->system_wide = true; | 1190 | target->system_wide = true; |
1197 | 1191 | ||
1198 | if (perf_evlist__create_maps(top.evlist, target) < 0) | 1192 | if (perf_evlist__create_maps(top.evlist, target) < 0) |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 329b7832b5da..6b230af940e2 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -149,21 +149,32 @@ static void perf_evsel__delete_priv(struct perf_evsel *evsel) | |||
149 | perf_evsel__delete(evsel); | 149 | perf_evsel__delete(evsel); |
150 | } | 150 | } |
151 | 151 | ||
152 | static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, | 152 | static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler) |
153 | void *handler, int idx) | ||
154 | { | 153 | { |
155 | struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction, idx); | 154 | evsel->priv = malloc(sizeof(struct syscall_tp)); |
156 | 155 | if (evsel->priv != NULL) { | |
157 | if (evsel) { | ||
158 | evsel->priv = malloc(sizeof(struct syscall_tp)); | ||
159 | |||
160 | if (evsel->priv == NULL) | ||
161 | goto out_delete; | ||
162 | |||
163 | if (perf_evsel__init_sc_tp_uint_field(evsel, id)) | 156 | if (perf_evsel__init_sc_tp_uint_field(evsel, id)) |
164 | goto out_delete; | 157 | goto out_delete; |
165 | 158 | ||
166 | evsel->handler = handler; | 159 | evsel->handler = handler; |
160 | return 0; | ||
161 | } | ||
162 | |||
163 | return -ENOMEM; | ||
164 | |||
165 | out_delete: | ||
166 | free(evsel->priv); | ||
167 | evsel->priv = NULL; | ||
168 | return -ENOENT; | ||
169 | } | ||
170 | |||
171 | static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler) | ||
172 | { | ||
173 | struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction); | ||
174 | |||
175 | if (evsel) { | ||
176 | if (perf_evsel__init_syscall_tp(evsel, handler)) | ||
177 | goto out_delete; | ||
167 | } | 178 | } |
168 | 179 | ||
169 | return evsel; | 180 | return evsel; |
@@ -186,17 +197,16 @@ static int perf_evlist__add_syscall_newtp(struct perf_evlist *evlist, | |||
186 | void *sys_exit_handler) | 197 | void *sys_exit_handler) |
187 | { | 198 | { |
188 | int ret = -1; | 199 | int ret = -1; |
189 | int idx = evlist->nr_entries; | ||
190 | struct perf_evsel *sys_enter, *sys_exit; | 200 | struct perf_evsel *sys_enter, *sys_exit; |
191 | 201 | ||
192 | sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler, idx++); | 202 | sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler); |
193 | if (sys_enter == NULL) | 203 | if (sys_enter == NULL) |
194 | goto out; | 204 | goto out; |
195 | 205 | ||
196 | if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) | 206 | if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) |
197 | goto out_delete_sys_enter; | 207 | goto out_delete_sys_enter; |
198 | 208 | ||
199 | sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler, idx++); | 209 | sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler); |
200 | if (sys_exit == NULL) | 210 | if (sys_exit == NULL) |
201 | goto out_delete_sys_enter; | 211 | goto out_delete_sys_enter; |
202 | 212 | ||
@@ -953,7 +963,8 @@ static struct syscall_fmt { | |||
953 | { .name = "mmap", .hexret = true, | 963 | { .name = "mmap", .hexret = true, |
954 | .arg_scnprintf = { [0] = SCA_HEX, /* addr */ | 964 | .arg_scnprintf = { [0] = SCA_HEX, /* addr */ |
955 | [2] = SCA_MMAP_PROT, /* prot */ | 965 | [2] = SCA_MMAP_PROT, /* prot */ |
956 | [3] = SCA_MMAP_FLAGS, /* flags */ }, }, | 966 | [3] = SCA_MMAP_FLAGS, /* flags */ |
967 | [4] = SCA_FD, /* fd */ }, }, | ||
957 | { .name = "mprotect", .errmsg = true, | 968 | { .name = "mprotect", .errmsg = true, |
958 | .arg_scnprintf = { [0] = SCA_HEX, /* start */ | 969 | .arg_scnprintf = { [0] = SCA_HEX, /* start */ |
959 | [2] = SCA_MMAP_PROT, /* prot */ }, }, | 970 | [2] = SCA_MMAP_PROT, /* prot */ }, }, |
@@ -1157,6 +1168,7 @@ struct trace { | |||
1157 | bool sched; | 1168 | bool sched; |
1158 | bool multiple_threads; | 1169 | bool multiple_threads; |
1159 | bool summary; | 1170 | bool summary; |
1171 | bool summary_only; | ||
1160 | bool show_comm; | 1172 | bool show_comm; |
1161 | bool show_tool_stats; | 1173 | bool show_tool_stats; |
1162 | double duration_filter; | 1174 | double duration_filter; |
@@ -1342,15 +1354,8 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) | |||
1342 | if (trace->host == NULL) | 1354 | if (trace->host == NULL) |
1343 | return -ENOMEM; | 1355 | return -ENOMEM; |
1344 | 1356 | ||
1345 | if (perf_target__has_task(&trace->opts.target)) { | 1357 | err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, |
1346 | err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads, | 1358 | evlist->threads, trace__tool_process, false); |
1347 | trace__tool_process, | ||
1348 | trace->host); | ||
1349 | } else { | ||
1350 | err = perf_event__synthesize_threads(&trace->tool, trace__tool_process, | ||
1351 | trace->host); | ||
1352 | } | ||
1353 | |||
1354 | if (err) | 1359 | if (err) |
1355 | symbol__exit(); | 1360 | symbol__exit(); |
1356 | 1361 | ||
@@ -1607,7 +1612,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, | |||
1607 | args, trace, thread); | 1612 | args, trace, thread); |
1608 | 1613 | ||
1609 | if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { | 1614 | if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { |
1610 | if (!trace->duration_filter) { | 1615 | if (!trace->duration_filter && !trace->summary_only) { |
1611 | trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); | 1616 | trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); |
1612 | fprintf(trace->output, "%-70s\n", ttrace->entry_str); | 1617 | fprintf(trace->output, "%-70s\n", ttrace->entry_str); |
1613 | } | 1618 | } |
@@ -1660,6 +1665,9 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, | |||
1660 | } else if (trace->duration_filter) | 1665 | } else if (trace->duration_filter) |
1661 | goto out; | 1666 | goto out; |
1662 | 1667 | ||
1668 | if (trace->summary_only) | ||
1669 | goto out; | ||
1670 | |||
1663 | trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); | 1671 | trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); |
1664 | 1672 | ||
1665 | if (ttrace->entry_pending) { | 1673 | if (ttrace->entry_pending) { |
@@ -1762,16 +1770,6 @@ static int trace__process_sample(struct perf_tool *tool, | |||
1762 | return err; | 1770 | return err; |
1763 | } | 1771 | } |
1764 | 1772 | ||
1765 | static bool | ||
1766 | perf_session__has_tp(struct perf_session *session, const char *name) | ||
1767 | { | ||
1768 | struct perf_evsel *evsel; | ||
1769 | |||
1770 | evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name); | ||
1771 | |||
1772 | return evsel != NULL; | ||
1773 | } | ||
1774 | |||
1775 | static int parse_target_str(struct trace *trace) | 1773 | static int parse_target_str(struct trace *trace) |
1776 | { | 1774 | { |
1777 | if (trace->opts.target.pid) { | 1775 | if (trace->opts.target.pid) { |
@@ -1824,8 +1822,7 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); | |||
1824 | 1822 | ||
1825 | static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist) | 1823 | static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist) |
1826 | { | 1824 | { |
1827 | struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname", | 1825 | struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); |
1828 | evlist->nr_entries); | ||
1829 | if (evsel == NULL) | 1826 | if (evsel == NULL) |
1830 | return; | 1827 | return; |
1831 | 1828 | ||
@@ -2009,8 +2006,6 @@ out_error: | |||
2009 | static int trace__replay(struct trace *trace) | 2006 | static int trace__replay(struct trace *trace) |
2010 | { | 2007 | { |
2011 | const struct perf_evsel_str_handler handlers[] = { | 2008 | const struct perf_evsel_str_handler handlers[] = { |
2012 | { "raw_syscalls:sys_enter", trace__sys_enter, }, | ||
2013 | { "raw_syscalls:sys_exit", trace__sys_exit, }, | ||
2014 | { "probe:vfs_getname", trace__vfs_getname, }, | 2009 | { "probe:vfs_getname", trace__vfs_getname, }, |
2015 | }; | 2010 | }; |
2016 | struct perf_data_file file = { | 2011 | struct perf_data_file file = { |
@@ -2018,6 +2013,7 @@ static int trace__replay(struct trace *trace) | |||
2018 | .mode = PERF_DATA_MODE_READ, | 2013 | .mode = PERF_DATA_MODE_READ, |
2019 | }; | 2014 | }; |
2020 | struct perf_session *session; | 2015 | struct perf_session *session; |
2016 | struct perf_evsel *evsel; | ||
2021 | int err = -1; | 2017 | int err = -1; |
2022 | 2018 | ||
2023 | trace->tool.sample = trace__process_sample; | 2019 | trace->tool.sample = trace__process_sample; |
@@ -2049,13 +2045,29 @@ static int trace__replay(struct trace *trace) | |||
2049 | if (err) | 2045 | if (err) |
2050 | goto out; | 2046 | goto out; |
2051 | 2047 | ||
2052 | if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) { | 2048 | evsel = perf_evlist__find_tracepoint_by_name(session->evlist, |
2053 | pr_err("Data file does not have raw_syscalls:sys_enter events\n"); | 2049 | "raw_syscalls:sys_enter"); |
2050 | if (evsel == NULL) { | ||
2051 | pr_err("Data file does not have raw_syscalls:sys_enter event\n"); | ||
2054 | goto out; | 2052 | goto out; |
2055 | } | 2053 | } |
2056 | 2054 | ||
2057 | if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) { | 2055 | if (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 || |
2058 | pr_err("Data file does not have raw_syscalls:sys_exit events\n"); | 2056 | perf_evsel__init_sc_tp_ptr_field(evsel, args)) { |
2057 | pr_err("Error during initialize raw_syscalls:sys_enter event\n"); | ||
2058 | goto out; | ||
2059 | } | ||
2060 | |||
2061 | evsel = perf_evlist__find_tracepoint_by_name(session->evlist, | ||
2062 | "raw_syscalls:sys_exit"); | ||
2063 | if (evsel == NULL) { | ||
2064 | pr_err("Data file does not have raw_syscalls:sys_exit event\n"); | ||
2065 | goto out; | ||
2066 | } | ||
2067 | |||
2068 | if (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 || | ||
2069 | perf_evsel__init_sc_tp_uint_field(evsel, ret)) { | ||
2070 | pr_err("Error during initialize raw_syscalls:sys_exit event\n"); | ||
2059 | goto out; | 2071 | goto out; |
2060 | } | 2072 | } |
2061 | 2073 | ||
@@ -2082,12 +2094,7 @@ static size_t trace__fprintf_threads_header(FILE *fp) | |||
2082 | { | 2094 | { |
2083 | size_t printed; | 2095 | size_t printed; |
2084 | 2096 | ||
2085 | printed = fprintf(fp, "\n _____________________________________________________________________________\n"); | 2097 | printed = fprintf(fp, "\n Summary of events:\n\n"); |
2086 | printed += fprintf(fp, " __) Summary of events (__\n\n"); | ||
2087 | printed += fprintf(fp, " [ task - pid ] [ events ] [ ratio ] [ runtime ]\n"); | ||
2088 | printed += fprintf(fp, " syscall count min max avg stddev\n"); | ||
2089 | printed += fprintf(fp, " msec msec msec %%\n"); | ||
2090 | printed += fprintf(fp, " _____________________________________________________________________________\n\n"); | ||
2091 | 2098 | ||
2092 | return printed; | 2099 | return printed; |
2093 | } | 2100 | } |
@@ -2105,6 +2112,10 @@ static size_t thread__dump_stats(struct thread_trace *ttrace, | |||
2105 | 2112 | ||
2106 | printed += fprintf(fp, "\n"); | 2113 | printed += fprintf(fp, "\n"); |
2107 | 2114 | ||
2115 | printed += fprintf(fp, " msec/call\n"); | ||
2116 | printed += fprintf(fp, " syscall calls min avg max stddev\n"); | ||
2117 | printed += fprintf(fp, " --------------- -------- -------- -------- -------- ------\n"); | ||
2118 | |||
2108 | /* each int_node is a syscall */ | 2119 | /* each int_node is a syscall */ |
2109 | while (inode) { | 2120 | while (inode) { |
2110 | stats = inode->priv; | 2121 | stats = inode->priv; |
@@ -2119,10 +2130,10 @@ static size_t thread__dump_stats(struct thread_trace *ttrace, | |||
2119 | avg /= NSEC_PER_MSEC; | 2130 | avg /= NSEC_PER_MSEC; |
2120 | 2131 | ||
2121 | sc = &trace->syscalls.table[inode->i]; | 2132 | sc = &trace->syscalls.table[inode->i]; |
2122 | printed += fprintf(fp, "%24s %14s : ", "", sc->name); | 2133 | printed += fprintf(fp, " %-15s", sc->name); |
2123 | printed += fprintf(fp, "%5" PRIu64 " %8.3f %8.3f", | 2134 | printed += fprintf(fp, " %8" PRIu64 " %8.3f %8.3f", |
2124 | n, min, max); | 2135 | n, min, avg); |
2125 | printed += fprintf(fp, " %8.3f %6.2f\n", avg, pct); | 2136 | printed += fprintf(fp, " %8.3f %6.2f\n", max, pct); |
2126 | } | 2137 | } |
2127 | 2138 | ||
2128 | inode = intlist__next(inode); | 2139 | inode = intlist__next(inode); |
@@ -2163,10 +2174,10 @@ static int trace__fprintf_one_thread(struct thread *thread, void *priv) | |||
2163 | else if (ratio > 5.0) | 2174 | else if (ratio > 5.0) |
2164 | color = PERF_COLOR_YELLOW; | 2175 | color = PERF_COLOR_YELLOW; |
2165 | 2176 | ||
2166 | printed += color_fprintf(fp, color, "%20s", thread__comm_str(thread)); | 2177 | printed += color_fprintf(fp, color, " %s (%d), ", thread__comm_str(thread), thread->tid); |
2167 | printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events); | 2178 | printed += fprintf(fp, "%lu events, ", ttrace->nr_events); |
2168 | printed += color_fprintf(fp, color, "%5.1f%%", ratio); | 2179 | printed += color_fprintf(fp, color, "%.1f%%", ratio); |
2169 | printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms); | 2180 | printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); |
2170 | printed += thread__dump_stats(ttrace, trace, fp); | 2181 | printed += thread__dump_stats(ttrace, trace, fp); |
2171 | 2182 | ||
2172 | data->printed += printed; | 2183 | data->printed += printed; |
@@ -2275,8 +2286,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) | |||
2275 | OPT_INCR('v', "verbose", &verbose, "be more verbose"), | 2286 | OPT_INCR('v', "verbose", &verbose, "be more verbose"), |
2276 | OPT_BOOLEAN('T', "time", &trace.full_time, | 2287 | OPT_BOOLEAN('T', "time", &trace.full_time, |
2277 | "Show full timestamp, not time relative to first start"), | 2288 | "Show full timestamp, not time relative to first start"), |
2278 | OPT_BOOLEAN(0, "summary", &trace.summary, | 2289 | OPT_BOOLEAN('s', "summary", &trace.summary_only, |
2279 | "Show syscall summary with statistics"), | 2290 | "Show only syscall summary with statistics"), |
2291 | OPT_BOOLEAN('S', "with-summary", &trace.summary, | ||
2292 | "Show all syscalls and summary with statistics"), | ||
2280 | OPT_END() | 2293 | OPT_END() |
2281 | }; | 2294 | }; |
2282 | int err; | 2295 | int err; |
@@ -2287,6 +2300,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) | |||
2287 | 2300 | ||
2288 | argc = parse_options(argc, argv, trace_options, trace_usage, 0); | 2301 | argc = parse_options(argc, argv, trace_options, trace_usage, 0); |
2289 | 2302 | ||
2303 | /* summary_only implies summary option, but don't overwrite summary if set */ | ||
2304 | if (trace.summary_only) | ||
2305 | trace.summary = trace.summary_only; | ||
2306 | |||
2290 | if (output_name != NULL) { | 2307 | if (output_name != NULL) { |
2291 | err = trace__open_output(&trace, output_name); | 2308 | err = trace__open_output(&trace, output_name); |
2292 | if (err < 0) { | 2309 | if (err < 0) { |
@@ -2310,21 +2327,21 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) | |||
2310 | } | 2327 | } |
2311 | } | 2328 | } |
2312 | 2329 | ||
2313 | err = perf_target__validate(&trace.opts.target); | 2330 | err = target__validate(&trace.opts.target); |
2314 | if (err) { | 2331 | if (err) { |
2315 | perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); | 2332 | target__strerror(&trace.opts.target, err, bf, sizeof(bf)); |
2316 | fprintf(trace.output, "%s", bf); | 2333 | fprintf(trace.output, "%s", bf); |
2317 | goto out_close; | 2334 | goto out_close; |
2318 | } | 2335 | } |
2319 | 2336 | ||
2320 | err = perf_target__parse_uid(&trace.opts.target); | 2337 | err = target__parse_uid(&trace.opts.target); |
2321 | if (err) { | 2338 | if (err) { |
2322 | perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); | 2339 | target__strerror(&trace.opts.target, err, bf, sizeof(bf)); |
2323 | fprintf(trace.output, "%s", bf); | 2340 | fprintf(trace.output, "%s", bf); |
2324 | goto out_close; | 2341 | goto out_close; |
2325 | } | 2342 | } |
2326 | 2343 | ||
2327 | if (!argc && perf_target__none(&trace.opts.target)) | 2344 | if (!argc && target__none(&trace.opts.target)) |
2328 | trace.opts.target.system_wide = true; | 2345 | trace.opts.target.system_wide = true; |
2329 | 2346 | ||
2330 | if (input_name) | 2347 | if (input_name) |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 6a587e84fdfe..b079304bd53d 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -248,7 +248,7 @@ enum perf_call_graph_mode { | |||
248 | }; | 248 | }; |
249 | 249 | ||
250 | struct perf_record_opts { | 250 | struct perf_record_opts { |
251 | struct perf_target target; | 251 | struct target target; |
252 | int call_graph; | 252 | int call_graph; |
253 | bool group; | 253 | bool group; |
254 | bool inherit_stat; | 254 | bool inherit_stat; |
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 49ccc3b2995e..85d4919dd623 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c | |||
@@ -275,8 +275,19 @@ static int process_event(struct machine *machine, struct perf_evlist *evlist, | |||
275 | if (event->header.type == PERF_RECORD_SAMPLE) | 275 | if (event->header.type == PERF_RECORD_SAMPLE) |
276 | return process_sample_event(machine, evlist, event, state); | 276 | return process_sample_event(machine, evlist, event, state); |
277 | 277 | ||
278 | if (event->header.type < PERF_RECORD_MAX) | 278 | if (event->header.type == PERF_RECORD_THROTTLE || |
279 | return machine__process_event(machine, event, NULL); | 279 | event->header.type == PERF_RECORD_UNTHROTTLE) |
280 | return 0; | ||
281 | |||
282 | if (event->header.type < PERF_RECORD_MAX) { | ||
283 | int ret; | ||
284 | |||
285 | ret = machine__process_event(machine, event, NULL); | ||
286 | if (ret < 0) | ||
287 | pr_debug("machine__process_event failed, event type %u\n", | ||
288 | event->header.type); | ||
289 | return ret; | ||
290 | } | ||
280 | 291 | ||
281 | return 0; | 292 | return 0; |
282 | } | 293 | } |
@@ -441,7 +452,7 @@ static int do_test_code_reading(bool try_kcore) | |||
441 | } | 452 | } |
442 | 453 | ||
443 | ret = perf_event__synthesize_thread_map(NULL, threads, | 454 | ret = perf_event__synthesize_thread_map(NULL, threads, |
444 | perf_event__process, machine); | 455 | perf_event__process, machine, false); |
445 | if (ret < 0) { | 456 | if (ret < 0) { |
446 | pr_debug("perf_event__synthesize_thread_map failed\n"); | 457 | pr_debug("perf_event__synthesize_thread_map failed\n"); |
447 | goto out_err; | 458 | goto out_err; |
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c index 9b98c1554833..4774f7fbb758 100644 --- a/tools/perf/tests/evsel-tp-sched.c +++ b/tools/perf/tests/evsel-tp-sched.c | |||
@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | |||
32 | 32 | ||
33 | int test__perf_evsel__tp_sched_test(void) | 33 | int test__perf_evsel__tp_sched_test(void) |
34 | { | 34 | { |
35 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | 35 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); |
36 | int ret = 0; | 36 | int ret = 0; |
37 | 37 | ||
38 | if (evsel == NULL) { | 38 | if (evsel == NULL) { |
@@ -63,7 +63,7 @@ int test__perf_evsel__tp_sched_test(void) | |||
63 | 63 | ||
64 | perf_evsel__delete(evsel); | 64 | perf_evsel__delete(evsel); |
65 | 65 | ||
66 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | 66 | evsel = perf_evsel__newtp("sched", "sched_wakeup"); |
67 | 67 | ||
68 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | 68 | if (perf_evsel__test_field(evsel, "comm", 16, true)) |
69 | ret = -1; | 69 | ret = -1; |
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index a7232c204eb9..d64ab79c6d35 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c | |||
@@ -65,7 +65,7 @@ int test__basic_mmap(void) | |||
65 | char name[64]; | 65 | char name[64]; |
66 | 66 | ||
67 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | 67 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); |
68 | evsels[i] = perf_evsel__newtp("syscalls", name, i); | 68 | evsels[i] = perf_evsel__newtp("syscalls", name); |
69 | if (evsels[i] == NULL) { | 69 | if (evsels[i] == NULL) { |
70 | pr_debug("perf_evsel__new\n"); | 70 | pr_debug("perf_evsel__new\n"); |
71 | goto out_free_evlist; | 71 | goto out_free_evlist; |
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c index b0657a9ccda6..5fecdbd2f5f7 100644 --- a/tools/perf/tests/open-syscall-all-cpus.c +++ b/tools/perf/tests/open-syscall-all-cpus.c | |||
@@ -26,7 +26,7 @@ int test__open_syscall_event_on_all_cpus(void) | |||
26 | 26 | ||
27 | CPU_ZERO(&cpu_set); | 27 | CPU_ZERO(&cpu_set); |
28 | 28 | ||
29 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | 29 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); |
30 | if (evsel == NULL) { | 30 | if (evsel == NULL) { |
31 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | 31 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); |
32 | goto out_thread_map_delete; | 32 | goto out_thread_map_delete; |
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c index 524b221b829b..41cc0badb74b 100644 --- a/tools/perf/tests/open-syscall-tp-fields.c +++ b/tools/perf/tests/open-syscall-tp-fields.c | |||
@@ -27,7 +27,7 @@ int test__syscall_open_tp_fields(void) | |||
27 | goto out; | 27 | goto out; |
28 | } | 28 | } |
29 | 29 | ||
30 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | 30 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); |
31 | if (evsel == NULL) { | 31 | if (evsel == NULL) { |
32 | pr_debug("%s: perf_evsel__newtp\n", __func__); | 32 | pr_debug("%s: perf_evsel__newtp\n", __func__); |
33 | goto out_delete_evlist; | 33 | goto out_delete_evlist; |
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c index befc0671f95d..c1dc7d25f38c 100644 --- a/tools/perf/tests/open-syscall.c +++ b/tools/perf/tests/open-syscall.c | |||
@@ -15,7 +15,7 @@ int test__open_syscall_event(void) | |||
15 | return -1; | 15 | return -1; |
16 | } | 16 | } |
17 | 17 | ||
18 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | 18 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); |
19 | if (evsel == NULL) { | 19 | if (evsel == NULL) { |
20 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | 20 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); |
21 | goto out_thread_map_delete; | 21 | goto out_thread_map_delete; |
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 6e2b44ec0749..6664a7cd828c 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include "util/cpumap.h" | 9 | #include "util/cpumap.h" |
10 | #include "util/thread_map.h" | 10 | #include "util/thread_map.h" |
11 | 11 | ||
12 | #define NR_LOOPS 1000000 | 12 | #define NR_LOOPS 10000000 |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * This test will open software clock events (cpu-clock, task-clock) | 15 | * This test will open software clock events (cpu-clock, task-clock) |
@@ -34,7 +34,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) | |||
34 | .freq = 1, | 34 | .freq = 1, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | attr.sample_freq = 10000; | 37 | attr.sample_freq = 500; |
38 | 38 | ||
39 | evlist = perf_evlist__new(); | 39 | evlist = perf_evlist__new(); |
40 | if (evlist == NULL) { | 40 | if (evlist == NULL) { |
@@ -42,7 +42,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) | |||
42 | return -1; | 42 | return -1; |
43 | } | 43 | } |
44 | 44 | ||
45 | evsel = perf_evsel__new(&attr, 0); | 45 | evsel = perf_evsel__new(&attr); |
46 | if (evsel == NULL) { | 46 | if (evsel == NULL) { |
47 | pr_debug("perf_evsel__new\n"); | 47 | pr_debug("perf_evsel__new\n"); |
48 | goto out_free_evlist; | 48 | goto out_free_evlist; |
@@ -57,7 +57,14 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) | |||
57 | goto out_delete_maps; | 57 | goto out_delete_maps; |
58 | } | 58 | } |
59 | 59 | ||
60 | perf_evlist__open(evlist); | 60 | if (perf_evlist__open(evlist)) { |
61 | const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; | ||
62 | |||
63 | err = -errno; | ||
64 | pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", | ||
65 | strerror(errno), knob, (u64)attr.sample_freq); | ||
66 | goto out_delete_maps; | ||
67 | } | ||
61 | 68 | ||
62 | err = perf_evlist__mmap(evlist, 128, true); | 69 | err = perf_evlist__mmap(evlist, 128, true); |
63 | if (err < 0) { | 70 | if (err < 0) { |
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index c33d95f9559a..d09ab579119e 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c | |||
@@ -28,7 +28,7 @@ int test__task_exit(void) | |||
28 | union perf_event *event; | 28 | union perf_event *event; |
29 | struct perf_evsel *evsel; | 29 | struct perf_evsel *evsel; |
30 | struct perf_evlist *evlist; | 30 | struct perf_evlist *evlist; |
31 | struct perf_target target = { | 31 | struct target target = { |
32 | .uid = UINT_MAX, | 32 | .uid = UINT_MAX, |
33 | .uses_mmap = true, | 33 | .uses_mmap = true, |
34 | }; | 34 | }; |
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c index 3e2d936d7443..c61d14b101e0 100644 --- a/tools/perf/ui/tui/progress.c +++ b/tools/perf/ui/tui/progress.c | |||
@@ -18,13 +18,14 @@ static void tui_progress__update(struct ui_progress *p) | |||
18 | if (p->total == 0) | 18 | if (p->total == 0) |
19 | return; | 19 | return; |
20 | 20 | ||
21 | ui__refresh_dimensions(true); | 21 | ui__refresh_dimensions(false); |
22 | pthread_mutex_lock(&ui__lock); | 22 | pthread_mutex_lock(&ui__lock); |
23 | y = SLtt_Screen_Rows / 2 - 2; | 23 | y = SLtt_Screen_Rows / 2 - 2; |
24 | SLsmg_set_color(0); | 24 | SLsmg_set_color(0); |
25 | SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); | 25 | SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); |
26 | SLsmg_gotorc(y++, 1); | 26 | SLsmg_gotorc(y++, 1); |
27 | SLsmg_write_string((char *)p->title); | 27 | SLsmg_write_string((char *)p->title); |
28 | SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' '); | ||
28 | SLsmg_set_color(HE_COLORSET_SELECTED); | 29 | SLsmg_set_color(HE_COLORSET_SELECTED); |
29 | bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; | 30 | bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; |
30 | SLsmg_fill_region(y, 1, 1, bar, ' '); | 31 | SLsmg_fill_region(y, 1, 1, bar, ' '); |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index ec9ae1114ed4..6e3a846aed0e 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -170,7 +170,8 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
170 | union perf_event *event, | 170 | union perf_event *event, |
171 | pid_t pid, pid_t tgid, | 171 | pid_t pid, pid_t tgid, |
172 | perf_event__handler_t process, | 172 | perf_event__handler_t process, |
173 | struct machine *machine) | 173 | struct machine *machine, |
174 | bool mmap_data) | ||
174 | { | 175 | { |
175 | char filename[PATH_MAX]; | 176 | char filename[PATH_MAX]; |
176 | FILE *fp; | 177 | FILE *fp; |
@@ -188,10 +189,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
188 | } | 189 | } |
189 | 190 | ||
190 | event->header.type = PERF_RECORD_MMAP; | 191 | event->header.type = PERF_RECORD_MMAP; |
191 | /* | ||
192 | * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c | ||
193 | */ | ||
194 | event->header.misc = PERF_RECORD_MISC_USER; | ||
195 | 192 | ||
196 | while (1) { | 193 | while (1) { |
197 | char bf[BUFSIZ]; | 194 | char bf[BUFSIZ]; |
@@ -215,9 +212,17 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
215 | 212 | ||
216 | if (n != 5) | 213 | if (n != 5) |
217 | continue; | 214 | continue; |
215 | /* | ||
216 | * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c | ||
217 | */ | ||
218 | event->header.misc = PERF_RECORD_MISC_USER; | ||
218 | 219 | ||
219 | if (prot[2] != 'x') | 220 | if (prot[2] != 'x') { |
220 | continue; | 221 | if (!mmap_data || prot[0] != 'r') |
222 | continue; | ||
223 | |||
224 | event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; | ||
225 | } | ||
221 | 226 | ||
222 | if (!strcmp(execname, "")) | 227 | if (!strcmp(execname, "")) |
223 | strcpy(execname, anonstr); | 228 | strcpy(execname, anonstr); |
@@ -304,20 +309,21 @@ static int __event__synthesize_thread(union perf_event *comm_event, | |||
304 | pid_t pid, int full, | 309 | pid_t pid, int full, |
305 | perf_event__handler_t process, | 310 | perf_event__handler_t process, |
306 | struct perf_tool *tool, | 311 | struct perf_tool *tool, |
307 | struct machine *machine) | 312 | struct machine *machine, bool mmap_data) |
308 | { | 313 | { |
309 | pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, | 314 | pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, |
310 | process, machine); | 315 | process, machine); |
311 | if (tgid == -1) | 316 | if (tgid == -1) |
312 | return -1; | 317 | return -1; |
313 | return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, | 318 | return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, |
314 | process, machine); | 319 | process, machine, mmap_data); |
315 | } | 320 | } |
316 | 321 | ||
317 | int perf_event__synthesize_thread_map(struct perf_tool *tool, | 322 | int perf_event__synthesize_thread_map(struct perf_tool *tool, |
318 | struct thread_map *threads, | 323 | struct thread_map *threads, |
319 | perf_event__handler_t process, | 324 | perf_event__handler_t process, |
320 | struct machine *machine) | 325 | struct machine *machine, |
326 | bool mmap_data) | ||
321 | { | 327 | { |
322 | union perf_event *comm_event, *mmap_event; | 328 | union perf_event *comm_event, *mmap_event; |
323 | int err = -1, thread, j; | 329 | int err = -1, thread, j; |
@@ -334,7 +340,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, | |||
334 | for (thread = 0; thread < threads->nr; ++thread) { | 340 | for (thread = 0; thread < threads->nr; ++thread) { |
335 | if (__event__synthesize_thread(comm_event, mmap_event, | 341 | if (__event__synthesize_thread(comm_event, mmap_event, |
336 | threads->map[thread], 0, | 342 | threads->map[thread], 0, |
337 | process, tool, machine)) { | 343 | process, tool, machine, |
344 | mmap_data)) { | ||
338 | err = -1; | 345 | err = -1; |
339 | break; | 346 | break; |
340 | } | 347 | } |
@@ -356,10 +363,10 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, | |||
356 | 363 | ||
357 | /* if not, generate events for it */ | 364 | /* if not, generate events for it */ |
358 | if (need_leader && | 365 | if (need_leader && |
359 | __event__synthesize_thread(comm_event, | 366 | __event__synthesize_thread(comm_event, mmap_event, |
360 | mmap_event, | 367 | comm_event->comm.pid, 0, |
361 | comm_event->comm.pid, 0, | 368 | process, tool, machine, |
362 | process, tool, machine)) { | 369 | mmap_data)) { |
363 | err = -1; | 370 | err = -1; |
364 | break; | 371 | break; |
365 | } | 372 | } |
@@ -374,7 +381,7 @@ out: | |||
374 | 381 | ||
375 | int perf_event__synthesize_threads(struct perf_tool *tool, | 382 | int perf_event__synthesize_threads(struct perf_tool *tool, |
376 | perf_event__handler_t process, | 383 | perf_event__handler_t process, |
377 | struct machine *machine) | 384 | struct machine *machine, bool mmap_data) |
378 | { | 385 | { |
379 | DIR *proc; | 386 | DIR *proc; |
380 | struct dirent dirent, *next; | 387 | struct dirent dirent, *next; |
@@ -404,7 +411,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, | |||
404 | * one thread couldn't be synthesized. | 411 | * one thread couldn't be synthesized. |
405 | */ | 412 | */ |
406 | __event__synthesize_thread(comm_event, mmap_event, pid, 1, | 413 | __event__synthesize_thread(comm_event, mmap_event, pid, 1, |
407 | process, tool, machine); | 414 | process, tool, machine, mmap_data); |
408 | } | 415 | } |
409 | 416 | ||
410 | err = 0; | 417 | err = 0; |
@@ -528,19 +535,22 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused, | |||
528 | 535 | ||
529 | size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) | 536 | size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) |
530 | { | 537 | { |
531 | return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", | 538 | return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", |
532 | event->mmap.pid, event->mmap.tid, event->mmap.start, | 539 | event->mmap.pid, event->mmap.tid, event->mmap.start, |
533 | event->mmap.len, event->mmap.pgoff, event->mmap.filename); | 540 | event->mmap.len, event->mmap.pgoff, |
541 | (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', | ||
542 | event->mmap.filename); | ||
534 | } | 543 | } |
535 | 544 | ||
536 | size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) | 545 | size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) |
537 | { | 546 | { |
538 | return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 | 547 | return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 |
539 | " %02x:%02x %"PRIu64" %"PRIu64"]: %s\n", | 548 | " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n", |
540 | event->mmap2.pid, event->mmap2.tid, event->mmap2.start, | 549 | event->mmap2.pid, event->mmap2.tid, event->mmap2.start, |
541 | event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, | 550 | event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, |
542 | event->mmap2.min, event->mmap2.ino, | 551 | event->mmap2.min, event->mmap2.ino, |
543 | event->mmap2.ino_generation, | 552 | event->mmap2.ino_generation, |
553 | (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', | ||
544 | event->mmap2.filename); | 554 | event->mmap2.filename); |
545 | } | 555 | } |
546 | 556 | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index f8d70f3003ab..30fec9901e44 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -208,10 +208,10 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, | |||
208 | int perf_event__synthesize_thread_map(struct perf_tool *tool, | 208 | int perf_event__synthesize_thread_map(struct perf_tool *tool, |
209 | struct thread_map *threads, | 209 | struct thread_map *threads, |
210 | perf_event__handler_t process, | 210 | perf_event__handler_t process, |
211 | struct machine *machine); | 211 | struct machine *machine, bool mmap_data); |
212 | int perf_event__synthesize_threads(struct perf_tool *tool, | 212 | int perf_event__synthesize_threads(struct perf_tool *tool, |
213 | perf_event__handler_t process, | 213 | perf_event__handler_t process, |
214 | struct machine *machine); | 214 | struct machine *machine, bool mmap_data); |
215 | int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | 215 | int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, |
216 | perf_event__handler_t process, | 216 | perf_event__handler_t process, |
217 | struct machine *machine, | 217 | struct machine *machine, |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b939221efd8d..dc6fa3fbb180 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -117,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist) | |||
117 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | 117 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
118 | { | 118 | { |
119 | list_add_tail(&entry->node, &evlist->entries); | 119 | list_add_tail(&entry->node, &evlist->entries); |
120 | entry->idx = evlist->nr_entries; | ||
121 | |||
120 | if (!evlist->nr_entries++) | 122 | if (!evlist->nr_entries++) |
121 | perf_evlist__set_id_pos(evlist); | 123 | perf_evlist__set_id_pos(evlist); |
122 | } | 124 | } |
@@ -165,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist) | |||
165 | 167 | ||
166 | event_attr_init(&attr); | 168 | event_attr_init(&attr); |
167 | 169 | ||
168 | evsel = perf_evsel__new(&attr, 0); | 170 | evsel = perf_evsel__new(&attr); |
169 | if (evsel == NULL) | 171 | if (evsel == NULL) |
170 | goto error; | 172 | goto error; |
171 | 173 | ||
@@ -190,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, | |||
190 | size_t i; | 192 | size_t i; |
191 | 193 | ||
192 | for (i = 0; i < nr_attrs; i++) { | 194 | for (i = 0; i < nr_attrs; i++) { |
193 | evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); | 195 | evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); |
194 | if (evsel == NULL) | 196 | if (evsel == NULL) |
195 | goto out_delete_partial_list; | 197 | goto out_delete_partial_list; |
196 | list_add_tail(&evsel->node, &head); | 198 | list_add_tail(&evsel->node, &head); |
@@ -249,9 +251,8 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | |||
249 | int perf_evlist__add_newtp(struct perf_evlist *evlist, | 251 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
250 | const char *sys, const char *name, void *handler) | 252 | const char *sys, const char *name, void *handler) |
251 | { | 253 | { |
252 | struct perf_evsel *evsel; | 254 | struct perf_evsel *evsel = perf_evsel__newtp(sys, name); |
253 | 255 | ||
254 | evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); | ||
255 | if (evsel == NULL) | 256 | if (evsel == NULL) |
256 | return -1; | 257 | return -1; |
257 | 258 | ||
@@ -704,12 +705,10 @@ static size_t perf_evlist__mmap_size(unsigned long pages) | |||
704 | return (pages + 1) * page_size; | 705 | return (pages + 1) * page_size; |
705 | } | 706 | } |
706 | 707 | ||
707 | int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, | 708 | static long parse_pages_arg(const char *str, unsigned long min, |
708 | int unset __maybe_unused) | 709 | unsigned long max) |
709 | { | 710 | { |
710 | unsigned int *mmap_pages = opt->value; | ||
711 | unsigned long pages, val; | 711 | unsigned long pages, val; |
712 | size_t size; | ||
713 | static struct parse_tag tags[] = { | 712 | static struct parse_tag tags[] = { |
714 | { .tag = 'B', .mult = 1 }, | 713 | { .tag = 'B', .mult = 1 }, |
715 | { .tag = 'K', .mult = 1 << 10 }, | 714 | { .tag = 'K', .mult = 1 << 10 }, |
@@ -718,33 +717,49 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, | |||
718 | { .tag = 0 }, | 717 | { .tag = 0 }, |
719 | }; | 718 | }; |
720 | 719 | ||
720 | if (str == NULL) | ||
721 | return -EINVAL; | ||
722 | |||
721 | val = parse_tag_value(str, tags); | 723 | val = parse_tag_value(str, tags); |
722 | if (val != (unsigned long) -1) { | 724 | if (val != (unsigned long) -1) { |
723 | /* we got file size value */ | 725 | /* we got file size value */ |
724 | pages = PERF_ALIGN(val, page_size) / page_size; | 726 | pages = PERF_ALIGN(val, page_size) / page_size; |
725 | if (pages < (1UL << 31) && !is_power_of_2(pages)) { | ||
726 | pages = next_pow2(pages); | ||
727 | pr_info("rounding mmap pages size to %lu (%lu pages)\n", | ||
728 | pages * page_size, pages); | ||
729 | } | ||
730 | } else { | 727 | } else { |
731 | /* we got pages count value */ | 728 | /* we got pages count value */ |
732 | char *eptr; | 729 | char *eptr; |
733 | pages = strtoul(str, &eptr, 10); | 730 | pages = strtoul(str, &eptr, 10); |
734 | if (*eptr != '\0') { | 731 | if (*eptr != '\0') |
735 | pr_err("failed to parse --mmap_pages/-m value\n"); | 732 | return -EINVAL; |
736 | return -1; | ||
737 | } | ||
738 | } | 733 | } |
739 | 734 | ||
740 | if (pages > UINT_MAX || pages > SIZE_MAX / page_size) { | 735 | if ((pages == 0) && (min == 0)) { |
741 | pr_err("--mmap_pages/-m value too big\n"); | 736 | /* leave number of pages at 0 */ |
742 | return -1; | 737 | } else if (pages < (1UL << 31) && !is_power_of_2(pages)) { |
738 | /* round pages up to next power of 2 */ | ||
739 | pages = next_pow2(pages); | ||
740 | pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", | ||
741 | pages * page_size, pages); | ||
743 | } | 742 | } |
744 | 743 | ||
745 | size = perf_evlist__mmap_size(pages); | 744 | if (pages > max) |
746 | if (!size) { | 745 | return -EINVAL; |
747 | pr_err("--mmap_pages/-m value must be a power of two."); | 746 | |
747 | return pages; | ||
748 | } | ||
749 | |||
750 | int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, | ||
751 | int unset __maybe_unused) | ||
752 | { | ||
753 | unsigned int *mmap_pages = opt->value; | ||
754 | unsigned long max = UINT_MAX; | ||
755 | long pages; | ||
756 | |||
757 | if (max < SIZE_MAX / page_size) | ||
758 | max = SIZE_MAX / page_size; | ||
759 | |||
760 | pages = parse_pages_arg(str, 1, max); | ||
761 | if (pages < 0) { | ||
762 | pr_err("Invalid argument for --mmap_pages/-m\n"); | ||
748 | return -1; | 763 | return -1; |
749 | } | 764 | } |
750 | 765 | ||
@@ -796,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, | |||
796 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); | 811 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
797 | } | 812 | } |
798 | 813 | ||
799 | int perf_evlist__create_maps(struct perf_evlist *evlist, | 814 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
800 | struct perf_target *target) | ||
801 | { | 815 | { |
802 | evlist->threads = thread_map__new_str(target->pid, target->tid, | 816 | evlist->threads = thread_map__new_str(target->pid, target->tid, |
803 | target->uid); | 817 | target->uid); |
@@ -805,9 +819,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, | |||
805 | if (evlist->threads == NULL) | 819 | if (evlist->threads == NULL) |
806 | return -1; | 820 | return -1; |
807 | 821 | ||
808 | if (perf_target__has_task(target)) | 822 | if (target__has_task(target)) |
809 | evlist->cpus = cpu_map__dummy_new(); | 823 | evlist->cpus = cpu_map__dummy_new(); |
810 | else if (!perf_target__has_cpu(target) && !target->uses_mmap) | 824 | else if (!target__has_cpu(target) && !target->uses_mmap) |
811 | evlist->cpus = cpu_map__dummy_new(); | 825 | evlist->cpus = cpu_map__dummy_new(); |
812 | else | 826 | else |
813 | evlist->cpus = cpu_map__new(target->cpu_list); | 827 | evlist->cpus = cpu_map__new(target->cpu_list); |
@@ -1016,8 +1030,7 @@ out_err: | |||
1016 | return err; | 1030 | return err; |
1017 | } | 1031 | } |
1018 | 1032 | ||
1019 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, | 1033 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, |
1020 | struct perf_target *target, | ||
1021 | const char *argv[], bool pipe_output, | 1034 | const char *argv[], bool pipe_output, |
1022 | bool want_signal) | 1035 | bool want_signal) |
1023 | { | 1036 | { |
@@ -1069,7 +1082,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, | |||
1069 | exit(-1); | 1082 | exit(-1); |
1070 | } | 1083 | } |
1071 | 1084 | ||
1072 | if (perf_target__none(target)) | 1085 | if (target__none(target)) |
1073 | evlist->threads->map[0] = evlist->workload.pid; | 1086 | evlist->threads->map[0] = evlist->workload.pid; |
1074 | 1087 | ||
1075 | close(child_ready_pipe[1]); | 1088 | close(child_ready_pipe[1]); |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index ecaa582f40e2..649d6ea98a84 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -102,7 +102,7 @@ void perf_evlist__config(struct perf_evlist *evlist, | |||
102 | int perf_record_opts__config(struct perf_record_opts *opts); | 102 | int perf_record_opts__config(struct perf_record_opts *opts); |
103 | 103 | ||
104 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, | 104 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, |
105 | struct perf_target *target, | 105 | struct target *target, |
106 | const char *argv[], bool pipe_output, | 106 | const char *argv[], bool pipe_output, |
107 | bool want_signal); | 107 | bool want_signal); |
108 | int perf_evlist__start_workload(struct perf_evlist *evlist); | 108 | int perf_evlist__start_workload(struct perf_evlist *evlist); |
@@ -134,8 +134,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | |||
134 | evlist->threads = threads; | 134 | evlist->threads = threads; |
135 | } | 135 | } |
136 | 136 | ||
137 | int perf_evlist__create_maps(struct perf_evlist *evlist, | 137 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); |
138 | struct perf_target *target); | ||
139 | void perf_evlist__delete_maps(struct perf_evlist *evlist); | 138 | void perf_evlist__delete_maps(struct perf_evlist *evlist); |
140 | int perf_evlist__apply_filters(struct perf_evlist *evlist); | 139 | int perf_evlist__apply_filters(struct perf_evlist *evlist); |
141 | 140 | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 5280820ed389..18f7c188ff63 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -168,7 +168,7 @@ void perf_evsel__init(struct perf_evsel *evsel, | |||
168 | perf_evsel__calc_id_pos(evsel); | 168 | perf_evsel__calc_id_pos(evsel); |
169 | } | 169 | } |
170 | 170 | ||
171 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | 171 | struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) |
172 | { | 172 | { |
173 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | 173 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); |
174 | 174 | ||
@@ -219,7 +219,7 @@ out: | |||
219 | return format; | 219 | return format; |
220 | } | 220 | } |
221 | 221 | ||
222 | struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) | 222 | struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) |
223 | { | 223 | { |
224 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | 224 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); |
225 | 225 | ||
@@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel, | |||
645 | } | 645 | } |
646 | } | 646 | } |
647 | 647 | ||
648 | if (perf_target__has_cpu(&opts->target)) | 648 | if (target__has_cpu(&opts->target)) |
649 | perf_evsel__set_sample_bit(evsel, CPU); | 649 | perf_evsel__set_sample_bit(evsel, CPU); |
650 | 650 | ||
651 | if (opts->period) | 651 | if (opts->period) |
@@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel, | |||
653 | 653 | ||
654 | if (!perf_missing_features.sample_id_all && | 654 | if (!perf_missing_features.sample_id_all && |
655 | (opts->sample_time || !opts->no_inherit || | 655 | (opts->sample_time || !opts->no_inherit || |
656 | perf_target__has_cpu(&opts->target))) | 656 | target__has_cpu(&opts->target))) |
657 | perf_evsel__set_sample_bit(evsel, TIME); | 657 | perf_evsel__set_sample_bit(evsel, TIME); |
658 | 658 | ||
659 | if (opts->raw_samples) { | 659 | if (opts->raw_samples) { |
@@ -696,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel, | |||
696 | * Setting enable_on_exec for independent events and | 696 | * Setting enable_on_exec for independent events and |
697 | * group leaders for traced executed by perf. | 697 | * group leaders for traced executed by perf. |
698 | */ | 698 | */ |
699 | if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) | 699 | if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) |
700 | attr->enable_on_exec = 1; | 700 | attr->enable_on_exec = 1; |
701 | } | 701 | } |
702 | 702 | ||
@@ -2006,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | |||
2006 | return false; | 2006 | return false; |
2007 | } | 2007 | } |
2008 | 2008 | ||
2009 | int perf_evsel__open_strerror(struct perf_evsel *evsel, | 2009 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, |
2010 | struct perf_target *target, | ||
2011 | int err, char *msg, size_t size) | 2010 | int err, char *msg, size_t size) |
2012 | { | 2011 | { |
2013 | switch (err) { | 2012 | switch (err) { |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 64ec8e1a7a28..f5029653dcd7 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -96,8 +96,19 @@ struct thread_map; | |||
96 | struct perf_evlist; | 96 | struct perf_evlist; |
97 | struct perf_record_opts; | 97 | struct perf_record_opts; |
98 | 98 | ||
99 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); | 99 | struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx); |
100 | struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); | 100 | |
101 | static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) | ||
102 | { | ||
103 | return perf_evsel__new_idx(attr, 0); | ||
104 | } | ||
105 | |||
106 | struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx); | ||
107 | |||
108 | static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name) | ||
109 | { | ||
110 | return perf_evsel__newtp_idx(sys, name, 0); | ||
111 | } | ||
101 | 112 | ||
102 | struct event_format *event_format__new(const char *sys, const char *name); | 113 | struct event_format *event_format__new(const char *sys, const char *name); |
103 | 114 | ||
@@ -307,8 +318,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, | |||
307 | 318 | ||
308 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | 319 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, |
309 | char *msg, size_t msgsize); | 320 | char *msg, size_t msgsize); |
310 | int perf_evsel__open_strerror(struct perf_evsel *evsel, | 321 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, |
311 | struct perf_target *target, | ||
312 | int err, char *msg, size_t size); | 322 | int err, char *msg, size_t size); |
313 | 323 | ||
314 | static inline int perf_evsel__group_idx(struct perf_evsel *evsel) | 324 | static inline int perf_evsel__group_idx(struct perf_evsel *evsel) |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 26d9520a0c1b..369c03648f88 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -2797,7 +2797,7 @@ int perf_session__read_header(struct perf_session *session) | |||
2797 | perf_event__attr_swap(&f_attr.attr); | 2797 | perf_event__attr_swap(&f_attr.attr); |
2798 | 2798 | ||
2799 | tmp = lseek(fd, 0, SEEK_CUR); | 2799 | tmp = lseek(fd, 0, SEEK_CUR); |
2800 | evsel = perf_evsel__new(&f_attr.attr, i); | 2800 | evsel = perf_evsel__new(&f_attr.attr); |
2801 | 2801 | ||
2802 | if (evsel == NULL) | 2802 | if (evsel == NULL) |
2803 | goto out_delete_evlist; | 2803 | goto out_delete_evlist; |
@@ -2916,7 +2916,7 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused, | |||
2916 | return -ENOMEM; | 2916 | return -ENOMEM; |
2917 | } | 2917 | } |
2918 | 2918 | ||
2919 | evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); | 2919 | evsel = perf_evsel__new(&event->attr.attr); |
2920 | if (evsel == NULL) | 2920 | if (evsel == NULL) |
2921 | return -ENOMEM; | 2921 | return -ENOMEM; |
2922 | 2922 | ||
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ce034c183a7e..0393912d8033 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -1394,3 +1394,15 @@ int machine__for_each_thread(struct machine *machine, | |||
1394 | } | 1394 | } |
1395 | return rc; | 1395 | return rc; |
1396 | } | 1396 | } |
1397 | |||
1398 | int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, | ||
1399 | struct target *target, struct thread_map *threads, | ||
1400 | perf_event__handler_t process, bool data_mmap) | ||
1401 | { | ||
1402 | if (target__has_task(target)) | ||
1403 | return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); | ||
1404 | else if (target__has_cpu(target)) | ||
1405 | return perf_event__synthesize_threads(tool, process, machine, data_mmap); | ||
1406 | /* command specified */ | ||
1407 | return 0; | ||
1408 | } | ||
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 2389ba81fafe..477133015440 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <sys/types.h> | 4 | #include <sys/types.h> |
5 | #include <linux/rbtree.h> | 5 | #include <linux/rbtree.h> |
6 | #include "map.h" | 6 | #include "map.h" |
7 | #include "event.h" | ||
7 | 8 | ||
8 | struct addr_location; | 9 | struct addr_location; |
9 | struct branch_stack; | 10 | struct branch_stack; |
@@ -178,4 +179,15 @@ int machine__for_each_thread(struct machine *machine, | |||
178 | int (*fn)(struct thread *thread, void *p), | 179 | int (*fn)(struct thread *thread, void *p), |
179 | void *priv); | 180 | void *priv); |
180 | 181 | ||
182 | int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, | ||
183 | struct target *target, struct thread_map *threads, | ||
184 | perf_event__handler_t process, bool data_mmap); | ||
185 | static inline | ||
186 | int machine__synthesize_threads(struct machine *machine, struct target *target, | ||
187 | struct thread_map *threads, bool data_mmap) | ||
188 | { | ||
189 | return __machine__synthesize_threads(machine, NULL, target, threads, | ||
190 | perf_event__process, data_mmap); | ||
191 | } | ||
192 | |||
181 | #endif /* __PERF_MACHINE_H */ | 193 | #endif /* __PERF_MACHINE_H */ |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index c90e55cf7e82..6de6f89c2a61 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -277,7 +277,7 @@ static int __add_event(struct list_head *list, int *idx, | |||
277 | 277 | ||
278 | event_attr_init(attr); | 278 | event_attr_init(attr); |
279 | 279 | ||
280 | evsel = perf_evsel__new(attr, (*idx)++); | 280 | evsel = perf_evsel__new_idx(attr, (*idx)++); |
281 | if (!evsel) | 281 | if (!evsel) |
282 | return -ENOMEM; | 282 | return -ENOMEM; |
283 | 283 | ||
@@ -378,7 +378,7 @@ static int add_tracepoint(struct list_head *list, int *idx, | |||
378 | { | 378 | { |
379 | struct perf_evsel *evsel; | 379 | struct perf_evsel *evsel; |
380 | 380 | ||
381 | evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); | 381 | evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); |
382 | if (!evsel) | 382 | if (!evsel) |
383 | return -ENOMEM; | 383 | return -ENOMEM; |
384 | 384 | ||
@@ -1097,7 +1097,7 @@ static bool is_event_supported(u8 type, unsigned config) | |||
1097 | .threads = { 0 }, | 1097 | .threads = { 0 }, |
1098 | }; | 1098 | }; |
1099 | 1099 | ||
1100 | evsel = perf_evsel__new(&attr, 0); | 1100 | evsel = perf_evsel__new(&attr); |
1101 | if (evsel) { | 1101 | if (evsel) { |
1102 | ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; | 1102 | ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; |
1103 | perf_evsel__delete(evsel); | 1103 | perf_evsel__delete(evsel); |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 3c1b75c8b9a6..8b0bb1f4494a 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -1137,6 +1137,8 @@ static void sort_entry__setup_elide(struct sort_entry *se, | |||
1137 | 1137 | ||
1138 | void sort__setup_elide(FILE *output) | 1138 | void sort__setup_elide(FILE *output) |
1139 | { | 1139 | { |
1140 | struct sort_entry *se; | ||
1141 | |||
1140 | sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, | 1142 | sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, |
1141 | "dso", output); | 1143 | "dso", output); |
1142 | sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, | 1144 | sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, |
@@ -1172,4 +1174,15 @@ void sort__setup_elide(FILE *output) | |||
1172 | "snoop", output); | 1174 | "snoop", output); |
1173 | } | 1175 | } |
1174 | 1176 | ||
1177 | /* | ||
1178 | * It makes no sense to elide all of sort entries. | ||
1179 | * Just revert them to show up again. | ||
1180 | */ | ||
1181 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
1182 | if (!se->elide) | ||
1183 | return; | ||
1184 | } | ||
1185 | |||
1186 | list_for_each_entry(se, &hist_entry__sort_list, list) | ||
1187 | se->elide = false; | ||
1175 | } | 1188 | } |
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c index 065528b7563e..3c778a07b7cc 100644 --- a/tools/perf/util/target.c +++ b/tools/perf/util/target.c | |||
@@ -13,9 +13,9 @@ | |||
13 | #include <string.h> | 13 | #include <string.h> |
14 | 14 | ||
15 | 15 | ||
16 | enum perf_target_errno perf_target__validate(struct perf_target *target) | 16 | enum target_errno target__validate(struct target *target) |
17 | { | 17 | { |
18 | enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS; | 18 | enum target_errno ret = TARGET_ERRNO__SUCCESS; |
19 | 19 | ||
20 | if (target->pid) | 20 | if (target->pid) |
21 | target->tid = target->pid; | 21 | target->tid = target->pid; |
@@ -23,42 +23,42 @@ enum perf_target_errno perf_target__validate(struct perf_target *target) | |||
23 | /* CPU and PID are mutually exclusive */ | 23 | /* CPU and PID are mutually exclusive */ |
24 | if (target->tid && target->cpu_list) { | 24 | if (target->tid && target->cpu_list) { |
25 | target->cpu_list = NULL; | 25 | target->cpu_list = NULL; |
26 | if (ret == PERF_ERRNO_TARGET__SUCCESS) | 26 | if (ret == TARGET_ERRNO__SUCCESS) |
27 | ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU; | 27 | ret = TARGET_ERRNO__PID_OVERRIDE_CPU; |
28 | } | 28 | } |
29 | 29 | ||
30 | /* UID and PID are mutually exclusive */ | 30 | /* UID and PID are mutually exclusive */ |
31 | if (target->tid && target->uid_str) { | 31 | if (target->tid && target->uid_str) { |
32 | target->uid_str = NULL; | 32 | target->uid_str = NULL; |
33 | if (ret == PERF_ERRNO_TARGET__SUCCESS) | 33 | if (ret == TARGET_ERRNO__SUCCESS) |
34 | ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID; | 34 | ret = TARGET_ERRNO__PID_OVERRIDE_UID; |
35 | } | 35 | } |
36 | 36 | ||
37 | /* UID and CPU are mutually exclusive */ | 37 | /* UID and CPU are mutually exclusive */ |
38 | if (target->uid_str && target->cpu_list) { | 38 | if (target->uid_str && target->cpu_list) { |
39 | target->cpu_list = NULL; | 39 | target->cpu_list = NULL; |
40 | if (ret == PERF_ERRNO_TARGET__SUCCESS) | 40 | if (ret == TARGET_ERRNO__SUCCESS) |
41 | ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU; | 41 | ret = TARGET_ERRNO__UID_OVERRIDE_CPU; |
42 | } | 42 | } |
43 | 43 | ||
44 | /* PID and SYSTEM are mutually exclusive */ | 44 | /* PID and SYSTEM are mutually exclusive */ |
45 | if (target->tid && target->system_wide) { | 45 | if (target->tid && target->system_wide) { |
46 | target->system_wide = false; | 46 | target->system_wide = false; |
47 | if (ret == PERF_ERRNO_TARGET__SUCCESS) | 47 | if (ret == TARGET_ERRNO__SUCCESS) |
48 | ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM; | 48 | ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM; |
49 | } | 49 | } |
50 | 50 | ||
51 | /* UID and SYSTEM are mutually exclusive */ | 51 | /* UID and SYSTEM are mutually exclusive */ |
52 | if (target->uid_str && target->system_wide) { | 52 | if (target->uid_str && target->system_wide) { |
53 | target->system_wide = false; | 53 | target->system_wide = false; |
54 | if (ret == PERF_ERRNO_TARGET__SUCCESS) | 54 | if (ret == TARGET_ERRNO__SUCCESS) |
55 | ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM; | 55 | ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM; |
56 | } | 56 | } |
57 | 57 | ||
58 | return ret; | 58 | return ret; |
59 | } | 59 | } |
60 | 60 | ||
61 | enum perf_target_errno perf_target__parse_uid(struct perf_target *target) | 61 | enum target_errno target__parse_uid(struct target *target) |
62 | { | 62 | { |
63 | struct passwd pwd, *result; | 63 | struct passwd pwd, *result; |
64 | char buf[1024]; | 64 | char buf[1024]; |
@@ -66,7 +66,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) | |||
66 | 66 | ||
67 | target->uid = UINT_MAX; | 67 | target->uid = UINT_MAX; |
68 | if (str == NULL) | 68 | if (str == NULL) |
69 | return PERF_ERRNO_TARGET__SUCCESS; | 69 | return TARGET_ERRNO__SUCCESS; |
70 | 70 | ||
71 | /* Try user name first */ | 71 | /* Try user name first */ |
72 | getpwnam_r(str, &pwd, buf, sizeof(buf), &result); | 72 | getpwnam_r(str, &pwd, buf, sizeof(buf), &result); |
@@ -79,22 +79,22 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) | |||
79 | int uid = strtol(str, &endptr, 10); | 79 | int uid = strtol(str, &endptr, 10); |
80 | 80 | ||
81 | if (*endptr != '\0') | 81 | if (*endptr != '\0') |
82 | return PERF_ERRNO_TARGET__INVALID_UID; | 82 | return TARGET_ERRNO__INVALID_UID; |
83 | 83 | ||
84 | getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); | 84 | getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); |
85 | 85 | ||
86 | if (result == NULL) | 86 | if (result == NULL) |
87 | return PERF_ERRNO_TARGET__USER_NOT_FOUND; | 87 | return TARGET_ERRNO__USER_NOT_FOUND; |
88 | } | 88 | } |
89 | 89 | ||
90 | target->uid = result->pw_uid; | 90 | target->uid = result->pw_uid; |
91 | return PERF_ERRNO_TARGET__SUCCESS; | 91 | return TARGET_ERRNO__SUCCESS; |
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * This must have a same ordering as the enum perf_target_errno. | 95 | * This must have a same ordering as the enum target_errno. |
96 | */ | 96 | */ |
97 | static const char *perf_target__error_str[] = { | 97 | static const char *target__error_str[] = { |
98 | "PID/TID switch overriding CPU", | 98 | "PID/TID switch overriding CPU", |
99 | "PID/TID switch overriding UID", | 99 | "PID/TID switch overriding UID", |
100 | "UID switch overriding CPU", | 100 | "UID switch overriding CPU", |
@@ -104,7 +104,7 @@ static const char *perf_target__error_str[] = { | |||
104 | "Problems obtaining information for user %s", | 104 | "Problems obtaining information for user %s", |
105 | }; | 105 | }; |
106 | 106 | ||
107 | int perf_target__strerror(struct perf_target *target, int errnum, | 107 | int target__strerror(struct target *target, int errnum, |
108 | char *buf, size_t buflen) | 108 | char *buf, size_t buflen) |
109 | { | 109 | { |
110 | int idx; | 110 | int idx; |
@@ -124,21 +124,19 @@ int perf_target__strerror(struct perf_target *target, int errnum, | |||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
127 | if (errnum < __PERF_ERRNO_TARGET__START || | 127 | if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END) |
128 | errnum >= __PERF_ERRNO_TARGET__END) | ||
129 | return -1; | 128 | return -1; |
130 | 129 | ||
131 | idx = errnum - __PERF_ERRNO_TARGET__START; | 130 | idx = errnum - __TARGET_ERRNO__START; |
132 | msg = perf_target__error_str[idx]; | 131 | msg = target__error_str[idx]; |
133 | 132 | ||
134 | switch (errnum) { | 133 | switch (errnum) { |
135 | case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU | 134 | case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM: |
136 | ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM: | ||
137 | snprintf(buf, buflen, "%s", msg); | 135 | snprintf(buf, buflen, "%s", msg); |
138 | break; | 136 | break; |
139 | 137 | ||
140 | case PERF_ERRNO_TARGET__INVALID_UID: | 138 | case TARGET_ERRNO__INVALID_UID: |
141 | case PERF_ERRNO_TARGET__USER_NOT_FOUND: | 139 | case TARGET_ERRNO__USER_NOT_FOUND: |
142 | snprintf(buf, buflen, msg, target->uid_str); | 140 | snprintf(buf, buflen, msg, target->uid_str); |
143 | break; | 141 | break; |
144 | 142 | ||
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h index a4be8575fda5..89bab7129de4 100644 --- a/tools/perf/util/target.h +++ b/tools/perf/util/target.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <stdbool.h> | 4 | #include <stdbool.h> |
5 | #include <sys/types.h> | 5 | #include <sys/types.h> |
6 | 6 | ||
7 | struct perf_target { | 7 | struct target { |
8 | const char *pid; | 8 | const char *pid; |
9 | const char *tid; | 9 | const char *tid; |
10 | const char *cpu_list; | 10 | const char *cpu_list; |
@@ -14,8 +14,8 @@ struct perf_target { | |||
14 | bool uses_mmap; | 14 | bool uses_mmap; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | enum perf_target_errno { | 17 | enum target_errno { |
18 | PERF_ERRNO_TARGET__SUCCESS = 0, | 18 | TARGET_ERRNO__SUCCESS = 0, |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Choose an arbitrary negative big number not to clash with standard | 21 | * Choose an arbitrary negative big number not to clash with standard |
@@ -24,42 +24,40 @@ enum perf_target_errno { | |||
24 | * | 24 | * |
25 | * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html | 25 | * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html |
26 | */ | 26 | */ |
27 | __PERF_ERRNO_TARGET__START = -10000, | 27 | __TARGET_ERRNO__START = -10000, |
28 | 28 | ||
29 | /* for target__validate() */ | ||
30 | TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START, | ||
31 | TARGET_ERRNO__PID_OVERRIDE_UID, | ||
32 | TARGET_ERRNO__UID_OVERRIDE_CPU, | ||
33 | TARGET_ERRNO__PID_OVERRIDE_SYSTEM, | ||
34 | TARGET_ERRNO__UID_OVERRIDE_SYSTEM, | ||
29 | 35 | ||
30 | /* for perf_target__validate() */ | 36 | /* for target__parse_uid() */ |
31 | PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START, | 37 | TARGET_ERRNO__INVALID_UID, |
32 | PERF_ERRNO_TARGET__PID_OVERRIDE_UID, | 38 | TARGET_ERRNO__USER_NOT_FOUND, |
33 | PERF_ERRNO_TARGET__UID_OVERRIDE_CPU, | ||
34 | PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM, | ||
35 | PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM, | ||
36 | 39 | ||
37 | /* for perf_target__parse_uid() */ | 40 | __TARGET_ERRNO__END, |
38 | PERF_ERRNO_TARGET__INVALID_UID, | ||
39 | PERF_ERRNO_TARGET__USER_NOT_FOUND, | ||
40 | |||
41 | __PERF_ERRNO_TARGET__END, | ||
42 | }; | 41 | }; |
43 | 42 | ||
44 | enum perf_target_errno perf_target__validate(struct perf_target *target); | 43 | enum target_errno target__validate(struct target *target); |
45 | enum perf_target_errno perf_target__parse_uid(struct perf_target *target); | 44 | enum target_errno target__parse_uid(struct target *target); |
46 | 45 | ||
47 | int perf_target__strerror(struct perf_target *target, int errnum, char *buf, | 46 | int target__strerror(struct target *target, int errnum, char *buf, size_t buflen); |
48 | size_t buflen); | ||
49 | 47 | ||
50 | static inline bool perf_target__has_task(struct perf_target *target) | 48 | static inline bool target__has_task(struct target *target) |
51 | { | 49 | { |
52 | return target->tid || target->pid || target->uid_str; | 50 | return target->tid || target->pid || target->uid_str; |
53 | } | 51 | } |
54 | 52 | ||
55 | static inline bool perf_target__has_cpu(struct perf_target *target) | 53 | static inline bool target__has_cpu(struct target *target) |
56 | { | 54 | { |
57 | return target->system_wide || target->cpu_list; | 55 | return target->system_wide || target->cpu_list; |
58 | } | 56 | } |
59 | 57 | ||
60 | static inline bool perf_target__none(struct perf_target *target) | 58 | static inline bool target__none(struct target *target) |
61 | { | 59 | { |
62 | return !perf_target__has_task(target) && !perf_target__has_cpu(target); | 60 | return !target__has_task(target) && !target__has_cpu(target); |
63 | } | 61 | } |
64 | 62 | ||
65 | #endif /* _PERF_TARGET_H */ | 63 | #endif /* _PERF_TARGET_H */ |
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index f857b51b6bde..ce793c7dd23c 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c | |||
@@ -27,7 +27,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) | |||
27 | float ksamples_per_sec; | 27 | float ksamples_per_sec; |
28 | float esamples_percent; | 28 | float esamples_percent; |
29 | struct perf_record_opts *opts = &top->record_opts; | 29 | struct perf_record_opts *opts = &top->record_opts; |
30 | struct perf_target *target = &opts->target; | 30 | struct target *target = &opts->target; |
31 | size_t ret = 0; | 31 | size_t ret = 0; |
32 | 32 | ||
33 | if (top->samples) { | 33 | if (top->samples) { |