aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 02:56:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 02:56:32 -0500
commitfcd7476f9e03a36e709e0807198d47a826cc4e3a (patch)
tree1a9017988a864fae9ec62fd9e08e18cdc42d06cf /tools/perf/util
parentd320e203bad4cfcef3613e83a52f8c70a77e8a60 (diff)
parentd969135aae1434547f41853f0e8eaa622e8b8816 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "A number of fixes: - Fix segfault on perf trace -i perf.data, from Namhyung Kim. - Fix segfault with --no-mmap-pages, from David Ahern. - Don't force a refresh during progress update in the TUI, greatly reducing startup costs, fix from Patrick Palka. - Fix sw clock event period test wrt not checking if using > max_sample_freq. - Handle throttle events in 'object code reading' test, fix from Adrian Hunter. - Prevent condition that all sort keys are elided, fix from Namhyung Kim. - Round mmap pages to power 2, from David Ahern. And a number of late arrival changes: - Add summary only option to 'perf trace', suppressing the decoding of events, from David Ahern - 'perf trace --summary' formatting simplifications, from Pekka Enberg. - Beautify fifth argument of mmap() as fd, in 'perf trace', from Namhyung Kim. - Add direct access to dynamic arrays in libtraceevent, from Steven Rostedt. - Synthesize non-exec MMAP records when --data used, allowing the resolution of data addresses to symbols (global variables, etc), by Arnaldo Carvalho de Melo. - Code cleanups by David Ahern and Adrian Hunter" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) tools lib traceevent: Add direct access to dynamic arrays perf target: Shorten perf_target__ to target__ perf tests: Handle throttle events in 'object code reading' test perf evlist: Refactor mmap_pages parsing perf evlist: Round mmap pages to power 2 - v2 perf record: Fix segfault with --no-mmap-pages perf trace: Add summary only option perf trace: Simplify '--summary' output perf trace: Change syscall summary duration order perf tests: Compensate lower sample freq with longer test loop perf trace: Fix segfault on perf trace -i perf.data perf trace: Separate tp syscall field caching into init routine to be reused perf trace: Beautify fifth argument of mmap() as fd perf tests: Use lower sample_freq in sw clock event period test perf tests: Check return of perf_evlist__open sw clock event period test perf record: Move existing write_output into helper function perf record: Use correct return type for write() perf tools: Prevent condition that all sort keys are elided perf machine: Simplify synthesize_threads method perf machine: Introduce synthesize_threads method out of open coded equivalent ...
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/event.c50
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/evlist.c73
-rw-r--r--tools/perf/util/evlist.h5
-rw-r--r--tools/perf/util/evsel.c13
-rw-r--r--tools/perf/util/evsel.h18
-rw-r--r--tools/perf/util/header.c4
-rw-r--r--tools/perf/util/machine.c12
-rw-r--r--tools/perf/util/machine.h12
-rw-r--r--tools/perf/util/parse-events.c6
-rw-r--r--tools/perf/util/sort.c13
-rw-r--r--tools/perf/util/target.c54
-rw-r--r--tools/perf/util/target.h44
-rw-r--r--tools/perf/util/top.c2
14 files changed, 187 insertions, 123 deletions
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index ec9ae1114ed4..6e3a846aed0e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -170,7 +170,8 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
170 union perf_event *event, 170 union perf_event *event,
171 pid_t pid, pid_t tgid, 171 pid_t pid, pid_t tgid,
172 perf_event__handler_t process, 172 perf_event__handler_t process,
173 struct machine *machine) 173 struct machine *machine,
174 bool mmap_data)
174{ 175{
175 char filename[PATH_MAX]; 176 char filename[PATH_MAX];
176 FILE *fp; 177 FILE *fp;
@@ -188,10 +189,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
188 } 189 }
189 190
190 event->header.type = PERF_RECORD_MMAP; 191 event->header.type = PERF_RECORD_MMAP;
191 /*
192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
193 */
194 event->header.misc = PERF_RECORD_MISC_USER;
195 192
196 while (1) { 193 while (1) {
197 char bf[BUFSIZ]; 194 char bf[BUFSIZ];
@@ -215,9 +212,17 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
215 212
216 if (n != 5) 213 if (n != 5)
217 continue; 214 continue;
215 /*
216 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
217 */
218 event->header.misc = PERF_RECORD_MISC_USER;
218 219
219 if (prot[2] != 'x') 220 if (prot[2] != 'x') {
220 continue; 221 if (!mmap_data || prot[0] != 'r')
222 continue;
223
224 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
225 }
221 226
222 if (!strcmp(execname, "")) 227 if (!strcmp(execname, ""))
223 strcpy(execname, anonstr); 228 strcpy(execname, anonstr);
@@ -304,20 +309,21 @@ static int __event__synthesize_thread(union perf_event *comm_event,
304 pid_t pid, int full, 309 pid_t pid, int full,
305 perf_event__handler_t process, 310 perf_event__handler_t process,
306 struct perf_tool *tool, 311 struct perf_tool *tool,
307 struct machine *machine) 312 struct machine *machine, bool mmap_data)
308{ 313{
309 pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, 314 pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
310 process, machine); 315 process, machine);
311 if (tgid == -1) 316 if (tgid == -1)
312 return -1; 317 return -1;
313 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 318 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
314 process, machine); 319 process, machine, mmap_data);
315} 320}
316 321
317int perf_event__synthesize_thread_map(struct perf_tool *tool, 322int perf_event__synthesize_thread_map(struct perf_tool *tool,
318 struct thread_map *threads, 323 struct thread_map *threads,
319 perf_event__handler_t process, 324 perf_event__handler_t process,
320 struct machine *machine) 325 struct machine *machine,
326 bool mmap_data)
321{ 327{
322 union perf_event *comm_event, *mmap_event; 328 union perf_event *comm_event, *mmap_event;
323 int err = -1, thread, j; 329 int err = -1, thread, j;
@@ -334,7 +340,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
334 for (thread = 0; thread < threads->nr; ++thread) { 340 for (thread = 0; thread < threads->nr; ++thread) {
335 if (__event__synthesize_thread(comm_event, mmap_event, 341 if (__event__synthesize_thread(comm_event, mmap_event,
336 threads->map[thread], 0, 342 threads->map[thread], 0,
337 process, tool, machine)) { 343 process, tool, machine,
344 mmap_data)) {
338 err = -1; 345 err = -1;
339 break; 346 break;
340 } 347 }
@@ -356,10 +363,10 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
356 363
357 /* if not, generate events for it */ 364 /* if not, generate events for it */
358 if (need_leader && 365 if (need_leader &&
359 __event__synthesize_thread(comm_event, 366 __event__synthesize_thread(comm_event, mmap_event,
360 mmap_event, 367 comm_event->comm.pid, 0,
361 comm_event->comm.pid, 0, 368 process, tool, machine,
362 process, tool, machine)) { 369 mmap_data)) {
363 err = -1; 370 err = -1;
364 break; 371 break;
365 } 372 }
@@ -374,7 +381,7 @@ out:
374 381
375int perf_event__synthesize_threads(struct perf_tool *tool, 382int perf_event__synthesize_threads(struct perf_tool *tool,
376 perf_event__handler_t process, 383 perf_event__handler_t process,
377 struct machine *machine) 384 struct machine *machine, bool mmap_data)
378{ 385{
379 DIR *proc; 386 DIR *proc;
380 struct dirent dirent, *next; 387 struct dirent dirent, *next;
@@ -404,7 +411,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
404 * one thread couldn't be synthesized. 411 * one thread couldn't be synthesized.
405 */ 412 */
406 __event__synthesize_thread(comm_event, mmap_event, pid, 1, 413 __event__synthesize_thread(comm_event, mmap_event, pid, 1,
407 process, tool, machine); 414 process, tool, machine, mmap_data);
408 } 415 }
409 416
410 err = 0; 417 err = 0;
@@ -528,19 +535,22 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
528 535
529size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 536size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
530{ 537{
531 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", 538 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
532 event->mmap.pid, event->mmap.tid, event->mmap.start, 539 event->mmap.pid, event->mmap.tid, event->mmap.start,
533 event->mmap.len, event->mmap.pgoff, event->mmap.filename); 540 event->mmap.len, event->mmap.pgoff,
541 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
542 event->mmap.filename);
534} 543}
535 544
536size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 545size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
537{ 546{
538 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 547 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
539 " %02x:%02x %"PRIu64" %"PRIu64"]: %s\n", 548 " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
540 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 549 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
541 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 550 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
542 event->mmap2.min, event->mmap2.ino, 551 event->mmap2.min, event->mmap2.ino,
543 event->mmap2.ino_generation, 552 event->mmap2.ino_generation,
553 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
544 event->mmap2.filename); 554 event->mmap2.filename);
545} 555}
546 556
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index f8d70f3003ab..30fec9901e44 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -208,10 +208,10 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
208int perf_event__synthesize_thread_map(struct perf_tool *tool, 208int perf_event__synthesize_thread_map(struct perf_tool *tool,
209 struct thread_map *threads, 209 struct thread_map *threads,
210 perf_event__handler_t process, 210 perf_event__handler_t process,
211 struct machine *machine); 211 struct machine *machine, bool mmap_data);
212int perf_event__synthesize_threads(struct perf_tool *tool, 212int perf_event__synthesize_threads(struct perf_tool *tool,
213 perf_event__handler_t process, 213 perf_event__handler_t process,
214 struct machine *machine); 214 struct machine *machine, bool mmap_data);
215int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 215int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
216 perf_event__handler_t process, 216 perf_event__handler_t process,
217 struct machine *machine, 217 struct machine *machine,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b939221efd8d..dc6fa3fbb180 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -117,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist)
117void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 117void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
118{ 118{
119 list_add_tail(&entry->node, &evlist->entries); 119 list_add_tail(&entry->node, &evlist->entries);
120 entry->idx = evlist->nr_entries;
121
120 if (!evlist->nr_entries++) 122 if (!evlist->nr_entries++)
121 perf_evlist__set_id_pos(evlist); 123 perf_evlist__set_id_pos(evlist);
122} 124}
@@ -165,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
165 167
166 event_attr_init(&attr); 168 event_attr_init(&attr);
167 169
168 evsel = perf_evsel__new(&attr, 0); 170 evsel = perf_evsel__new(&attr);
169 if (evsel == NULL) 171 if (evsel == NULL)
170 goto error; 172 goto error;
171 173
@@ -190,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
190 size_t i; 192 size_t i;
191 193
192 for (i = 0; i < nr_attrs; i++) { 194 for (i = 0; i < nr_attrs; i++) {
193 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 195 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
194 if (evsel == NULL) 196 if (evsel == NULL)
195 goto out_delete_partial_list; 197 goto out_delete_partial_list;
196 list_add_tail(&evsel->node, &head); 198 list_add_tail(&evsel->node, &head);
@@ -249,9 +251,8 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
249int perf_evlist__add_newtp(struct perf_evlist *evlist, 251int perf_evlist__add_newtp(struct perf_evlist *evlist,
250 const char *sys, const char *name, void *handler) 252 const char *sys, const char *name, void *handler)
251{ 253{
252 struct perf_evsel *evsel; 254 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
253 255
254 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
255 if (evsel == NULL) 256 if (evsel == NULL)
256 return -1; 257 return -1;
257 258
@@ -704,12 +705,10 @@ static size_t perf_evlist__mmap_size(unsigned long pages)
704 return (pages + 1) * page_size; 705 return (pages + 1) * page_size;
705} 706}
706 707
707int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 708static long parse_pages_arg(const char *str, unsigned long min,
708 int unset __maybe_unused) 709 unsigned long max)
709{ 710{
710 unsigned int *mmap_pages = opt->value;
711 unsigned long pages, val; 711 unsigned long pages, val;
712 size_t size;
713 static struct parse_tag tags[] = { 712 static struct parse_tag tags[] = {
714 { .tag = 'B', .mult = 1 }, 713 { .tag = 'B', .mult = 1 },
715 { .tag = 'K', .mult = 1 << 10 }, 714 { .tag = 'K', .mult = 1 << 10 },
@@ -718,33 +717,49 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
718 { .tag = 0 }, 717 { .tag = 0 },
719 }; 718 };
720 719
720 if (str == NULL)
721 return -EINVAL;
722
721 val = parse_tag_value(str, tags); 723 val = parse_tag_value(str, tags);
722 if (val != (unsigned long) -1) { 724 if (val != (unsigned long) -1) {
723 /* we got file size value */ 725 /* we got file size value */
724 pages = PERF_ALIGN(val, page_size) / page_size; 726 pages = PERF_ALIGN(val, page_size) / page_size;
725 if (pages < (1UL << 31) && !is_power_of_2(pages)) {
726 pages = next_pow2(pages);
727 pr_info("rounding mmap pages size to %lu (%lu pages)\n",
728 pages * page_size, pages);
729 }
730 } else { 727 } else {
731 /* we got pages count value */ 728 /* we got pages count value */
732 char *eptr; 729 char *eptr;
733 pages = strtoul(str, &eptr, 10); 730 pages = strtoul(str, &eptr, 10);
734 if (*eptr != '\0') { 731 if (*eptr != '\0')
735 pr_err("failed to parse --mmap_pages/-m value\n"); 732 return -EINVAL;
736 return -1;
737 }
738 } 733 }
739 734
740 if (pages > UINT_MAX || pages > SIZE_MAX / page_size) { 735 if ((pages == 0) && (min == 0)) {
741 pr_err("--mmap_pages/-m value too big\n"); 736 /* leave number of pages at 0 */
742 return -1; 737 } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
738 /* round pages up to next power of 2 */
739 pages = next_pow2(pages);
740 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
741 pages * page_size, pages);
743 } 742 }
744 743
745 size = perf_evlist__mmap_size(pages); 744 if (pages > max)
746 if (!size) { 745 return -EINVAL;
747 pr_err("--mmap_pages/-m value must be a power of two."); 746
747 return pages;
748}
749
750int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
751 int unset __maybe_unused)
752{
753 unsigned int *mmap_pages = opt->value;
754 unsigned long max = UINT_MAX;
755 long pages;
756
757 if (max < SIZE_MAX / page_size)
758 max = SIZE_MAX / page_size;
759
760 pages = parse_pages_arg(str, 1, max);
761 if (pages < 0) {
762 pr_err("Invalid argument for --mmap_pages/-m\n");
748 return -1; 763 return -1;
749 } 764 }
750 765
@@ -796,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
796 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 811 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
797} 812}
798 813
799int perf_evlist__create_maps(struct perf_evlist *evlist, 814int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
800 struct perf_target *target)
801{ 815{
802 evlist->threads = thread_map__new_str(target->pid, target->tid, 816 evlist->threads = thread_map__new_str(target->pid, target->tid,
803 target->uid); 817 target->uid);
@@ -805,9 +819,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist,
805 if (evlist->threads == NULL) 819 if (evlist->threads == NULL)
806 return -1; 820 return -1;
807 821
808 if (perf_target__has_task(target)) 822 if (target__has_task(target))
809 evlist->cpus = cpu_map__dummy_new(); 823 evlist->cpus = cpu_map__dummy_new();
810 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 824 else if (!target__has_cpu(target) && !target->uses_mmap)
811 evlist->cpus = cpu_map__dummy_new(); 825 evlist->cpus = cpu_map__dummy_new();
812 else 826 else
813 evlist->cpus = cpu_map__new(target->cpu_list); 827 evlist->cpus = cpu_map__new(target->cpu_list);
@@ -1016,8 +1030,7 @@ out_err:
1016 return err; 1030 return err;
1017} 1031}
1018 1032
1019int perf_evlist__prepare_workload(struct perf_evlist *evlist, 1033int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1020 struct perf_target *target,
1021 const char *argv[], bool pipe_output, 1034 const char *argv[], bool pipe_output,
1022 bool want_signal) 1035 bool want_signal)
1023{ 1036{
@@ -1069,7 +1082,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
1069 exit(-1); 1082 exit(-1);
1070 } 1083 }
1071 1084
1072 if (perf_target__none(target)) 1085 if (target__none(target))
1073 evlist->threads->map[0] = evlist->workload.pid; 1086 evlist->threads->map[0] = evlist->workload.pid;
1074 1087
1075 close(child_ready_pipe[1]); 1088 close(child_ready_pipe[1]);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index ecaa582f40e2..649d6ea98a84 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -102,7 +102,7 @@ void perf_evlist__config(struct perf_evlist *evlist,
102int perf_record_opts__config(struct perf_record_opts *opts); 102int perf_record_opts__config(struct perf_record_opts *opts);
103 103
104int perf_evlist__prepare_workload(struct perf_evlist *evlist, 104int perf_evlist__prepare_workload(struct perf_evlist *evlist,
105 struct perf_target *target, 105 struct target *target,
106 const char *argv[], bool pipe_output, 106 const char *argv[], bool pipe_output,
107 bool want_signal); 107 bool want_signal);
108int perf_evlist__start_workload(struct perf_evlist *evlist); 108int perf_evlist__start_workload(struct perf_evlist *evlist);
@@ -134,8 +134,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
134 evlist->threads = threads; 134 evlist->threads = threads;
135} 135}
136 136
137int perf_evlist__create_maps(struct perf_evlist *evlist, 137int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
138 struct perf_target *target);
139void perf_evlist__delete_maps(struct perf_evlist *evlist); 138void perf_evlist__delete_maps(struct perf_evlist *evlist);
140int perf_evlist__apply_filters(struct perf_evlist *evlist); 139int perf_evlist__apply_filters(struct perf_evlist *evlist);
141 140
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 5280820ed389..18f7c188ff63 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -168,7 +168,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
168 perf_evsel__calc_id_pos(evsel); 168 perf_evsel__calc_id_pos(evsel);
169} 169}
170 170
171struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) 171struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
172{ 172{
173 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 173 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
174 174
@@ -219,7 +219,7 @@ out:
219 return format; 219 return format;
220} 220}
221 221
222struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) 222struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
223{ 223{
224 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 224 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
225 225
@@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
645 } 645 }
646 } 646 }
647 647
648 if (perf_target__has_cpu(&opts->target)) 648 if (target__has_cpu(&opts->target))
649 perf_evsel__set_sample_bit(evsel, CPU); 649 perf_evsel__set_sample_bit(evsel, CPU);
650 650
651 if (opts->period) 651 if (opts->period)
@@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
653 653
654 if (!perf_missing_features.sample_id_all && 654 if (!perf_missing_features.sample_id_all &&
655 (opts->sample_time || !opts->no_inherit || 655 (opts->sample_time || !opts->no_inherit ||
656 perf_target__has_cpu(&opts->target))) 656 target__has_cpu(&opts->target)))
657 perf_evsel__set_sample_bit(evsel, TIME); 657 perf_evsel__set_sample_bit(evsel, TIME);
658 658
659 if (opts->raw_samples) { 659 if (opts->raw_samples) {
@@ -696,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
696 * Setting enable_on_exec for independent events and 696 * Setting enable_on_exec for independent events and
697 * group leaders for traced executed by perf. 697 * group leaders for traced executed by perf.
698 */ 698 */
699 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) 699 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
700 attr->enable_on_exec = 1; 700 attr->enable_on_exec = 1;
701} 701}
702 702
@@ -2006,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2006 return false; 2006 return false;
2007} 2007}
2008 2008
2009int perf_evsel__open_strerror(struct perf_evsel *evsel, 2009int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2010 struct perf_target *target,
2011 int err, char *msg, size_t size) 2010 int err, char *msg, size_t size)
2012{ 2011{
2013 switch (err) { 2012 switch (err) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 64ec8e1a7a28..f5029653dcd7 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -96,8 +96,19 @@ struct thread_map;
96struct perf_evlist; 96struct perf_evlist;
97struct perf_record_opts; 97struct perf_record_opts;
98 98
99struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); 99struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
100struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); 100
101static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
102{
103 return perf_evsel__new_idx(attr, 0);
104}
105
106struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
107
108static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
109{
110 return perf_evsel__newtp_idx(sys, name, 0);
111}
101 112
102struct event_format *event_format__new(const char *sys, const char *name); 113struct event_format *event_format__new(const char *sys, const char *name);
103 114
@@ -307,8 +318,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
307 318
308bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 319bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
309 char *msg, size_t msgsize); 320 char *msg, size_t msgsize);
310int perf_evsel__open_strerror(struct perf_evsel *evsel, 321int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
311 struct perf_target *target,
312 int err, char *msg, size_t size); 322 int err, char *msg, size_t size);
313 323
314static inline int perf_evsel__group_idx(struct perf_evsel *evsel) 324static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26d9520a0c1b..369c03648f88 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2797,7 +2797,7 @@ int perf_session__read_header(struct perf_session *session)
2797 perf_event__attr_swap(&f_attr.attr); 2797 perf_event__attr_swap(&f_attr.attr);
2798 2798
2799 tmp = lseek(fd, 0, SEEK_CUR); 2799 tmp = lseek(fd, 0, SEEK_CUR);
2800 evsel = perf_evsel__new(&f_attr.attr, i); 2800 evsel = perf_evsel__new(&f_attr.attr);
2801 2801
2802 if (evsel == NULL) 2802 if (evsel == NULL)
2803 goto out_delete_evlist; 2803 goto out_delete_evlist;
@@ -2916,7 +2916,7 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
2916 return -ENOMEM; 2916 return -ENOMEM;
2917 } 2917 }
2918 2918
2919 evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); 2919 evsel = perf_evsel__new(&event->attr.attr);
2920 if (evsel == NULL) 2920 if (evsel == NULL)
2921 return -ENOMEM; 2921 return -ENOMEM;
2922 2922
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ce034c183a7e..0393912d8033 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1394,3 +1394,15 @@ int machine__for_each_thread(struct machine *machine,
1394 } 1394 }
1395 return rc; 1395 return rc;
1396} 1396}
1397
1398int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1399 struct target *target, struct thread_map *threads,
1400 perf_event__handler_t process, bool data_mmap)
1401{
1402 if (target__has_task(target))
1403 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1404 else if (target__has_cpu(target))
1405 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1406 /* command specified */
1407 return 0;
1408}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 2389ba81fafe..477133015440 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,6 +4,7 @@
4#include <sys/types.h> 4#include <sys/types.h>
5#include <linux/rbtree.h> 5#include <linux/rbtree.h>
6#include "map.h" 6#include "map.h"
7#include "event.h"
7 8
8struct addr_location; 9struct addr_location;
9struct branch_stack; 10struct branch_stack;
@@ -178,4 +179,15 @@ int machine__for_each_thread(struct machine *machine,
178 int (*fn)(struct thread *thread, void *p), 179 int (*fn)(struct thread *thread, void *p),
179 void *priv); 180 void *priv);
180 181
182int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
183 struct target *target, struct thread_map *threads,
184 perf_event__handler_t process, bool data_mmap);
185static inline
186int machine__synthesize_threads(struct machine *machine, struct target *target,
187 struct thread_map *threads, bool data_mmap)
188{
189 return __machine__synthesize_threads(machine, NULL, target, threads,
190 perf_event__process, data_mmap);
191}
192
181#endif /* __PERF_MACHINE_H */ 193#endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c90e55cf7e82..6de6f89c2a61 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -277,7 +277,7 @@ static int __add_event(struct list_head *list, int *idx,
277 277
278 event_attr_init(attr); 278 event_attr_init(attr);
279 279
280 evsel = perf_evsel__new(attr, (*idx)++); 280 evsel = perf_evsel__new_idx(attr, (*idx)++);
281 if (!evsel) 281 if (!evsel)
282 return -ENOMEM; 282 return -ENOMEM;
283 283
@@ -378,7 +378,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
378{ 378{
379 struct perf_evsel *evsel; 379 struct perf_evsel *evsel;
380 380
381 evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); 381 evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
382 if (!evsel) 382 if (!evsel)
383 return -ENOMEM; 383 return -ENOMEM;
384 384
@@ -1097,7 +1097,7 @@ static bool is_event_supported(u8 type, unsigned config)
1097 .threads = { 0 }, 1097 .threads = { 0 },
1098 }; 1098 };
1099 1099
1100 evsel = perf_evsel__new(&attr, 0); 1100 evsel = perf_evsel__new(&attr);
1101 if (evsel) { 1101 if (evsel) {
1102 ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1102 ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
1103 perf_evsel__delete(evsel); 1103 perf_evsel__delete(evsel);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 3c1b75c8b9a6..8b0bb1f4494a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1137,6 +1137,8 @@ static void sort_entry__setup_elide(struct sort_entry *se,
1137 1137
1138void sort__setup_elide(FILE *output) 1138void sort__setup_elide(FILE *output)
1139{ 1139{
1140 struct sort_entry *se;
1141
1140 sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, 1142 sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
1141 "dso", output); 1143 "dso", output);
1142 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, 1144 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list,
@@ -1172,4 +1174,15 @@ void sort__setup_elide(FILE *output)
1172 "snoop", output); 1174 "snoop", output);
1173 } 1175 }
1174 1176
1177 /*
1178 * It makes no sense to elide all of sort entries.
1179 * Just revert them to show up again.
1180 */
1181 list_for_each_entry(se, &hist_entry__sort_list, list) {
1182 if (!se->elide)
1183 return;
1184 }
1185
1186 list_for_each_entry(se, &hist_entry__sort_list, list)
1187 se->elide = false;
1175} 1188}
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 065528b7563e..3c778a07b7cc 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -13,9 +13,9 @@
13#include <string.h> 13#include <string.h>
14 14
15 15
16enum perf_target_errno perf_target__validate(struct perf_target *target) 16enum target_errno target__validate(struct target *target)
17{ 17{
18 enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS; 18 enum target_errno ret = TARGET_ERRNO__SUCCESS;
19 19
20 if (target->pid) 20 if (target->pid)
21 target->tid = target->pid; 21 target->tid = target->pid;
@@ -23,42 +23,42 @@ enum perf_target_errno perf_target__validate(struct perf_target *target)
23 /* CPU and PID are mutually exclusive */ 23 /* CPU and PID are mutually exclusive */
24 if (target->tid && target->cpu_list) { 24 if (target->tid && target->cpu_list) {
25 target->cpu_list = NULL; 25 target->cpu_list = NULL;
26 if (ret == PERF_ERRNO_TARGET__SUCCESS) 26 if (ret == TARGET_ERRNO__SUCCESS)
27 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU; 27 ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
28 } 28 }
29 29
30 /* UID and PID are mutually exclusive */ 30 /* UID and PID are mutually exclusive */
31 if (target->tid && target->uid_str) { 31 if (target->tid && target->uid_str) {
32 target->uid_str = NULL; 32 target->uid_str = NULL;
33 if (ret == PERF_ERRNO_TARGET__SUCCESS) 33 if (ret == TARGET_ERRNO__SUCCESS)
34 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID; 34 ret = TARGET_ERRNO__PID_OVERRIDE_UID;
35 } 35 }
36 36
37 /* UID and CPU are mutually exclusive */ 37 /* UID and CPU are mutually exclusive */
38 if (target->uid_str && target->cpu_list) { 38 if (target->uid_str && target->cpu_list) {
39 target->cpu_list = NULL; 39 target->cpu_list = NULL;
40 if (ret == PERF_ERRNO_TARGET__SUCCESS) 40 if (ret == TARGET_ERRNO__SUCCESS)
41 ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU; 41 ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
42 } 42 }
43 43
44 /* PID and SYSTEM are mutually exclusive */ 44 /* PID and SYSTEM are mutually exclusive */
45 if (target->tid && target->system_wide) { 45 if (target->tid && target->system_wide) {
46 target->system_wide = false; 46 target->system_wide = false;
47 if (ret == PERF_ERRNO_TARGET__SUCCESS) 47 if (ret == TARGET_ERRNO__SUCCESS)
48 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM; 48 ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
49 } 49 }
50 50
51 /* UID and SYSTEM are mutually exclusive */ 51 /* UID and SYSTEM are mutually exclusive */
52 if (target->uid_str && target->system_wide) { 52 if (target->uid_str && target->system_wide) {
53 target->system_wide = false; 53 target->system_wide = false;
54 if (ret == PERF_ERRNO_TARGET__SUCCESS) 54 if (ret == TARGET_ERRNO__SUCCESS)
55 ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM; 55 ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
56 } 56 }
57 57
58 return ret; 58 return ret;
59} 59}
60 60
61enum perf_target_errno perf_target__parse_uid(struct perf_target *target) 61enum target_errno target__parse_uid(struct target *target)
62{ 62{
63 struct passwd pwd, *result; 63 struct passwd pwd, *result;
64 char buf[1024]; 64 char buf[1024];
@@ -66,7 +66,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
66 66
67 target->uid = UINT_MAX; 67 target->uid = UINT_MAX;
68 if (str == NULL) 68 if (str == NULL)
69 return PERF_ERRNO_TARGET__SUCCESS; 69 return TARGET_ERRNO__SUCCESS;
70 70
71 /* Try user name first */ 71 /* Try user name first */
72 getpwnam_r(str, &pwd, buf, sizeof(buf), &result); 72 getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
@@ -79,22 +79,22 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
79 int uid = strtol(str, &endptr, 10); 79 int uid = strtol(str, &endptr, 10);
80 80
81 if (*endptr != '\0') 81 if (*endptr != '\0')
82 return PERF_ERRNO_TARGET__INVALID_UID; 82 return TARGET_ERRNO__INVALID_UID;
83 83
84 getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); 84 getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
85 85
86 if (result == NULL) 86 if (result == NULL)
87 return PERF_ERRNO_TARGET__USER_NOT_FOUND; 87 return TARGET_ERRNO__USER_NOT_FOUND;
88 } 88 }
89 89
90 target->uid = result->pw_uid; 90 target->uid = result->pw_uid;
91 return PERF_ERRNO_TARGET__SUCCESS; 91 return TARGET_ERRNO__SUCCESS;
92} 92}
93 93
94/* 94/*
95 * This must have a same ordering as the enum perf_target_errno. 95 * This must have a same ordering as the enum target_errno.
96 */ 96 */
97static const char *perf_target__error_str[] = { 97static const char *target__error_str[] = {
98 "PID/TID switch overriding CPU", 98 "PID/TID switch overriding CPU",
99 "PID/TID switch overriding UID", 99 "PID/TID switch overriding UID",
100 "UID switch overriding CPU", 100 "UID switch overriding CPU",
@@ -104,7 +104,7 @@ static const char *perf_target__error_str[] = {
104 "Problems obtaining information for user %s", 104 "Problems obtaining information for user %s",
105}; 105};
106 106
107int perf_target__strerror(struct perf_target *target, int errnum, 107int target__strerror(struct target *target, int errnum,
108 char *buf, size_t buflen) 108 char *buf, size_t buflen)
109{ 109{
110 int idx; 110 int idx;
@@ -124,21 +124,19 @@ int perf_target__strerror(struct perf_target *target, int errnum,
124 return 0; 124 return 0;
125 } 125 }
126 126
127 if (errnum < __PERF_ERRNO_TARGET__START || 127 if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
128 errnum >= __PERF_ERRNO_TARGET__END)
129 return -1; 128 return -1;
130 129
131 idx = errnum - __PERF_ERRNO_TARGET__START; 130 idx = errnum - __TARGET_ERRNO__START;
132 msg = perf_target__error_str[idx]; 131 msg = target__error_str[idx];
133 132
134 switch (errnum) { 133 switch (errnum) {
135 case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU 134 case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
136 ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
137 snprintf(buf, buflen, "%s", msg); 135 snprintf(buf, buflen, "%s", msg);
138 break; 136 break;
139 137
140 case PERF_ERRNO_TARGET__INVALID_UID: 138 case TARGET_ERRNO__INVALID_UID:
141 case PERF_ERRNO_TARGET__USER_NOT_FOUND: 139 case TARGET_ERRNO__USER_NOT_FOUND:
142 snprintf(buf, buflen, msg, target->uid_str); 140 snprintf(buf, buflen, msg, target->uid_str);
143 break; 141 break;
144 142
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index a4be8575fda5..89bab7129de4 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -4,7 +4,7 @@
4#include <stdbool.h> 4#include <stdbool.h>
5#include <sys/types.h> 5#include <sys/types.h>
6 6
7struct perf_target { 7struct target {
8 const char *pid; 8 const char *pid;
9 const char *tid; 9 const char *tid;
10 const char *cpu_list; 10 const char *cpu_list;
@@ -14,8 +14,8 @@ struct perf_target {
14 bool uses_mmap; 14 bool uses_mmap;
15}; 15};
16 16
17enum perf_target_errno { 17enum target_errno {
18 PERF_ERRNO_TARGET__SUCCESS = 0, 18 TARGET_ERRNO__SUCCESS = 0,
19 19
20 /* 20 /*
21 * Choose an arbitrary negative big number not to clash with standard 21 * Choose an arbitrary negative big number not to clash with standard
@@ -24,42 +24,40 @@ enum perf_target_errno {
24 * 24 *
25 * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html 25 * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
26 */ 26 */
27 __PERF_ERRNO_TARGET__START = -10000, 27 __TARGET_ERRNO__START = -10000,
28 28
29 /* for target__validate() */
30 TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
31 TARGET_ERRNO__PID_OVERRIDE_UID,
32 TARGET_ERRNO__UID_OVERRIDE_CPU,
33 TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
34 TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
29 35
30 /* for perf_target__validate() */ 36 /* for target__parse_uid() */
31 PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START, 37 TARGET_ERRNO__INVALID_UID,
32 PERF_ERRNO_TARGET__PID_OVERRIDE_UID, 38 TARGET_ERRNO__USER_NOT_FOUND,
33 PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
34 PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
35 PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
36 39
37 /* for perf_target__parse_uid() */ 40 __TARGET_ERRNO__END,
38 PERF_ERRNO_TARGET__INVALID_UID,
39 PERF_ERRNO_TARGET__USER_NOT_FOUND,
40
41 __PERF_ERRNO_TARGET__END,
42}; 41};
43 42
44enum perf_target_errno perf_target__validate(struct perf_target *target); 43enum target_errno target__validate(struct target *target);
45enum perf_target_errno perf_target__parse_uid(struct perf_target *target); 44enum target_errno target__parse_uid(struct target *target);
46 45
47int perf_target__strerror(struct perf_target *target, int errnum, char *buf, 46int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
48 size_t buflen);
49 47
50static inline bool perf_target__has_task(struct perf_target *target) 48static inline bool target__has_task(struct target *target)
51{ 49{
52 return target->tid || target->pid || target->uid_str; 50 return target->tid || target->pid || target->uid_str;
53} 51}
54 52
55static inline bool perf_target__has_cpu(struct perf_target *target) 53static inline bool target__has_cpu(struct target *target)
56{ 54{
57 return target->system_wide || target->cpu_list; 55 return target->system_wide || target->cpu_list;
58} 56}
59 57
60static inline bool perf_target__none(struct perf_target *target) 58static inline bool target__none(struct target *target)
61{ 59{
62 return !perf_target__has_task(target) && !perf_target__has_cpu(target); 60 return !target__has_task(target) && !target__has_cpu(target);
63} 61}
64 62
65#endif /* _PERF_TARGET_H */ 63#endif /* _PERF_TARGET_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index f857b51b6bde..ce793c7dd23c 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -27,7 +27,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
27 float ksamples_per_sec; 27 float ksamples_per_sec;
28 float esamples_percent; 28 float esamples_percent;
29 struct perf_record_opts *opts = &top->record_opts; 29 struct perf_record_opts *opts = &top->record_opts;
30 struct perf_target *target = &opts->target; 30 struct target *target = &opts->target;
31 size_t ret = 0; 31 size_t ret = 0;
32 32
33 if (top->samples) { 33 if (top->samples) {