aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN4
-rw-r--r--tools/perf/util/annotate.c264
-rw-r--r--tools/perf/util/annotate.h63
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cpumap.c116
-rw-r--r--tools/perf/util/cpumap.h21
-rw-r--r--tools/perf/util/debug.c28
-rw-r--r--tools/perf/util/debug.h34
-rw-r--r--tools/perf/util/debugfs.c114
-rw-r--r--tools/perf/util/debugfs.h12
-rw-r--r--tools/perf/util/dso.c6
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/event.h9
-rw-r--r--tools/perf/util/evlist.c107
-rw-r--r--tools/perf/util/evlist.h43
-rw-r--r--tools/perf/util/evsel.c402
-rw-r--r--tools/perf/util/evsel.h75
-rw-r--r--tools/perf/util/header.c281
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/hist.c252
-rw-r--r--tools/perf/util/hist.h59
-rw-r--r--tools/perf/util/include/linux/bitops.h1
-rw-r--r--tools/perf/util/intlist.c36
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/machine.c826
-rw-r--r--tools/perf/util/machine.h45
-rw-r--r--tools/perf/util/map.c121
-rw-r--r--tools/perf/util/map.h24
-rw-r--r--tools/perf/util/parse-events.c98
-rw-r--r--tools/perf/util/parse-events.h22
-rw-r--r--tools/perf/util/parse-events.y75
-rw-r--r--tools/perf/util/pmu.c46
-rw-r--r--tools/perf/util/pmu.h15
-rw-r--r--tools/perf/util/pmu.y1
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/probe-finder.c10
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c9
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c1
-rw-r--r--tools/perf/util/session.c341
-rw-r--r--tools/perf/util/session.h36
-rw-r--r--tools/perf/util/setup.py3
-rw-r--r--tools/perf/util/sort.c653
-rw-r--r--tools/perf/util/sort.h27
-rw-r--r--tools/perf/util/string.c18
-rw-r--r--tools/perf/util/strlist.c54
-rw-r--r--tools/perf/util/strlist.h42
-rw-r--r--tools/perf/util/symbol-elf.c23
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c537
-rw-r--r--tools/perf/util/symbol.h16
-rw-r--r--tools/perf/util/sysfs.c2
-rw-r--r--tools/perf/util/thread.c20
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/thread_map.h5
-rw-r--r--tools/perf/util/top.c22
-rw-r--r--tools/perf/util/top.h10
-rw-r--r--tools/perf/util/trace-event-info.c380
-rw-r--r--tools/perf/util/trace-event-parse.c37
-rw-r--r--tools/perf/util/trace-event-read.c473
-rw-r--r--tools/perf/util/trace-event.h6
-rw-r--r--tools/perf/util/util.c51
-rw-r--r--tools/perf/util/util.h11
66 files changed, 3821 insertions, 2190 deletions
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 6aa34e5afdcf..055fef34b6f6 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -26,13 +26,13 @@ VN=$(expr "$VN" : v*'\(.*\)')
26 26
27if test -r $GVF 27if test -r $GVF
28then 28then
29 VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) 29 VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF)
30else 30else
31 VC=unset 31 VC=unset
32fi 32fi
33test "$VN" = "$VC" || { 33test "$VN" = "$VC" || {
34 echo >&2 "PERF_VERSION = $VN" 34 echo >&2 "PERF_VERSION = $VN"
35 echo "PERF_VERSION = $VN" >$GVF 35 echo "#define PERF_VERSION \"$VN\"" >$GVF
36} 36}
37 37
38 38
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 07aaeea60000..d102716c43a1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -14,6 +14,7 @@
14#include "symbol.h" 14#include "symbol.h"
15#include "debug.h" 15#include "debug.h"
16#include "annotate.h" 16#include "annotate.h"
17#include "evsel.h"
17#include <pthread.h> 18#include <pthread.h>
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19 20
@@ -602,8 +603,42 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
602 return NULL; 603 return NULL;
603} 604}
604 605
606double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
607 s64 end, const char **path)
608{
609 struct source_line *src_line = notes->src->lines;
610 double percent = 0.0;
611
612 if (src_line) {
613 size_t sizeof_src_line = sizeof(*src_line) +
614 sizeof(src_line->p) * (src_line->nr_pcnt - 1);
615
616 while (offset < end) {
617 src_line = (void *)notes->src->lines +
618 (sizeof_src_line * offset);
619
620 if (*path == NULL)
621 *path = src_line->path;
622
623 percent += src_line->p[evidx].percent;
624 offset++;
625 }
626 } else {
627 struct sym_hist *h = annotation__histogram(notes, evidx);
628 unsigned int hits = 0;
629
630 while (offset < end)
631 hits += h->addr[offset++];
632
633 if (h->sum)
634 percent = 100.0 * hits / h->sum;
635 }
636
637 return percent;
638}
639
605static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start, 640static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
606 int evidx, u64 len, int min_pcnt, int printed, 641 struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
607 int max_lines, struct disasm_line *queue) 642 int max_lines, struct disasm_line *queue)
608{ 643{
609 static const char *prev_line; 644 static const char *prev_line;
@@ -611,34 +646,37 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
611 646
612 if (dl->offset != -1) { 647 if (dl->offset != -1) {
613 const char *path = NULL; 648 const char *path = NULL;
614 unsigned int hits = 0; 649 double percent, max_percent = 0.0;
615 double percent = 0.0; 650 double *ppercents = &percent;
651 int i, nr_percent = 1;
616 const char *color; 652 const char *color;
617 struct annotation *notes = symbol__annotation(sym); 653 struct annotation *notes = symbol__annotation(sym);
618 struct source_line *src_line = notes->src->lines;
619 struct sym_hist *h = annotation__histogram(notes, evidx);
620 s64 offset = dl->offset; 654 s64 offset = dl->offset;
621 const u64 addr = start + offset; 655 const u64 addr = start + offset;
622 struct disasm_line *next; 656 struct disasm_line *next;
623 657
624 next = disasm__get_next_ip_line(&notes->src->source, dl); 658 next = disasm__get_next_ip_line(&notes->src->source, dl);
625 659
626 while (offset < (s64)len && 660 if (perf_evsel__is_group_event(evsel)) {
627 (next == NULL || offset < next->offset)) { 661 nr_percent = evsel->nr_members;
628 if (src_line) { 662 ppercents = calloc(nr_percent, sizeof(double));
629 if (path == NULL) 663 if (ppercents == NULL)
630 path = src_line[offset].path; 664 return -1;
631 percent += src_line[offset].percent;
632 } else
633 hits += h->addr[offset];
634
635 ++offset;
636 } 665 }
637 666
638 if (src_line == NULL && h->sum) 667 for (i = 0; i < nr_percent; i++) {
639 percent = 100.0 * hits / h->sum; 668 percent = disasm__calc_percent(notes,
669 notes->src->lines ? i : evsel->idx + i,
670 offset,
671 next ? next->offset : (s64) len,
672 &path);
673
674 ppercents[i] = percent;
675 if (percent > max_percent)
676 max_percent = percent;
677 }
640 678
641 if (percent < min_pcnt) 679 if (max_percent < min_pcnt)
642 return -1; 680 return -1;
643 681
644 if (max_lines && printed >= max_lines) 682 if (max_lines && printed >= max_lines)
@@ -648,12 +686,12 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
648 list_for_each_entry_from(queue, &notes->src->source, node) { 686 list_for_each_entry_from(queue, &notes->src->source, node) {
649 if (queue == dl) 687 if (queue == dl)
650 break; 688 break;
651 disasm_line__print(queue, sym, start, evidx, len, 689 disasm_line__print(queue, sym, start, evsel, len,
652 0, 0, 1, NULL); 690 0, 0, 1, NULL);
653 } 691 }
654 } 692 }
655 693
656 color = get_percent_color(percent); 694 color = get_percent_color(max_percent);
657 695
658 /* 696 /*
659 * Also color the filename and line if needed, with 697 * Also color the filename and line if needed, with
@@ -669,25 +707,59 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
669 } 707 }
670 } 708 }
671 709
672 color_fprintf(stdout, color, " %7.2f", percent); 710 for (i = 0; i < nr_percent; i++) {
711 percent = ppercents[i];
712 color = get_percent_color(percent);
713 color_fprintf(stdout, color, " %7.2f", percent);
714 }
715
673 printf(" : "); 716 printf(" : ");
674 color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr); 717 color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
675 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line); 718 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
719
720 if (ppercents != &percent)
721 free(ppercents);
722
676 } else if (max_lines && printed >= max_lines) 723 } else if (max_lines && printed >= max_lines)
677 return 1; 724 return 1;
678 else { 725 else {
726 int width = 8;
727
679 if (queue) 728 if (queue)
680 return -1; 729 return -1;
681 730
731 if (perf_evsel__is_group_event(evsel))
732 width *= evsel->nr_members;
733
682 if (!*dl->line) 734 if (!*dl->line)
683 printf(" :\n"); 735 printf(" %*s:\n", width, " ");
684 else 736 else
685 printf(" : %s\n", dl->line); 737 printf(" %*s: %s\n", width, " ", dl->line);
686 } 738 }
687 739
688 return 0; 740 return 0;
689} 741}
690 742
743/*
744 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
745 * which looks like following
746 *
747 * 0000000000415500 <_init>:
748 * 415500: sub $0x8,%rsp
749 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
750 * 41550b: test %rax,%rax
751 * 41550e: je 415515 <_init+0x15>
752 * 415510: callq 416e70 <__gmon_start__@plt>
753 * 415515: add $0x8,%rsp
754 * 415519: retq
755 *
756 * it will be parsed and saved into struct disasm_line as
757 * <offset> <name> <ops.raw>
758 *
759 * The offset will be a relative offset from the start of the symbol and -1
760 * means that it's not a disassembly line so should be treated differently.
761 * The ops.raw part will be parsed further according to type of the instruction.
762 */
691static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, 763static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
692 FILE *file, size_t privsize) 764 FILE *file, size_t privsize)
693{ 765{
@@ -809,7 +881,7 @@ fallback:
809 pr_err("Can't annotate %s:\n\n" 881 pr_err("Can't annotate %s:\n\n"
810 "No vmlinux file%s\nwas found in the path.\n\n" 882 "No vmlinux file%s\nwas found in the path.\n\n"
811 "Please use:\n\n" 883 "Please use:\n\n"
812 " perf buildid-cache -av vmlinux\n\n" 884 " perf buildid-cache -vu vmlinux\n\n"
813 "or:\n\n" 885 "or:\n\n"
814 " --vmlinux vmlinux\n", 886 " --vmlinux vmlinux\n",
815 sym->name, build_id_msg ?: ""); 887 sym->name, build_id_msg ?: "");
@@ -858,7 +930,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
858 struct source_line *iter; 930 struct source_line *iter;
859 struct rb_node **p = &root->rb_node; 931 struct rb_node **p = &root->rb_node;
860 struct rb_node *parent = NULL; 932 struct rb_node *parent = NULL;
861 int ret; 933 int i, ret;
862 934
863 while (*p != NULL) { 935 while (*p != NULL) {
864 parent = *p; 936 parent = *p;
@@ -866,7 +938,8 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
866 938
867 ret = strcmp(iter->path, src_line->path); 939 ret = strcmp(iter->path, src_line->path);
868 if (ret == 0) { 940 if (ret == 0) {
869 iter->percent_sum += src_line->percent; 941 for (i = 0; i < src_line->nr_pcnt; i++)
942 iter->p[i].percent_sum += src_line->p[i].percent;
870 return; 943 return;
871 } 944 }
872 945
@@ -876,12 +949,26 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
876 p = &(*p)->rb_right; 949 p = &(*p)->rb_right;
877 } 950 }
878 951
879 src_line->percent_sum = src_line->percent; 952 for (i = 0; i < src_line->nr_pcnt; i++)
953 src_line->p[i].percent_sum = src_line->p[i].percent;
880 954
881 rb_link_node(&src_line->node, parent, p); 955 rb_link_node(&src_line->node, parent, p);
882 rb_insert_color(&src_line->node, root); 956 rb_insert_color(&src_line->node, root);
883} 957}
884 958
959static int cmp_source_line(struct source_line *a, struct source_line *b)
960{
961 int i;
962
963 for (i = 0; i < a->nr_pcnt; i++) {
964 if (a->p[i].percent_sum == b->p[i].percent_sum)
965 continue;
966 return a->p[i].percent_sum > b->p[i].percent_sum;
967 }
968
969 return 0;
970}
971
885static void __resort_source_line(struct rb_root *root, struct source_line *src_line) 972static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
886{ 973{
887 struct source_line *iter; 974 struct source_line *iter;
@@ -892,7 +979,7 @@ static void __resort_source_line(struct rb_root *root, struct source_line *src_l
892 parent = *p; 979 parent = *p;
893 iter = rb_entry(parent, struct source_line, node); 980 iter = rb_entry(parent, struct source_line, node);
894 981
895 if (src_line->percent_sum > iter->percent_sum) 982 if (cmp_source_line(src_line, iter))
896 p = &(*p)->rb_left; 983 p = &(*p)->rb_left;
897 else 984 else
898 p = &(*p)->rb_right; 985 p = &(*p)->rb_right;
@@ -924,32 +1011,52 @@ static void symbol__free_source_line(struct symbol *sym, int len)
924{ 1011{
925 struct annotation *notes = symbol__annotation(sym); 1012 struct annotation *notes = symbol__annotation(sym);
926 struct source_line *src_line = notes->src->lines; 1013 struct source_line *src_line = notes->src->lines;
1014 size_t sizeof_src_line;
927 int i; 1015 int i;
928 1016
929 for (i = 0; i < len; i++) 1017 sizeof_src_line = sizeof(*src_line) +
930 free(src_line[i].path); 1018 (sizeof(src_line->p) * (src_line->nr_pcnt - 1));
1019
1020 for (i = 0; i < len; i++) {
1021 free(src_line->path);
1022 src_line = (void *)src_line + sizeof_src_line;
1023 }
931 1024
932 free(src_line); 1025 free(notes->src->lines);
933 notes->src->lines = NULL; 1026 notes->src->lines = NULL;
934} 1027}
935 1028
936/* Get the filename:line for the colored entries */ 1029/* Get the filename:line for the colored entries */
937static int symbol__get_source_line(struct symbol *sym, struct map *map, 1030static int symbol__get_source_line(struct symbol *sym, struct map *map,
938 int evidx, struct rb_root *root, int len, 1031 struct perf_evsel *evsel,
1032 struct rb_root *root, int len,
939 const char *filename) 1033 const char *filename)
940{ 1034{
941 u64 start; 1035 u64 start;
942 int i; 1036 int i, k;
1037 int evidx = evsel->idx;
943 char cmd[PATH_MAX * 2]; 1038 char cmd[PATH_MAX * 2];
944 struct source_line *src_line; 1039 struct source_line *src_line;
945 struct annotation *notes = symbol__annotation(sym); 1040 struct annotation *notes = symbol__annotation(sym);
946 struct sym_hist *h = annotation__histogram(notes, evidx); 1041 struct sym_hist *h = annotation__histogram(notes, evidx);
947 struct rb_root tmp_root = RB_ROOT; 1042 struct rb_root tmp_root = RB_ROOT;
1043 int nr_pcnt = 1;
1044 u64 h_sum = h->sum;
1045 size_t sizeof_src_line = sizeof(struct source_line);
1046
1047 if (perf_evsel__is_group_event(evsel)) {
1048 for (i = 1; i < evsel->nr_members; i++) {
1049 h = annotation__histogram(notes, evidx + i);
1050 h_sum += h->sum;
1051 }
1052 nr_pcnt = evsel->nr_members;
1053 sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p);
1054 }
948 1055
949 if (!h->sum) 1056 if (!h_sum)
950 return 0; 1057 return 0;
951 1058
952 src_line = notes->src->lines = calloc(len, sizeof(struct source_line)); 1059 src_line = notes->src->lines = calloc(len, sizeof_src_line);
953 if (!notes->src->lines) 1060 if (!notes->src->lines)
954 return -1; 1061 return -1;
955 1062
@@ -960,29 +1067,41 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
960 size_t line_len; 1067 size_t line_len;
961 u64 offset; 1068 u64 offset;
962 FILE *fp; 1069 FILE *fp;
1070 double percent_max = 0.0;
963 1071
964 src_line[i].percent = 100.0 * h->addr[i] / h->sum; 1072 src_line->nr_pcnt = nr_pcnt;
965 if (src_line[i].percent <= 0.5) 1073
966 continue; 1074 for (k = 0; k < nr_pcnt; k++) {
1075 h = annotation__histogram(notes, evidx + k);
1076 src_line->p[k].percent = 100.0 * h->addr[i] / h->sum;
1077
1078 if (src_line->p[k].percent > percent_max)
1079 percent_max = src_line->p[k].percent;
1080 }
1081
1082 if (percent_max <= 0.5)
1083 goto next;
967 1084
968 offset = start + i; 1085 offset = start + i;
969 sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); 1086 sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
970 fp = popen(cmd, "r"); 1087 fp = popen(cmd, "r");
971 if (!fp) 1088 if (!fp)
972 continue; 1089 goto next;
973 1090
974 if (getline(&path, &line_len, fp) < 0 || !line_len) 1091 if (getline(&path, &line_len, fp) < 0 || !line_len)
975 goto next; 1092 goto next_close;
976 1093
977 src_line[i].path = malloc(sizeof(char) * line_len + 1); 1094 src_line->path = malloc(sizeof(char) * line_len + 1);
978 if (!src_line[i].path) 1095 if (!src_line->path)
979 goto next; 1096 goto next_close;
980 1097
981 strcpy(src_line[i].path, path); 1098 strcpy(src_line->path, path);
982 insert_source_line(&tmp_root, &src_line[i]); 1099 insert_source_line(&tmp_root, src_line);
983 1100
984 next: 1101 next_close:
985 pclose(fp); 1102 pclose(fp);
1103 next:
1104 src_line = (void *)src_line + sizeof_src_line;
986 } 1105 }
987 1106
988 resort_source_line(root, &tmp_root); 1107 resort_source_line(root, &tmp_root);
@@ -1004,24 +1123,33 @@ static void print_summary(struct rb_root *root, const char *filename)
1004 1123
1005 node = rb_first(root); 1124 node = rb_first(root);
1006 while (node) { 1125 while (node) {
1007 double percent; 1126 double percent, percent_max = 0.0;
1008 const char *color; 1127 const char *color;
1009 char *path; 1128 char *path;
1129 int i;
1010 1130
1011 src_line = rb_entry(node, struct source_line, node); 1131 src_line = rb_entry(node, struct source_line, node);
1012 percent = src_line->percent_sum; 1132 for (i = 0; i < src_line->nr_pcnt; i++) {
1013 color = get_percent_color(percent); 1133 percent = src_line->p[i].percent_sum;
1134 color = get_percent_color(percent);
1135 color_fprintf(stdout, color, " %7.2f", percent);
1136
1137 if (percent > percent_max)
1138 percent_max = percent;
1139 }
1140
1014 path = src_line->path; 1141 path = src_line->path;
1142 color = get_percent_color(percent_max);
1143 color_fprintf(stdout, color, " %s", path);
1015 1144
1016 color_fprintf(stdout, color, " %7.2f %s", percent, path);
1017 node = rb_next(node); 1145 node = rb_next(node);
1018 } 1146 }
1019} 1147}
1020 1148
1021static void symbol__annotate_hits(struct symbol *sym, int evidx) 1149static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1022{ 1150{
1023 struct annotation *notes = symbol__annotation(sym); 1151 struct annotation *notes = symbol__annotation(sym);
1024 struct sym_hist *h = annotation__histogram(notes, evidx); 1152 struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1025 u64 len = symbol__size(sym), offset; 1153 u64 len = symbol__size(sym), offset;
1026 1154
1027 for (offset = 0; offset < len; ++offset) 1155 for (offset = 0; offset < len; ++offset)
@@ -1031,9 +1159,9 @@ static void symbol__annotate_hits(struct symbol *sym, int evidx)
1031 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); 1159 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
1032} 1160}
1033 1161
1034int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, 1162int symbol__annotate_printf(struct symbol *sym, struct map *map,
1035 bool full_paths, int min_pcnt, int max_lines, 1163 struct perf_evsel *evsel, bool full_paths,
1036 int context) 1164 int min_pcnt, int max_lines, int context)
1037{ 1165{
1038 struct dso *dso = map->dso; 1166 struct dso *dso = map->dso;
1039 char *filename; 1167 char *filename;
@@ -1044,6 +1172,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
1044 int printed = 2, queue_len = 0; 1172 int printed = 2, queue_len = 0;
1045 int more = 0; 1173 int more = 0;
1046 u64 len; 1174 u64 len;
1175 int width = 8;
1176 int namelen;
1047 1177
1048 filename = strdup(dso->long_name); 1178 filename = strdup(dso->long_name);
1049 if (!filename) 1179 if (!filename)
@@ -1055,12 +1185,18 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
1055 d_filename = basename(filename); 1185 d_filename = basename(filename);
1056 1186
1057 len = symbol__size(sym); 1187 len = symbol__size(sym);
1188 namelen = strlen(d_filename);
1189
1190 if (perf_evsel__is_group_event(evsel))
1191 width *= evsel->nr_members;
1058 1192
1059 printf(" Percent | Source code & Disassembly of %s\n", d_filename); 1193 printf(" %-*.*s| Source code & Disassembly of %s\n",
1060 printf("------------------------------------------------\n"); 1194 width, width, "Percent", d_filename);
1195 printf("-%-*.*s-------------------------------------\n",
1196 width+namelen, width+namelen, graph_dotted_line);
1061 1197
1062 if (verbose) 1198 if (verbose)
1063 symbol__annotate_hits(sym, evidx); 1199 symbol__annotate_hits(sym, evsel);
1064 1200
1065 list_for_each_entry(pos, &notes->src->source, node) { 1201 list_for_each_entry(pos, &notes->src->source, node) {
1066 if (context && queue == NULL) { 1202 if (context && queue == NULL) {
@@ -1068,7 +1204,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
1068 queue_len = 0; 1204 queue_len = 0;
1069 } 1205 }
1070 1206
1071 switch (disasm_line__print(pos, sym, start, evidx, len, 1207 switch (disasm_line__print(pos, sym, start, evsel, len,
1072 min_pcnt, printed, max_lines, 1208 min_pcnt, printed, max_lines,
1073 queue)) { 1209 queue)) {
1074 case 0: 1210 case 0:
@@ -1163,9 +1299,9 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp)
1163 return printed; 1299 return printed;
1164} 1300}
1165 1301
1166int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, 1302int symbol__tty_annotate(struct symbol *sym, struct map *map,
1167 bool print_lines, bool full_paths, int min_pcnt, 1303 struct perf_evsel *evsel, bool print_lines,
1168 int max_lines) 1304 bool full_paths, int min_pcnt, int max_lines)
1169{ 1305{
1170 struct dso *dso = map->dso; 1306 struct dso *dso = map->dso;
1171 const char *filename = dso->long_name; 1307 const char *filename = dso->long_name;
@@ -1178,12 +1314,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
1178 len = symbol__size(sym); 1314 len = symbol__size(sym);
1179 1315
1180 if (print_lines) { 1316 if (print_lines) {
1181 symbol__get_source_line(sym, map, evidx, &source_line, 1317 symbol__get_source_line(sym, map, evsel, &source_line,
1182 len, filename); 1318 len, filename);
1183 print_summary(&source_line, filename); 1319 print_summary(&source_line, filename);
1184 } 1320 }
1185 1321
1186 symbol__annotate_printf(sym, map, evidx, full_paths, 1322 symbol__annotate_printf(sym, map, evsel, full_paths,
1187 min_pcnt, max_lines, 0); 1323 min_pcnt, max_lines, 0);
1188 if (print_lines) 1324 if (print_lines)
1189 symbol__free_source_line(sym, len); 1325 symbol__free_source_line(sym, len);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 8eec94358a4a..af755156d278 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -6,6 +6,7 @@
6#include "types.h" 6#include "types.h"
7#include "symbol.h" 7#include "symbol.h"
8#include "hist.h" 8#include "hist.h"
9#include "sort.h"
9#include <linux/list.h> 10#include <linux/list.h>
10#include <linux/rbtree.h> 11#include <linux/rbtree.h>
11#include <pthread.h> 12#include <pthread.h>
@@ -49,6 +50,8 @@ bool ins__is_jump(const struct ins *ins);
49bool ins__is_call(const struct ins *ins); 50bool ins__is_call(const struct ins *ins);
50int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops); 51int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
51 52
53struct annotation;
54
52struct disasm_line { 55struct disasm_line {
53 struct list_head node; 56 struct list_head node;
54 s64 offset; 57 s64 offset;
@@ -67,17 +70,24 @@ void disasm_line__free(struct disasm_line *dl);
67struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos); 70struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
68int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw); 71int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
69size_t disasm__fprintf(struct list_head *head, FILE *fp); 72size_t disasm__fprintf(struct list_head *head, FILE *fp);
73double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
74 s64 end, const char **path);
70 75
71struct sym_hist { 76struct sym_hist {
72 u64 sum; 77 u64 sum;
73 u64 addr[0]; 78 u64 addr[0];
74}; 79};
75 80
76struct source_line { 81struct source_line_percent {
77 struct rb_node node;
78 double percent; 82 double percent;
79 double percent_sum; 83 double percent_sum;
84};
85
86struct source_line {
87 struct rb_node node;
80 char *path; 88 char *path;
89 int nr_pcnt;
90 struct source_line_percent p[1];
81}; 91};
82 92
83/** struct annotated_source - symbols with hits have this attached as in sannotation 93/** struct annotated_source - symbols with hits have this attached as in sannotation
@@ -129,31 +139,56 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
129 139
130int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); 140int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
131int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); 141int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
132int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, 142int symbol__annotate_printf(struct symbol *sym, struct map *map,
133 bool full_paths, int min_pcnt, int max_lines, 143 struct perf_evsel *evsel, bool full_paths,
134 int context); 144 int min_pcnt, int max_lines, int context);
135void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); 145void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
136void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); 146void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
137void disasm__purge(struct list_head *head); 147void disasm__purge(struct list_head *head);
138 148
139int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, 149int symbol__tty_annotate(struct symbol *sym, struct map *map,
140 bool print_lines, bool full_paths, int min_pcnt, 150 struct perf_evsel *evsel, bool print_lines,
141 int max_lines); 151 bool full_paths, int min_pcnt, int max_lines);
142 152
143#ifdef NEWT_SUPPORT 153#ifdef SLANG_SUPPORT
144int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, 154int symbol__tui_annotate(struct symbol *sym, struct map *map,
155 struct perf_evsel *evsel,
145 struct hist_browser_timer *hbt); 156 struct hist_browser_timer *hbt);
146#else 157#else
147static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, 158static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
148 struct map *map __maybe_unused, 159 struct map *map __maybe_unused,
149 int evidx __maybe_unused, 160 struct perf_evsel *evsel __maybe_unused,
150 struct hist_browser_timer *hbt 161 struct hist_browser_timer *hbt
151 __maybe_unused) 162 __maybe_unused)
152{ 163{
153 return 0; 164 return 0;
154} 165}
155#endif 166#endif
156 167
168#ifdef GTK2_SUPPORT
169int symbol__gtk_annotate(struct symbol *sym, struct map *map,
170 struct perf_evsel *evsel,
171 struct hist_browser_timer *hbt);
172
173static inline int hist_entry__gtk_annotate(struct hist_entry *he,
174 struct perf_evsel *evsel,
175 struct hist_browser_timer *hbt)
176{
177 return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt);
178}
179
180void perf_gtk__show_annotations(void);
181#else
182static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused,
183 struct perf_evsel *evsel __maybe_unused,
184 struct hist_browser_timer *hbt __maybe_unused)
185{
186 return 0;
187}
188
189static inline void perf_gtk__show_annotations(void) {}
190#endif
191
157extern const char *disassembler_style; 192extern const char *disassembler_style;
158 193
159#endif /* __PERF_ANNOTATE_H */ 194#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index d3b3f5d82137..42b6a632fe7b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -444,7 +444,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
444 struct callchain_cursor_node *node = *cursor->last; 444 struct callchain_cursor_node *node = *cursor->last;
445 445
446 if (!node) { 446 if (!node) {
447 node = calloc(sizeof(*node), 1); 447 node = calloc(1, sizeof(*node));
448 if (!node) 448 if (!node)
449 return -ENOMEM; 449 return -ENOMEM;
450 450
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index eb340571e7d6..3ee9f67d5af0 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -143,4 +143,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
143 cursor->curr = cursor->curr->next; 143 cursor->curr = cursor->curr->next;
144 cursor->pos++; 144 cursor->pos++;
145} 145}
146
147struct option;
148
149int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
150extern const char record_callchain_help[];
146#endif /* __PERF_CALLCHAIN_H */ 151#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 2b32ffa9ebdb..beb8cf9f9976 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -1,8 +1,10 @@
1#include "util.h" 1#include "util.h"
2#include "sysfs.h"
2#include "../perf.h" 3#include "../perf.h"
3#include "cpumap.h" 4#include "cpumap.h"
4#include <assert.h> 5#include <assert.h>
5#include <stdio.h> 6#include <stdio.h>
7#include <stdlib.h>
6 8
7static struct cpu_map *cpu_map__default_new(void) 9static struct cpu_map *cpu_map__default_new(void)
8{ 10{
@@ -201,3 +203,117 @@ void cpu_map__delete(struct cpu_map *map)
201{ 203{
202 free(map); 204 free(map);
203} 205}
206
207int cpu_map__get_socket(struct cpu_map *map, int idx)
208{
209 FILE *fp;
210 const char *mnt;
211 char path[PATH_MAX];
212 int cpu, ret;
213
214 if (idx > map->nr)
215 return -1;
216
217 cpu = map->map[idx];
218
219 mnt = sysfs_find_mountpoint();
220 if (!mnt)
221 return -1;
222
223 snprintf(path, PATH_MAX,
224 "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
225 mnt, cpu);
226
227 fp = fopen(path, "r");
228 if (!fp)
229 return -1;
230 ret = fscanf(fp, "%d", &cpu);
231 fclose(fp);
232 return ret == 1 ? cpu : -1;
233}
234
235static int cmp_ids(const void *a, const void *b)
236{
237 return *(int *)a - *(int *)b;
238}
239
240static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
241 int (*f)(struct cpu_map *map, int cpu))
242{
243 struct cpu_map *c;
244 int nr = cpus->nr;
245 int cpu, s1, s2;
246
247 /* allocate as much as possible */
248 c = calloc(1, sizeof(*c) + nr * sizeof(int));
249 if (!c)
250 return -1;
251
252 for (cpu = 0; cpu < nr; cpu++) {
253 s1 = f(cpus, cpu);
254 for (s2 = 0; s2 < c->nr; s2++) {
255 if (s1 == c->map[s2])
256 break;
257 }
258 if (s2 == c->nr) {
259 c->map[c->nr] = s1;
260 c->nr++;
261 }
262 }
263 /* ensure we process id in increasing order */
264 qsort(c->map, c->nr, sizeof(int), cmp_ids);
265
266 *res = c;
267 return 0;
268}
269
270int cpu_map__get_core(struct cpu_map *map, int idx)
271{
272 FILE *fp;
273 const char *mnt;
274 char path[PATH_MAX];
275 int cpu, ret, s;
276
277 if (idx > map->nr)
278 return -1;
279
280 cpu = map->map[idx];
281
282 mnt = sysfs_find_mountpoint();
283 if (!mnt)
284 return -1;
285
286 snprintf(path, PATH_MAX,
287 "%s/devices/system/cpu/cpu%d/topology/core_id",
288 mnt, cpu);
289
290 fp = fopen(path, "r");
291 if (!fp)
292 return -1;
293 ret = fscanf(fp, "%d", &cpu);
294 fclose(fp);
295 if (ret != 1)
296 return -1;
297
298 s = cpu_map__get_socket(map, idx);
299 if (s == -1)
300 return -1;
301
302 /*
303 * encode socket in upper 16 bits
304 * core_id is relative to socket, and
305 * we need a global id. So we combine
306 * socket+ core id
307 */
308 return (s << 16) | (cpu & 0xffff);
309}
310
311int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
312{
313 return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
314}
315
316int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
317{
318 return cpu_map__build_map(cpus, corep, cpu_map__get_core);
319}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 2f68a3b8c285..9bed02e5fb3d 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -14,6 +14,27 @@ struct cpu_map *cpu_map__dummy_new(void);
14void cpu_map__delete(struct cpu_map *map); 14void cpu_map__delete(struct cpu_map *map);
15struct cpu_map *cpu_map__read(FILE *file); 15struct cpu_map *cpu_map__read(FILE *file);
16size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); 16size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
17int cpu_map__get_socket(struct cpu_map *map, int idx);
18int cpu_map__get_core(struct cpu_map *map, int idx);
19int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
20int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
21
22static inline int cpu_map__socket(struct cpu_map *sock, int s)
23{
24 if (!sock || s > sock->nr || s < 0)
25 return 0;
26 return sock->map[s];
27}
28
29static inline int cpu_map__id_to_socket(int id)
30{
31 return id >> 16;
32}
33
34static inline int cpu_map__id_to_cpu(int id)
35{
36 return id & 0xffff;
37}
17 38
18static inline int cpu_map__nr(const struct cpu_map *map) 39static inline int cpu_map__nr(const struct cpu_map *map)
19{ 40{
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 03f830b48148..399e74c34c1a 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -23,10 +23,8 @@ int eprintf(int level, const char *fmt, ...)
23 23
24 if (verbose >= level) { 24 if (verbose >= level) {
25 va_start(args, fmt); 25 va_start(args, fmt);
26 if (use_browser == 1) 26 if (use_browser >= 1)
27 ret = ui_helpline__show_help(fmt, args); 27 ui_helpline__vshow(fmt, args);
28 else if (use_browser == 2)
29 ret = perf_gtk__show_helpline(fmt, args);
30 else 28 else
31 ret = vfprintf(stderr, fmt, args); 29 ret = vfprintf(stderr, fmt, args);
32 va_end(args); 30 va_end(args);
@@ -49,28 +47,6 @@ int dump_printf(const char *fmt, ...)
49 return ret; 47 return ret;
50} 48}
51 49
52#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT)
53int ui__warning(const char *format, ...)
54{
55 va_list args;
56
57 va_start(args, format);
58 vfprintf(stderr, format, args);
59 va_end(args);
60 return 0;
61}
62#endif
63
64int ui__error_paranoid(void)
65{
66 return ui__error("Permission error - are you root?\n"
67 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
68 " -1 - Not paranoid at all\n"
69 " 0 - Disallow raw tracepoint access for unpriv\n"
70 " 1 - Disallow cpu events for unpriv\n"
71 " 2 - Disallow kernel profiling for unpriv\n");
72}
73
74void trace_event(union perf_event *event) 50void trace_event(union perf_event *event)
75{ 51{
76 unsigned char *raw_event = (void *)event; 52 unsigned char *raw_event = (void *)event;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 83e8d234af6b..efbd98805ad0 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -5,6 +5,8 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include "event.h" 6#include "event.h"
7#include "../ui/helpline.h" 7#include "../ui/helpline.h"
8#include "../ui/progress.h"
9#include "../ui/util.h"
8 10
9extern int verbose; 11extern int verbose;
10extern bool quiet, dump_trace; 12extern bool quiet, dump_trace;
@@ -12,39 +14,7 @@ extern bool quiet, dump_trace;
12int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); 14int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
13void trace_event(union perf_event *event); 15void trace_event(union perf_event *event);
14 16
15struct ui_progress;
16struct perf_error_ops;
17
18#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT)
19
20#include "../ui/progress.h"
21int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); 17int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
22#include "../ui/util.h"
23
24#else
25
26static inline void ui_progress__update(u64 curr __maybe_unused,
27 u64 total __maybe_unused,
28 const char *title __maybe_unused) {}
29static inline void ui_progress__finish(void) {}
30
31#define ui__error(format, arg...) ui__warning(format, ##arg)
32
33static inline int
34perf_error__register(struct perf_error_ops *eops __maybe_unused)
35{
36 return 0;
37}
38
39static inline int
40perf_error__unregister(struct perf_error_ops *eops __maybe_unused)
41{
42 return 0;
43}
44
45#endif /* NEWT_SUPPORT || GTK2_SUPPORT */
46
47int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 18int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
48int ui__error_paranoid(void);
49 19
50#endif /* __PERF_DEBUG_H */ 20#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
deleted file mode 100644
index dd8b19319c03..000000000000
--- a/tools/perf/util/debugfs.c
+++ /dev/null
@@ -1,114 +0,0 @@
1#include "util.h"
2#include "debugfs.h"
3#include "cache.h"
4
5#include <linux/kernel.h>
6#include <sys/mount.h>
7
8static int debugfs_premounted;
9char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
10char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
11
12static const char *debugfs_known_mountpoints[] = {
13 "/sys/kernel/debug/",
14 "/debug/",
15 0,
16};
17
18static int debugfs_found;
19
20/* find the path to the mounted debugfs */
21const char *debugfs_find_mountpoint(void)
22{
23 const char **ptr;
24 char type[100];
25 FILE *fp;
26
27 if (debugfs_found)
28 return (const char *) debugfs_mountpoint;
29
30 ptr = debugfs_known_mountpoints;
31 while (*ptr) {
32 if (debugfs_valid_mountpoint(*ptr) == 0) {
33 debugfs_found = 1;
34 strcpy(debugfs_mountpoint, *ptr);
35 return debugfs_mountpoint;
36 }
37 ptr++;
38 }
39
40 /* give up and parse /proc/mounts */
41 fp = fopen("/proc/mounts", "r");
42 if (fp == NULL)
43 return NULL;
44
45 while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
46 debugfs_mountpoint, type) == 2) {
47 if (strcmp(type, "debugfs") == 0)
48 break;
49 }
50 fclose(fp);
51
52 if (strcmp(type, "debugfs") != 0)
53 return NULL;
54
55 debugfs_found = 1;
56
57 return debugfs_mountpoint;
58}
59
60/* verify that a mountpoint is actually a debugfs instance */
61
62int debugfs_valid_mountpoint(const char *debugfs)
63{
64 struct statfs st_fs;
65
66 if (statfs(debugfs, &st_fs) < 0)
67 return -ENOENT;
68 else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
69 return -ENOENT;
70
71 return 0;
72}
73
74static void debugfs_set_tracing_events_path(const char *mountpoint)
75{
76 snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
77 mountpoint, "tracing/events");
78}
79
80/* mount the debugfs somewhere if it's not mounted */
81
82char *debugfs_mount(const char *mountpoint)
83{
84 /* see if it's already mounted */
85 if (debugfs_find_mountpoint()) {
86 debugfs_premounted = 1;
87 goto out;
88 }
89
90 /* if not mounted and no argument */
91 if (mountpoint == NULL) {
92 /* see if environment variable set */
93 mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
94 /* if no environment variable, use default */
95 if (mountpoint == NULL)
96 mountpoint = "/sys/kernel/debug";
97 }
98
99 if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
100 return NULL;
101
102 /* save the mountpoint */
103 debugfs_found = 1;
104 strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
105out:
106 debugfs_set_tracing_events_path(debugfs_mountpoint);
107 return debugfs_mountpoint;
108}
109
110void debugfs_set_path(const char *mountpoint)
111{
112 snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
113 debugfs_set_tracing_events_path(mountpoint);
114}
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
deleted file mode 100644
index 68f3e87ec57f..000000000000
--- a/tools/perf/util/debugfs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __DEBUGFS_H__
2#define __DEBUGFS_H__
3
4const char *debugfs_find_mountpoint(void);
5int debugfs_valid_mountpoint(const char *debugfs);
6char *debugfs_mount(const char *mountpoint);
7void debugfs_set_path(const char *mountpoint);
8
9extern char debugfs_mountpoint[];
10extern char tracing_events_path[];
11
12#endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d6d9a465acdb..6f7d5a9d6b05 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -539,13 +539,13 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
539} 539}
540 540
541size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 541size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
542 bool with_hits) 542 bool (skip)(struct dso *dso, int parm), int parm)
543{ 543{
544 struct dso *pos; 544 struct dso *pos;
545 size_t ret = 0; 545 size_t ret = 0;
546 546
547 list_for_each_entry(pos, head, node) { 547 list_for_each_entry(pos, head, node) {
548 if (with_hits && !pos->hit) 548 if (skip && skip(pos, parm))
549 continue; 549 continue;
550 ret += dso__fprintf_buildid(pos, fp); 550 ret += dso__fprintf_buildid(pos, fp);
551 ret += fprintf(fp, " %s\n", pos->long_name); 551 ret += fprintf(fp, " %s\n", pos->long_name);
@@ -583,7 +583,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
583 if (dso->short_name != dso->long_name) 583 if (dso->short_name != dso->long_name)
584 ret += fprintf(fp, "%s, ", dso->long_name); 584 ret += fprintf(fp, "%s, ", dso->long_name);
585 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 585 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
586 dso->loaded ? "" : "NOT "); 586 dso__loaded(dso, type) ? "" : "NOT ");
587 ret += dso__fprintf_buildid(dso, fp); 587 ret += dso__fprintf_buildid(dso, fp);
588 ret += fprintf(fp, ")\n"); 588 ret += fprintf(fp, ")\n");
589 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 589 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index e03276940b99..450199ab51b5 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -138,7 +138,7 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name);
138bool __dsos__read_build_ids(struct list_head *head, bool with_hits); 138bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
139 139
140size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 140size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
141 bool with_hits); 141 bool (skip)(struct dso *dso, int parm), int parm);
142size_t __dsos__fprintf(struct list_head *head, FILE *fp); 142size_t __dsos__fprintf(struct list_head *head, FILE *fp);
143 143
144size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); 144size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3cf2c3e0605f..5cd13d768cec 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -476,8 +476,10 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
476 } 476 }
477 } 477 }
478 478
479 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 479 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
480 free(event);
480 return -ENOENT; 481 return -ENOENT;
482 }
481 483
482 map = machine->vmlinux_maps[MAP__FUNCTION]; 484 map = machine->vmlinux_maps[MAP__FUNCTION];
483 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 485 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 0d573ff4771a..181389535c0c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -88,8 +88,10 @@ struct perf_sample {
88 u64 id; 88 u64 id;
89 u64 stream_id; 89 u64 stream_id;
90 u64 period; 90 u64 period;
91 u64 weight;
91 u32 cpu; 92 u32 cpu;
92 u32 raw_size; 93 u32 raw_size;
94 u64 data_src;
93 void *raw_data; 95 void *raw_data;
94 struct ip_callchain *callchain; 96 struct ip_callchain *callchain;
95 struct branch_stack *branch_stack; 97 struct branch_stack *branch_stack;
@@ -97,6 +99,13 @@ struct perf_sample {
97 struct stack_dump user_stack; 99 struct stack_dump user_stack;
98}; 100};
99 101
102#define PERF_MEM_DATA_SRC_NONE \
103 (PERF_MEM_S(OP, NA) |\
104 PERF_MEM_S(LVL, NA) |\
105 PERF_MEM_S(SNOOP, NA) |\
106 PERF_MEM_S(LOCK, NA) |\
107 PERF_MEM_S(TLB, NA))
108
100struct build_id_event { 109struct build_id_event {
101 struct perf_event_header header; 110 struct perf_event_header header;
102 pid_t pid; 111 pid_t pid;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 705293489e3c..f7c727801aab 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -7,7 +7,7 @@
7 * Released under the GPL v2. (and only v2, not any later version) 7 * Released under the GPL v2. (and only v2, not any later version)
8 */ 8 */
9#include "util.h" 9#include "util.h"
10#include "debugfs.h" 10#include <lk/debugfs.h>
11#include <poll.h> 11#include <poll.h>
12#include "cpumap.h" 12#include "cpumap.h"
13#include "thread_map.h" 13#include "thread_map.h"
@@ -38,21 +38,26 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
38 evlist->workload.pid = -1; 38 evlist->workload.pid = -1;
39} 39}
40 40
41struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 41struct perf_evlist *perf_evlist__new(void)
42 struct thread_map *threads)
43{ 42{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 43 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 44
46 if (evlist != NULL) 45 if (evlist != NULL)
47 perf_evlist__init(evlist, cpus, threads); 46 perf_evlist__init(evlist, NULL, NULL);
48 47
49 return evlist; 48 return evlist;
50} 49}
51 50
52void perf_evlist__config_attrs(struct perf_evlist *evlist, 51void perf_evlist__config(struct perf_evlist *evlist,
53 struct perf_record_opts *opts) 52 struct perf_record_opts *opts)
54{ 53{
55 struct perf_evsel *evsel; 54 struct perf_evsel *evsel;
55 /*
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
58 */
59 if (opts->group)
60 perf_evlist__set_leader(evlist);
56 61
57 if (evlist->cpus->map[0] < 0) 62 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true; 63 opts->no_inherit = true;
@@ -61,7 +66,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
61 perf_evsel__config(evsel, opts); 66 perf_evsel__config(evsel, opts);
62 67
63 if (evlist->nr_entries > 1) 68 if (evlist->nr_entries > 1)
64 evsel->attr.sample_type |= PERF_SAMPLE_ID; 69 perf_evsel__set_sample_id(evsel);
65 } 70 }
66} 71}
67 72
@@ -111,18 +116,21 @@ void __perf_evlist__set_leader(struct list_head *list)
111 struct perf_evsel *evsel, *leader; 116 struct perf_evsel *evsel, *leader;
112 117
113 leader = list_entry(list->next, struct perf_evsel, node); 118 leader = list_entry(list->next, struct perf_evsel, node);
114 leader->leader = NULL; 119 evsel = list_entry(list->prev, struct perf_evsel, node);
120
121 leader->nr_members = evsel->idx - leader->idx + 1;
115 122
116 list_for_each_entry(evsel, list, node) { 123 list_for_each_entry(evsel, list, node) {
117 if (evsel != leader) 124 evsel->leader = leader;
118 evsel->leader = leader;
119 } 125 }
120} 126}
121 127
122void perf_evlist__set_leader(struct perf_evlist *evlist) 128void perf_evlist__set_leader(struct perf_evlist *evlist)
123{ 129{
124 if (evlist->nr_entries) 130 if (evlist->nr_entries) {
131 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
125 __perf_evlist__set_leader(&evlist->entries); 132 __perf_evlist__set_leader(&evlist->entries);
133 }
126} 134}
127 135
128int perf_evlist__add_default(struct perf_evlist *evlist) 136int perf_evlist__add_default(struct perf_evlist *evlist)
@@ -219,12 +227,14 @@ void perf_evlist__disable(struct perf_evlist *evlist)
219{ 227{
220 int cpu, thread; 228 int cpu, thread;
221 struct perf_evsel *pos; 229 struct perf_evsel *pos;
230 int nr_cpus = cpu_map__nr(evlist->cpus);
231 int nr_threads = thread_map__nr(evlist->threads);
222 232
223 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 233 for (cpu = 0; cpu < nr_cpus; cpu++) {
224 list_for_each_entry(pos, &evlist->entries, node) { 234 list_for_each_entry(pos, &evlist->entries, node) {
225 if (perf_evsel__is_group_member(pos)) 235 if (!perf_evsel__is_group_leader(pos))
226 continue; 236 continue;
227 for (thread = 0; thread < evlist->threads->nr; thread++) 237 for (thread = 0; thread < nr_threads; thread++)
228 ioctl(FD(pos, cpu, thread), 238 ioctl(FD(pos, cpu, thread),
229 PERF_EVENT_IOC_DISABLE, 0); 239 PERF_EVENT_IOC_DISABLE, 0);
230 } 240 }
@@ -235,12 +245,14 @@ void perf_evlist__enable(struct perf_evlist *evlist)
235{ 245{
236 int cpu, thread; 246 int cpu, thread;
237 struct perf_evsel *pos; 247 struct perf_evsel *pos;
248 int nr_cpus = cpu_map__nr(evlist->cpus);
249 int nr_threads = thread_map__nr(evlist->threads);
238 250
239 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 251 for (cpu = 0; cpu < nr_cpus; cpu++) {
240 list_for_each_entry(pos, &evlist->entries, node) { 252 list_for_each_entry(pos, &evlist->entries, node) {
241 if (perf_evsel__is_group_member(pos)) 253 if (!perf_evsel__is_group_leader(pos))
242 continue; 254 continue;
243 for (thread = 0; thread < evlist->threads->nr; thread++) 255 for (thread = 0; thread < nr_threads; thread++)
244 ioctl(FD(pos, cpu, thread), 256 ioctl(FD(pos, cpu, thread),
245 PERF_EVENT_IOC_ENABLE, 0); 257 PERF_EVENT_IOC_ENABLE, 0);
246 } 258 }
@@ -249,7 +261,9 @@ void perf_evlist__enable(struct perf_evlist *evlist)
249 261
250static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 262static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
251{ 263{
252 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; 264 int nr_cpus = cpu_map__nr(evlist->cpus);
265 int nr_threads = thread_map__nr(evlist->threads);
266 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
253 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 267 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
254 return evlist->pollfd != NULL ? 0 : -ENOMEM; 268 return evlist->pollfd != NULL ? 0 : -ENOMEM;
255} 269}
@@ -305,7 +319,6 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
305struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 319struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
306{ 320{
307 struct hlist_head *head; 321 struct hlist_head *head;
308 struct hlist_node *pos;
309 struct perf_sample_id *sid; 322 struct perf_sample_id *sid;
310 int hash; 323 int hash;
311 324
@@ -315,7 +328,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
315 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 328 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
316 head = &evlist->heads[hash]; 329 head = &evlist->heads[hash];
317 330
318 hlist_for_each_entry(sid, pos, head, node) 331 hlist_for_each_entry(sid, head, node)
319 if (sid->id == id) 332 if (sid->id == id)
320 return sid->evsel; 333 return sid->evsel;
321 334
@@ -366,7 +379,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
366 if ((old & md->mask) + size != ((old + size) & md->mask)) { 379 if ((old & md->mask) + size != ((old + size) & md->mask)) {
367 unsigned int offset = old; 380 unsigned int offset = old;
368 unsigned int len = min(sizeof(*event), size), cpy; 381 unsigned int len = min(sizeof(*event), size), cpy;
369 void *dst = &evlist->event_copy; 382 void *dst = &md->event_copy;
370 383
371 do { 384 do {
372 cpy = min(md->mask + 1 - (offset & md->mask), len); 385 cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -376,7 +389,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
376 len -= cpy; 389 len -= cpy;
377 } while (len); 390 } while (len);
378 391
379 event = &evlist->event_copy; 392 event = &md->event_copy;
380 } 393 }
381 394
382 old += size; 395 old += size;
@@ -409,7 +422,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
409{ 422{
410 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 423 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
411 if (cpu_map__all(evlist->cpus)) 424 if (cpu_map__all(evlist->cpus))
412 evlist->nr_mmaps = evlist->threads->nr; 425 evlist->nr_mmaps = thread_map__nr(evlist->threads);
413 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 426 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
414 return evlist->mmap != NULL ? 0 : -ENOMEM; 427 return evlist->mmap != NULL ? 0 : -ENOMEM;
415} 428}
@@ -434,11 +447,13 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
434{ 447{
435 struct perf_evsel *evsel; 448 struct perf_evsel *evsel;
436 int cpu, thread; 449 int cpu, thread;
450 int nr_cpus = cpu_map__nr(evlist->cpus);
451 int nr_threads = thread_map__nr(evlist->threads);
437 452
438 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 453 for (cpu = 0; cpu < nr_cpus; cpu++) {
439 int output = -1; 454 int output = -1;
440 455
441 for (thread = 0; thread < evlist->threads->nr; thread++) { 456 for (thread = 0; thread < nr_threads; thread++) {
442 list_for_each_entry(evsel, &evlist->entries, node) { 457 list_for_each_entry(evsel, &evlist->entries, node) {
443 int fd = FD(evsel, cpu, thread); 458 int fd = FD(evsel, cpu, thread);
444 459
@@ -462,7 +477,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
462 return 0; 477 return 0;
463 478
464out_unmap: 479out_unmap:
465 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 480 for (cpu = 0; cpu < nr_cpus; cpu++) {
466 if (evlist->mmap[cpu].base != NULL) { 481 if (evlist->mmap[cpu].base != NULL) {
467 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 482 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
468 evlist->mmap[cpu].base = NULL; 483 evlist->mmap[cpu].base = NULL;
@@ -475,8 +490,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
475{ 490{
476 struct perf_evsel *evsel; 491 struct perf_evsel *evsel;
477 int thread; 492 int thread;
493 int nr_threads = thread_map__nr(evlist->threads);
478 494
479 for (thread = 0; thread < evlist->threads->nr; thread++) { 495 for (thread = 0; thread < nr_threads; thread++) {
480 int output = -1; 496 int output = -1;
481 497
482 list_for_each_entry(evsel, &evlist->entries, node) { 498 list_for_each_entry(evsel, &evlist->entries, node) {
@@ -501,7 +517,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
501 return 0; 517 return 0;
502 518
503out_unmap: 519out_unmap:
504 for (thread = 0; thread < evlist->threads->nr; thread++) { 520 for (thread = 0; thread < nr_threads; thread++) {
505 if (evlist->mmap[thread].base != NULL) { 521 if (evlist->mmap[thread].base != NULL) {
506 munmap(evlist->mmap[thread].base, evlist->mmap_len); 522 munmap(evlist->mmap[thread].base, evlist->mmap_len);
507 evlist->mmap[thread].base = NULL; 523 evlist->mmap[thread].base = NULL;
@@ -602,7 +618,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
602 struct perf_evsel *evsel; 618 struct perf_evsel *evsel;
603 int err = 0; 619 int err = 0;
604 const int ncpus = cpu_map__nr(evlist->cpus), 620 const int ncpus = cpu_map__nr(evlist->cpus),
605 nthreads = evlist->threads->nr; 621 nthreads = thread_map__nr(evlist->threads);
606 622
607 list_for_each_entry(evsel, &evlist->entries, node) { 623 list_for_each_entry(evsel, &evlist->entries, node) {
608 if (evsel->filter == NULL) 624 if (evsel->filter == NULL)
@@ -621,7 +637,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
621 struct perf_evsel *evsel; 637 struct perf_evsel *evsel;
622 int err = 0; 638 int err = 0;
623 const int ncpus = cpu_map__nr(evlist->cpus), 639 const int ncpus = cpu_map__nr(evlist->cpus),
624 nthreads = evlist->threads->nr; 640 nthreads = thread_map__nr(evlist->threads);
625 641
626 list_for_each_entry(evsel, &evlist->entries, node) { 642 list_for_each_entry(evsel, &evlist->entries, node) {
627 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 643 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
@@ -704,10 +720,20 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
704 evlist->selected = evsel; 720 evlist->selected = evsel;
705} 721}
706 722
723void perf_evlist__close(struct perf_evlist *evlist)
724{
725 struct perf_evsel *evsel;
726 int ncpus = cpu_map__nr(evlist->cpus);
727 int nthreads = thread_map__nr(evlist->threads);
728
729 list_for_each_entry_reverse(evsel, &evlist->entries, node)
730 perf_evsel__close(evsel, ncpus, nthreads);
731}
732
707int perf_evlist__open(struct perf_evlist *evlist) 733int perf_evlist__open(struct perf_evlist *evlist)
708{ 734{
709 struct perf_evsel *evsel; 735 struct perf_evsel *evsel;
710 int err, ncpus, nthreads; 736 int err;
711 737
712 list_for_each_entry(evsel, &evlist->entries, node) { 738 list_for_each_entry(evsel, &evlist->entries, node) {
713 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 739 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
@@ -717,19 +743,15 @@ int perf_evlist__open(struct perf_evlist *evlist)
717 743
718 return 0; 744 return 0;
719out_err: 745out_err:
720 ncpus = evlist->cpus ? evlist->cpus->nr : 1; 746 perf_evlist__close(evlist);
721 nthreads = evlist->threads ? evlist->threads->nr : 1;
722
723 list_for_each_entry_reverse(evsel, &evlist->entries, node)
724 perf_evsel__close(evsel, ncpus, nthreads);
725
726 errno = -err; 747 errno = -err;
727 return err; 748 return err;
728} 749}
729 750
730int perf_evlist__prepare_workload(struct perf_evlist *evlist, 751int perf_evlist__prepare_workload(struct perf_evlist *evlist,
731 struct perf_record_opts *opts, 752 struct perf_target *target,
732 const char *argv[]) 753 const char *argv[], bool pipe_output,
754 bool want_signal)
733{ 755{
734 int child_ready_pipe[2], go_pipe[2]; 756 int child_ready_pipe[2], go_pipe[2];
735 char bf; 757 char bf;
@@ -751,7 +773,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
751 } 773 }
752 774
753 if (!evlist->workload.pid) { 775 if (!evlist->workload.pid) {
754 if (opts->pipe_output) 776 if (pipe_output)
755 dup2(2, 1); 777 dup2(2, 1);
756 778
757 close(child_ready_pipe[0]); 779 close(child_ready_pipe[0]);
@@ -779,11 +801,12 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
779 execvp(argv[0], (char **)argv); 801 execvp(argv[0], (char **)argv);
780 802
781 perror(argv[0]); 803 perror(argv[0]);
782 kill(getppid(), SIGUSR1); 804 if (want_signal)
805 kill(getppid(), SIGUSR1);
783 exit(-1); 806 exit(-1);
784 } 807 }
785 808
786 if (perf_target__none(&opts->target)) 809 if (perf_target__none(target))
787 evlist->threads->map[0] = evlist->workload.pid; 810 evlist->threads->map[0] = evlist->workload.pid;
788 811
789 close(child_ready_pipe[1]); 812 close(child_ready_pipe[1]);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 56003f779e60..0583d36252be 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,10 +17,18 @@ struct perf_record_opts;
17#define PERF_EVLIST__HLIST_BITS 8 17#define PERF_EVLIST__HLIST_BITS 8
18#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 18#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
19 19
20struct perf_mmap {
21 void *base;
22 int mask;
23 unsigned int prev;
24 union perf_event event_copy;
25};
26
20struct perf_evlist { 27struct perf_evlist {
21 struct list_head entries; 28 struct list_head entries;
22 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 29 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
23 int nr_entries; 30 int nr_entries;
31 int nr_groups;
24 int nr_fds; 32 int nr_fds;
25 int nr_mmaps; 33 int nr_mmaps;
26 int mmap_len; 34 int mmap_len;
@@ -29,7 +37,6 @@ struct perf_evlist {
29 pid_t pid; 37 pid_t pid;
30 } workload; 38 } workload;
31 bool overwrite; 39 bool overwrite;
32 union perf_event event_copy;
33 struct perf_mmap *mmap; 40 struct perf_mmap *mmap;
34 struct pollfd *pollfd; 41 struct pollfd *pollfd;
35 struct thread_map *threads; 42 struct thread_map *threads;
@@ -42,8 +49,7 @@ struct perf_evsel_str_handler {
42 void *handler; 49 void *handler;
43}; 50};
44 51
45struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 52struct perf_evlist *perf_evlist__new(void);
46 struct thread_map *threads);
47void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 53void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
48 struct thread_map *threads); 54 struct thread_map *threads);
49void perf_evlist__exit(struct perf_evlist *evlist); 55void perf_evlist__exit(struct perf_evlist *evlist);
@@ -75,13 +81,15 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
75union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 81union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
76 82
77int perf_evlist__open(struct perf_evlist *evlist); 83int perf_evlist__open(struct perf_evlist *evlist);
84void perf_evlist__close(struct perf_evlist *evlist);
78 85
79void perf_evlist__config_attrs(struct perf_evlist *evlist, 86void perf_evlist__config(struct perf_evlist *evlist,
80 struct perf_record_opts *opts); 87 struct perf_record_opts *opts);
81 88
82int perf_evlist__prepare_workload(struct perf_evlist *evlist, 89int perf_evlist__prepare_workload(struct perf_evlist *evlist,
83 struct perf_record_opts *opts, 90 struct perf_target *target,
84 const char *argv[]); 91 const char *argv[], bool pipe_output,
92 bool want_signal);
85int perf_evlist__start_workload(struct perf_evlist *evlist); 93int perf_evlist__start_workload(struct perf_evlist *evlist);
86 94
87int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 95int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
@@ -135,4 +143,25 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
135} 143}
136 144
137size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); 145size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
146
147static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
148{
149 struct perf_event_mmap_page *pc = mm->base;
150 int head = pc->data_head;
151 rmb();
152 return head;
153}
154
155static inline void perf_mmap__write_tail(struct perf_mmap *md,
156 unsigned long tail)
157{
158 struct perf_event_mmap_page *pc = md->base;
159
160 /*
161 * ensure all reads are done before we write the tail out.
162 */
163 /* mb(); */
164 pc->data_tail = tail;
165}
166
138#endif /* __PERF_EVLIST_H */ 167#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1b16dd1edc8e..07b1a3ad3e24 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -10,7 +10,7 @@
10#include <byteswap.h> 10#include <byteswap.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12#include "asm/bug.h" 12#include "asm/bug.h"
13#include "debugfs.h" 13#include <lk/debugfs.h>
14#include "event-parse.h" 14#include "event-parse.h"
15#include "evsel.h" 15#include "evsel.h"
16#include "evlist.h" 16#include "evlist.h"
@@ -22,6 +22,11 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include "perf_regs.h" 23#include "perf_regs.h"
24 24
25static struct {
26 bool sample_id_all;
27 bool exclude_guest;
28} perf_missing_features;
29
25#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 30#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 31
27static int __perf_evsel__sample_size(u64 sample_type) 32static int __perf_evsel__sample_size(u64 sample_type)
@@ -50,11 +55,36 @@ void hists__init(struct hists *hists)
50 pthread_mutex_init(&hists->lock, NULL); 55 pthread_mutex_init(&hists->lock, NULL);
51} 56}
52 57
58void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
59 enum perf_event_sample_format bit)
60{
61 if (!(evsel->attr.sample_type & bit)) {
62 evsel->attr.sample_type |= bit;
63 evsel->sample_size += sizeof(u64);
64 }
65}
66
67void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
68 enum perf_event_sample_format bit)
69{
70 if (evsel->attr.sample_type & bit) {
71 evsel->attr.sample_type &= ~bit;
72 evsel->sample_size -= sizeof(u64);
73 }
74}
75
76void perf_evsel__set_sample_id(struct perf_evsel *evsel)
77{
78 perf_evsel__set_sample_bit(evsel, ID);
79 evsel->attr.read_format |= PERF_FORMAT_ID;
80}
81
53void perf_evsel__init(struct perf_evsel *evsel, 82void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx) 83 struct perf_event_attr *attr, int idx)
55{ 84{
56 evsel->idx = idx; 85 evsel->idx = idx;
57 evsel->attr = *attr; 86 evsel->attr = *attr;
87 evsel->leader = evsel;
58 INIT_LIST_HEAD(&evsel->node); 88 INIT_LIST_HEAD(&evsel->node);
59 hists__init(&evsel->hists); 89 hists__init(&evsel->hists);
60 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 90 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@@ -404,6 +434,31 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
404 return evsel->name ?: "unknown"; 434 return evsel->name ?: "unknown";
405} 435}
406 436
437const char *perf_evsel__group_name(struct perf_evsel *evsel)
438{
439 return evsel->group_name ?: "anon group";
440}
441
442int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
443{
444 int ret;
445 struct perf_evsel *pos;
446 const char *group_name = perf_evsel__group_name(evsel);
447
448 ret = scnprintf(buf, size, "%s", group_name);
449
450 ret += scnprintf(buf + ret, size - ret, " { %s",
451 perf_evsel__name(evsel));
452
453 for_each_group_member(pos, evsel)
454 ret += scnprintf(buf + ret, size - ret, ", %s",
455 perf_evsel__name(pos));
456
457 ret += scnprintf(buf + ret, size - ret, " }");
458
459 return ret;
460}
461
407/* 462/*
408 * The enable_on_exec/disabled value strategy: 463 * The enable_on_exec/disabled value strategy:
409 * 464 *
@@ -438,13 +493,11 @@ void perf_evsel__config(struct perf_evsel *evsel,
438 struct perf_event_attr *attr = &evsel->attr; 493 struct perf_event_attr *attr = &evsel->attr;
439 int track = !evsel->idx; /* only the first counter needs these */ 494 int track = !evsel->idx; /* only the first counter needs these */
440 495
441 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 496 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
442 attr->inherit = !opts->no_inherit; 497 attr->inherit = !opts->no_inherit;
443 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
444 PERF_FORMAT_TOTAL_TIME_RUNNING |
445 PERF_FORMAT_ID;
446 498
447 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; 499 perf_evsel__set_sample_bit(evsel, IP);
500 perf_evsel__set_sample_bit(evsel, TID);
448 501
449 /* 502 /*
450 * We default some events to a 1 default interval. But keep 503 * We default some events to a 1 default interval. But keep
@@ -453,7 +506,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
453 if (!attr->sample_period || (opts->user_freq != UINT_MAX && 506 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
454 opts->user_interval != ULLONG_MAX)) { 507 opts->user_interval != ULLONG_MAX)) {
455 if (opts->freq) { 508 if (opts->freq) {
456 attr->sample_type |= PERF_SAMPLE_PERIOD; 509 perf_evsel__set_sample_bit(evsel, PERIOD);
457 attr->freq = 1; 510 attr->freq = 1;
458 attr->sample_freq = opts->freq; 511 attr->sample_freq = opts->freq;
459 } else { 512 } else {
@@ -468,16 +521,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
468 attr->inherit_stat = 1; 521 attr->inherit_stat = 1;
469 522
470 if (opts->sample_address) { 523 if (opts->sample_address) {
471 attr->sample_type |= PERF_SAMPLE_ADDR; 524 perf_evsel__set_sample_bit(evsel, ADDR);
472 attr->mmap_data = track; 525 attr->mmap_data = track;
473 } 526 }
474 527
475 if (opts->call_graph) { 528 if (opts->call_graph) {
476 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 529 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
477 530
478 if (opts->call_graph == CALLCHAIN_DWARF) { 531 if (opts->call_graph == CALLCHAIN_DWARF) {
479 attr->sample_type |= PERF_SAMPLE_REGS_USER | 532 perf_evsel__set_sample_bit(evsel, REGS_USER);
480 PERF_SAMPLE_STACK_USER; 533 perf_evsel__set_sample_bit(evsel, STACK_USER);
481 attr->sample_regs_user = PERF_REGS_MASK; 534 attr->sample_regs_user = PERF_REGS_MASK;
482 attr->sample_stack_user = opts->stack_dump_size; 535 attr->sample_stack_user = opts->stack_dump_size;
483 attr->exclude_callchain_user = 1; 536 attr->exclude_callchain_user = 1;
@@ -485,31 +538,37 @@ void perf_evsel__config(struct perf_evsel *evsel,
485 } 538 }
486 539
487 if (perf_target__has_cpu(&opts->target)) 540 if (perf_target__has_cpu(&opts->target))
488 attr->sample_type |= PERF_SAMPLE_CPU; 541 perf_evsel__set_sample_bit(evsel, CPU);
489 542
490 if (opts->period) 543 if (opts->period)
491 attr->sample_type |= PERF_SAMPLE_PERIOD; 544 perf_evsel__set_sample_bit(evsel, PERIOD);
492 545
493 if (!opts->sample_id_all_missing && 546 if (!perf_missing_features.sample_id_all &&
494 (opts->sample_time || !opts->no_inherit || 547 (opts->sample_time || !opts->no_inherit ||
495 perf_target__has_cpu(&opts->target))) 548 perf_target__has_cpu(&opts->target)))
496 attr->sample_type |= PERF_SAMPLE_TIME; 549 perf_evsel__set_sample_bit(evsel, TIME);
497 550
498 if (opts->raw_samples) { 551 if (opts->raw_samples) {
499 attr->sample_type |= PERF_SAMPLE_TIME; 552 perf_evsel__set_sample_bit(evsel, TIME);
500 attr->sample_type |= PERF_SAMPLE_RAW; 553 perf_evsel__set_sample_bit(evsel, RAW);
501 attr->sample_type |= PERF_SAMPLE_CPU; 554 perf_evsel__set_sample_bit(evsel, CPU);
502 } 555 }
503 556
557 if (opts->sample_address)
558 attr->sample_type |= PERF_SAMPLE_DATA_SRC;
559
504 if (opts->no_delay) { 560 if (opts->no_delay) {
505 attr->watermark = 0; 561 attr->watermark = 0;
506 attr->wakeup_events = 1; 562 attr->wakeup_events = 1;
507 } 563 }
508 if (opts->branch_stack) { 564 if (opts->branch_stack) {
509 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; 565 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
510 attr->branch_sample_type = opts->branch_stack; 566 attr->branch_sample_type = opts->branch_stack;
511 } 567 }
512 568
569 if (opts->sample_weight)
570 attr->sample_type |= PERF_SAMPLE_WEIGHT;
571
513 attr->mmap = track; 572 attr->mmap = track;
514 attr->comm = track; 573 attr->comm = track;
515 574
@@ -519,14 +578,14 @@ void perf_evsel__config(struct perf_evsel *evsel,
519 * Disabling only independent events or group leaders, 578 * Disabling only independent events or group leaders,
520 * keeping group members enabled. 579 * keeping group members enabled.
521 */ 580 */
522 if (!perf_evsel__is_group_member(evsel)) 581 if (perf_evsel__is_group_leader(evsel))
523 attr->disabled = 1; 582 attr->disabled = 1;
524 583
525 /* 584 /*
526 * Setting enable_on_exec for independent events and 585 * Setting enable_on_exec for independent events and
527 * group leaders for traced executed by perf. 586 * group leaders for traced executed by perf.
528 */ 587 */
529 if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel)) 588 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
530 attr->enable_on_exec = 1; 589 attr->enable_on_exec = 1;
531} 590}
532 591
@@ -580,6 +639,12 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
580 return 0; 639 return 0;
581} 640}
582 641
642void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
643{
644 memset(evsel->counts, 0, (sizeof(*evsel->counts) +
645 (ncpus * sizeof(struct perf_counts_values))));
646}
647
583int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) 648int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
584{ 649{
585 evsel->counts = zalloc((sizeof(*evsel->counts) + 650 evsel->counts = zalloc((sizeof(*evsel->counts) +
@@ -612,12 +677,16 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
612 } 677 }
613} 678}
614 679
680void perf_evsel__free_counts(struct perf_evsel *evsel)
681{
682 free(evsel->counts);
683}
684
615void perf_evsel__exit(struct perf_evsel *evsel) 685void perf_evsel__exit(struct perf_evsel *evsel)
616{ 686{
617 assert(list_empty(&evsel->node)); 687 assert(list_empty(&evsel->node));
618 xyarray__delete(evsel->fd); 688 perf_evsel__free_fd(evsel);
619 xyarray__delete(evsel->sample_id); 689 perf_evsel__free_id(evsel);
620 free(evsel->id);
621} 690}
622 691
623void perf_evsel__delete(struct perf_evsel *evsel) 692void perf_evsel__delete(struct perf_evsel *evsel)
@@ -631,6 +700,28 @@ void perf_evsel__delete(struct perf_evsel *evsel)
631 free(evsel); 700 free(evsel);
632} 701}
633 702
703static inline void compute_deltas(struct perf_evsel *evsel,
704 int cpu,
705 struct perf_counts_values *count)
706{
707 struct perf_counts_values tmp;
708
709 if (!evsel->prev_raw_counts)
710 return;
711
712 if (cpu == -1) {
713 tmp = evsel->prev_raw_counts->aggr;
714 evsel->prev_raw_counts->aggr = *count;
715 } else {
716 tmp = evsel->prev_raw_counts->cpu[cpu];
717 evsel->prev_raw_counts->cpu[cpu] = *count;
718 }
719
720 count->val = count->val - tmp.val;
721 count->ena = count->ena - tmp.ena;
722 count->run = count->run - tmp.run;
723}
724
634int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, 725int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
635 int cpu, int thread, bool scale) 726 int cpu, int thread, bool scale)
636{ 727{
@@ -646,6 +737,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
646 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) 737 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
647 return -errno; 738 return -errno;
648 739
740 compute_deltas(evsel, cpu, &count);
741
649 if (scale) { 742 if (scale) {
650 if (count.run == 0) 743 if (count.run == 0)
651 count.val = 0; 744 count.val = 0;
@@ -684,6 +777,8 @@ int __perf_evsel__read(struct perf_evsel *evsel,
684 } 777 }
685 } 778 }
686 779
780 compute_deltas(evsel, -1, aggr);
781
687 evsel->counts->scaled = 0; 782 evsel->counts->scaled = 0;
688 if (scale) { 783 if (scale) {
689 if (aggr->run == 0) { 784 if (aggr->run == 0) {
@@ -707,7 +802,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
707 struct perf_evsel *leader = evsel->leader; 802 struct perf_evsel *leader = evsel->leader;
708 int fd; 803 int fd;
709 804
710 if (!perf_evsel__is_group_member(evsel)) 805 if (perf_evsel__is_group_leader(evsel))
711 return -1; 806 return -1;
712 807
713 /* 808 /*
@@ -738,6 +833,13 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
738 pid = evsel->cgrp->fd; 833 pid = evsel->cgrp->fd;
739 } 834 }
740 835
836fallback_missing_features:
837 if (perf_missing_features.exclude_guest)
838 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
839retry_sample_id:
840 if (perf_missing_features.sample_id_all)
841 evsel->attr.sample_id_all = 0;
842
741 for (cpu = 0; cpu < cpus->nr; cpu++) { 843 for (cpu = 0; cpu < cpus->nr; cpu++) {
742 844
743 for (thread = 0; thread < threads->nr; thread++) { 845 for (thread = 0; thread < threads->nr; thread++) {
@@ -754,13 +856,26 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
754 group_fd, flags); 856 group_fd, flags);
755 if (FD(evsel, cpu, thread) < 0) { 857 if (FD(evsel, cpu, thread) < 0) {
756 err = -errno; 858 err = -errno;
757 goto out_close; 859 goto try_fallback;
758 } 860 }
759 } 861 }
760 } 862 }
761 863
762 return 0; 864 return 0;
763 865
866try_fallback:
867 if (err != -EINVAL || cpu > 0 || thread > 0)
868 goto out_close;
869
870 if (!perf_missing_features.exclude_guest &&
871 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
872 perf_missing_features.exclude_guest = true;
873 goto fallback_missing_features;
874 } else if (!perf_missing_features.sample_id_all) {
875 perf_missing_features.sample_id_all = true;
876 goto retry_sample_id;
877 }
878
764out_close: 879out_close:
765 do { 880 do {
766 while (--thread >= 0) { 881 while (--thread >= 0) {
@@ -908,6 +1023,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
908 data->cpu = data->pid = data->tid = -1; 1023 data->cpu = data->pid = data->tid = -1;
909 data->stream_id = data->id = data->time = -1ULL; 1024 data->stream_id = data->id = data->time = -1ULL;
910 data->period = 1; 1025 data->period = 1;
1026 data->weight = 0;
911 1027
912 if (event->header.type != PERF_RECORD_SAMPLE) { 1028 if (event->header.type != PERF_RECORD_SAMPLE) {
913 if (!evsel->attr.sample_id_all) 1029 if (!evsel->attr.sample_id_all)
@@ -1058,6 +1174,18 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1058 } 1174 }
1059 } 1175 }
1060 1176
1177 data->weight = 0;
1178 if (type & PERF_SAMPLE_WEIGHT) {
1179 data->weight = *array;
1180 array++;
1181 }
1182
1183 data->data_src = PERF_MEM_DATA_SRC_NONE;
1184 if (type & PERF_SAMPLE_DATA_SRC) {
1185 data->data_src = *array;
1186 array++;
1187 }
1188
1061 return 0; 1189 return 0;
1062} 1190}
1063 1191
@@ -1205,3 +1333,225 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1205 1333
1206 return 0; 1334 return 0;
1207} 1335}
1336
1337static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1338{
1339 va_list args;
1340 int ret = 0;
1341
1342 if (!*first) {
1343 ret += fprintf(fp, ",");
1344 } else {
1345 ret += fprintf(fp, ":");
1346 *first = false;
1347 }
1348
1349 va_start(args, fmt);
1350 ret += vfprintf(fp, fmt, args);
1351 va_end(args);
1352 return ret;
1353}
1354
1355static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1356{
1357 if (value == 0)
1358 return 0;
1359
1360 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1361}
1362
1363#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1364
1365struct bit_names {
1366 int bit;
1367 const char *name;
1368};
1369
1370static int bits__fprintf(FILE *fp, const char *field, u64 value,
1371 struct bit_names *bits, bool *first)
1372{
1373 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1374 bool first_bit = true;
1375
1376 do {
1377 if (value & bits[i].bit) {
1378 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1379 first_bit = false;
1380 }
1381 } while (bits[++i].name != NULL);
1382
1383 return printed;
1384}
1385
1386static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1387{
1388#define bit_name(n) { PERF_SAMPLE_##n, #n }
1389 struct bit_names bits[] = {
1390 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1391 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1392 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1393 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1394 { .name = NULL, }
1395 };
1396#undef bit_name
1397 return bits__fprintf(fp, "sample_type", value, bits, first);
1398}
1399
1400static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1401{
1402#define bit_name(n) { PERF_FORMAT_##n, #n }
1403 struct bit_names bits[] = {
1404 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1405 bit_name(ID), bit_name(GROUP),
1406 { .name = NULL, }
1407 };
1408#undef bit_name
1409 return bits__fprintf(fp, "read_format", value, bits, first);
1410}
1411
1412int perf_evsel__fprintf(struct perf_evsel *evsel,
1413 struct perf_attr_details *details, FILE *fp)
1414{
1415 bool first = true;
1416 int printed = 0;
1417
1418 if (details->event_group) {
1419 struct perf_evsel *pos;
1420
1421 if (!perf_evsel__is_group_leader(evsel))
1422 return 0;
1423
1424 if (evsel->nr_members > 1)
1425 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
1426
1427 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1428 for_each_group_member(pos, evsel)
1429 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
1430
1431 if (evsel->nr_members > 1)
1432 printed += fprintf(fp, "}");
1433 goto out;
1434 }
1435
1436 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1437
1438 if (details->verbose || details->freq) {
1439 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1440 (u64)evsel->attr.sample_freq);
1441 }
1442
1443 if (details->verbose) {
1444 if_print(type);
1445 if_print(config);
1446 if_print(config1);
1447 if_print(config2);
1448 if_print(size);
1449 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1450 if (evsel->attr.read_format)
1451 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
1452 if_print(disabled);
1453 if_print(inherit);
1454 if_print(pinned);
1455 if_print(exclusive);
1456 if_print(exclude_user);
1457 if_print(exclude_kernel);
1458 if_print(exclude_hv);
1459 if_print(exclude_idle);
1460 if_print(mmap);
1461 if_print(comm);
1462 if_print(freq);
1463 if_print(inherit_stat);
1464 if_print(enable_on_exec);
1465 if_print(task);
1466 if_print(watermark);
1467 if_print(precise_ip);
1468 if_print(mmap_data);
1469 if_print(sample_id_all);
1470 if_print(exclude_host);
1471 if_print(exclude_guest);
1472 if_print(__reserved_1);
1473 if_print(wakeup_events);
1474 if_print(bp_type);
1475 if_print(branch_sample_type);
1476 }
1477out:
1478 fputc('\n', fp);
1479 return ++printed;
1480}
1481
1482bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1483 char *msg, size_t msgsize)
1484{
1485 if ((err == ENOENT || err == ENXIO) &&
1486 evsel->attr.type == PERF_TYPE_HARDWARE &&
1487 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1488 /*
1489 * If it's cycles then fall back to hrtimer based
1490 * cpu-clock-tick sw counter, which is always available even if
1491 * no PMU support.
1492 *
1493 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1494 * b0a873e).
1495 */
1496 scnprintf(msg, msgsize, "%s",
1497"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1498
1499 evsel->attr.type = PERF_TYPE_SOFTWARE;
1500 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1501
1502 free(evsel->name);
1503 evsel->name = NULL;
1504 return true;
1505 }
1506
1507 return false;
1508}
1509
1510int perf_evsel__open_strerror(struct perf_evsel *evsel,
1511 struct perf_target *target,
1512 int err, char *msg, size_t size)
1513{
1514 switch (err) {
1515 case EPERM:
1516 case EACCES:
1517 return scnprintf(msg, size, "%s",
1518 "You may not have permission to collect %sstats.\n"
1519 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1520 " -1 - Not paranoid at all\n"
1521 " 0 - Disallow raw tracepoint access for unpriv\n"
1522 " 1 - Disallow cpu events for unpriv\n"
1523 " 2 - Disallow kernel profiling for unpriv",
1524 target->system_wide ? "system-wide " : "");
1525 case ENOENT:
1526 return scnprintf(msg, size, "The %s event is not supported.",
1527 perf_evsel__name(evsel));
1528 case EMFILE:
1529 return scnprintf(msg, size, "%s",
1530 "Too many events are opened.\n"
1531 "Try again after reducing the number of events.");
1532 case ENODEV:
1533 if (target->cpu_list)
1534 return scnprintf(msg, size, "%s",
1535 "No such device - did you specify an out-of-range profile CPU?\n");
1536 break;
1537 case EOPNOTSUPP:
1538 if (evsel->attr.precise_ip)
1539 return scnprintf(msg, size, "%s",
1540 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1541#if defined(__i386__) || defined(__x86_64__)
1542 if (evsel->attr.type == PERF_TYPE_HARDWARE)
1543 return scnprintf(msg, size, "%s",
1544 "No hardware sampling interrupt available.\n"
1545 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1546#endif
1547 break;
1548 default:
1549 break;
1550 }
1551
1552 return scnprintf(msg, size,
1553 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
1554 "/bin/dmesg may provide additional information.\n"
1555 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1556 err, strerror(err), perf_evsel__name(evsel));
1557}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3d2b8017438c..3f156ccc1acb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -9,6 +9,7 @@
9#include "xyarray.h" 9#include "xyarray.h"
10#include "cgroup.h" 10#include "cgroup.h"
11#include "hist.h" 11#include "hist.h"
12#include "symbol.h"
12 13
13struct perf_counts_values { 14struct perf_counts_values {
14 union { 15 union {
@@ -53,6 +54,7 @@ struct perf_evsel {
53 struct xyarray *sample_id; 54 struct xyarray *sample_id;
54 u64 *id; 55 u64 *id;
55 struct perf_counts *counts; 56 struct perf_counts *counts;
57 struct perf_counts *prev_raw_counts;
56 int idx; 58 int idx;
57 u32 ids; 59 u32 ids;
58 struct hists hists; 60 struct hists hists;
@@ -73,10 +75,13 @@ struct perf_evsel {
73 bool needs_swap; 75 bool needs_swap;
74 /* parse modifier helper */ 76 /* parse modifier helper */
75 int exclude_GH; 77 int exclude_GH;
78 int nr_members;
76 struct perf_evsel *leader; 79 struct perf_evsel *leader;
77 char *group_name; 80 char *group_name;
78}; 81};
79 82
83#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
84
80struct cpu_map; 85struct cpu_map;
81struct thread_map; 86struct thread_map;
82struct perf_evlist; 87struct perf_evlist;
@@ -110,14 +115,31 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
110int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 115int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
111 char *bf, size_t size); 116 char *bf, size_t size);
112const char *perf_evsel__name(struct perf_evsel *evsel); 117const char *perf_evsel__name(struct perf_evsel *evsel);
118const char *perf_evsel__group_name(struct perf_evsel *evsel);
119int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
113 120
114int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 121int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
115int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 122int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
116int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); 123int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
124void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
117void perf_evsel__free_fd(struct perf_evsel *evsel); 125void perf_evsel__free_fd(struct perf_evsel *evsel);
118void perf_evsel__free_id(struct perf_evsel *evsel); 126void perf_evsel__free_id(struct perf_evsel *evsel);
127void perf_evsel__free_counts(struct perf_evsel *evsel);
119void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 128void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
120 129
130void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
131 enum perf_event_sample_format bit);
132void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
133 enum perf_event_sample_format bit);
134
135#define perf_evsel__set_sample_bit(evsel, bit) \
136 __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
137
138#define perf_evsel__reset_sample_bit(evsel, bit) \
139 __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
140
141void perf_evsel__set_sample_id(struct perf_evsel *evsel);
142
121int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 143int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
122 const char *filter); 144 const char *filter);
123 145
@@ -226,8 +248,57 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
226 return list_entry(evsel->node.next, struct perf_evsel, node); 248 return list_entry(evsel->node.next, struct perf_evsel, node);
227} 249}
228 250
229static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) 251/**
252 * perf_evsel__is_group_leader - Return whether given evsel is a leader event
253 *
254 * @evsel - evsel selector to be tested
255 *
256 * Return %true if @evsel is a group leader or a stand-alone event
257 */
258static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
259{
260 return evsel->leader == evsel;
261}
262
263/**
264 * perf_evsel__is_group_event - Return whether given evsel is a group event
265 *
266 * @evsel - evsel selector to be tested
267 *
268 * Return %true iff event group view is enabled and @evsel is a actual group
269 * leader which has other members in the group
270 */
271static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
272{
273 if (!symbol_conf.event_group)
274 return false;
275
276 return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
277}
278
279struct perf_attr_details {
280 bool freq;
281 bool verbose;
282 bool event_group;
283};
284
285int perf_evsel__fprintf(struct perf_evsel *evsel,
286 struct perf_attr_details *details, FILE *fp);
287
288bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
289 char *msg, size_t msgsize);
290int perf_evsel__open_strerror(struct perf_evsel *evsel,
291 struct perf_target *target,
292 int err, char *msg, size_t size);
293
294static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
230{ 295{
231 return evsel->leader != NULL; 296 return evsel->idx - evsel->leader->idx;
232} 297}
298
299#define for_each_group_member(_evsel, _leader) \
300for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
301 (_evsel) && (_evsel)->leader == (_leader); \
302 (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
303
233#endif /* __PERF_EVSEL_H */ 304#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b7da4634a047..326068a593a5 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,5 +1,3 @@
1#define _FILE_OFFSET_BITS 64
2
3#include "util.h" 1#include "util.h"
4#include <sys/types.h> 2#include <sys/types.h>
5#include <byteswap.h> 3#include <byteswap.h>
@@ -148,7 +146,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
148 u32 len; 146 u32 len;
149 char *buf; 147 char *buf;
150 148
151 sz = read(fd, &len, sizeof(len)); 149 sz = readn(fd, &len, sizeof(len));
152 if (sz < (ssize_t)sizeof(len)) 150 if (sz < (ssize_t)sizeof(len))
153 return NULL; 151 return NULL;
154 152
@@ -159,7 +157,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
159 if (!buf) 157 if (!buf)
160 return NULL; 158 return NULL;
161 159
162 ret = read(fd, buf, len); 160 ret = readn(fd, buf, len);
163 if (ret == (ssize_t)len) { 161 if (ret == (ssize_t)len) {
164 /* 162 /*
165 * strings are padded by zeroes 163 * strings are padded by zeroes
@@ -287,12 +285,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
287 struct perf_session *session = container_of(header, 285 struct perf_session *session = container_of(header,
288 struct perf_session, header); 286 struct perf_session, header);
289 struct rb_node *nd; 287 struct rb_node *nd;
290 int err = machine__write_buildid_table(&session->host_machine, fd); 288 int err = machine__write_buildid_table(&session->machines.host, fd);
291 289
292 if (err) 290 if (err)
293 return err; 291 return err;
294 292
295 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 293 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
296 struct machine *pos = rb_entry(nd, struct machine, rb_node); 294 struct machine *pos = rb_entry(nd, struct machine, rb_node);
297 err = machine__write_buildid_table(pos, fd); 295 err = machine__write_buildid_table(pos, fd);
298 if (err) 296 if (err)
@@ -313,7 +311,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
313 if (is_kallsyms) { 311 if (is_kallsyms) {
314 if (symbol_conf.kptr_restrict) { 312 if (symbol_conf.kptr_restrict) {
315 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); 313 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
316 return 0; 314 err = 0;
315 goto out_free;
317 } 316 }
318 realname = (char *) name; 317 realname = (char *) name;
319 } else 318 } else
@@ -448,9 +447,9 @@ static int perf_session__cache_build_ids(struct perf_session *session)
448 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 447 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
449 return -1; 448 return -1;
450 449
451 ret = machine__cache_build_ids(&session->host_machine, debugdir); 450 ret = machine__cache_build_ids(&session->machines.host, debugdir);
452 451
453 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 452 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
454 struct machine *pos = rb_entry(nd, struct machine, rb_node); 453 struct machine *pos = rb_entry(nd, struct machine, rb_node);
455 ret |= machine__cache_build_ids(pos, debugdir); 454 ret |= machine__cache_build_ids(pos, debugdir);
456 } 455 }
@@ -467,9 +466,9 @@ static bool machine__read_build_ids(struct machine *machine, bool with_hits)
467static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) 466static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
468{ 467{
469 struct rb_node *nd; 468 struct rb_node *nd;
470 bool ret = machine__read_build_ids(&session->host_machine, with_hits); 469 bool ret = machine__read_build_ids(&session->machines.host, with_hits);
471 470
472 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 471 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
473 struct machine *pos = rb_entry(nd, struct machine, rb_node); 472 struct machine *pos = rb_entry(nd, struct machine, rb_node);
474 ret |= machine__read_build_ids(pos, with_hits); 473 ret |= machine__read_build_ids(pos, with_hits);
475 } 474 }
@@ -954,6 +953,7 @@ static int write_topo_node(int fd, int node)
954 } 953 }
955 954
956 fclose(fp); 955 fclose(fp);
956 fp = NULL;
957 957
958 ret = do_write(fd, &mem_total, sizeof(u64)); 958 ret = do_write(fd, &mem_total, sizeof(u64));
959 if (ret) 959 if (ret)
@@ -980,7 +980,8 @@ static int write_topo_node(int fd, int node)
980 ret = do_write_string(fd, buf); 980 ret = do_write_string(fd, buf);
981done: 981done:
982 free(buf); 982 free(buf);
983 fclose(fp); 983 if (fp)
984 fclose(fp);
984 return ret; 985 return ret;
985} 986}
986 987
@@ -1051,16 +1052,25 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1051 struct perf_pmu *pmu = NULL; 1052 struct perf_pmu *pmu = NULL;
1052 off_t offset = lseek(fd, 0, SEEK_CUR); 1053 off_t offset = lseek(fd, 0, SEEK_CUR);
1053 __u32 pmu_num = 0; 1054 __u32 pmu_num = 0;
1055 int ret;
1054 1056
1055 /* write real pmu_num later */ 1057 /* write real pmu_num later */
1056 do_write(fd, &pmu_num, sizeof(pmu_num)); 1058 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
1059 if (ret < 0)
1060 return ret;
1057 1061
1058 while ((pmu = perf_pmu__scan(pmu))) { 1062 while ((pmu = perf_pmu__scan(pmu))) {
1059 if (!pmu->name) 1063 if (!pmu->name)
1060 continue; 1064 continue;
1061 pmu_num++; 1065 pmu_num++;
1062 do_write(fd, &pmu->type, sizeof(pmu->type)); 1066
1063 do_write_string(fd, pmu->name); 1067 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
1068 if (ret < 0)
1069 return ret;
1070
1071 ret = do_write_string(fd, pmu->name);
1072 if (ret < 0)
1073 return ret;
1064 } 1074 }
1065 1075
1066 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { 1076 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
@@ -1073,6 +1083,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1073} 1083}
1074 1084
1075/* 1085/*
1086 * File format:
1087 *
1088 * struct group_descs {
1089 * u32 nr_groups;
1090 * struct group_desc {
1091 * char name[];
1092 * u32 leader_idx;
1093 * u32 nr_members;
1094 * }[nr_groups];
1095 * };
1096 */
1097static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
1098 struct perf_evlist *evlist)
1099{
1100 u32 nr_groups = evlist->nr_groups;
1101 struct perf_evsel *evsel;
1102 int ret;
1103
1104 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
1105 if (ret < 0)
1106 return ret;
1107
1108 list_for_each_entry(evsel, &evlist->entries, node) {
1109 if (perf_evsel__is_group_leader(evsel) &&
1110 evsel->nr_members > 1) {
1111 const char *name = evsel->group_name ?: "{anon_group}";
1112 u32 leader_idx = evsel->idx;
1113 u32 nr_members = evsel->nr_members;
1114
1115 ret = do_write_string(fd, name);
1116 if (ret < 0)
1117 return ret;
1118
1119 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
1120 if (ret < 0)
1121 return ret;
1122
1123 ret = do_write(fd, &nr_members, sizeof(nr_members));
1124 if (ret < 0)
1125 return ret;
1126 }
1127 }
1128 return 0;
1129}
1130
1131/*
1076 * default get_cpuid(): nothing gets recorded 1132 * default get_cpuid(): nothing gets recorded
1077 * actual implementation must be in arch/$(ARCH)/util/header.c 1133 * actual implementation must be in arch/$(ARCH)/util/header.c
1078 */ 1134 */
@@ -1209,14 +1265,14 @@ read_event_desc(struct perf_header *ph, int fd)
1209 size_t msz; 1265 size_t msz;
1210 1266
1211 /* number of events */ 1267 /* number of events */
1212 ret = read(fd, &nre, sizeof(nre)); 1268 ret = readn(fd, &nre, sizeof(nre));
1213 if (ret != (ssize_t)sizeof(nre)) 1269 if (ret != (ssize_t)sizeof(nre))
1214 goto error; 1270 goto error;
1215 1271
1216 if (ph->needs_swap) 1272 if (ph->needs_swap)
1217 nre = bswap_32(nre); 1273 nre = bswap_32(nre);
1218 1274
1219 ret = read(fd, &sz, sizeof(sz)); 1275 ret = readn(fd, &sz, sizeof(sz));
1220 if (ret != (ssize_t)sizeof(sz)) 1276 if (ret != (ssize_t)sizeof(sz))
1221 goto error; 1277 goto error;
1222 1278
@@ -1244,7 +1300,7 @@ read_event_desc(struct perf_header *ph, int fd)
1244 * must read entire on-file attr struct to 1300 * must read entire on-file attr struct to
1245 * sync up with layout. 1301 * sync up with layout.
1246 */ 1302 */
1247 ret = read(fd, buf, sz); 1303 ret = readn(fd, buf, sz);
1248 if (ret != (ssize_t)sz) 1304 if (ret != (ssize_t)sz)
1249 goto error; 1305 goto error;
1250 1306
@@ -1253,7 +1309,7 @@ read_event_desc(struct perf_header *ph, int fd)
1253 1309
1254 memcpy(&evsel->attr, buf, msz); 1310 memcpy(&evsel->attr, buf, msz);
1255 1311
1256 ret = read(fd, &nr, sizeof(nr)); 1312 ret = readn(fd, &nr, sizeof(nr));
1257 if (ret != (ssize_t)sizeof(nr)) 1313 if (ret != (ssize_t)sizeof(nr))
1258 goto error; 1314 goto error;
1259 1315
@@ -1274,7 +1330,7 @@ read_event_desc(struct perf_header *ph, int fd)
1274 evsel->id = id; 1330 evsel->id = id;
1275 1331
1276 for (j = 0 ; j < nr; j++) { 1332 for (j = 0 ; j < nr; j++) {
1277 ret = read(fd, id, sizeof(*id)); 1333 ret = readn(fd, id, sizeof(*id));
1278 if (ret != (ssize_t)sizeof(*id)) 1334 if (ret != (ssize_t)sizeof(*id))
1279 goto error; 1335 goto error;
1280 if (ph->needs_swap) 1336 if (ph->needs_swap)
@@ -1435,6 +1491,31 @@ error:
1435 fprintf(fp, "# pmu mappings: unable to read\n"); 1491 fprintf(fp, "# pmu mappings: unable to read\n");
1436} 1492}
1437 1493
1494static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1495 FILE *fp)
1496{
1497 struct perf_session *session;
1498 struct perf_evsel *evsel;
1499 u32 nr = 0;
1500
1501 session = container_of(ph, struct perf_session, header);
1502
1503 list_for_each_entry(evsel, &session->evlist->entries, node) {
1504 if (perf_evsel__is_group_leader(evsel) &&
1505 evsel->nr_members > 1) {
1506 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1507 perf_evsel__name(evsel));
1508
1509 nr = evsel->nr_members - 1;
1510 } else if (nr) {
1511 fprintf(fp, ",%s", perf_evsel__name(evsel));
1512
1513 if (--nr == 0)
1514 fprintf(fp, "}\n");
1515 }
1516 }
1517}
1518
1438static int __event_process_build_id(struct build_id_event *bev, 1519static int __event_process_build_id(struct build_id_event *bev,
1439 char *filename, 1520 char *filename,
1440 struct perf_session *session) 1521 struct perf_session *session)
@@ -1506,14 +1587,14 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1506 while (offset < limit) { 1587 while (offset < limit) {
1507 ssize_t len; 1588 ssize_t len;
1508 1589
1509 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1590 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1510 return -1; 1591 return -1;
1511 1592
1512 if (header->needs_swap) 1593 if (header->needs_swap)
1513 perf_event_header__bswap(&old_bev.header); 1594 perf_event_header__bswap(&old_bev.header);
1514 1595
1515 len = old_bev.header.size - sizeof(old_bev); 1596 len = old_bev.header.size - sizeof(old_bev);
1516 if (read(input, filename, len) != len) 1597 if (readn(input, filename, len) != len)
1517 return -1; 1598 return -1;
1518 1599
1519 bev.header = old_bev.header; 1600 bev.header = old_bev.header;
@@ -1548,14 +1629,14 @@ static int perf_header__read_build_ids(struct perf_header *header,
1548 while (offset < limit) { 1629 while (offset < limit) {
1549 ssize_t len; 1630 ssize_t len;
1550 1631
1551 if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 1632 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1552 goto out; 1633 goto out;
1553 1634
1554 if (header->needs_swap) 1635 if (header->needs_swap)
1555 perf_event_header__bswap(&bev.header); 1636 perf_event_header__bswap(&bev.header);
1556 1637
1557 len = bev.header.size - sizeof(bev); 1638 len = bev.header.size - sizeof(bev);
1558 if (read(input, filename, len) != len) 1639 if (readn(input, filename, len) != len)
1559 goto out; 1640 goto out;
1560 /* 1641 /*
1561 * The a1645ce1 changeset: 1642 * The a1645ce1 changeset:
@@ -1589,8 +1670,8 @@ static int process_tracing_data(struct perf_file_section *section __maybe_unused
1589 struct perf_header *ph __maybe_unused, 1670 struct perf_header *ph __maybe_unused,
1590 int fd, void *data) 1671 int fd, void *data)
1591{ 1672{
1592 trace_report(fd, data, false); 1673 ssize_t ret = trace_report(fd, data, false);
1593 return 0; 1674 return ret < 0 ? -1 : 0;
1594} 1675}
1595 1676
1596static int process_build_id(struct perf_file_section *section, 1677static int process_build_id(struct perf_file_section *section,
@@ -1641,7 +1722,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1641 size_t ret; 1722 size_t ret;
1642 u32 nr; 1723 u32 nr;
1643 1724
1644 ret = read(fd, &nr, sizeof(nr)); 1725 ret = readn(fd, &nr, sizeof(nr));
1645 if (ret != sizeof(nr)) 1726 if (ret != sizeof(nr))
1646 return -1; 1727 return -1;
1647 1728
@@ -1650,7 +1731,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1650 1731
1651 ph->env.nr_cpus_online = nr; 1732 ph->env.nr_cpus_online = nr;
1652 1733
1653 ret = read(fd, &nr, sizeof(nr)); 1734 ret = readn(fd, &nr, sizeof(nr));
1654 if (ret != sizeof(nr)) 1735 if (ret != sizeof(nr))
1655 return -1; 1736 return -1;
1656 1737
@@ -1684,7 +1765,7 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused,
1684 uint64_t mem; 1765 uint64_t mem;
1685 size_t ret; 1766 size_t ret;
1686 1767
1687 ret = read(fd, &mem, sizeof(mem)); 1768 ret = readn(fd, &mem, sizeof(mem));
1688 if (ret != sizeof(mem)) 1769 if (ret != sizeof(mem))
1689 return -1; 1770 return -1;
1690 1771
@@ -1756,7 +1837,7 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused,
1756 u32 nr, i; 1837 u32 nr, i;
1757 struct strbuf sb; 1838 struct strbuf sb;
1758 1839
1759 ret = read(fd, &nr, sizeof(nr)); 1840 ret = readn(fd, &nr, sizeof(nr));
1760 if (ret != sizeof(nr)) 1841 if (ret != sizeof(nr))
1761 return -1; 1842 return -1;
1762 1843
@@ -1792,7 +1873,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1792 char *str; 1873 char *str;
1793 struct strbuf sb; 1874 struct strbuf sb;
1794 1875
1795 ret = read(fd, &nr, sizeof(nr)); 1876 ret = readn(fd, &nr, sizeof(nr));
1796 if (ret != sizeof(nr)) 1877 if (ret != sizeof(nr))
1797 return -1; 1878 return -1;
1798 1879
@@ -1813,7 +1894,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1813 } 1894 }
1814 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1895 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1815 1896
1816 ret = read(fd, &nr, sizeof(nr)); 1897 ret = readn(fd, &nr, sizeof(nr));
1817 if (ret != sizeof(nr)) 1898 if (ret != sizeof(nr))
1818 return -1; 1899 return -1;
1819 1900
@@ -1850,7 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1850 struct strbuf sb; 1931 struct strbuf sb;
1851 1932
1852 /* nr nodes */ 1933 /* nr nodes */
1853 ret = read(fd, &nr, sizeof(nr)); 1934 ret = readn(fd, &nr, sizeof(nr));
1854 if (ret != sizeof(nr)) 1935 if (ret != sizeof(nr))
1855 goto error; 1936 goto error;
1856 1937
@@ -1862,15 +1943,15 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1862 1943
1863 for (i = 0; i < nr; i++) { 1944 for (i = 0; i < nr; i++) {
1864 /* node number */ 1945 /* node number */
1865 ret = read(fd, &node, sizeof(node)); 1946 ret = readn(fd, &node, sizeof(node));
1866 if (ret != sizeof(node)) 1947 if (ret != sizeof(node))
1867 goto error; 1948 goto error;
1868 1949
1869 ret = read(fd, &mem_total, sizeof(u64)); 1950 ret = readn(fd, &mem_total, sizeof(u64));
1870 if (ret != sizeof(u64)) 1951 if (ret != sizeof(u64))
1871 goto error; 1952 goto error;
1872 1953
1873 ret = read(fd, &mem_free, sizeof(u64)); 1954 ret = readn(fd, &mem_free, sizeof(u64));
1874 if (ret != sizeof(u64)) 1955 if (ret != sizeof(u64))
1875 goto error; 1956 goto error;
1876 1957
@@ -1909,7 +1990,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
1909 u32 type; 1990 u32 type;
1910 struct strbuf sb; 1991 struct strbuf sb;
1911 1992
1912 ret = read(fd, &pmu_num, sizeof(pmu_num)); 1993 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1913 if (ret != sizeof(pmu_num)) 1994 if (ret != sizeof(pmu_num))
1914 return -1; 1995 return -1;
1915 1996
@@ -1925,7 +2006,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
1925 strbuf_init(&sb, 128); 2006 strbuf_init(&sb, 128);
1926 2007
1927 while (pmu_num) { 2008 while (pmu_num) {
1928 if (read(fd, &type, sizeof(type)) != sizeof(type)) 2009 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1929 goto error; 2010 goto error;
1930 if (ph->needs_swap) 2011 if (ph->needs_swap)
1931 type = bswap_32(type); 2012 type = bswap_32(type);
@@ -1949,6 +2030,98 @@ error:
1949 return -1; 2030 return -1;
1950} 2031}
1951 2032
2033static int process_group_desc(struct perf_file_section *section __maybe_unused,
2034 struct perf_header *ph, int fd,
2035 void *data __maybe_unused)
2036{
2037 size_t ret = -1;
2038 u32 i, nr, nr_groups;
2039 struct perf_session *session;
2040 struct perf_evsel *evsel, *leader = NULL;
2041 struct group_desc {
2042 char *name;
2043 u32 leader_idx;
2044 u32 nr_members;
2045 } *desc;
2046
2047 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2048 return -1;
2049
2050 if (ph->needs_swap)
2051 nr_groups = bswap_32(nr_groups);
2052
2053 ph->env.nr_groups = nr_groups;
2054 if (!nr_groups) {
2055 pr_debug("group desc not available\n");
2056 return 0;
2057 }
2058
2059 desc = calloc(nr_groups, sizeof(*desc));
2060 if (!desc)
2061 return -1;
2062
2063 for (i = 0; i < nr_groups; i++) {
2064 desc[i].name = do_read_string(fd, ph);
2065 if (!desc[i].name)
2066 goto out_free;
2067
2068 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2069 goto out_free;
2070
2071 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2072 goto out_free;
2073
2074 if (ph->needs_swap) {
2075 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2076 desc[i].nr_members = bswap_32(desc[i].nr_members);
2077 }
2078 }
2079
2080 /*
2081 * Rebuild group relationship based on the group_desc
2082 */
2083 session = container_of(ph, struct perf_session, header);
2084 session->evlist->nr_groups = nr_groups;
2085
2086 i = nr = 0;
2087 list_for_each_entry(evsel, &session->evlist->entries, node) {
2088 if (evsel->idx == (int) desc[i].leader_idx) {
2089 evsel->leader = evsel;
2090 /* {anon_group} is a dummy name */
2091 if (strcmp(desc[i].name, "{anon_group}"))
2092 evsel->group_name = desc[i].name;
2093 evsel->nr_members = desc[i].nr_members;
2094
2095 if (i >= nr_groups || nr > 0) {
2096 pr_debug("invalid group desc\n");
2097 goto out_free;
2098 }
2099
2100 leader = evsel;
2101 nr = evsel->nr_members - 1;
2102 i++;
2103 } else if (nr) {
2104 /* This is a group member */
2105 evsel->leader = leader;
2106
2107 nr--;
2108 }
2109 }
2110
2111 if (i != nr_groups || nr != 0) {
2112 pr_debug("invalid group desc\n");
2113 goto out_free;
2114 }
2115
2116 ret = 0;
2117out_free:
2118 while ((int) --i >= 0)
2119 free(desc[i].name);
2120 free(desc);
2121
2122 return ret;
2123}
2124
1952struct feature_ops { 2125struct feature_ops {
1953 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 2126 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1954 void (*print)(struct perf_header *h, int fd, FILE *fp); 2127 void (*print)(struct perf_header *h, int fd, FILE *fp);
@@ -1988,6 +2161,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1988 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 2161 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
1989 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 2162 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
1990 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), 2163 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
2164 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
1991}; 2165};
1992 2166
1993struct header_print_data { 2167struct header_print_data {
@@ -2077,7 +2251,7 @@ static int perf_header__adds_write(struct perf_header *header,
2077 if (!nr_sections) 2251 if (!nr_sections)
2078 return 0; 2252 return 0;
2079 2253
2080 feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); 2254 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2081 if (feat_sec == NULL) 2255 if (feat_sec == NULL)
2082 return -ENOMEM; 2256 return -ENOMEM;
2083 2257
@@ -2249,7 +2423,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
2249 if (!nr_sections) 2423 if (!nr_sections)
2250 return 0; 2424 return 0;
2251 2425
2252 feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); 2426 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2253 if (!feat_sec) 2427 if (!feat_sec)
2254 return -1; 2428 return -1;
2255 2429
@@ -2576,6 +2750,11 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2576 if (evsel->tp_format) 2750 if (evsel->tp_format)
2577 return 0; 2751 return 0;
2578 2752
2753 if (pevent == NULL) {
2754 pr_debug("broken or missing trace data\n");
2755 return -1;
2756 }
2757
2579 event = pevent_find_event(pevent, evsel->attr.config); 2758 event = pevent_find_event(pevent, evsel->attr.config);
2580 if (event == NULL) 2759 if (event == NULL)
2581 return -1; 2760 return -1;
@@ -2613,7 +2792,7 @@ int perf_session__read_header(struct perf_session *session, int fd)
2613 u64 f_id; 2792 u64 f_id;
2614 int nr_attrs, nr_ids, i, j; 2793 int nr_attrs, nr_ids, i, j;
2615 2794
2616 session->evlist = perf_evlist__new(NULL, NULL); 2795 session->evlist = perf_evlist__new();
2617 if (session->evlist == NULL) 2796 if (session->evlist == NULL)
2618 return -ENOMEM; 2797 return -ENOMEM;
2619 2798
@@ -2764,7 +2943,7 @@ int perf_event__process_attr(union perf_event *event,
2764 struct perf_evlist *evlist = *pevlist; 2943 struct perf_evlist *evlist = *pevlist;
2765 2944
2766 if (evlist == NULL) { 2945 if (evlist == NULL) {
2767 *pevlist = evlist = perf_evlist__new(NULL, NULL); 2946 *pevlist = evlist = perf_evlist__new();
2768 if (evlist == NULL) 2947 if (evlist == NULL)
2769 return -ENOMEM; 2948 return -ENOMEM;
2770 } 2949 }
@@ -2912,16 +3091,22 @@ int perf_event__process_tracing_data(union perf_event *event,
2912 session->repipe); 3091 session->repipe);
2913 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3092 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
2914 3093
2915 if (read(session->fd, buf, padding) < 0) 3094 if (readn(session->fd, buf, padding) < 0) {
2916 die("reading input file"); 3095 pr_err("%s: reading input file", __func__);
3096 return -1;
3097 }
2917 if (session->repipe) { 3098 if (session->repipe) {
2918 int retw = write(STDOUT_FILENO, buf, padding); 3099 int retw = write(STDOUT_FILENO, buf, padding);
2919 if (retw <= 0 || retw != padding) 3100 if (retw <= 0 || retw != padding) {
2920 die("repiping tracing data padding"); 3101 pr_err("%s: repiping tracing data padding", __func__);
3102 return -1;
3103 }
2921 } 3104 }
2922 3105
2923 if (size_read + padding != size) 3106 if (size_read + padding != size) {
2924 die("tracing data size mismatch"); 3107 pr_err("%s: tracing data size mismatch", __func__);
3108 return -1;
3109 }
2925 3110
2926 perf_evlist__prepare_tracepoint_events(session->evlist, 3111 perf_evlist__prepare_tracepoint_events(session->evlist,
2927 session->pevent); 3112 session->pevent);
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 20f0344accb1..c9fc55cada6d 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -29,6 +29,7 @@ enum {
29 HEADER_NUMA_TOPOLOGY, 29 HEADER_NUMA_TOPOLOGY,
30 HEADER_BRANCH_STACK, 30 HEADER_BRANCH_STACK,
31 HEADER_PMU_MAPPINGS, 31 HEADER_PMU_MAPPINGS,
32 HEADER_GROUP_DESC,
32 HEADER_LAST_FEATURE, 33 HEADER_LAST_FEATURE,
33 HEADER_FEAT_BITS = 256, 34 HEADER_FEAT_BITS = 256,
34}; 35};
@@ -79,6 +80,7 @@ struct perf_session_env {
79 char *numa_nodes; 80 char *numa_nodes;
80 int nr_pmu_mappings; 81 int nr_pmu_mappings;
81 char *pmu_mappings; 82 char *pmu_mappings;
83 int nr_groups;
82}; 84};
83 85
84struct perf_header { 86struct perf_header {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index cb17e2a8c6ed..6b32721f829a 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -4,6 +4,7 @@
4#include "hist.h" 4#include "hist.h"
5#include "session.h" 5#include "session.h"
6#include "sort.h" 6#include "sort.h"
7#include "evsel.h"
7#include <math.h> 8#include <math.h>
8 9
9static bool hists__filter_entry_by_dso(struct hists *hists, 10static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -66,12 +67,16 @@ static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
66void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 67void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67{ 68{
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70 int symlen;
69 u16 len; 71 u16 len;
70 72
71 if (h->ms.sym) 73 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); 74 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else 75 else {
76 symlen = unresolved_col_width + 4 + 2;
77 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO); 78 hists__set_unres_dso_col_len(hists, HISTC_DSO);
79 }
75 80
76 len = thread__comm_len(h->thread); 81 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len)) 82 if (hists__new_col_len(hists, HISTC_COMM, len))
@@ -82,8 +87,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
82 hists__new_col_len(hists, HISTC_DSO, len); 87 hists__new_col_len(hists, HISTC_DSO, len);
83 } 88 }
84 89
90 if (h->parent)
91 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
92
85 if (h->branch_info) { 93 if (h->branch_info) {
86 int symlen;
87 /* 94 /*
88 * +4 accounts for '[x] ' priv level info 95 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses 96 * +2 account of 0x prefix on raw addresses
@@ -112,6 +119,42 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 119 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 } 120 }
114 } 121 }
122
123 if (h->mem_info) {
124 /*
125 * +4 accounts for '[x] ' priv level info
126 * +2 account of 0x prefix on raw addresses
127 */
128 if (h->mem_info->daddr.sym) {
129 symlen = (int)h->mem_info->daddr.sym->namelen + 4
130 + unresolved_col_width + 2;
131 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
132 symlen);
133 } else {
134 symlen = unresolved_col_width + 4 + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 }
138 if (h->mem_info->daddr.map) {
139 symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
141 symlen);
142 } else {
143 symlen = unresolved_col_width + 4 + 2;
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 }
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 }
151
152 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
115} 158}
116 159
117void hists__output_recalc_col_len(struct hists *hists, int max_rows) 160void hists__output_recalc_col_len(struct hists *hists, int max_rows)
@@ -151,9 +194,12 @@ static void hist_entry__add_cpumode_period(struct hist_entry *he,
151 } 194 }
152} 195}
153 196
154static void he_stat__add_period(struct he_stat *he_stat, u64 period) 197static void he_stat__add_period(struct he_stat *he_stat, u64 period,
198 u64 weight)
155{ 199{
200
156 he_stat->period += period; 201 he_stat->period += period;
202 he_stat->weight += weight;
157 he_stat->nr_events += 1; 203 he_stat->nr_events += 1;
158} 204}
159 205
@@ -165,12 +211,14 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
165 dest->period_guest_sys += src->period_guest_sys; 211 dest->period_guest_sys += src->period_guest_sys;
166 dest->period_guest_us += src->period_guest_us; 212 dest->period_guest_us += src->period_guest_us;
167 dest->nr_events += src->nr_events; 213 dest->nr_events += src->nr_events;
214 dest->weight += src->weight;
168} 215}
169 216
170static void hist_entry__decay(struct hist_entry *he) 217static void hist_entry__decay(struct hist_entry *he)
171{ 218{
172 he->stat.period = (he->stat.period * 7) / 8; 219 he->stat.period = (he->stat.period * 7) / 8;
173 he->stat.nr_events = (he->stat.nr_events * 7) / 8; 220 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
221 /* XXX need decay for weight too? */
174} 222}
175 223
176static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 224static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
@@ -235,13 +283,28 @@ void hists__decay_entries_threaded(struct hists *hists,
235static struct hist_entry *hist_entry__new(struct hist_entry *template) 283static struct hist_entry *hist_entry__new(struct hist_entry *template)
236{ 284{
237 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; 285 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
238 struct hist_entry *he = malloc(sizeof(*he) + callchain_size); 286 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
239 287
240 if (he != NULL) { 288 if (he != NULL) {
241 *he = *template; 289 *he = *template;
242 290
243 if (he->ms.map) 291 if (he->ms.map)
244 he->ms.map->referenced = true; 292 he->ms.map->referenced = true;
293
294 if (he->branch_info) {
295 if (he->branch_info->from.map)
296 he->branch_info->from.map->referenced = true;
297 if (he->branch_info->to.map)
298 he->branch_info->to.map->referenced = true;
299 }
300
301 if (he->mem_info) {
302 if (he->mem_info->iaddr.map)
303 he->mem_info->iaddr.map->referenced = true;
304 if (he->mem_info->daddr.map)
305 he->mem_info->daddr.map->referenced = true;
306 }
307
245 if (symbol_conf.use_callchain) 308 if (symbol_conf.use_callchain)
246 callchain_init(he->callchain); 309 callchain_init(he->callchain);
247 310
@@ -251,7 +314,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
251 return he; 314 return he;
252} 315}
253 316
254static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 317void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
255{ 318{
256 if (!h->filtered) { 319 if (!h->filtered) {
257 hists__calc_col_len(hists, h); 320 hists__calc_col_len(hists, h);
@@ -270,7 +333,8 @@ static u8 symbol__parent_filter(const struct symbol *parent)
270static struct hist_entry *add_hist_entry(struct hists *hists, 333static struct hist_entry *add_hist_entry(struct hists *hists,
271 struct hist_entry *entry, 334 struct hist_entry *entry,
272 struct addr_location *al, 335 struct addr_location *al,
273 u64 period) 336 u64 period,
337 u64 weight)
274{ 338{
275 struct rb_node **p; 339 struct rb_node **p;
276 struct rb_node *parent = NULL; 340 struct rb_node *parent = NULL;
@@ -285,10 +349,16 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
285 parent = *p; 349 parent = *p;
286 he = rb_entry(parent, struct hist_entry, rb_node_in); 350 he = rb_entry(parent, struct hist_entry, rb_node_in);
287 351
288 cmp = hist_entry__cmp(entry, he); 352 /*
353 * Make sure that it receives arguments in a same order as
354 * hist_entry__collapse() so that we can use an appropriate
355 * function when searching an entry regardless which sort
356 * keys were used.
357 */
358 cmp = hist_entry__cmp(he, entry);
289 359
290 if (!cmp) { 360 if (!cmp) {
291 he_stat__add_period(&he->stat, period); 361 he_stat__add_period(&he->stat, period, weight);
292 362
293 /* If the map of an existing hist_entry has 363 /* If the map of an existing hist_entry has
294 * become out-of-date due to an exec() or 364 * become out-of-date due to an exec() or
@@ -323,11 +393,42 @@ out_unlock:
323 return he; 393 return he;
324} 394}
325 395
396struct hist_entry *__hists__add_mem_entry(struct hists *self,
397 struct addr_location *al,
398 struct symbol *sym_parent,
399 struct mem_info *mi,
400 u64 period,
401 u64 weight)
402{
403 struct hist_entry entry = {
404 .thread = al->thread,
405 .ms = {
406 .map = al->map,
407 .sym = al->sym,
408 },
409 .stat = {
410 .period = period,
411 .weight = weight,
412 .nr_events = 1,
413 },
414 .cpu = al->cpu,
415 .ip = al->addr,
416 .level = al->level,
417 .parent = sym_parent,
418 .filtered = symbol__parent_filter(sym_parent),
419 .hists = self,
420 .mem_info = mi,
421 .branch_info = NULL,
422 };
423 return add_hist_entry(self, &entry, al, period, weight);
424}
425
326struct hist_entry *__hists__add_branch_entry(struct hists *self, 426struct hist_entry *__hists__add_branch_entry(struct hists *self,
327 struct addr_location *al, 427 struct addr_location *al,
328 struct symbol *sym_parent, 428 struct symbol *sym_parent,
329 struct branch_info *bi, 429 struct branch_info *bi,
330 u64 period) 430 u64 period,
431 u64 weight)
331{ 432{
332 struct hist_entry entry = { 433 struct hist_entry entry = {
333 .thread = al->thread, 434 .thread = al->thread,
@@ -341,19 +442,22 @@ struct hist_entry *__hists__add_branch_entry(struct hists *self,
341 .stat = { 442 .stat = {
342 .period = period, 443 .period = period,
343 .nr_events = 1, 444 .nr_events = 1,
445 .weight = weight,
344 }, 446 },
345 .parent = sym_parent, 447 .parent = sym_parent,
346 .filtered = symbol__parent_filter(sym_parent), 448 .filtered = symbol__parent_filter(sym_parent),
347 .branch_info = bi, 449 .branch_info = bi,
348 .hists = self, 450 .hists = self,
451 .mem_info = NULL,
349 }; 452 };
350 453
351 return add_hist_entry(self, &entry, al, period); 454 return add_hist_entry(self, &entry, al, period, weight);
352} 455}
353 456
354struct hist_entry *__hists__add_entry(struct hists *self, 457struct hist_entry *__hists__add_entry(struct hists *self,
355 struct addr_location *al, 458 struct addr_location *al,
356 struct symbol *sym_parent, u64 period) 459 struct symbol *sym_parent, u64 period,
460 u64 weight)
357{ 461{
358 struct hist_entry entry = { 462 struct hist_entry entry = {
359 .thread = al->thread, 463 .thread = al->thread,
@@ -367,13 +471,16 @@ struct hist_entry *__hists__add_entry(struct hists *self,
367 .stat = { 471 .stat = {
368 .period = period, 472 .period = period,
369 .nr_events = 1, 473 .nr_events = 1,
474 .weight = weight,
370 }, 475 },
371 .parent = sym_parent, 476 .parent = sym_parent,
372 .filtered = symbol__parent_filter(sym_parent), 477 .filtered = symbol__parent_filter(sym_parent),
373 .hists = self, 478 .hists = self,
479 .branch_info = NULL,
480 .mem_info = NULL,
374 }; 481 };
375 482
376 return add_hist_entry(self, &entry, al, period); 483 return add_hist_entry(self, &entry, al, period, weight);
377} 484}
378 485
379int64_t 486int64_t
@@ -413,6 +520,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
413void hist_entry__free(struct hist_entry *he) 520void hist_entry__free(struct hist_entry *he)
414{ 521{
415 free(he->branch_info); 522 free(he->branch_info);
523 free(he->mem_info);
416 free(he); 524 free(he);
417} 525}
418 526
@@ -523,6 +631,62 @@ void hists__collapse_resort_threaded(struct hists *hists)
523 * reverse the map, sort on period. 631 * reverse the map, sort on period.
524 */ 632 */
525 633
634static int period_cmp(u64 period_a, u64 period_b)
635{
636 if (period_a > period_b)
637 return 1;
638 if (period_a < period_b)
639 return -1;
640 return 0;
641}
642
643static int hist_entry__sort_on_period(struct hist_entry *a,
644 struct hist_entry *b)
645{
646 int ret;
647 int i, nr_members;
648 struct perf_evsel *evsel;
649 struct hist_entry *pair;
650 u64 *periods_a, *periods_b;
651
652 ret = period_cmp(a->stat.period, b->stat.period);
653 if (ret || !symbol_conf.event_group)
654 return ret;
655
656 evsel = hists_to_evsel(a->hists);
657 nr_members = evsel->nr_members;
658 if (nr_members <= 1)
659 return ret;
660
661 periods_a = zalloc(sizeof(periods_a) * nr_members);
662 periods_b = zalloc(sizeof(periods_b) * nr_members);
663
664 if (!periods_a || !periods_b)
665 goto out;
666
667 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
668 evsel = hists_to_evsel(pair->hists);
669 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
670 }
671
672 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
673 evsel = hists_to_evsel(pair->hists);
674 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
675 }
676
677 for (i = 1; i < nr_members; i++) {
678 ret = period_cmp(periods_a[i], periods_b[i]);
679 if (ret)
680 break;
681 }
682
683out:
684 free(periods_a);
685 free(periods_b);
686
687 return ret;
688}
689
526static void __hists__insert_output_entry(struct rb_root *entries, 690static void __hists__insert_output_entry(struct rb_root *entries,
527 struct hist_entry *he, 691 struct hist_entry *he,
528 u64 min_callchain_hits) 692 u64 min_callchain_hits)
@@ -539,7 +703,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
539 parent = *p; 703 parent = *p;
540 iter = rb_entry(parent, struct hist_entry, rb_node); 704 iter = rb_entry(parent, struct hist_entry, rb_node);
541 705
542 if (he->stat.period > iter->stat.period) 706 if (hist_entry__sort_on_period(he, iter) > 0)
543 p = &(*p)->rb_left; 707 p = &(*p)->rb_left;
544 else 708 else
545 p = &(*p)->rb_right; 709 p = &(*p)->rb_right;
@@ -711,25 +875,38 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
711 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 875 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
712} 876}
713 877
878void events_stats__inc(struct events_stats *stats, u32 type)
879{
880 ++stats->nr_events[0];
881 ++stats->nr_events[type];
882}
883
714void hists__inc_nr_events(struct hists *hists, u32 type) 884void hists__inc_nr_events(struct hists *hists, u32 type)
715{ 885{
716 ++hists->stats.nr_events[0]; 886 events_stats__inc(&hists->stats, type);
717 ++hists->stats.nr_events[type];
718} 887}
719 888
720static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 889static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
721 struct hist_entry *pair) 890 struct hist_entry *pair)
722{ 891{
723 struct rb_node **p = &hists->entries.rb_node; 892 struct rb_root *root;
893 struct rb_node **p;
724 struct rb_node *parent = NULL; 894 struct rb_node *parent = NULL;
725 struct hist_entry *he; 895 struct hist_entry *he;
726 int cmp; 896 int cmp;
727 897
898 if (sort__need_collapse)
899 root = &hists->entries_collapsed;
900 else
901 root = hists->entries_in;
902
903 p = &root->rb_node;
904
728 while (*p != NULL) { 905 while (*p != NULL) {
729 parent = *p; 906 parent = *p;
730 he = rb_entry(parent, struct hist_entry, rb_node); 907 he = rb_entry(parent, struct hist_entry, rb_node_in);
731 908
732 cmp = hist_entry__cmp(pair, he); 909 cmp = hist_entry__collapse(he, pair);
733 910
734 if (!cmp) 911 if (!cmp)
735 goto out; 912 goto out;
@@ -744,8 +921,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
744 if (he) { 921 if (he) {
745 memset(&he->stat, 0, sizeof(he->stat)); 922 memset(&he->stat, 0, sizeof(he->stat));
746 he->hists = hists; 923 he->hists = hists;
747 rb_link_node(&he->rb_node, parent, p); 924 rb_link_node(&he->rb_node_in, parent, p);
748 rb_insert_color(&he->rb_node, &hists->entries); 925 rb_insert_color(&he->rb_node_in, root);
749 hists__inc_nr_entries(hists, he); 926 hists__inc_nr_entries(hists, he);
750 } 927 }
751out: 928out:
@@ -755,11 +932,16 @@ out:
755static struct hist_entry *hists__find_entry(struct hists *hists, 932static struct hist_entry *hists__find_entry(struct hists *hists,
756 struct hist_entry *he) 933 struct hist_entry *he)
757{ 934{
758 struct rb_node *n = hists->entries.rb_node; 935 struct rb_node *n;
936
937 if (sort__need_collapse)
938 n = hists->entries_collapsed.rb_node;
939 else
940 n = hists->entries_in->rb_node;
759 941
760 while (n) { 942 while (n) {
761 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 943 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
762 int64_t cmp = hist_entry__cmp(he, iter); 944 int64_t cmp = hist_entry__collapse(iter, he);
763 945
764 if (cmp < 0) 946 if (cmp < 0)
765 n = n->rb_left; 947 n = n->rb_left;
@@ -777,15 +959,21 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
777 */ 959 */
778void hists__match(struct hists *leader, struct hists *other) 960void hists__match(struct hists *leader, struct hists *other)
779{ 961{
962 struct rb_root *root;
780 struct rb_node *nd; 963 struct rb_node *nd;
781 struct hist_entry *pos, *pair; 964 struct hist_entry *pos, *pair;
782 965
783 for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { 966 if (sort__need_collapse)
784 pos = rb_entry(nd, struct hist_entry, rb_node); 967 root = &leader->entries_collapsed;
968 else
969 root = leader->entries_in;
970
971 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
972 pos = rb_entry(nd, struct hist_entry, rb_node_in);
785 pair = hists__find_entry(other, pos); 973 pair = hists__find_entry(other, pos);
786 974
787 if (pair) 975 if (pair)
788 hist__entry_add_pair(pos, pair); 976 hist_entry__add_pair(pair, pos);
789 } 977 }
790} 978}
791 979
@@ -796,17 +984,23 @@ void hists__match(struct hists *leader, struct hists *other)
796 */ 984 */
797int hists__link(struct hists *leader, struct hists *other) 985int hists__link(struct hists *leader, struct hists *other)
798{ 986{
987 struct rb_root *root;
799 struct rb_node *nd; 988 struct rb_node *nd;
800 struct hist_entry *pos, *pair; 989 struct hist_entry *pos, *pair;
801 990
802 for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { 991 if (sort__need_collapse)
803 pos = rb_entry(nd, struct hist_entry, rb_node); 992 root = &other->entries_collapsed;
993 else
994 root = other->entries_in;
995
996 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
997 pos = rb_entry(nd, struct hist_entry, rb_node_in);
804 998
805 if (!hist_entry__has_pairs(pos)) { 999 if (!hist_entry__has_pairs(pos)) {
806 pair = hists__add_dummy_entry(leader, pos); 1000 pair = hists__add_dummy_entry(leader, pos);
807 if (pair == NULL) 1001 if (pair == NULL)
808 return -1; 1002 return -1;
809 hist__entry_add_pair(pair, pos); 1003 hist_entry__add_pair(pos, pair);
810 } 1004 }
811 } 1005 }
812 1006
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 8b091a51e4a2..14c2fe20aa62 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -49,6 +49,14 @@ enum hist_column {
49 HISTC_DSO_FROM, 49 HISTC_DSO_FROM,
50 HISTC_DSO_TO, 50 HISTC_DSO_TO,
51 HISTC_SRCLINE, 51 HISTC_SRCLINE,
52 HISTC_LOCAL_WEIGHT,
53 HISTC_GLOBAL_WEIGHT,
54 HISTC_MEM_DADDR_SYMBOL,
55 HISTC_MEM_DADDR_DSO,
56 HISTC_MEM_LOCKED,
57 HISTC_MEM_TLB,
58 HISTC_MEM_LVL,
59 HISTC_MEM_SNOOP,
52 HISTC_NR_COLS, /* Last entry */ 60 HISTC_NR_COLS, /* Last entry */
53}; 61};
54 62
@@ -73,7 +81,8 @@ struct hists {
73 81
74struct hist_entry *__hists__add_entry(struct hists *self, 82struct hist_entry *__hists__add_entry(struct hists *self,
75 struct addr_location *al, 83 struct addr_location *al,
76 struct symbol *parent, u64 period); 84 struct symbol *parent, u64 period,
85 u64 weight);
77int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); 86int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
78int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); 87int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
79int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size, 88int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size,
@@ -84,7 +93,15 @@ struct hist_entry *__hists__add_branch_entry(struct hists *self,
84 struct addr_location *al, 93 struct addr_location *al,
85 struct symbol *sym_parent, 94 struct symbol *sym_parent,
86 struct branch_info *bi, 95 struct branch_info *bi,
87 u64 period); 96 u64 period,
97 u64 weight);
98
99struct hist_entry *__hists__add_mem_entry(struct hists *self,
100 struct addr_location *al,
101 struct symbol *sym_parent,
102 struct mem_info *mi,
103 u64 period,
104 u64 weight);
88 105
89void hists__output_resort(struct hists *self); 106void hists__output_resort(struct hists *self);
90void hists__output_resort_threaded(struct hists *hists); 107void hists__output_resort_threaded(struct hists *hists);
@@ -96,8 +113,10 @@ void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
96 bool zap_kernel); 113 bool zap_kernel);
97void hists__output_recalc_col_len(struct hists *hists, int max_rows); 114void hists__output_recalc_col_len(struct hists *hists, int max_rows);
98 115
116void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
99void hists__inc_nr_events(struct hists *self, u32 type); 117void hists__inc_nr_events(struct hists *self, u32 type);
100size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); 118void events_stats__inc(struct events_stats *stats, u32 type);
119size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
101 120
102size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, 121size_t hists__fprintf(struct hists *self, bool show_header, int max_rows,
103 int max_cols, FILE *fp); 122 int max_cols, FILE *fp);
@@ -126,13 +145,19 @@ struct perf_hpp {
126}; 145};
127 146
128struct perf_hpp_fmt { 147struct perf_hpp_fmt {
129 bool cond;
130 int (*header)(struct perf_hpp *hpp); 148 int (*header)(struct perf_hpp *hpp);
131 int (*width)(struct perf_hpp *hpp); 149 int (*width)(struct perf_hpp *hpp);
132 int (*color)(struct perf_hpp *hpp, struct hist_entry *he); 150 int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
133 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); 151 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
152
153 struct list_head list;
134}; 154};
135 155
156extern struct list_head perf_hpp__list;
157
158#define perf_hpp__for_each_format(format) \
159 list_for_each_entry(format, &perf_hpp__list, list)
160
136extern struct perf_hpp_fmt perf_hpp__format[]; 161extern struct perf_hpp_fmt perf_hpp__format[];
137 162
138enum { 163enum {
@@ -148,14 +173,14 @@ enum {
148 PERF_HPP__DELTA, 173 PERF_HPP__DELTA,
149 PERF_HPP__RATIO, 174 PERF_HPP__RATIO,
150 PERF_HPP__WEIGHTED_DIFF, 175 PERF_HPP__WEIGHTED_DIFF,
151 PERF_HPP__DISPL,
152 PERF_HPP__FORMULA, 176 PERF_HPP__FORMULA,
153 177
154 PERF_HPP__MAX_INDEX 178 PERF_HPP__MAX_INDEX
155}; 179};
156 180
157void perf_hpp__init(void); 181void perf_hpp__init(void);
158void perf_hpp__column_enable(unsigned col, bool enable); 182void perf_hpp__column_register(struct perf_hpp_fmt *format);
183void perf_hpp__column_enable(unsigned col);
159int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, 184int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
160 bool color); 185 bool color);
161 186
@@ -167,9 +192,9 @@ struct hist_browser_timer {
167 int refresh; 192 int refresh;
168}; 193};
169 194
170#ifdef NEWT_SUPPORT 195#ifdef SLANG_SUPPORT
171#include "../ui/keysyms.h" 196#include "../ui/keysyms.h"
172int hist_entry__tui_annotate(struct hist_entry *he, int evidx, 197int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
173 struct hist_browser_timer *hbt); 198 struct hist_browser_timer *hbt);
174 199
175int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 200int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
@@ -188,7 +213,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
188 213
189static inline int hist_entry__tui_annotate(struct hist_entry *self 214static inline int hist_entry__tui_annotate(struct hist_entry *self
190 __maybe_unused, 215 __maybe_unused,
191 int evidx __maybe_unused, 216 struct perf_evsel *evsel
217 __maybe_unused,
192 struct hist_browser_timer *hbt 218 struct hist_browser_timer *hbt
193 __maybe_unused) 219 __maybe_unused)
194{ 220{
@@ -200,8 +226,9 @@ static inline int script_browse(const char *script_opt __maybe_unused)
200 return 0; 226 return 0;
201} 227}
202 228
203#define K_LEFT -1 229#define K_LEFT -1000
204#define K_RIGHT -2 230#define K_RIGHT -2000
231#define K_SWITCH_INPUT_DATA -3000
205#endif 232#endif
206 233
207#ifdef GTK2_SUPPORT 234#ifdef GTK2_SUPPORT
@@ -219,8 +246,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
219 246
220unsigned int hists__sort_list_width(struct hists *self); 247unsigned int hists__sort_list_width(struct hists *self);
221 248
222double perf_diff__compute_delta(struct hist_entry *he); 249double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
223double perf_diff__compute_ratio(struct hist_entry *he); 250double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
224s64 perf_diff__compute_wdiff(struct hist_entry *he); 251s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
225int perf_diff__formula(char *buf, size_t size, struct hist_entry *he); 252int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
253 char *buf, size_t size);
254double perf_diff__period_percent(struct hist_entry *he, u64 period);
226#endif /* __PERF_HIST_H */ 255#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index a55d8cf083c9..45cf10a562bd 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -14,6 +14,7 @@
14#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 14#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) 15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
17#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
17 18
18#define for_each_set_bit(bit, addr, size) \ 19#define for_each_set_bit(bit, addr, size) \
19 for ((bit) = find_first_bit((addr), (size)); \ 20 for ((bit) = find_first_bit((addr), (size)); \
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index 9d0740024ba8..11a8d86f7fea 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -59,16 +59,40 @@ void intlist__remove(struct intlist *ilist, struct int_node *node)
59 59
60struct int_node *intlist__find(struct intlist *ilist, int i) 60struct int_node *intlist__find(struct intlist *ilist, int i)
61{ 61{
62 struct int_node *node = NULL; 62 struct int_node *node;
63 struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); 63 struct rb_node *rb_node;
64 64
65 if (ilist == NULL)
66 return NULL;
67
68 node = NULL;
69 rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
65 if (rb_node) 70 if (rb_node)
66 node = container_of(rb_node, struct int_node, rb_node); 71 node = container_of(rb_node, struct int_node, rb_node);
67 72
68 return node; 73 return node;
69} 74}
70 75
71struct intlist *intlist__new(void) 76static int intlist__parse_list(struct intlist *ilist, const char *s)
77{
78 char *sep;
79 int err;
80
81 do {
82 long value = strtol(s, &sep, 10);
83 err = -EINVAL;
84 if (*sep != ',' && *sep != '\0')
85 break;
86 err = intlist__add(ilist, value);
87 if (err)
88 break;
89 s = sep + 1;
90 } while (*sep != '\0');
91
92 return err;
93}
94
95struct intlist *intlist__new(const char *slist)
72{ 96{
73 struct intlist *ilist = malloc(sizeof(*ilist)); 97 struct intlist *ilist = malloc(sizeof(*ilist));
74 98
@@ -77,9 +101,15 @@ struct intlist *intlist__new(void)
77 ilist->rblist.node_cmp = intlist__node_cmp; 101 ilist->rblist.node_cmp = intlist__node_cmp;
78 ilist->rblist.node_new = intlist__node_new; 102 ilist->rblist.node_new = intlist__node_new;
79 ilist->rblist.node_delete = intlist__node_delete; 103 ilist->rblist.node_delete = intlist__node_delete;
104
105 if (slist && intlist__parse_list(ilist, slist))
106 goto out_delete;
80 } 107 }
81 108
82 return ilist; 109 return ilist;
110out_delete:
111 intlist__delete(ilist);
112 return NULL;
83} 113}
84 114
85void intlist__delete(struct intlist *ilist) 115void intlist__delete(struct intlist *ilist)
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 6d63ab90db50..62351dad848f 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -15,7 +15,7 @@ struct intlist {
15 struct rblist rblist; 15 struct rblist rblist;
16}; 16};
17 17
18struct intlist *intlist__new(void); 18struct intlist *intlist__new(const char *slist);
19void intlist__delete(struct intlist *ilist); 19void intlist__delete(struct intlist *ilist);
20 20
21void intlist__remove(struct intlist *ilist, struct int_node *in); 21void intlist__remove(struct intlist *ilist, struct int_node *in);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 1f09d0581e6b..b2ecad6ec46b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1,10 +1,15 @@
1#include "callchain.h"
1#include "debug.h" 2#include "debug.h"
2#include "event.h" 3#include "event.h"
4#include "evsel.h"
5#include "hist.h"
3#include "machine.h" 6#include "machine.h"
4#include "map.h" 7#include "map.h"
8#include "sort.h"
5#include "strlist.h" 9#include "strlist.h"
6#include "thread.h" 10#include "thread.h"
7#include <stdbool.h> 11#include <stdbool.h>
12#include "unwind.h"
8 13
9int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 14int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
10{ 15{
@@ -48,6 +53,29 @@ static void dsos__delete(struct list_head *dsos)
48 } 53 }
49} 54}
50 55
56void machine__delete_dead_threads(struct machine *machine)
57{
58 struct thread *n, *t;
59
60 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
61 list_del(&t->node);
62 thread__delete(t);
63 }
64}
65
66void machine__delete_threads(struct machine *machine)
67{
68 struct rb_node *nd = rb_first(&machine->threads);
69
70 while (nd) {
71 struct thread *t = rb_entry(nd, struct thread, rb_node);
72
73 rb_erase(&t->rb_node, &machine->threads);
74 nd = rb_next(nd);
75 thread__delete(t);
76 }
77}
78
51void machine__exit(struct machine *machine) 79void machine__exit(struct machine *machine)
52{ 80{
53 map_groups__exit(&machine->kmaps); 81 map_groups__exit(&machine->kmaps);
@@ -63,10 +91,22 @@ void machine__delete(struct machine *machine)
63 free(machine); 91 free(machine);
64} 92}
65 93
66struct machine *machines__add(struct rb_root *machines, pid_t pid, 94void machines__init(struct machines *machines)
95{
96 machine__init(&machines->host, "", HOST_KERNEL_ID);
97 machines->guests = RB_ROOT;
98}
99
100void machines__exit(struct machines *machines)
101{
102 machine__exit(&machines->host);
103 /* XXX exit guest */
104}
105
106struct machine *machines__add(struct machines *machines, pid_t pid,
67 const char *root_dir) 107 const char *root_dir)
68{ 108{
69 struct rb_node **p = &machines->rb_node; 109 struct rb_node **p = &machines->guests.rb_node;
70 struct rb_node *parent = NULL; 110 struct rb_node *parent = NULL;
71 struct machine *pos, *machine = malloc(sizeof(*machine)); 111 struct machine *pos, *machine = malloc(sizeof(*machine));
72 112
@@ -88,18 +128,21 @@ struct machine *machines__add(struct rb_root *machines, pid_t pid,
88 } 128 }
89 129
90 rb_link_node(&machine->rb_node, parent, p); 130 rb_link_node(&machine->rb_node, parent, p);
91 rb_insert_color(&machine->rb_node, machines); 131 rb_insert_color(&machine->rb_node, &machines->guests);
92 132
93 return machine; 133 return machine;
94} 134}
95 135
96struct machine *machines__find(struct rb_root *machines, pid_t pid) 136struct machine *machines__find(struct machines *machines, pid_t pid)
97{ 137{
98 struct rb_node **p = &machines->rb_node; 138 struct rb_node **p = &machines->guests.rb_node;
99 struct rb_node *parent = NULL; 139 struct rb_node *parent = NULL;
100 struct machine *machine; 140 struct machine *machine;
101 struct machine *default_machine = NULL; 141 struct machine *default_machine = NULL;
102 142
143 if (pid == HOST_KERNEL_ID)
144 return &machines->host;
145
103 while (*p != NULL) { 146 while (*p != NULL) {
104 parent = *p; 147 parent = *p;
105 machine = rb_entry(parent, struct machine, rb_node); 148 machine = rb_entry(parent, struct machine, rb_node);
@@ -116,7 +159,7 @@ struct machine *machines__find(struct rb_root *machines, pid_t pid)
116 return default_machine; 159 return default_machine;
117} 160}
118 161
119struct machine *machines__findnew(struct rb_root *machines, pid_t pid) 162struct machine *machines__findnew(struct machines *machines, pid_t pid)
120{ 163{
121 char path[PATH_MAX]; 164 char path[PATH_MAX];
122 const char *root_dir = ""; 165 const char *root_dir = "";
@@ -150,12 +193,12 @@ out:
150 return machine; 193 return machine;
151} 194}
152 195
153void machines__process(struct rb_root *machines, 196void machines__process_guests(struct machines *machines,
154 machine__process_t process, void *data) 197 machine__process_t process, void *data)
155{ 198{
156 struct rb_node *nd; 199 struct rb_node *nd;
157 200
158 for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 201 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
159 struct machine *pos = rb_entry(nd, struct machine, rb_node); 202 struct machine *pos = rb_entry(nd, struct machine, rb_node);
160 process(pos, data); 203 process(pos, data);
161 } 204 }
@@ -175,12 +218,14 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
175 return bf; 218 return bf;
176} 219}
177 220
178void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) 221void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
179{ 222{
180 struct rb_node *node; 223 struct rb_node *node;
181 struct machine *machine; 224 struct machine *machine;
182 225
183 for (node = rb_first(machines); node; node = rb_next(node)) { 226 machines->host.id_hdr_size = id_hdr_size;
227
228 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
184 machine = rb_entry(node, struct machine, rb_node); 229 machine = rb_entry(node, struct machine, rb_node);
185 machine->id_hdr_size = id_hdr_size; 230 machine->id_hdr_size = id_hdr_size;
186 } 231 }
@@ -264,6 +309,537 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
264 return 0; 309 return 0;
265} 310}
266 311
312struct map *machine__new_module(struct machine *machine, u64 start,
313 const char *filename)
314{
315 struct map *map;
316 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
317
318 if (dso == NULL)
319 return NULL;
320
321 map = map__new2(start, dso, MAP__FUNCTION);
322 if (map == NULL)
323 return NULL;
324
325 if (machine__is_host(machine))
326 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
327 else
328 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
329 map_groups__insert(&machine->kmaps, map);
330 return map;
331}
332
333size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
334{
335 struct rb_node *nd;
336 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
337 __dsos__fprintf(&machines->host.user_dsos, fp);
338
339 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
340 struct machine *pos = rb_entry(nd, struct machine, rb_node);
341 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
342 ret += __dsos__fprintf(&pos->user_dsos, fp);
343 }
344
345 return ret;
346}
347
348size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
349 bool (skip)(struct dso *dso, int parm), int parm)
350{
351 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
352 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
353}
354
355size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
356 bool (skip)(struct dso *dso, int parm), int parm)
357{
358 struct rb_node *nd;
359 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
360
361 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
362 struct machine *pos = rb_entry(nd, struct machine, rb_node);
363 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
364 }
365 return ret;
366}
367
368size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
369{
370 int i;
371 size_t printed = 0;
372 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
373
374 if (kdso->has_build_id) {
375 char filename[PATH_MAX];
376 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
377 printed += fprintf(fp, "[0] %s\n", filename);
378 }
379
380 for (i = 0; i < vmlinux_path__nr_entries; ++i)
381 printed += fprintf(fp, "[%d] %s\n",
382 i + kdso->has_build_id, vmlinux_path[i]);
383
384 return printed;
385}
386
387size_t machine__fprintf(struct machine *machine, FILE *fp)
388{
389 size_t ret = 0;
390 struct rb_node *nd;
391
392 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
393 struct thread *pos = rb_entry(nd, struct thread, rb_node);
394
395 ret += thread__fprintf(pos, fp);
396 }
397
398 return ret;
399}
400
401static struct dso *machine__get_kernel(struct machine *machine)
402{
403 const char *vmlinux_name = NULL;
404 struct dso *kernel;
405
406 if (machine__is_host(machine)) {
407 vmlinux_name = symbol_conf.vmlinux_name;
408 if (!vmlinux_name)
409 vmlinux_name = "[kernel.kallsyms]";
410
411 kernel = dso__kernel_findnew(machine, vmlinux_name,
412 "[kernel]",
413 DSO_TYPE_KERNEL);
414 } else {
415 char bf[PATH_MAX];
416
417 if (machine__is_default_guest(machine))
418 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
419 if (!vmlinux_name)
420 vmlinux_name = machine__mmap_name(machine, bf,
421 sizeof(bf));
422
423 kernel = dso__kernel_findnew(machine, vmlinux_name,
424 "[guest.kernel]",
425 DSO_TYPE_GUEST_KERNEL);
426 }
427
428 if (kernel != NULL && (!kernel->has_build_id))
429 dso__read_running_kernel_build_id(kernel, machine);
430
431 return kernel;
432}
433
434struct process_args {
435 u64 start;
436};
437
438static int symbol__in_kernel(void *arg, const char *name,
439 char type __maybe_unused, u64 start)
440{
441 struct process_args *args = arg;
442
443 if (strchr(name, '['))
444 return 0;
445
446 args->start = start;
447 return 1;
448}
449
450/* Figure out the start address of kernel map from /proc/kallsyms */
451static u64 machine__get_kernel_start_addr(struct machine *machine)
452{
453 const char *filename;
454 char path[PATH_MAX];
455 struct process_args args;
456
457 if (machine__is_host(machine)) {
458 filename = "/proc/kallsyms";
459 } else {
460 if (machine__is_default_guest(machine))
461 filename = (char *)symbol_conf.default_guest_kallsyms;
462 else {
463 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
464 filename = path;
465 }
466 }
467
468 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
469 return 0;
470
471 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
472 return 0;
473
474 return args.start;
475}
476
477int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
478{
479 enum map_type type;
480 u64 start = machine__get_kernel_start_addr(machine);
481
482 for (type = 0; type < MAP__NR_TYPES; ++type) {
483 struct kmap *kmap;
484
485 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
486 if (machine->vmlinux_maps[type] == NULL)
487 return -1;
488
489 machine->vmlinux_maps[type]->map_ip =
490 machine->vmlinux_maps[type]->unmap_ip =
491 identity__map_ip;
492 kmap = map__kmap(machine->vmlinux_maps[type]);
493 kmap->kmaps = &machine->kmaps;
494 map_groups__insert(&machine->kmaps,
495 machine->vmlinux_maps[type]);
496 }
497
498 return 0;
499}
500
501void machine__destroy_kernel_maps(struct machine *machine)
502{
503 enum map_type type;
504
505 for (type = 0; type < MAP__NR_TYPES; ++type) {
506 struct kmap *kmap;
507
508 if (machine->vmlinux_maps[type] == NULL)
509 continue;
510
511 kmap = map__kmap(machine->vmlinux_maps[type]);
512 map_groups__remove(&machine->kmaps,
513 machine->vmlinux_maps[type]);
514 if (kmap->ref_reloc_sym) {
515 /*
516 * ref_reloc_sym is shared among all maps, so free just
517 * on one of them.
518 */
519 if (type == MAP__FUNCTION) {
520 free((char *)kmap->ref_reloc_sym->name);
521 kmap->ref_reloc_sym->name = NULL;
522 free(kmap->ref_reloc_sym);
523 }
524 kmap->ref_reloc_sym = NULL;
525 }
526
527 map__delete(machine->vmlinux_maps[type]);
528 machine->vmlinux_maps[type] = NULL;
529 }
530}
531
532int machines__create_guest_kernel_maps(struct machines *machines)
533{
534 int ret = 0;
535 struct dirent **namelist = NULL;
536 int i, items = 0;
537 char path[PATH_MAX];
538 pid_t pid;
539 char *endp;
540
541 if (symbol_conf.default_guest_vmlinux_name ||
542 symbol_conf.default_guest_modules ||
543 symbol_conf.default_guest_kallsyms) {
544 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
545 }
546
547 if (symbol_conf.guestmount) {
548 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
549 if (items <= 0)
550 return -ENOENT;
551 for (i = 0; i < items; i++) {
552 if (!isdigit(namelist[i]->d_name[0])) {
553 /* Filter out . and .. */
554 continue;
555 }
556 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
557 if ((*endp != '\0') ||
558 (endp == namelist[i]->d_name) ||
559 (errno == ERANGE)) {
560 pr_debug("invalid directory (%s). Skipping.\n",
561 namelist[i]->d_name);
562 continue;
563 }
564 sprintf(path, "%s/%s/proc/kallsyms",
565 symbol_conf.guestmount,
566 namelist[i]->d_name);
567 ret = access(path, R_OK);
568 if (ret) {
569 pr_debug("Can't access file %s\n", path);
570 goto failure;
571 }
572 machines__create_kernel_maps(machines, pid);
573 }
574failure:
575 free(namelist);
576 }
577
578 return ret;
579}
580
581void machines__destroy_kernel_maps(struct machines *machines)
582{
583 struct rb_node *next = rb_first(&machines->guests);
584
585 machine__destroy_kernel_maps(&machines->host);
586
587 while (next) {
588 struct machine *pos = rb_entry(next, struct machine, rb_node);
589
590 next = rb_next(&pos->rb_node);
591 rb_erase(&pos->rb_node, &machines->guests);
592 machine__delete(pos);
593 }
594}
595
596int machines__create_kernel_maps(struct machines *machines, pid_t pid)
597{
598 struct machine *machine = machines__findnew(machines, pid);
599
600 if (machine == NULL)
601 return -1;
602
603 return machine__create_kernel_maps(machine);
604}
605
606int machine__load_kallsyms(struct machine *machine, const char *filename,
607 enum map_type type, symbol_filter_t filter)
608{
609 struct map *map = machine->vmlinux_maps[type];
610 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
611
612 if (ret > 0) {
613 dso__set_loaded(map->dso, type);
614 /*
615 * Since /proc/kallsyms will have multiple sessions for the
616 * kernel, with modules between them, fixup the end of all
617 * sections.
618 */
619 __map_groups__fixup_end(&machine->kmaps, type);
620 }
621
622 return ret;
623}
624
625int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
626 symbol_filter_t filter)
627{
628 struct map *map = machine->vmlinux_maps[type];
629 int ret = dso__load_vmlinux_path(map->dso, map, filter);
630
631 if (ret > 0) {
632 dso__set_loaded(map->dso, type);
633 map__reloc_vmlinux(map);
634 }
635
636 return ret;
637}
638
639static void map_groups__fixup_end(struct map_groups *mg)
640{
641 int i;
642 for (i = 0; i < MAP__NR_TYPES; ++i)
643 __map_groups__fixup_end(mg, i);
644}
645
646static char *get_kernel_version(const char *root_dir)
647{
648 char version[PATH_MAX];
649 FILE *file;
650 char *name, *tmp;
651 const char *prefix = "Linux version ";
652
653 sprintf(version, "%s/proc/version", root_dir);
654 file = fopen(version, "r");
655 if (!file)
656 return NULL;
657
658 version[0] = '\0';
659 tmp = fgets(version, sizeof(version), file);
660 fclose(file);
661
662 name = strstr(version, prefix);
663 if (!name)
664 return NULL;
665 name += strlen(prefix);
666 tmp = strchr(name, ' ');
667 if (tmp)
668 *tmp = '\0';
669
670 return strdup(name);
671}
672
673static int map_groups__set_modules_path_dir(struct map_groups *mg,
674 const char *dir_name)
675{
676 struct dirent *dent;
677 DIR *dir = opendir(dir_name);
678 int ret = 0;
679
680 if (!dir) {
681 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
682 return -1;
683 }
684
685 while ((dent = readdir(dir)) != NULL) {
686 char path[PATH_MAX];
687 struct stat st;
688
689 /*sshfs might return bad dent->d_type, so we have to stat*/
690 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
691 if (stat(path, &st))
692 continue;
693
694 if (S_ISDIR(st.st_mode)) {
695 if (!strcmp(dent->d_name, ".") ||
696 !strcmp(dent->d_name, ".."))
697 continue;
698
699 ret = map_groups__set_modules_path_dir(mg, path);
700 if (ret < 0)
701 goto out;
702 } else {
703 char *dot = strrchr(dent->d_name, '.'),
704 dso_name[PATH_MAX];
705 struct map *map;
706 char *long_name;
707
708 if (dot == NULL || strcmp(dot, ".ko"))
709 continue;
710 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
711 (int)(dot - dent->d_name), dent->d_name);
712
713 strxfrchar(dso_name, '-', '_');
714 map = map_groups__find_by_name(mg, MAP__FUNCTION,
715 dso_name);
716 if (map == NULL)
717 continue;
718
719 long_name = strdup(path);
720 if (long_name == NULL) {
721 ret = -1;
722 goto out;
723 }
724 dso__set_long_name(map->dso, long_name);
725 map->dso->lname_alloc = 1;
726 dso__kernel_module_get_build_id(map->dso, "");
727 }
728 }
729
730out:
731 closedir(dir);
732 return ret;
733}
734
735static int machine__set_modules_path(struct machine *machine)
736{
737 char *version;
738 char modules_path[PATH_MAX];
739
740 version = get_kernel_version(machine->root_dir);
741 if (!version)
742 return -1;
743
744 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
745 machine->root_dir, version);
746 free(version);
747
748 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
749}
750
751static int machine__create_modules(struct machine *machine)
752{
753 char *line = NULL;
754 size_t n;
755 FILE *file;
756 struct map *map;
757 const char *modules;
758 char path[PATH_MAX];
759
760 if (machine__is_default_guest(machine))
761 modules = symbol_conf.default_guest_modules;
762 else {
763 sprintf(path, "%s/proc/modules", machine->root_dir);
764 modules = path;
765 }
766
767 if (symbol__restricted_filename(path, "/proc/modules"))
768 return -1;
769
770 file = fopen(modules, "r");
771 if (file == NULL)
772 return -1;
773
774 while (!feof(file)) {
775 char name[PATH_MAX];
776 u64 start;
777 char *sep;
778 int line_len;
779
780 line_len = getline(&line, &n, file);
781 if (line_len < 0)
782 break;
783
784 if (!line)
785 goto out_failure;
786
787 line[--line_len] = '\0'; /* \n */
788
789 sep = strrchr(line, 'x');
790 if (sep == NULL)
791 continue;
792
793 hex2u64(sep + 1, &start);
794
795 sep = strchr(line, ' ');
796 if (sep == NULL)
797 continue;
798
799 *sep = '\0';
800
801 snprintf(name, sizeof(name), "[%s]", line);
802 map = machine__new_module(machine, start, name);
803 if (map == NULL)
804 goto out_delete_line;
805 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
806 }
807
808 free(line);
809 fclose(file);
810
811 return machine__set_modules_path(machine);
812
813out_delete_line:
814 free(line);
815out_failure:
816 return -1;
817}
818
819int machine__create_kernel_maps(struct machine *machine)
820{
821 struct dso *kernel = machine__get_kernel(machine);
822
823 if (kernel == NULL ||
824 __machine__create_kernel_maps(machine, kernel) < 0)
825 return -1;
826
827 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
828 if (machine__is_host(machine))
829 pr_debug("Problems creating module maps, "
830 "continuing anyway...\n");
831 else
832 pr_debug("Problems creating module maps for guest %d, "
833 "continuing anyway...\n", machine->pid);
834 }
835
836 /*
837 * Now that we have all the maps created, just set the ->end of them:
838 */
839 map_groups__fixup_end(&machine->kmaps);
840 return 0;
841}
842
267static void machine__set_kernel_mmap_len(struct machine *machine, 843static void machine__set_kernel_mmap_len(struct machine *machine,
268 union perf_event *event) 844 union perf_event *event)
269{ 845{
@@ -379,6 +955,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
379 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 955 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
380 struct thread *thread; 956 struct thread *thread;
381 struct map *map; 957 struct map *map;
958 enum map_type type;
382 int ret = 0; 959 int ret = 0;
383 960
384 if (dump_trace) 961 if (dump_trace)
@@ -395,10 +972,17 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
395 thread = machine__findnew_thread(machine, event->mmap.pid); 972 thread = machine__findnew_thread(machine, event->mmap.pid);
396 if (thread == NULL) 973 if (thread == NULL)
397 goto out_problem; 974 goto out_problem;
975
976 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
977 type = MAP__VARIABLE;
978 else
979 type = MAP__FUNCTION;
980
398 map = map__new(&machine->user_dsos, event->mmap.start, 981 map = map__new(&machine->user_dsos, event->mmap.start,
399 event->mmap.len, event->mmap.pgoff, 982 event->mmap.len, event->mmap.pgoff,
400 event->mmap.pid, event->mmap.filename, 983 event->mmap.pid, event->mmap.filename,
401 MAP__FUNCTION); 984 type);
985
402 if (map == NULL) 986 if (map == NULL)
403 goto out_problem; 987 goto out_problem;
404 988
@@ -427,6 +1011,17 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
427 return 0; 1011 return 0;
428} 1012}
429 1013
1014static void machine__remove_thread(struct machine *machine, struct thread *th)
1015{
1016 machine->last_match = NULL;
1017 rb_erase(&th->rb_node, &machine->threads);
1018 /*
1019 * We may have references to this thread, for instance in some hist_entry
1020 * instances, so just move them to a separate list.
1021 */
1022 list_add_tail(&th->node, &machine->dead_threads);
1023}
1024
430int machine__process_exit_event(struct machine *machine, union perf_event *event) 1025int machine__process_exit_event(struct machine *machine, union perf_event *event)
431{ 1026{
432 struct thread *thread = machine__find_thread(machine, event->fork.tid); 1027 struct thread *thread = machine__find_thread(machine, event->fork.tid);
@@ -462,3 +1057,210 @@ int machine__process_event(struct machine *machine, union perf_event *event)
462 1057
463 return ret; 1058 return ret;
464} 1059}
1060
1061static bool symbol__match_parent_regex(struct symbol *sym)
1062{
1063 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
1064 return 1;
1065
1066 return 0;
1067}
1068
1069static const u8 cpumodes[] = {
1070 PERF_RECORD_MISC_USER,
1071 PERF_RECORD_MISC_KERNEL,
1072 PERF_RECORD_MISC_GUEST_USER,
1073 PERF_RECORD_MISC_GUEST_KERNEL
1074};
1075#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
1076
1077static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1078 struct addr_map_symbol *ams,
1079 u64 ip)
1080{
1081 struct addr_location al;
1082 size_t i;
1083 u8 m;
1084
1085 memset(&al, 0, sizeof(al));
1086
1087 for (i = 0; i < NCPUMODES; i++) {
1088 m = cpumodes[i];
1089 /*
1090 * We cannot use the header.misc hint to determine whether a
1091 * branch stack address is user, kernel, guest, hypervisor.
1092 * Branches may straddle the kernel/user/hypervisor boundaries.
1093 * Thus, we have to try consecutively until we find a match
1094 * or else, the symbol is unknown
1095 */
1096 thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
1097 ip, &al, NULL);
1098 if (al.sym)
1099 goto found;
1100 }
1101found:
1102 ams->addr = ip;
1103 ams->al_addr = al.addr;
1104 ams->sym = al.sym;
1105 ams->map = al.map;
1106}
1107
1108static void ip__resolve_data(struct machine *machine, struct thread *thread,
1109 u8 m, struct addr_map_symbol *ams, u64 addr)
1110{
1111 struct addr_location al;
1112
1113 memset(&al, 0, sizeof(al));
1114
1115 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, &al,
1116 NULL);
1117 ams->addr = addr;
1118 ams->al_addr = al.addr;
1119 ams->sym = al.sym;
1120 ams->map = al.map;
1121}
1122
1123struct mem_info *machine__resolve_mem(struct machine *machine,
1124 struct thread *thr,
1125 struct perf_sample *sample,
1126 u8 cpumode)
1127{
1128 struct mem_info *mi = zalloc(sizeof(*mi));
1129
1130 if (!mi)
1131 return NULL;
1132
1133 ip__resolve_ams(machine, thr, &mi->iaddr, sample->ip);
1134 ip__resolve_data(machine, thr, cpumode, &mi->daddr, sample->addr);
1135 mi->data_src.val = sample->data_src;
1136
1137 return mi;
1138}
1139
1140struct branch_info *machine__resolve_bstack(struct machine *machine,
1141 struct thread *thr,
1142 struct branch_stack *bs)
1143{
1144 struct branch_info *bi;
1145 unsigned int i;
1146
1147 bi = calloc(bs->nr, sizeof(struct branch_info));
1148 if (!bi)
1149 return NULL;
1150
1151 for (i = 0; i < bs->nr; i++) {
1152 ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
1153 ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
1154 bi[i].flags = bs->entries[i].flags;
1155 }
1156 return bi;
1157}
1158
1159static int machine__resolve_callchain_sample(struct machine *machine,
1160 struct thread *thread,
1161 struct ip_callchain *chain,
1162 struct symbol **parent)
1163
1164{
1165 u8 cpumode = PERF_RECORD_MISC_USER;
1166 unsigned int i;
1167 int err;
1168
1169 callchain_cursor_reset(&callchain_cursor);
1170
1171 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1172 pr_warning("corrupted callchain. skipping...\n");
1173 return 0;
1174 }
1175
1176 for (i = 0; i < chain->nr; i++) {
1177 u64 ip;
1178 struct addr_location al;
1179
1180 if (callchain_param.order == ORDER_CALLEE)
1181 ip = chain->ips[i];
1182 else
1183 ip = chain->ips[chain->nr - i - 1];
1184
1185 if (ip >= PERF_CONTEXT_MAX) {
1186 switch (ip) {
1187 case PERF_CONTEXT_HV:
1188 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1189 break;
1190 case PERF_CONTEXT_KERNEL:
1191 cpumode = PERF_RECORD_MISC_KERNEL;
1192 break;
1193 case PERF_CONTEXT_USER:
1194 cpumode = PERF_RECORD_MISC_USER;
1195 break;
1196 default:
1197 pr_debug("invalid callchain context: "
1198 "%"PRId64"\n", (s64) ip);
1199 /*
1200 * It seems the callchain is corrupted.
1201 * Discard all.
1202 */
1203 callchain_cursor_reset(&callchain_cursor);
1204 return 0;
1205 }
1206 continue;
1207 }
1208
1209 al.filtered = false;
1210 thread__find_addr_location(thread, machine, cpumode,
1211 MAP__FUNCTION, ip, &al, NULL);
1212 if (al.sym != NULL) {
1213 if (sort__has_parent && !*parent &&
1214 symbol__match_parent_regex(al.sym))
1215 *parent = al.sym;
1216 if (!symbol_conf.use_callchain)
1217 break;
1218 }
1219
1220 err = callchain_cursor_append(&callchain_cursor,
1221 ip, al.map, al.sym);
1222 if (err)
1223 return err;
1224 }
1225
1226 return 0;
1227}
1228
1229static int unwind_entry(struct unwind_entry *entry, void *arg)
1230{
1231 struct callchain_cursor *cursor = arg;
1232 return callchain_cursor_append(cursor, entry->ip,
1233 entry->map, entry->sym);
1234}
1235
1236int machine__resolve_callchain(struct machine *machine,
1237 struct perf_evsel *evsel,
1238 struct thread *thread,
1239 struct perf_sample *sample,
1240 struct symbol **parent)
1241
1242{
1243 int ret;
1244
1245 callchain_cursor_reset(&callchain_cursor);
1246
1247 ret = machine__resolve_callchain_sample(machine, thread,
1248 sample->callchain, parent);
1249 if (ret)
1250 return ret;
1251
1252 /* Can we do dwarf post unwind? */
1253 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1254 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1255 return 0;
1256
1257 /* Bail out if nothing was captured. */
1258 if ((!sample->user_regs.regs) ||
1259 (!sample->user_stack.size))
1260 return 0;
1261
1262 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1263 thread, evsel->attr.sample_regs_user,
1264 sample);
1265
1266}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index b7cde7467d55..77940680f1fc 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -47,26 +47,38 @@ int machine__process_event(struct machine *machine, union perf_event *event);
47 47
48typedef void (*machine__process_t)(struct machine *machine, void *data); 48typedef void (*machine__process_t)(struct machine *machine, void *data);
49 49
50void machines__process(struct rb_root *machines, 50struct machines {
51 machine__process_t process, void *data); 51 struct machine host;
52 struct rb_root guests;
53};
54
55void machines__init(struct machines *machines);
56void machines__exit(struct machines *machines);
52 57
53struct machine *machines__add(struct rb_root *machines, pid_t pid, 58void machines__process_guests(struct machines *machines,
59 machine__process_t process, void *data);
60
61struct machine *machines__add(struct machines *machines, pid_t pid,
54 const char *root_dir); 62 const char *root_dir);
55struct machine *machines__find_host(struct rb_root *machines); 63struct machine *machines__find_host(struct machines *machines);
56struct machine *machines__find(struct rb_root *machines, pid_t pid); 64struct machine *machines__find(struct machines *machines, pid_t pid);
57struct machine *machines__findnew(struct rb_root *machines, pid_t pid); 65struct machine *machines__findnew(struct machines *machines, pid_t pid);
58 66
59void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size); 67void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
60char *machine__mmap_name(struct machine *machine, char *bf, size_t size); 68char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
61 69
62int machine__init(struct machine *machine, const char *root_dir, pid_t pid); 70int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
63void machine__exit(struct machine *machine); 71void machine__exit(struct machine *machine);
72void machine__delete_dead_threads(struct machine *machine);
73void machine__delete_threads(struct machine *machine);
64void machine__delete(struct machine *machine); 74void machine__delete(struct machine *machine);
65 75
66
67struct branch_info *machine__resolve_bstack(struct machine *machine, 76struct branch_info *machine__resolve_bstack(struct machine *machine,
68 struct thread *thread, 77 struct thread *thread,
69 struct branch_stack *bs); 78 struct branch_stack *bs);
79struct mem_info *machine__resolve_mem(struct machine *machine,
80 struct thread *thread,
81 struct perf_sample *sample, u8 cpumode);
70int machine__resolve_callchain(struct machine *machine, 82int machine__resolve_callchain(struct machine *machine,
71 struct perf_evsel *evsel, 83 struct perf_evsel *evsel,
72 struct thread *thread, 84 struct thread *thread,
@@ -88,7 +100,6 @@ static inline bool machine__is_host(struct machine *machine)
88} 100}
89 101
90struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); 102struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
91void machine__remove_thread(struct machine *machine, struct thread *th);
92 103
93size_t machine__fprintf(struct machine *machine, FILE *fp); 104size_t machine__fprintf(struct machine *machine, FILE *fp);
94 105
@@ -129,19 +140,19 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
129int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 140int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
130 symbol_filter_t filter); 141 symbol_filter_t filter);
131 142
132size_t machine__fprintf_dsos_buildid(struct machine *machine, 143size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
133 FILE *fp, bool with_hits); 144 bool (skip)(struct dso *dso, int parm), int parm);
134size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 145size_t machines__fprintf_dsos(struct machines *machines, FILE *fp);
135size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 146size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
136 FILE *fp, bool with_hits); 147 bool (skip)(struct dso *dso, int parm), int parm);
137 148
138void machine__destroy_kernel_maps(struct machine *machine); 149void machine__destroy_kernel_maps(struct machine *machine);
139int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); 150int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
140int machine__create_kernel_maps(struct machine *machine); 151int machine__create_kernel_maps(struct machine *machine);
141 152
142int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); 153int machines__create_kernel_maps(struct machines *machines, pid_t pid);
143int machines__create_guest_kernel_maps(struct rb_root *machines); 154int machines__create_guest_kernel_maps(struct machines *machines);
144void machines__destroy_guest_kernel_maps(struct rb_root *machines); 155void machines__destroy_kernel_maps(struct machines *machines);
145 156
146size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); 157size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
147 158
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 0328d45c4f2a..6fcb9de62340 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -11,6 +11,7 @@
11#include "strlist.h" 11#include "strlist.h"
12#include "vdso.h" 12#include "vdso.h"
13#include "build-id.h" 13#include "build-id.h"
14#include <linux/string.h>
14 15
15const char *map_type__name[MAP__NR_TYPES] = { 16const char *map_type__name[MAP__NR_TYPES] = {
16 [MAP__FUNCTION] = "Functions", 17 [MAP__FUNCTION] = "Functions",
@@ -19,7 +20,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
19 20
20static inline int is_anon_memory(const char *filename) 21static inline int is_anon_memory(const char *filename)
21{ 22{
22 return strcmp(filename, "//anon") == 0; 23 return !strcmp(filename, "//anon") ||
24 !strcmp(filename, "/anon_hugepage (deleted)");
23} 25}
24 26
25static inline int is_no_dso_memory(const char *filename) 27static inline int is_no_dso_memory(const char *filename)
@@ -28,29 +30,29 @@ static inline int is_no_dso_memory(const char *filename)
28 !strcmp(filename, "[heap]"); 30 !strcmp(filename, "[heap]");
29} 31}
30 32
31void map__init(struct map *self, enum map_type type, 33void map__init(struct map *map, enum map_type type,
32 u64 start, u64 end, u64 pgoff, struct dso *dso) 34 u64 start, u64 end, u64 pgoff, struct dso *dso)
33{ 35{
34 self->type = type; 36 map->type = type;
35 self->start = start; 37 map->start = start;
36 self->end = end; 38 map->end = end;
37 self->pgoff = pgoff; 39 map->pgoff = pgoff;
38 self->dso = dso; 40 map->dso = dso;
39 self->map_ip = map__map_ip; 41 map->map_ip = map__map_ip;
40 self->unmap_ip = map__unmap_ip; 42 map->unmap_ip = map__unmap_ip;
41 RB_CLEAR_NODE(&self->rb_node); 43 RB_CLEAR_NODE(&map->rb_node);
42 self->groups = NULL; 44 map->groups = NULL;
43 self->referenced = false; 45 map->referenced = false;
44 self->erange_warned = false; 46 map->erange_warned = false;
45} 47}
46 48
47struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 49struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
48 u64 pgoff, u32 pid, char *filename, 50 u64 pgoff, u32 pid, char *filename,
49 enum map_type type) 51 enum map_type type)
50{ 52{
51 struct map *self = malloc(sizeof(*self)); 53 struct map *map = malloc(sizeof(*map));
52 54
53 if (self != NULL) { 55 if (map != NULL) {
54 char newfilename[PATH_MAX]; 56 char newfilename[PATH_MAX];
55 struct dso *dso; 57 struct dso *dso;
56 int anon, no_dso, vdso; 58 int anon, no_dso, vdso;
@@ -73,10 +75,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
73 if (dso == NULL) 75 if (dso == NULL)
74 goto out_delete; 76 goto out_delete;
75 77
76 map__init(self, type, start, start + len, pgoff, dso); 78 map__init(map, type, start, start + len, pgoff, dso);
77 79
78 if (anon || no_dso) { 80 if (anon || no_dso) {
79 self->map_ip = self->unmap_ip = identity__map_ip; 81 map->map_ip = map->unmap_ip = identity__map_ip;
80 82
81 /* 83 /*
82 * Set memory without DSO as loaded. All map__find_* 84 * Set memory without DSO as loaded. All map__find_*
@@ -84,12 +86,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
84 * unnecessary map__load warning. 86 * unnecessary map__load warning.
85 */ 87 */
86 if (no_dso) 88 if (no_dso)
87 dso__set_loaded(dso, self->type); 89 dso__set_loaded(dso, map->type);
88 } 90 }
89 } 91 }
90 return self; 92 return map;
91out_delete: 93out_delete:
92 free(self); 94 free(map);
93 return NULL; 95 return NULL;
94} 96}
95 97
@@ -112,48 +114,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
112 return map; 114 return map;
113} 115}
114 116
115void map__delete(struct map *self) 117void map__delete(struct map *map)
116{ 118{
117 free(self); 119 free(map);
118} 120}
119 121
120void map__fixup_start(struct map *self) 122void map__fixup_start(struct map *map)
121{ 123{
122 struct rb_root *symbols = &self->dso->symbols[self->type]; 124 struct rb_root *symbols = &map->dso->symbols[map->type];
123 struct rb_node *nd = rb_first(symbols); 125 struct rb_node *nd = rb_first(symbols);
124 if (nd != NULL) { 126 if (nd != NULL) {
125 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 127 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
126 self->start = sym->start; 128 map->start = sym->start;
127 } 129 }
128} 130}
129 131
130void map__fixup_end(struct map *self) 132void map__fixup_end(struct map *map)
131{ 133{
132 struct rb_root *symbols = &self->dso->symbols[self->type]; 134 struct rb_root *symbols = &map->dso->symbols[map->type];
133 struct rb_node *nd = rb_last(symbols); 135 struct rb_node *nd = rb_last(symbols);
134 if (nd != NULL) { 136 if (nd != NULL) {
135 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 137 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
136 self->end = sym->end; 138 map->end = sym->end;
137 } 139 }
138} 140}
139 141
140#define DSO__DELETED "(deleted)" 142#define DSO__DELETED "(deleted)"
141 143
142int map__load(struct map *self, symbol_filter_t filter) 144int map__load(struct map *map, symbol_filter_t filter)
143{ 145{
144 const char *name = self->dso->long_name; 146 const char *name = map->dso->long_name;
145 int nr; 147 int nr;
146 148
147 if (dso__loaded(self->dso, self->type)) 149 if (dso__loaded(map->dso, map->type))
148 return 0; 150 return 0;
149 151
150 nr = dso__load(self->dso, self, filter); 152 nr = dso__load(map->dso, map, filter);
151 if (nr < 0) { 153 if (nr < 0) {
152 if (self->dso->has_build_id) { 154 if (map->dso->has_build_id) {
153 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 155 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
154 156
155 build_id__sprintf(self->dso->build_id, 157 build_id__sprintf(map->dso->build_id,
156 sizeof(self->dso->build_id), 158 sizeof(map->dso->build_id),
157 sbuild_id); 159 sbuild_id);
158 pr_warning("%s with build id %s not found", 160 pr_warning("%s with build id %s not found",
159 name, sbuild_id); 161 name, sbuild_id);
@@ -183,43 +185,36 @@ int map__load(struct map *self, symbol_filter_t filter)
183 * Only applies to the kernel, as its symtabs aren't relative like the 185 * Only applies to the kernel, as its symtabs aren't relative like the
184 * module ones. 186 * module ones.
185 */ 187 */
186 if (self->dso->kernel) 188 if (map->dso->kernel)
187 map__reloc_vmlinux(self); 189 map__reloc_vmlinux(map);
188 190
189 return 0; 191 return 0;
190} 192}
191 193
192struct symbol *map__find_symbol(struct map *self, u64 addr, 194struct symbol *map__find_symbol(struct map *map, u64 addr,
193 symbol_filter_t filter) 195 symbol_filter_t filter)
194{ 196{
195 if (map__load(self, filter) < 0) 197 if (map__load(map, filter) < 0)
196 return NULL; 198 return NULL;
197 199
198 return dso__find_symbol(self->dso, self->type, addr); 200 return dso__find_symbol(map->dso, map->type, addr);
199} 201}
200 202
201struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 203struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
202 symbol_filter_t filter) 204 symbol_filter_t filter)
203{ 205{
204 if (map__load(self, filter) < 0) 206 if (map__load(map, filter) < 0)
205 return NULL; 207 return NULL;
206 208
207 if (!dso__sorted_by_name(self->dso, self->type)) 209 if (!dso__sorted_by_name(map->dso, map->type))
208 dso__sort_by_name(self->dso, self->type); 210 dso__sort_by_name(map->dso, map->type);
209 211
210 return dso__find_symbol_by_name(self->dso, self->type, name); 212 return dso__find_symbol_by_name(map->dso, map->type, name);
211} 213}
212 214
213struct map *map__clone(struct map *self) 215struct map *map__clone(struct map *map)
214{ 216{
215 struct map *map = malloc(sizeof(*self)); 217 return memdup(map, sizeof(*map));
216
217 if (!map)
218 return NULL;
219
220 memcpy(map, self, sizeof(*self));
221
222 return map;
223} 218}
224 219
225int map__overlap(struct map *l, struct map *r) 220int map__overlap(struct map *l, struct map *r)
@@ -236,10 +231,10 @@ int map__overlap(struct map *l, struct map *r)
236 return 0; 231 return 0;
237} 232}
238 233
239size_t map__fprintf(struct map *self, FILE *fp) 234size_t map__fprintf(struct map *map, FILE *fp)
240{ 235{
241 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 236 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
242 self->start, self->end, self->pgoff, self->dso->name); 237 map->start, map->end, map->pgoff, map->dso->name);
243} 238}
244 239
245size_t map__fprintf_dsoname(struct map *map, FILE *fp) 240size_t map__fprintf_dsoname(struct map *map, FILE *fp)
@@ -527,9 +522,9 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
527 return ip - (s64)map->pgoff; 522 return ip - (s64)map->pgoff;
528} 523}
529 524
530void map__reloc_vmlinux(struct map *self) 525void map__reloc_vmlinux(struct map *map)
531{ 526{
532 struct kmap *kmap = map__kmap(self); 527 struct kmap *kmap = map__kmap(map);
533 s64 reloc; 528 s64 reloc;
534 529
535 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) 530 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
@@ -541,9 +536,9 @@ void map__reloc_vmlinux(struct map *self)
541 if (!reloc) 536 if (!reloc)
542 return; 537 return;
543 538
544 self->map_ip = map__reloc_map_ip; 539 map->map_ip = map__reloc_map_ip;
545 self->unmap_ip = map__reloc_unmap_ip; 540 map->unmap_ip = map__reloc_unmap_ip;
546 self->pgoff = reloc; 541 map->pgoff = reloc;
547} 542}
548 543
549void maps__insert(struct rb_root *maps, struct map *map) 544void maps__insert(struct rb_root *maps, struct map *map)
@@ -566,9 +561,9 @@ void maps__insert(struct rb_root *maps, struct map *map)
566 rb_insert_color(&map->rb_node, maps); 561 rb_insert_color(&map->rb_node, maps);
567} 562}
568 563
569void maps__remove(struct rb_root *self, struct map *map) 564void maps__remove(struct rb_root *maps, struct map *map)
570{ 565{
571 rb_erase(&map->rb_node, self); 566 rb_erase(&map->rb_node, maps);
572} 567}
573 568
574struct map *maps__find(struct rb_root *maps, u64 ip) 569struct map *maps__find(struct rb_root *maps, u64 ip)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index bcb39e2a6965..a887f2c9dfbb 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -57,9 +57,9 @@ struct map_groups {
57 struct machine *machine; 57 struct machine *machine;
58}; 58};
59 59
60static inline struct kmap *map__kmap(struct map *self) 60static inline struct kmap *map__kmap(struct map *map)
61{ 61{
62 return (struct kmap *)(self + 1); 62 return (struct kmap *)(map + 1);
63} 63}
64 64
65static inline u64 map__map_ip(struct map *map, u64 ip) 65static inline u64 map__map_ip(struct map *map, u64 ip)
@@ -85,27 +85,27 @@ struct symbol;
85 85
86typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 86typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
87 87
88void map__init(struct map *self, enum map_type type, 88void map__init(struct map *map, enum map_type type,
89 u64 start, u64 end, u64 pgoff, struct dso *dso); 89 u64 start, u64 end, u64 pgoff, struct dso *dso);
90struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 90struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
91 u64 pgoff, u32 pid, char *filename, 91 u64 pgoff, u32 pid, char *filename,
92 enum map_type type); 92 enum map_type type);
93struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 93struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
94void map__delete(struct map *self); 94void map__delete(struct map *map);
95struct map *map__clone(struct map *self); 95struct map *map__clone(struct map *map);
96int map__overlap(struct map *l, struct map *r); 96int map__overlap(struct map *l, struct map *r);
97size_t map__fprintf(struct map *self, FILE *fp); 97size_t map__fprintf(struct map *map, FILE *fp);
98size_t map__fprintf_dsoname(struct map *map, FILE *fp); 98size_t map__fprintf_dsoname(struct map *map, FILE *fp);
99 99
100int map__load(struct map *self, symbol_filter_t filter); 100int map__load(struct map *map, symbol_filter_t filter);
101struct symbol *map__find_symbol(struct map *self, 101struct symbol *map__find_symbol(struct map *map,
102 u64 addr, symbol_filter_t filter); 102 u64 addr, symbol_filter_t filter);
103struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 103struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
104 symbol_filter_t filter); 104 symbol_filter_t filter);
105void map__fixup_start(struct map *self); 105void map__fixup_start(struct map *map);
106void map__fixup_end(struct map *self); 106void map__fixup_end(struct map *map);
107 107
108void map__reloc_vmlinux(struct map *self); 108void map__reloc_vmlinux(struct map *map);
109 109
110size_t __map_groups__fprintf_maps(struct map_groups *mg, 110size_t __map_groups__fprintf_maps(struct map_groups *mg,
111 enum map_type type, int verbose, FILE *fp); 111 enum map_type type, int verbose, FILE *fp);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 2d8d53bec17e..6c8bb0fb189b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,7 +10,7 @@
10#include "symbol.h" 10#include "symbol.h"
11#include "cache.h" 11#include "cache.h"
12#include "header.h" 12#include "header.h"
13#include "debugfs.h" 13#include <lk/debugfs.h>
14#include "parse-events-bison.h" 14#include "parse-events-bison.h"
15#define YY_EXTRA_TYPE int 15#define YY_EXTRA_TYPE int
16#include "parse-events-flex.h" 16#include "parse-events-flex.h"
@@ -380,8 +380,8 @@ static int add_tracepoint(struct list_head **listp, int *idx,
380 return 0; 380 return 0;
381} 381}
382 382
383static int add_tracepoint_multi(struct list_head **list, int *idx, 383static int add_tracepoint_multi_event(struct list_head **list, int *idx,
384 char *sys_name, char *evt_name) 384 char *sys_name, char *evt_name)
385{ 385{
386 char evt_path[MAXPATHLEN]; 386 char evt_path[MAXPATHLEN];
387 struct dirent *evt_ent; 387 struct dirent *evt_ent;
@@ -408,6 +408,47 @@ static int add_tracepoint_multi(struct list_head **list, int *idx,
408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
409 } 409 }
410 410
411 closedir(evt_dir);
412 return ret;
413}
414
415static int add_tracepoint_event(struct list_head **list, int *idx,
416 char *sys_name, char *evt_name)
417{
418 return strpbrk(evt_name, "*?") ?
419 add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
420 add_tracepoint(list, idx, sys_name, evt_name);
421}
422
423static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
424 char *sys_name, char *evt_name)
425{
426 struct dirent *events_ent;
427 DIR *events_dir;
428 int ret = 0;
429
430 events_dir = opendir(tracing_events_path);
431 if (!events_dir) {
432 perror("Can't open event dir");
433 return -1;
434 }
435
436 while (!ret && (events_ent = readdir(events_dir))) {
437 if (!strcmp(events_ent->d_name, ".")
438 || !strcmp(events_ent->d_name, "..")
439 || !strcmp(events_ent->d_name, "enable")
440 || !strcmp(events_ent->d_name, "header_event")
441 || !strcmp(events_ent->d_name, "header_page"))
442 continue;
443
444 if (!strglobmatch(events_ent->d_name, sys_name))
445 continue;
446
447 ret = add_tracepoint_event(list, idx, events_ent->d_name,
448 evt_name);
449 }
450
451 closedir(events_dir);
411 return ret; 452 return ret;
412} 453}
413 454
@@ -420,9 +461,10 @@ int parse_events_add_tracepoint(struct list_head **list, int *idx,
420 if (ret) 461 if (ret)
421 return ret; 462 return ret;
422 463
423 return strpbrk(event, "*?") ? 464 if (strpbrk(sys, "*?"))
424 add_tracepoint_multi(list, idx, sys, event) : 465 return add_tracepoint_multi_sys(list, idx, sys, event);
425 add_tracepoint(list, idx, sys, event); 466 else
467 return add_tracepoint_event(list, idx, sys, event);
426} 468}
427 469
428static int 470static int
@@ -492,7 +534,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
492} 534}
493 535
494static int config_term(struct perf_event_attr *attr, 536static int config_term(struct perf_event_attr *attr,
495 struct parse_events__term *term) 537 struct parse_events_term *term)
496{ 538{
497#define CHECK_TYPE_VAL(type) \ 539#define CHECK_TYPE_VAL(type) \
498do { \ 540do { \
@@ -537,7 +579,7 @@ do { \
537static int config_attr(struct perf_event_attr *attr, 579static int config_attr(struct perf_event_attr *attr,
538 struct list_head *head, int fail) 580 struct list_head *head, int fail)
539{ 581{
540 struct parse_events__term *term; 582 struct parse_events_term *term;
541 583
542 list_for_each_entry(term, head, list) 584 list_for_each_entry(term, head, list)
543 if (config_term(attr, term) && fail) 585 if (config_term(attr, term) && fail)
@@ -563,14 +605,14 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
563 return add_event(list, idx, &attr, NULL); 605 return add_event(list, idx, &attr, NULL);
564} 606}
565 607
566static int parse_events__is_name_term(struct parse_events__term *term) 608static int parse_events__is_name_term(struct parse_events_term *term)
567{ 609{
568 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; 610 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
569} 611}
570 612
571static char *pmu_event_name(struct list_head *head_terms) 613static char *pmu_event_name(struct list_head *head_terms)
572{ 614{
573 struct parse_events__term *term; 615 struct parse_events_term *term;
574 616
575 list_for_each_entry(term, head_terms, list) 617 list_for_each_entry(term, head_terms, list)
576 if (parse_events__is_name_term(term)) 618 if (parse_events__is_name_term(term))
@@ -657,14 +699,6 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
657 int exclude = eu | ek | eh; 699 int exclude = eu | ek | eh;
658 int exclude_GH = evsel ? evsel->exclude_GH : 0; 700 int exclude_GH = evsel ? evsel->exclude_GH : 0;
659 701
660 /*
661 * We are here for group and 'GH' was not set as event
662 * modifier and whatever event/group modifier override
663 * default 'GH' setup.
664 */
665 if (evsel && !exclude_GH)
666 eH = eG = 0;
667
668 memset(mod, 0, sizeof(*mod)); 702 memset(mod, 0, sizeof(*mod));
669 703
670 while (*str) { 704 while (*str) {
@@ -814,7 +848,7 @@ static int parse_events__scanner(const char *str, void *data, int start_token)
814 */ 848 */
815int parse_events_terms(struct list_head *terms, const char *str) 849int parse_events_terms(struct list_head *terms, const char *str)
816{ 850{
817 struct parse_events_data__terms data = { 851 struct parse_events_terms data = {
818 .terms = NULL, 852 .terms = NULL,
819 }; 853 };
820 int ret; 854 int ret;
@@ -830,10 +864,9 @@ int parse_events_terms(struct list_head *terms, const char *str)
830 return ret; 864 return ret;
831} 865}
832 866
833int parse_events(struct perf_evlist *evlist, const char *str, 867int parse_events(struct perf_evlist *evlist, const char *str)
834 int unset __maybe_unused)
835{ 868{
836 struct parse_events_data__events data = { 869 struct parse_events_evlist data = {
837 .list = LIST_HEAD_INIT(data.list), 870 .list = LIST_HEAD_INIT(data.list),
838 .idx = evlist->nr_entries, 871 .idx = evlist->nr_entries,
839 }; 872 };
@@ -843,6 +876,7 @@ int parse_events(struct perf_evlist *evlist, const char *str,
843 if (!ret) { 876 if (!ret) {
844 int entries = data.idx - evlist->nr_entries; 877 int entries = data.idx - evlist->nr_entries;
845 perf_evlist__splice_list_tail(evlist, &data.list, entries); 878 perf_evlist__splice_list_tail(evlist, &data.list, entries);
879 evlist->nr_groups += data.nr_groups;
846 return 0; 880 return 0;
847 } 881 }
848 882
@@ -858,7 +892,7 @@ int parse_events_option(const struct option *opt, const char *str,
858 int unset __maybe_unused) 892 int unset __maybe_unused)
859{ 893{
860 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 894 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
861 int ret = parse_events(evlist, str, unset); 895 int ret = parse_events(evlist, str);
862 896
863 if (ret) { 897 if (ret) {
864 fprintf(stderr, "invalid or unsupported event: '%s'\n", str); 898 fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
@@ -1121,16 +1155,16 @@ void print_events(const char *event_glob, bool name_only)
1121 print_tracepoint_events(NULL, NULL, name_only); 1155 print_tracepoint_events(NULL, NULL, name_only);
1122} 1156}
1123 1157
1124int parse_events__is_hardcoded_term(struct parse_events__term *term) 1158int parse_events__is_hardcoded_term(struct parse_events_term *term)
1125{ 1159{
1126 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 1160 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1127} 1161}
1128 1162
1129static int new_term(struct parse_events__term **_term, int type_val, 1163static int new_term(struct parse_events_term **_term, int type_val,
1130 int type_term, char *config, 1164 int type_term, char *config,
1131 char *str, u64 num) 1165 char *str, u64 num)
1132{ 1166{
1133 struct parse_events__term *term; 1167 struct parse_events_term *term;
1134 1168
1135 term = zalloc(sizeof(*term)); 1169 term = zalloc(sizeof(*term));
1136 if (!term) 1170 if (!term)
@@ -1156,21 +1190,21 @@ static int new_term(struct parse_events__term **_term, int type_val,
1156 return 0; 1190 return 0;
1157} 1191}
1158 1192
1159int parse_events__term_num(struct parse_events__term **term, 1193int parse_events_term__num(struct parse_events_term **term,
1160 int type_term, char *config, u64 num) 1194 int type_term, char *config, u64 num)
1161{ 1195{
1162 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, 1196 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1163 config, NULL, num); 1197 config, NULL, num);
1164} 1198}
1165 1199
1166int parse_events__term_str(struct parse_events__term **term, 1200int parse_events_term__str(struct parse_events_term **term,
1167 int type_term, char *config, char *str) 1201 int type_term, char *config, char *str)
1168{ 1202{
1169 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, 1203 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1170 config, str, 0); 1204 config, str, 0);
1171} 1205}
1172 1206
1173int parse_events__term_sym_hw(struct parse_events__term **term, 1207int parse_events_term__sym_hw(struct parse_events_term **term,
1174 char *config, unsigned idx) 1208 char *config, unsigned idx)
1175{ 1209{
1176 struct event_symbol *sym; 1210 struct event_symbol *sym;
@@ -1188,8 +1222,8 @@ int parse_events__term_sym_hw(struct parse_events__term **term,
1188 (char *) "event", (char *) sym->symbol, 0); 1222 (char *) "event", (char *) sym->symbol, 0);
1189} 1223}
1190 1224
1191int parse_events__term_clone(struct parse_events__term **new, 1225int parse_events_term__clone(struct parse_events_term **new,
1192 struct parse_events__term *term) 1226 struct parse_events_term *term)
1193{ 1227{
1194 return new_term(new, term->type_val, term->type_term, term->config, 1228 return new_term(new, term->type_val, term->type_term, term->config,
1195 term->val.str, term->val.num); 1229 term->val.str, term->val.num);
@@ -1197,7 +1231,7 @@ int parse_events__term_clone(struct parse_events__term **new,
1197 1231
1198void parse_events__free_terms(struct list_head *terms) 1232void parse_events__free_terms(struct list_head *terms)
1199{ 1233{
1200 struct parse_events__term *term, *h; 1234 struct parse_events_term *term, *h;
1201 1235
1202 list_for_each_entry_safe(term, h, terms, list) 1236 list_for_each_entry_safe(term, h, terms, list)
1203 free(term); 1237 free(term);
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b7af80b8bdda..8a4859315fd9 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -29,8 +29,7 @@ const char *event_type(int type);
29 29
30extern int parse_events_option(const struct option *opt, const char *str, 30extern int parse_events_option(const struct option *opt, const char *str,
31 int unset); 31 int unset);
32extern int parse_events(struct perf_evlist *evlist, const char *str, 32extern int parse_events(struct perf_evlist *evlist, const char *str);
33 int unset);
34extern int parse_events_terms(struct list_head *terms, const char *str); 33extern int parse_events_terms(struct list_head *terms, const char *str);
35extern int parse_filter(const struct option *opt, const char *str, int unset); 34extern int parse_filter(const struct option *opt, const char *str, int unset);
36 35
@@ -51,7 +50,7 @@ enum {
51 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, 50 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
52}; 51};
53 52
54struct parse_events__term { 53struct parse_events_term {
55 char *config; 54 char *config;
56 union { 55 union {
57 char *str; 56 char *str;
@@ -62,24 +61,25 @@ struct parse_events__term {
62 struct list_head list; 61 struct list_head list;
63}; 62};
64 63
65struct parse_events_data__events { 64struct parse_events_evlist {
66 struct list_head list; 65 struct list_head list;
67 int idx; 66 int idx;
67 int nr_groups;
68}; 68};
69 69
70struct parse_events_data__terms { 70struct parse_events_terms {
71 struct list_head *terms; 71 struct list_head *terms;
72}; 72};
73 73
74int parse_events__is_hardcoded_term(struct parse_events__term *term); 74int parse_events__is_hardcoded_term(struct parse_events_term *term);
75int parse_events__term_num(struct parse_events__term **_term, 75int parse_events_term__num(struct parse_events_term **_term,
76 int type_term, char *config, u64 num); 76 int type_term, char *config, u64 num);
77int parse_events__term_str(struct parse_events__term **_term, 77int parse_events_term__str(struct parse_events_term **_term,
78 int type_term, char *config, char *str); 78 int type_term, char *config, char *str);
79int parse_events__term_sym_hw(struct parse_events__term **term, 79int parse_events_term__sym_hw(struct parse_events_term **term,
80 char *config, unsigned idx); 80 char *config, unsigned idx);
81int parse_events__term_clone(struct parse_events__term **new, 81int parse_events_term__clone(struct parse_events_term **new,
82 struct parse_events__term *term); 82 struct parse_events_term *term);
83void parse_events__free_terms(struct list_head *terms); 83void parse_events__free_terms(struct list_head *terms);
84int parse_events__modifier_event(struct list_head *list, char *str, bool add); 84int parse_events__modifier_event(struct list_head *list, char *str, bool add);
85int parse_events__modifier_group(struct list_head *list, char *event_mod); 85int parse_events__modifier_group(struct list_head *list, char *event_mod);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 0f9914ae6bac..afc44c18dfe1 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -1,5 +1,4 @@
1%pure-parser 1%pure-parser
2%name-prefix "parse_events_"
3%parse-param {void *_data} 2%parse-param {void *_data}
4%parse-param {void *scanner} 3%parse-param {void *scanner}
5%lex-param {void* scanner} 4%lex-param {void* scanner}
@@ -23,6 +22,14 @@ do { \
23 YYABORT; \ 22 YYABORT; \
24} while (0) 23} while (0)
25 24
25static inc_group_count(struct list_head *list,
26 struct parse_events_evlist *data)
27{
28 /* Count groups only have more than 1 members */
29 if (!list_is_last(list->next, list))
30 data->nr_groups++;
31}
32
26%} 33%}
27 34
28%token PE_START_EVENTS PE_START_TERMS 35%token PE_START_EVENTS PE_START_TERMS
@@ -68,7 +75,7 @@ do { \
68 char *str; 75 char *str;
69 u64 num; 76 u64 num;
70 struct list_head *head; 77 struct list_head *head;
71 struct parse_events__term *term; 78 struct parse_events_term *term;
72} 79}
73%% 80%%
74 81
@@ -79,7 +86,7 @@ PE_START_TERMS start_terms
79 86
80start_events: groups 87start_events: groups
81{ 88{
82 struct parse_events_data__events *data = _data; 89 struct parse_events_evlist *data = _data;
83 90
84 parse_events_update_lists($1, &data->list); 91 parse_events_update_lists($1, &data->list);
85} 92}
@@ -123,6 +130,7 @@ PE_NAME '{' events '}'
123{ 130{
124 struct list_head *list = $3; 131 struct list_head *list = $3;
125 132
133 inc_group_count(list, _data);
126 parse_events__set_leader($1, list); 134 parse_events__set_leader($1, list);
127 $$ = list; 135 $$ = list;
128} 136}
@@ -131,6 +139,7 @@ PE_NAME '{' events '}'
131{ 139{
132 struct list_head *list = $2; 140 struct list_head *list = $2;
133 141
142 inc_group_count(list, _data);
134 parse_events__set_leader(NULL, list); 143 parse_events__set_leader(NULL, list);
135 $$ = list; 144 $$ = list;
136} 145}
@@ -186,7 +195,7 @@ event_def: event_pmu |
186event_pmu: 195event_pmu:
187PE_NAME '/' event_config '/' 196PE_NAME '/' event_config '/'
188{ 197{
189 struct parse_events_data__events *data = _data; 198 struct parse_events_evlist *data = _data;
190 struct list_head *list = NULL; 199 struct list_head *list = NULL;
191 200
192 ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); 201 ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
@@ -202,7 +211,7 @@ PE_VALUE_SYM_SW
202event_legacy_symbol: 211event_legacy_symbol:
203value_sym '/' event_config '/' 212value_sym '/' event_config '/'
204{ 213{
205 struct parse_events_data__events *data = _data; 214 struct parse_events_evlist *data = _data;
206 struct list_head *list = NULL; 215 struct list_head *list = NULL;
207 int type = $1 >> 16; 216 int type = $1 >> 16;
208 int config = $1 & 255; 217 int config = $1 & 255;
@@ -215,7 +224,7 @@ value_sym '/' event_config '/'
215| 224|
216value_sym sep_slash_dc 225value_sym sep_slash_dc
217{ 226{
218 struct parse_events_data__events *data = _data; 227 struct parse_events_evlist *data = _data;
219 struct list_head *list = NULL; 228 struct list_head *list = NULL;
220 int type = $1 >> 16; 229 int type = $1 >> 16;
221 int config = $1 & 255; 230 int config = $1 & 255;
@@ -228,7 +237,7 @@ value_sym sep_slash_dc
228event_legacy_cache: 237event_legacy_cache:
229PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT 238PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
230{ 239{
231 struct parse_events_data__events *data = _data; 240 struct parse_events_evlist *data = _data;
232 struct list_head *list = NULL; 241 struct list_head *list = NULL;
233 242
234 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); 243 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
@@ -237,7 +246,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
237| 246|
238PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT 247PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
239{ 248{
240 struct parse_events_data__events *data = _data; 249 struct parse_events_evlist *data = _data;
241 struct list_head *list = NULL; 250 struct list_head *list = NULL;
242 251
243 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); 252 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
@@ -246,7 +255,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
246| 255|
247PE_NAME_CACHE_TYPE 256PE_NAME_CACHE_TYPE
248{ 257{
249 struct parse_events_data__events *data = _data; 258 struct parse_events_evlist *data = _data;
250 struct list_head *list = NULL; 259 struct list_head *list = NULL;
251 260
252 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); 261 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
@@ -256,7 +265,7 @@ PE_NAME_CACHE_TYPE
256event_legacy_mem: 265event_legacy_mem:
257PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc 266PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
258{ 267{
259 struct parse_events_data__events *data = _data; 268 struct parse_events_evlist *data = _data;
260 struct list_head *list = NULL; 269 struct list_head *list = NULL;
261 270
262 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, 271 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -266,7 +275,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
266| 275|
267PE_PREFIX_MEM PE_VALUE sep_dc 276PE_PREFIX_MEM PE_VALUE sep_dc
268{ 277{
269 struct parse_events_data__events *data = _data; 278 struct parse_events_evlist *data = _data;
270 struct list_head *list = NULL; 279 struct list_head *list = NULL;
271 280
272 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, 281 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -277,7 +286,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
277event_legacy_tracepoint: 286event_legacy_tracepoint:
278PE_NAME ':' PE_NAME 287PE_NAME ':' PE_NAME
279{ 288{
280 struct parse_events_data__events *data = _data; 289 struct parse_events_evlist *data = _data;
281 struct list_head *list = NULL; 290 struct list_head *list = NULL;
282 291
283 ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); 292 ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
@@ -287,7 +296,7 @@ PE_NAME ':' PE_NAME
287event_legacy_numeric: 296event_legacy_numeric:
288PE_VALUE ':' PE_VALUE 297PE_VALUE ':' PE_VALUE
289{ 298{
290 struct parse_events_data__events *data = _data; 299 struct parse_events_evlist *data = _data;
291 struct list_head *list = NULL; 300 struct list_head *list = NULL;
292 301
293 ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); 302 ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
@@ -297,7 +306,7 @@ PE_VALUE ':' PE_VALUE
297event_legacy_raw: 306event_legacy_raw:
298PE_RAW 307PE_RAW
299{ 308{
300 struct parse_events_data__events *data = _data; 309 struct parse_events_evlist *data = _data;
301 struct list_head *list = NULL; 310 struct list_head *list = NULL;
302 311
303 ABORT_ON(parse_events_add_numeric(&list, &data->idx, 312 ABORT_ON(parse_events_add_numeric(&list, &data->idx,
@@ -307,7 +316,7 @@ PE_RAW
307 316
308start_terms: event_config 317start_terms: event_config
309{ 318{
310 struct parse_events_data__terms *data = _data; 319 struct parse_events_terms *data = _data;
311 data->terms = $1; 320 data->terms = $1;
312} 321}
313 322
@@ -315,7 +324,7 @@ event_config:
315event_config ',' event_term 324event_config ',' event_term
316{ 325{
317 struct list_head *head = $1; 326 struct list_head *head = $1;
318 struct parse_events__term *term = $3; 327 struct parse_events_term *term = $3;
319 328
320 ABORT_ON(!head); 329 ABORT_ON(!head);
321 list_add_tail(&term->list, head); 330 list_add_tail(&term->list, head);
@@ -325,7 +334,7 @@ event_config ',' event_term
325event_term 334event_term
326{ 335{
327 struct list_head *head = malloc(sizeof(*head)); 336 struct list_head *head = malloc(sizeof(*head));
328 struct parse_events__term *term = $1; 337 struct parse_events_term *term = $1;
329 338
330 ABORT_ON(!head); 339 ABORT_ON(!head);
331 INIT_LIST_HEAD(head); 340 INIT_LIST_HEAD(head);
@@ -336,70 +345,70 @@ event_term
336event_term: 345event_term:
337PE_NAME '=' PE_NAME 346PE_NAME '=' PE_NAME
338{ 347{
339 struct parse_events__term *term; 348 struct parse_events_term *term;
340 349
341 ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER, 350 ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
342 $1, $3)); 351 $1, $3));
343 $$ = term; 352 $$ = term;
344} 353}
345| 354|
346PE_NAME '=' PE_VALUE 355PE_NAME '=' PE_VALUE
347{ 356{
348 struct parse_events__term *term; 357 struct parse_events_term *term;
349 358
350 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 359 ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
351 $1, $3)); 360 $1, $3));
352 $$ = term; 361 $$ = term;
353} 362}
354| 363|
355PE_NAME '=' PE_VALUE_SYM_HW 364PE_NAME '=' PE_VALUE_SYM_HW
356{ 365{
357 struct parse_events__term *term; 366 struct parse_events_term *term;
358 int config = $3 & 255; 367 int config = $3 & 255;
359 368
360 ABORT_ON(parse_events__term_sym_hw(&term, $1, config)); 369 ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
361 $$ = term; 370 $$ = term;
362} 371}
363| 372|
364PE_NAME 373PE_NAME
365{ 374{
366 struct parse_events__term *term; 375 struct parse_events_term *term;
367 376
368 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 377 ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
369 $1, 1)); 378 $1, 1));
370 $$ = term; 379 $$ = term;
371} 380}
372| 381|
373PE_VALUE_SYM_HW 382PE_VALUE_SYM_HW
374{ 383{
375 struct parse_events__term *term; 384 struct parse_events_term *term;
376 int config = $1 & 255; 385 int config = $1 & 255;
377 386
378 ABORT_ON(parse_events__term_sym_hw(&term, NULL, config)); 387 ABORT_ON(parse_events_term__sym_hw(&term, NULL, config));
379 $$ = term; 388 $$ = term;
380} 389}
381| 390|
382PE_TERM '=' PE_NAME 391PE_TERM '=' PE_NAME
383{ 392{
384 struct parse_events__term *term; 393 struct parse_events_term *term;
385 394
386 ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); 395 ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
387 $$ = term; 396 $$ = term;
388} 397}
389| 398|
390PE_TERM '=' PE_VALUE 399PE_TERM '=' PE_VALUE
391{ 400{
392 struct parse_events__term *term; 401 struct parse_events_term *term;
393 402
394 ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); 403 ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
395 $$ = term; 404 $$ = term;
396} 405}
397| 406|
398PE_TERM 407PE_TERM
399{ 408{
400 struct parse_events__term *term; 409 struct parse_events_term *term;
401 410
402 ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); 411 ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
403 $$ = term; 412 $$ = term;
404} 413}
405 414
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 9bdc60c6f138..4c6f9c490a8d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1,4 +1,3 @@
1
2#include <linux/list.h> 1#include <linux/list.h>
3#include <sys/types.h> 2#include <sys/types.h>
4#include <sys/stat.h> 3#include <sys/stat.h>
@@ -11,6 +10,19 @@
11#include "parse-events.h" 10#include "parse-events.h"
12#include "cpumap.h" 11#include "cpumap.h"
13 12
13struct perf_pmu_alias {
14 char *name;
15 struct list_head terms;
16 struct list_head list;
17};
18
19struct perf_pmu_format {
20 char *name;
21 int value;
22 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
23 struct list_head list;
24};
25
14#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" 26#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
15 27
16int perf_pmu_parse(struct list_head *list, char *name); 28int perf_pmu_parse(struct list_head *list, char *name);
@@ -85,7 +97,7 @@ static int pmu_format(char *name, struct list_head *format)
85 97
86static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) 98static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
87{ 99{
88 struct perf_pmu__alias *alias; 100 struct perf_pmu_alias *alias;
89 char buf[256]; 101 char buf[256];
90 int ret; 102 int ret;
91 103
@@ -172,15 +184,15 @@ static int pmu_aliases(char *name, struct list_head *head)
172 return 0; 184 return 0;
173} 185}
174 186
175static int pmu_alias_terms(struct perf_pmu__alias *alias, 187static int pmu_alias_terms(struct perf_pmu_alias *alias,
176 struct list_head *terms) 188 struct list_head *terms)
177{ 189{
178 struct parse_events__term *term, *clone; 190 struct parse_events_term *term, *clone;
179 LIST_HEAD(list); 191 LIST_HEAD(list);
180 int ret; 192 int ret;
181 193
182 list_for_each_entry(term, &alias->terms, list) { 194 list_for_each_entry(term, &alias->terms, list) {
183 ret = parse_events__term_clone(&clone, term); 195 ret = parse_events_term__clone(&clone, term);
184 if (ret) { 196 if (ret) {
185 parse_events__free_terms(&list); 197 parse_events__free_terms(&list);
186 return ret; 198 return ret;
@@ -360,10 +372,10 @@ struct perf_pmu *perf_pmu__find(char *name)
360 return pmu_lookup(name); 372 return pmu_lookup(name);
361} 373}
362 374
363static struct perf_pmu__format* 375static struct perf_pmu_format *
364pmu_find_format(struct list_head *formats, char *name) 376pmu_find_format(struct list_head *formats, char *name)
365{ 377{
366 struct perf_pmu__format *format; 378 struct perf_pmu_format *format;
367 379
368 list_for_each_entry(format, formats, list) 380 list_for_each_entry(format, formats, list)
369 if (!strcmp(format->name, name)) 381 if (!strcmp(format->name, name))
@@ -403,9 +415,9 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value)
403 */ 415 */
404static int pmu_config_term(struct list_head *formats, 416static int pmu_config_term(struct list_head *formats,
405 struct perf_event_attr *attr, 417 struct perf_event_attr *attr,
406 struct parse_events__term *term) 418 struct parse_events_term *term)
407{ 419{
408 struct perf_pmu__format *format; 420 struct perf_pmu_format *format;
409 __u64 *vp; 421 __u64 *vp;
410 422
411 /* 423 /*
@@ -450,7 +462,7 @@ int perf_pmu__config_terms(struct list_head *formats,
450 struct perf_event_attr *attr, 462 struct perf_event_attr *attr,
451 struct list_head *head_terms) 463 struct list_head *head_terms)
452{ 464{
453 struct parse_events__term *term; 465 struct parse_events_term *term;
454 466
455 list_for_each_entry(term, head_terms, list) 467 list_for_each_entry(term, head_terms, list)
456 if (pmu_config_term(formats, attr, term)) 468 if (pmu_config_term(formats, attr, term))
@@ -471,10 +483,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
471 return perf_pmu__config_terms(&pmu->format, attr, head_terms); 483 return perf_pmu__config_terms(&pmu->format, attr, head_terms);
472} 484}
473 485
474static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, 486static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
475 struct parse_events__term *term) 487 struct parse_events_term *term)
476{ 488{
477 struct perf_pmu__alias *alias; 489 struct perf_pmu_alias *alias;
478 char *name; 490 char *name;
479 491
480 if (parse_events__is_hardcoded_term(term)) 492 if (parse_events__is_hardcoded_term(term))
@@ -507,8 +519,8 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
507 */ 519 */
508int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) 520int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
509{ 521{
510 struct parse_events__term *term, *h; 522 struct parse_events_term *term, *h;
511 struct perf_pmu__alias *alias; 523 struct perf_pmu_alias *alias;
512 int ret; 524 int ret;
513 525
514 list_for_each_entry_safe(term, h, head_terms, list) { 526 list_for_each_entry_safe(term, h, head_terms, list) {
@@ -527,7 +539,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
527int perf_pmu__new_format(struct list_head *list, char *name, 539int perf_pmu__new_format(struct list_head *list, char *name,
528 int config, unsigned long *bits) 540 int config, unsigned long *bits)
529{ 541{
530 struct perf_pmu__format *format; 542 struct perf_pmu_format *format;
531 543
532 format = zalloc(sizeof(*format)); 544 format = zalloc(sizeof(*format));
533 if (!format) 545 if (!format)
@@ -548,7 +560,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
548 if (!to) 560 if (!to)
549 to = from; 561 to = from;
550 562
551 memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS)); 563 memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
552 for (b = from; b <= to; b++) 564 for (b = from; b <= to; b++)
553 set_bit(b, bits); 565 set_bit(b, bits);
554} 566}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index a313ed76a49a..32fe55b659fa 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -12,19 +12,6 @@ enum {
12 12
13#define PERF_PMU_FORMAT_BITS 64 13#define PERF_PMU_FORMAT_BITS 64
14 14
15struct perf_pmu__format {
16 char *name;
17 int value;
18 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
19 struct list_head list;
20};
21
22struct perf_pmu__alias {
23 char *name;
24 struct list_head terms;
25 struct list_head list;
26};
27
28struct perf_pmu { 15struct perf_pmu {
29 char *name; 16 char *name;
30 __u32 type; 17 __u32 type;
@@ -42,7 +29,7 @@ int perf_pmu__config_terms(struct list_head *formats,
42 struct list_head *head_terms); 29 struct list_head *head_terms);
43int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); 30int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
44struct list_head *perf_pmu__alias(struct perf_pmu *pmu, 31struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
45 struct list_head *head_terms); 32 struct list_head *head_terms);
46int perf_pmu_wrap(void); 33int perf_pmu_wrap(void);
47void perf_pmu_error(struct list_head *list, char *name, char const *msg); 34void perf_pmu_error(struct list_head *list, char *name, char const *msg);
48 35
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index ec898047ebb9..bfd7e8509869 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -1,5 +1,4 @@
1 1
2%name-prefix "perf_pmu_"
3%parse-param {struct list_head *format} 2%parse-param {struct list_head *format}
4%parse-param {char *name} 3%parse-param {char *name}
5 4
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 49a256e6e0a2..aa04bf9c9ad7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -40,7 +40,7 @@
40#include "color.h" 40#include "color.h"
41#include "symbol.h" 41#include "symbol.h"
42#include "thread.h" 42#include "thread.h"
43#include "debugfs.h" 43#include <lk/debugfs.h>
44#include "trace-event.h" /* For __maybe_unused */ 44#include "trace-event.h" /* For __maybe_unused */
45#include "probe-event.h" 45#include "probe-event.h"
46#include "probe-finder.h" 46#include "probe-finder.h"
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1daf5c14e751..be0329394d56 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -413,12 +413,12 @@ static int convert_variable_type(Dwarf_Die *vr_die,
413 dwarf_diename(vr_die), dwarf_diename(&type)); 413 dwarf_diename(vr_die), dwarf_diename(&type));
414 return -EINVAL; 414 return -EINVAL;
415 } 415 }
416 if (die_get_real_type(&type, &type) == NULL) {
417 pr_warning("Failed to get a type"
418 " information.\n");
419 return -ENOENT;
420 }
416 if (ret == DW_TAG_pointer_type) { 421 if (ret == DW_TAG_pointer_type) {
417 if (die_get_real_type(&type, &type) == NULL) {
418 pr_warning("Failed to get a type"
419 " information.\n");
420 return -ENOENT;
421 }
422 while (*ref_ptr) 422 while (*ref_ptr)
423 ref_ptr = &(*ref_ptr)->next; 423 ref_ptr = &(*ref_ptr)->next;
424 /* Add new reference with offset +0 */ 424 /* Add new reference with offset +0 */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index c40c2d33199e..f75ae1b9900c 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -15,7 +15,7 @@ util/thread_map.c
15util/util.c 15util/util.c
16util/xyarray.c 16util/xyarray.c
17util/cgroup.c 17util/cgroup.c
18util/debugfs.c
19util/rblist.c 18util/rblist.c
20util/strlist.c 19util/strlist.c
20util/sysfs.c
21../../lib/rbtree.c 21../../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a2657fd96837..925e0c3e6d91 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -1045,3 +1045,12 @@ error:
1045 if (PyErr_Occurred()) 1045 if (PyErr_Occurred())
1046 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1046 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1047} 1047}
1048
1049/*
1050 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1051 * binding.
1052 */
1053void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
1054 int fd, int group_fd, unsigned long flags)
1055{
1056}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index f80605eb1855..eacec859f299 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -292,6 +292,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
292 ns = nsecs - s * NSECS_PER_SEC; 292 ns = nsecs - s * NSECS_PER_SEC;
293 293
294 scripting_context->event_data = data; 294 scripting_context->event_data = data;
295 scripting_context->pevent = evsel->tp_format->pevent;
295 296
296 ENTER; 297 ENTER;
297 SAVETMPS; 298 SAVETMPS;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 14683dfca2ee..e87aa5d9696b 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -265,6 +265,7 @@ static void python_process_tracepoint(union perf_event *perf_event
265 ns = nsecs - s * NSECS_PER_SEC; 265 ns = nsecs - s * NSECS_PER_SEC;
266 266
267 scripting_context->event_data = data; 267 scripting_context->event_data = data;
268 scripting_context->pevent = evsel->tp_format->pevent;
268 269
269 context = PyCObject_FromVoidPtr(scripting_context, NULL); 270 context = PyCObject_FromVoidPtr(scripting_context, NULL);
270 271
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce6f51162386..cf1fe01b7e89 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,5 +1,3 @@
1#define _FILE_OFFSET_BITS 64
2
3#include <linux/kernel.h> 1#include <linux/kernel.h>
4 2
5#include <byteswap.h> 3#include <byteswap.h>
@@ -16,7 +14,6 @@
16#include "cpumap.h" 14#include "cpumap.h"
17#include "event-parse.h" 15#include "event-parse.h"
18#include "perf_regs.h" 16#include "perf_regs.h"
19#include "unwind.h"
20#include "vdso.h" 17#include "vdso.h"
21 18
22static int perf_session__open(struct perf_session *self, bool force) 19static int perf_session__open(struct perf_session *self, bool force)
@@ -87,13 +84,12 @@ void perf_session__set_id_hdr_size(struct perf_session *session)
87{ 84{
88 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 85 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
89 86
90 session->host_machine.id_hdr_size = id_hdr_size;
91 machines__set_id_hdr_size(&session->machines, id_hdr_size); 87 machines__set_id_hdr_size(&session->machines, id_hdr_size);
92} 88}
93 89
94int perf_session__create_kernel_maps(struct perf_session *self) 90int perf_session__create_kernel_maps(struct perf_session *self)
95{ 91{
96 int ret = machine__create_kernel_maps(&self->host_machine); 92 int ret = machine__create_kernel_maps(&self->machines.host);
97 93
98 if (ret >= 0) 94 if (ret >= 0)
99 ret = machines__create_guest_kernel_maps(&self->machines); 95 ret = machines__create_guest_kernel_maps(&self->machines);
@@ -102,8 +98,7 @@ int perf_session__create_kernel_maps(struct perf_session *self)
102 98
103static void perf_session__destroy_kernel_maps(struct perf_session *self) 99static void perf_session__destroy_kernel_maps(struct perf_session *self)
104{ 100{
105 machine__destroy_kernel_maps(&self->host_machine); 101 machines__destroy_kernel_maps(&self->machines);
106 machines__destroy_guest_kernel_maps(&self->machines);
107} 102}
108 103
109struct perf_session *perf_session__new(const char *filename, int mode, 104struct perf_session *perf_session__new(const char *filename, int mode,
@@ -128,22 +123,11 @@ struct perf_session *perf_session__new(const char *filename, int mode,
128 goto out; 123 goto out;
129 124
130 memcpy(self->filename, filename, len); 125 memcpy(self->filename, filename, len);
131 /*
132 * On 64bit we can mmap the data file in one go. No need for tiny mmap
133 * slices. On 32bit we use 32MB.
134 */
135#if BITS_PER_LONG == 64
136 self->mmap_window = ULLONG_MAX;
137#else
138 self->mmap_window = 32 * 1024 * 1024ULL;
139#endif
140 self->machines = RB_ROOT;
141 self->repipe = repipe; 126 self->repipe = repipe;
142 INIT_LIST_HEAD(&self->ordered_samples.samples); 127 INIT_LIST_HEAD(&self->ordered_samples.samples);
143 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 128 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
144 INIT_LIST_HEAD(&self->ordered_samples.to_free); 129 INIT_LIST_HEAD(&self->ordered_samples.to_free);
145 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 130 machines__init(&self->machines);
146 hists__init(&self->hists);
147 131
148 if (mode == O_RDONLY) { 132 if (mode == O_RDONLY) {
149 if (perf_session__open(self, force) < 0) 133 if (perf_session__open(self, force) < 0)
@@ -171,37 +155,30 @@ out_delete:
171 return NULL; 155 return NULL;
172} 156}
173 157
174static void machine__delete_dead_threads(struct machine *machine)
175{
176 struct thread *n, *t;
177
178 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
179 list_del(&t->node);
180 thread__delete(t);
181 }
182}
183
184static void perf_session__delete_dead_threads(struct perf_session *session) 158static void perf_session__delete_dead_threads(struct perf_session *session)
185{ 159{
186 machine__delete_dead_threads(&session->host_machine); 160 machine__delete_dead_threads(&session->machines.host);
187} 161}
188 162
189static void machine__delete_threads(struct machine *self) 163static void perf_session__delete_threads(struct perf_session *session)
190{ 164{
191 struct rb_node *nd = rb_first(&self->threads); 165 machine__delete_threads(&session->machines.host);
192
193 while (nd) {
194 struct thread *t = rb_entry(nd, struct thread, rb_node);
195
196 rb_erase(&t->rb_node, &self->threads);
197 nd = rb_next(nd);
198 thread__delete(t);
199 }
200} 166}
201 167
202static void perf_session__delete_threads(struct perf_session *session) 168static void perf_session_env__delete(struct perf_session_env *env)
203{ 169{
204 machine__delete_threads(&session->host_machine); 170 free(env->hostname);
171 free(env->os_release);
172 free(env->version);
173 free(env->arch);
174 free(env->cpu_desc);
175 free(env->cpuid);
176
177 free(env->cmdline);
178 free(env->sibling_cores);
179 free(env->sibling_threads);
180 free(env->numa_nodes);
181 free(env->pmu_mappings);
205} 182}
206 183
207void perf_session__delete(struct perf_session *self) 184void perf_session__delete(struct perf_session *self)
@@ -209,198 +186,13 @@ void perf_session__delete(struct perf_session *self)
209 perf_session__destroy_kernel_maps(self); 186 perf_session__destroy_kernel_maps(self);
210 perf_session__delete_dead_threads(self); 187 perf_session__delete_dead_threads(self);
211 perf_session__delete_threads(self); 188 perf_session__delete_threads(self);
212 machine__exit(&self->host_machine); 189 perf_session_env__delete(&self->header.env);
190 machines__exit(&self->machines);
213 close(self->fd); 191 close(self->fd);
214 free(self); 192 free(self);
215 vdso__exit(); 193 vdso__exit();
216} 194}
217 195
218void machine__remove_thread(struct machine *self, struct thread *th)
219{
220 self->last_match = NULL;
221 rb_erase(&th->rb_node, &self->threads);
222 /*
223 * We may have references to this thread, for instance in some hist_entry
224 * instances, so just move them to a separate list.
225 */
226 list_add_tail(&th->node, &self->dead_threads);
227}
228
229static bool symbol__match_parent_regex(struct symbol *sym)
230{
231 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
232 return 1;
233
234 return 0;
235}
236
237static const u8 cpumodes[] = {
238 PERF_RECORD_MISC_USER,
239 PERF_RECORD_MISC_KERNEL,
240 PERF_RECORD_MISC_GUEST_USER,
241 PERF_RECORD_MISC_GUEST_KERNEL
242};
243#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
244
245static void ip__resolve_ams(struct machine *self, struct thread *thread,
246 struct addr_map_symbol *ams,
247 u64 ip)
248{
249 struct addr_location al;
250 size_t i;
251 u8 m;
252
253 memset(&al, 0, sizeof(al));
254
255 for (i = 0; i < NCPUMODES; i++) {
256 m = cpumodes[i];
257 /*
258 * We cannot use the header.misc hint to determine whether a
259 * branch stack address is user, kernel, guest, hypervisor.
260 * Branches may straddle the kernel/user/hypervisor boundaries.
261 * Thus, we have to try consecutively until we find a match
262 * or else, the symbol is unknown
263 */
264 thread__find_addr_location(thread, self, m, MAP__FUNCTION,
265 ip, &al, NULL);
266 if (al.sym)
267 goto found;
268 }
269found:
270 ams->addr = ip;
271 ams->al_addr = al.addr;
272 ams->sym = al.sym;
273 ams->map = al.map;
274}
275
276struct branch_info *machine__resolve_bstack(struct machine *self,
277 struct thread *thr,
278 struct branch_stack *bs)
279{
280 struct branch_info *bi;
281 unsigned int i;
282
283 bi = calloc(bs->nr, sizeof(struct branch_info));
284 if (!bi)
285 return NULL;
286
287 for (i = 0; i < bs->nr; i++) {
288 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
289 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
290 bi[i].flags = bs->entries[i].flags;
291 }
292 return bi;
293}
294
295static int machine__resolve_callchain_sample(struct machine *machine,
296 struct thread *thread,
297 struct ip_callchain *chain,
298 struct symbol **parent)
299
300{
301 u8 cpumode = PERF_RECORD_MISC_USER;
302 unsigned int i;
303 int err;
304
305 callchain_cursor_reset(&callchain_cursor);
306
307 if (chain->nr > PERF_MAX_STACK_DEPTH) {
308 pr_warning("corrupted callchain. skipping...\n");
309 return 0;
310 }
311
312 for (i = 0; i < chain->nr; i++) {
313 u64 ip;
314 struct addr_location al;
315
316 if (callchain_param.order == ORDER_CALLEE)
317 ip = chain->ips[i];
318 else
319 ip = chain->ips[chain->nr - i - 1];
320
321 if (ip >= PERF_CONTEXT_MAX) {
322 switch (ip) {
323 case PERF_CONTEXT_HV:
324 cpumode = PERF_RECORD_MISC_HYPERVISOR;
325 break;
326 case PERF_CONTEXT_KERNEL:
327 cpumode = PERF_RECORD_MISC_KERNEL;
328 break;
329 case PERF_CONTEXT_USER:
330 cpumode = PERF_RECORD_MISC_USER;
331 break;
332 default:
333 pr_debug("invalid callchain context: "
334 "%"PRId64"\n", (s64) ip);
335 /*
336 * It seems the callchain is corrupted.
337 * Discard all.
338 */
339 callchain_cursor_reset(&callchain_cursor);
340 return 0;
341 }
342 continue;
343 }
344
345 al.filtered = false;
346 thread__find_addr_location(thread, machine, cpumode,
347 MAP__FUNCTION, ip, &al, NULL);
348 if (al.sym != NULL) {
349 if (sort__has_parent && !*parent &&
350 symbol__match_parent_regex(al.sym))
351 *parent = al.sym;
352 if (!symbol_conf.use_callchain)
353 break;
354 }
355
356 err = callchain_cursor_append(&callchain_cursor,
357 ip, al.map, al.sym);
358 if (err)
359 return err;
360 }
361
362 return 0;
363}
364
365static int unwind_entry(struct unwind_entry *entry, void *arg)
366{
367 struct callchain_cursor *cursor = arg;
368 return callchain_cursor_append(cursor, entry->ip,
369 entry->map, entry->sym);
370}
371
372int machine__resolve_callchain(struct machine *machine,
373 struct perf_evsel *evsel,
374 struct thread *thread,
375 struct perf_sample *sample,
376 struct symbol **parent)
377
378{
379 int ret;
380
381 callchain_cursor_reset(&callchain_cursor);
382
383 ret = machine__resolve_callchain_sample(machine, thread,
384 sample->callchain, parent);
385 if (ret)
386 return ret;
387
388 /* Can we do dwarf post unwind? */
389 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
390 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
391 return 0;
392
393 /* Bail out if nothing was captured. */
394 if ((!sample->user_regs.regs) ||
395 (!sample->user_stack.size))
396 return 0;
397
398 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
399 thread, evsel->attr.sample_regs_user,
400 sample);
401
402}
403
404static int process_event_synth_tracing_data_stub(union perf_event *event 196static int process_event_synth_tracing_data_stub(union perf_event *event
405 __maybe_unused, 197 __maybe_unused,
406 struct perf_session *session 198 struct perf_session *session
@@ -1006,6 +798,12 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1006 798
1007 if (sample_type & PERF_SAMPLE_STACK_USER) 799 if (sample_type & PERF_SAMPLE_STACK_USER)
1008 stack_user__printf(&sample->user_stack); 800 stack_user__printf(&sample->user_stack);
801
802 if (sample_type & PERF_SAMPLE_WEIGHT)
803 printf("... weight: %" PRIu64 "\n", sample->weight);
804
805 if (sample_type & PERF_SAMPLE_DATA_SRC)
806 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1009} 807}
1010 808
1011static struct machine * 809static struct machine *
@@ -1027,7 +825,7 @@ static struct machine *
1027 return perf_session__findnew_machine(session, pid); 825 return perf_session__findnew_machine(session, pid);
1028 } 826 }
1029 827
1030 return perf_session__find_host_machine(session); 828 return &session->machines.host;
1031} 829}
1032 830
1033static int perf_session_deliver_event(struct perf_session *session, 831static int perf_session_deliver_event(struct perf_session *session,
@@ -1065,11 +863,11 @@ static int perf_session_deliver_event(struct perf_session *session,
1065 case PERF_RECORD_SAMPLE: 863 case PERF_RECORD_SAMPLE:
1066 dump_sample(evsel, event, sample); 864 dump_sample(evsel, event, sample);
1067 if (evsel == NULL) { 865 if (evsel == NULL) {
1068 ++session->hists.stats.nr_unknown_id; 866 ++session->stats.nr_unknown_id;
1069 return 0; 867 return 0;
1070 } 868 }
1071 if (machine == NULL) { 869 if (machine == NULL) {
1072 ++session->hists.stats.nr_unprocessable_samples; 870 ++session->stats.nr_unprocessable_samples;
1073 return 0; 871 return 0;
1074 } 872 }
1075 return tool->sample(tool, event, sample, evsel, machine); 873 return tool->sample(tool, event, sample, evsel, machine);
@@ -1083,7 +881,7 @@ static int perf_session_deliver_event(struct perf_session *session,
1083 return tool->exit(tool, event, sample, machine); 881 return tool->exit(tool, event, sample, machine);
1084 case PERF_RECORD_LOST: 882 case PERF_RECORD_LOST:
1085 if (tool->lost == perf_event__process_lost) 883 if (tool->lost == perf_event__process_lost)
1086 session->hists.stats.total_lost += event->lost.lost; 884 session->stats.total_lost += event->lost.lost;
1087 return tool->lost(tool, event, sample, machine); 885 return tool->lost(tool, event, sample, machine);
1088 case PERF_RECORD_READ: 886 case PERF_RECORD_READ:
1089 return tool->read(tool, event, sample, evsel, machine); 887 return tool->read(tool, event, sample, evsel, machine);
@@ -1092,7 +890,7 @@ static int perf_session_deliver_event(struct perf_session *session,
1092 case PERF_RECORD_UNTHROTTLE: 890 case PERF_RECORD_UNTHROTTLE:
1093 return tool->unthrottle(tool, event, sample, machine); 891 return tool->unthrottle(tool, event, sample, machine);
1094 default: 892 default:
1095 ++session->hists.stats.nr_unknown_events; 893 ++session->stats.nr_unknown_events;
1096 return -1; 894 return -1;
1097 } 895 }
1098} 896}
@@ -1106,8 +904,8 @@ static int perf_session__preprocess_sample(struct perf_session *session,
1106 904
1107 if (!ip_callchain__valid(sample->callchain, event)) { 905 if (!ip_callchain__valid(sample->callchain, event)) {
1108 pr_debug("call-chain problem with event, skipping it.\n"); 906 pr_debug("call-chain problem with event, skipping it.\n");
1109 ++session->hists.stats.nr_invalid_chains; 907 ++session->stats.nr_invalid_chains;
1110 session->hists.stats.total_invalid_chains += sample->period; 908 session->stats.total_invalid_chains += sample->period;
1111 return -EINVAL; 909 return -EINVAL;
1112 } 910 }
1113 return 0; 911 return 0;
@@ -1165,7 +963,7 @@ static int perf_session__process_event(struct perf_session *session,
1165 if (event->header.type >= PERF_RECORD_HEADER_MAX) 963 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1166 return -EINVAL; 964 return -EINVAL;
1167 965
1168 hists__inc_nr_events(&session->hists, event->header.type); 966 events_stats__inc(&session->stats, event->header.type);
1169 967
1170 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 968 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1171 return perf_session__process_user_event(session, event, tool, file_offset); 969 return perf_session__process_user_event(session, event, tool, file_offset);
@@ -1201,7 +999,7 @@ void perf_event_header__bswap(struct perf_event_header *self)
1201 999
1202struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1000struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1203{ 1001{
1204 return machine__findnew_thread(&session->host_machine, pid); 1002 return machine__findnew_thread(&session->machines.host, pid);
1205} 1003}
1206 1004
1207static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1005static struct thread *perf_session__register_idle_thread(struct perf_session *self)
@@ -1220,39 +1018,39 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1220 const struct perf_tool *tool) 1018 const struct perf_tool *tool)
1221{ 1019{
1222 if (tool->lost == perf_event__process_lost && 1020 if (tool->lost == perf_event__process_lost &&
1223 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1021 session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1224 ui__warning("Processed %d events and lost %d chunks!\n\n" 1022 ui__warning("Processed %d events and lost %d chunks!\n\n"
1225 "Check IO/CPU overload!\n\n", 1023 "Check IO/CPU overload!\n\n",
1226 session->hists.stats.nr_events[0], 1024 session->stats.nr_events[0],
1227 session->hists.stats.nr_events[PERF_RECORD_LOST]); 1025 session->stats.nr_events[PERF_RECORD_LOST]);
1228 } 1026 }
1229 1027
1230 if (session->hists.stats.nr_unknown_events != 0) { 1028 if (session->stats.nr_unknown_events != 0) {
1231 ui__warning("Found %u unknown events!\n\n" 1029 ui__warning("Found %u unknown events!\n\n"
1232 "Is this an older tool processing a perf.data " 1030 "Is this an older tool processing a perf.data "
1233 "file generated by a more recent tool?\n\n" 1031 "file generated by a more recent tool?\n\n"
1234 "If that is not the case, consider " 1032 "If that is not the case, consider "
1235 "reporting to linux-kernel@vger.kernel.org.\n\n", 1033 "reporting to linux-kernel@vger.kernel.org.\n\n",
1236 session->hists.stats.nr_unknown_events); 1034 session->stats.nr_unknown_events);
1237 } 1035 }
1238 1036
1239 if (session->hists.stats.nr_unknown_id != 0) { 1037 if (session->stats.nr_unknown_id != 0) {
1240 ui__warning("%u samples with id not present in the header\n", 1038 ui__warning("%u samples with id not present in the header\n",
1241 session->hists.stats.nr_unknown_id); 1039 session->stats.nr_unknown_id);
1242 } 1040 }
1243 1041
1244 if (session->hists.stats.nr_invalid_chains != 0) { 1042 if (session->stats.nr_invalid_chains != 0) {
1245 ui__warning("Found invalid callchains!\n\n" 1043 ui__warning("Found invalid callchains!\n\n"
1246 "%u out of %u events were discarded for this reason.\n\n" 1044 "%u out of %u events were discarded for this reason.\n\n"
1247 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1045 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1248 session->hists.stats.nr_invalid_chains, 1046 session->stats.nr_invalid_chains,
1249 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1047 session->stats.nr_events[PERF_RECORD_SAMPLE]);
1250 } 1048 }
1251 1049
1252 if (session->hists.stats.nr_unprocessable_samples != 0) { 1050 if (session->stats.nr_unprocessable_samples != 0) {
1253 ui__warning("%u unprocessable samples recorded.\n" 1051 ui__warning("%u unprocessable samples recorded.\n"
1254 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1052 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1255 session->hists.stats.nr_unprocessable_samples); 1053 session->stats.nr_unprocessable_samples);
1256 } 1054 }
1257} 1055}
1258 1056
@@ -1369,6 +1167,18 @@ fetch_mmaped_event(struct perf_session *session,
1369 return event; 1167 return event;
1370} 1168}
1371 1169
1170/*
1171 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1172 * slices. On 32bit we use 32MB.
1173 */
1174#if BITS_PER_LONG == 64
1175#define MMAP_SIZE ULLONG_MAX
1176#define NUM_MMAPS 1
1177#else
1178#define MMAP_SIZE (32 * 1024 * 1024ULL)
1179#define NUM_MMAPS 128
1180#endif
1181
1372int __perf_session__process_events(struct perf_session *session, 1182int __perf_session__process_events(struct perf_session *session,
1373 u64 data_offset, u64 data_size, 1183 u64 data_offset, u64 data_size,
1374 u64 file_size, struct perf_tool *tool) 1184 u64 file_size, struct perf_tool *tool)
@@ -1376,7 +1186,7 @@ int __perf_session__process_events(struct perf_session *session,
1376 u64 head, page_offset, file_offset, file_pos, progress_next; 1186 u64 head, page_offset, file_offset, file_pos, progress_next;
1377 int err, mmap_prot, mmap_flags, map_idx = 0; 1187 int err, mmap_prot, mmap_flags, map_idx = 0;
1378 size_t mmap_size; 1188 size_t mmap_size;
1379 char *buf, *mmaps[8]; 1189 char *buf, *mmaps[NUM_MMAPS];
1380 union perf_event *event; 1190 union perf_event *event;
1381 uint32_t size; 1191 uint32_t size;
1382 1192
@@ -1391,7 +1201,7 @@ int __perf_session__process_events(struct perf_session *session,
1391 1201
1392 progress_next = file_size / 16; 1202 progress_next = file_size / 16;
1393 1203
1394 mmap_size = session->mmap_window; 1204 mmap_size = MMAP_SIZE;
1395 if (mmap_size > file_size) 1205 if (mmap_size > file_size)
1396 mmap_size = file_size; 1206 mmap_size = file_size;
1397 1207
@@ -1526,16 +1336,13 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1526 1336
1527size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1337size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1528{ 1338{
1529 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1339 return machines__fprintf_dsos(&self->machines, fp);
1530 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1531 machines__fprintf_dsos(&self->machines, fp);
1532} 1340}
1533 1341
1534size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1342size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1535 bool with_hits) 1343 bool (skip)(struct dso *dso, int parm), int parm)
1536{ 1344{
1537 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1345 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1538 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1539} 1346}
1540 1347
1541size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1348size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
@@ -1543,11 +1350,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1543 struct perf_evsel *pos; 1350 struct perf_evsel *pos;
1544 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1351 size_t ret = fprintf(fp, "Aggregated stats:\n");
1545 1352
1546 ret += hists__fprintf_nr_events(&session->hists, fp); 1353 ret += events_stats__fprintf(&session->stats, fp);
1547 1354
1548 list_for_each_entry(pos, &session->evlist->entries, node) { 1355 list_for_each_entry(pos, &session->evlist->entries, node) {
1549 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1356 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1550 ret += hists__fprintf_nr_events(&pos->hists, fp); 1357 ret += events_stats__fprintf(&pos->hists.stats, fp);
1551 } 1358 }
1552 1359
1553 return ret; 1360 return ret;
@@ -1559,19 +1366,7 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1559 * FIXME: Here we have to actually print all the machines in this 1366 * FIXME: Here we have to actually print all the machines in this
1560 * session, not just the host... 1367 * session, not just the host...
1561 */ 1368 */
1562 return machine__fprintf(&session->host_machine, fp); 1369 return machine__fprintf(&session->machines.host, fp);
1563}
1564
1565void perf_session__remove_thread(struct perf_session *session,
1566 struct thread *th)
1567{
1568 /*
1569 * FIXME: This one makes no sense, we need to remove the thread from
1570 * the machine it belongs to, perf_session can have many machines, so
1571 * doing it always on ->host_machine is wrong. Fix when auditing all
1572 * the 'perf kvm' code.
1573 */
1574 machine__remove_thread(&session->host_machine, th);
1575} 1370}
1576 1371
1577struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1372struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index cea133a6bdf1..6b51d47acdba 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -30,16 +30,10 @@ struct ordered_samples {
30struct perf_session { 30struct perf_session {
31 struct perf_header header; 31 struct perf_header header;
32 unsigned long size; 32 unsigned long size;
33 unsigned long mmap_window; 33 struct machines machines;
34 struct machine host_machine;
35 struct rb_root machines;
36 struct perf_evlist *evlist; 34 struct perf_evlist *evlist;
37 struct pevent *pevent; 35 struct pevent *pevent;
38 /* 36 struct events_stats stats;
39 * FIXME: Need to split this up further, we need global
40 * stats + per event stats.
41 */
42 struct hists hists;
43 int fd; 37 int fd;
44 bool fd_pipe; 38 bool fd_pipe;
45 bool repipe; 39 bool repipe;
@@ -54,7 +48,7 @@ struct perf_tool;
54struct perf_session *perf_session__new(const char *filename, int mode, 48struct perf_session *perf_session__new(const char *filename, int mode,
55 bool force, bool repipe, 49 bool force, bool repipe,
56 struct perf_tool *tool); 50 struct perf_tool *tool);
57void perf_session__delete(struct perf_session *self); 51void perf_session__delete(struct perf_session *session);
58 52
59void perf_event_header__bswap(struct perf_event_header *self); 53void perf_event_header__bswap(struct perf_event_header *self);
60 54
@@ -78,46 +72,26 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
78int perf_session__create_kernel_maps(struct perf_session *self); 72int perf_session__create_kernel_maps(struct perf_session *self);
79 73
80void perf_session__set_id_hdr_size(struct perf_session *session); 74void perf_session__set_id_hdr_size(struct perf_session *session);
81void perf_session__remove_thread(struct perf_session *self, struct thread *th);
82
83static inline
84struct machine *perf_session__find_host_machine(struct perf_session *self)
85{
86 return &self->host_machine;
87}
88 75
89static inline 76static inline
90struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) 77struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
91{ 78{
92 if (pid == HOST_KERNEL_ID)
93 return &self->host_machine;
94 return machines__find(&self->machines, pid); 79 return machines__find(&self->machines, pid);
95} 80}
96 81
97static inline 82static inline
98struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid) 83struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
99{ 84{
100 if (pid == HOST_KERNEL_ID)
101 return &self->host_machine;
102 return machines__findnew(&self->machines, pid); 85 return machines__findnew(&self->machines, pid);
103} 86}
104 87
105static inline
106void perf_session__process_machines(struct perf_session *self,
107 struct perf_tool *tool,
108 machine__process_t process)
109{
110 process(&self->host_machine, tool);
111 return machines__process(&self->machines, process, tool);
112}
113
114struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 88struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
115size_t perf_session__fprintf(struct perf_session *self, FILE *fp); 89size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
116 90
117size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); 91size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
118 92
119size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, 93size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
120 FILE *fp, bool with_hits); 94 bool (fn)(struct dso *dso, int parm), int parm);
121 95
122size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); 96size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
123 97
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 73d510269784..6b0ed322907e 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -24,6 +24,7 @@ cflags += getenv('CFLAGS', '').split()
24build_lib = getenv('PYTHON_EXTBUILD_LIB') 24build_lib = getenv('PYTHON_EXTBUILD_LIB')
25build_tmp = getenv('PYTHON_EXTBUILD_TMP') 25build_tmp = getenv('PYTHON_EXTBUILD_TMP')
26libtraceevent = getenv('LIBTRACEEVENT') 26libtraceevent = getenv('LIBTRACEEVENT')
27liblk = getenv('LIBLK')
27 28
28ext_sources = [f.strip() for f in file('util/python-ext-sources') 29ext_sources = [f.strip() for f in file('util/python-ext-sources')
29 if len(f.strip()) > 0 and f[0] != '#'] 30 if len(f.strip()) > 0 and f[0] != '#']
@@ -32,7 +33,7 @@ perf = Extension('perf',
32 sources = ext_sources, 33 sources = ext_sources,
33 include_dirs = ['util/include'], 34 include_dirs = ['util/include'],
34 extra_compile_args = cflags, 35 extra_compile_args = cflags,
35 extra_objects = [libtraceevent], 36 extra_objects = [libtraceevent, liblk],
36 ) 37 )
37 38
38setup(name='perf', 39setup(name='perf',
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cfd1c0feb32d..5f52d492590c 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -60,7 +60,7 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
60static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, 60static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
61 size_t size, unsigned int width) 61 size_t size, unsigned int width)
62{ 62{
63 return repsep_snprintf(bf, size, "%*s:%5d", width, 63 return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
64 self->thread->comm ?: "", self->thread->pid); 64 self->thread->comm ?: "", self->thread->pid);
65} 65}
66 66
@@ -97,6 +97,16 @@ static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
97 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); 97 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
98} 98}
99 99
100struct sort_entry sort_comm = {
101 .se_header = "Command",
102 .se_cmp = sort__comm_cmp,
103 .se_collapse = sort__comm_collapse,
104 .se_snprintf = hist_entry__comm_snprintf,
105 .se_width_idx = HISTC_COMM,
106};
107
108/* --sort dso */
109
100static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 110static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
101{ 111{
102 struct dso *dso_l = map_l ? map_l->dso : NULL; 112 struct dso *dso_l = map_l ? map_l->dso : NULL;
@@ -117,40 +127,12 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
117 return strcmp(dso_name_l, dso_name_r); 127 return strcmp(dso_name_l, dso_name_r);
118} 128}
119 129
120struct sort_entry sort_comm = {
121 .se_header = "Command",
122 .se_cmp = sort__comm_cmp,
123 .se_collapse = sort__comm_collapse,
124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
130static int64_t 130static int64_t
131sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 131sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
132{ 132{
133 return _sort__dso_cmp(left->ms.map, right->ms.map); 133 return _sort__dso_cmp(left->ms.map, right->ms.map);
134} 134}
135 135
136
137static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r,
138 u64 ip_l, u64 ip_r)
139{
140 if (!sym_l || !sym_r)
141 return cmp_null(sym_l, sym_r);
142
143 if (sym_l == sym_r)
144 return 0;
145
146 if (sym_l)
147 ip_l = sym_l->start;
148 if (sym_r)
149 ip_r = sym_r->start;
150
151 return (int64_t)(ip_r - ip_l);
152}
153
154static int _hist_entry__dso_snprintf(struct map *map, char *bf, 136static int _hist_entry__dso_snprintf(struct map *map, char *bf,
155 size_t size, unsigned int width) 137 size_t size, unsigned int width)
156{ 138{
@@ -169,9 +151,43 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
169 return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); 151 return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
170} 152}
171 153
154struct sort_entry sort_dso = {
155 .se_header = "Shared Object",
156 .se_cmp = sort__dso_cmp,
157 .se_snprintf = hist_entry__dso_snprintf,
158 .se_width_idx = HISTC_DSO,
159};
160
161/* --sort symbol */
162
163static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
164{
165 u64 ip_l, ip_r;
166
167 if (!sym_l || !sym_r)
168 return cmp_null(sym_l, sym_r);
169
170 if (sym_l == sym_r)
171 return 0;
172
173 ip_l = sym_l->start;
174 ip_r = sym_r->start;
175
176 return (int64_t)(ip_r - ip_l);
177}
178
179static int64_t
180sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
181{
182 if (!left->ms.sym && !right->ms.sym)
183 return right->level - left->level;
184
185 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
186}
187
172static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 188static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
173 u64 ip, char level, char *bf, size_t size, 189 u64 ip, char level, char *bf, size_t size,
174 unsigned int width __maybe_unused) 190 unsigned int width)
175{ 191{
176 size_t ret = 0; 192 size_t ret = 0;
177 193
@@ -182,11 +198,19 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
182 } 198 }
183 199
184 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 200 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
185 if (sym) 201 if (sym && map) {
186 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 202 if (map->type == MAP__VARIABLE) {
187 width - ret, 203 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
188 sym->name); 204 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
189 else { 205 ip - map->unmap_ip(map, sym->start));
206 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
207 width - ret, "");
208 } else {
209 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
210 width - ret,
211 sym->name);
212 }
213 } else {
190 size_t len = BITS_PER_LONG / 4; 214 size_t len = BITS_PER_LONG / 4;
191 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 215 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
192 len, ip); 216 len, ip);
@@ -197,43 +221,13 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
197 return ret; 221 return ret;
198} 222}
199 223
200
201struct sort_entry sort_dso = {
202 .se_header = "Shared Object",
203 .se_cmp = sort__dso_cmp,
204 .se_snprintf = hist_entry__dso_snprintf,
205 .se_width_idx = HISTC_DSO,
206};
207
208static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, 224static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
209 size_t size, 225 size_t size, unsigned int width)
210 unsigned int width __maybe_unused)
211{ 226{
212 return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, 227 return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
213 self->level, bf, size, width); 228 self->level, bf, size, width);
214} 229}
215 230
216/* --sort symbol */
217static int64_t
218sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
219{
220 u64 ip_l, ip_r;
221
222 if (!left->ms.sym && !right->ms.sym)
223 return right->level - left->level;
224
225 if (!left->ms.sym || !right->ms.sym)
226 return cmp_null(left->ms.sym, right->ms.sym);
227
228 if (left->ms.sym == right->ms.sym)
229 return 0;
230
231 ip_l = left->ms.sym->start;
232 ip_r = right->ms.sym->start;
233
234 return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r);
235}
236
237struct sort_entry sort_sym = { 231struct sort_entry sort_sym = {
238 .se_header = "Symbol", 232 .se_header = "Symbol",
239 .se_cmp = sort__sym_cmp, 233 .se_cmp = sort__sym_cmp,
@@ -253,7 +247,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
253 size_t size, 247 size_t size,
254 unsigned int width __maybe_unused) 248 unsigned int width __maybe_unused)
255{ 249{
256 FILE *fp; 250 FILE *fp = NULL;
257 char cmd[PATH_MAX + 2], *path = self->srcline, *nl; 251 char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
258 size_t line_len; 252 size_t line_len;
259 253
@@ -274,7 +268,6 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
274 268
275 if (getline(&path, &line_len, fp) < 0 || !line_len) 269 if (getline(&path, &line_len, fp) < 0 || !line_len)
276 goto out_ip; 270 goto out_ip;
277 fclose(fp);
278 self->srcline = strdup(path); 271 self->srcline = strdup(path);
279 if (self->srcline == NULL) 272 if (self->srcline == NULL)
280 goto out_ip; 273 goto out_ip;
@@ -284,8 +277,12 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
284 *nl = '\0'; 277 *nl = '\0';
285 path = self->srcline; 278 path = self->srcline;
286out_path: 279out_path:
280 if (fp)
281 pclose(fp);
287 return repsep_snprintf(bf, size, "%s", path); 282 return repsep_snprintf(bf, size, "%s", path);
288out_ip: 283out_ip:
284 if (fp)
285 pclose(fp);
289 return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); 286 return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
290} 287}
291 288
@@ -335,7 +332,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
335static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, 332static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
336 size_t size, unsigned int width) 333 size_t size, unsigned int width)
337{ 334{
338 return repsep_snprintf(bf, size, "%-*d", width, self->cpu); 335 return repsep_snprintf(bf, size, "%*d", width, self->cpu);
339} 336}
340 337
341struct sort_entry sort_cpu = { 338struct sort_entry sort_cpu = {
@@ -345,6 +342,8 @@ struct sort_entry sort_cpu = {
345 .se_width_idx = HISTC_CPU, 342 .se_width_idx = HISTC_CPU,
346}; 343};
347 344
345/* sort keys for branch stacks */
346
348static int64_t 347static int64_t
349sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 348sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
350{ 349{
@@ -359,13 +358,6 @@ static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
359 bf, size, width); 358 bf, size, width);
360} 359}
361 360
362struct sort_entry sort_dso_from = {
363 .se_header = "Source Shared Object",
364 .se_cmp = sort__dso_from_cmp,
365 .se_snprintf = hist_entry__dso_from_snprintf,
366 .se_width_idx = HISTC_DSO_FROM,
367};
368
369static int64_t 361static int64_t
370sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 362sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
371{ 363{
@@ -389,8 +381,7 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
389 if (!from_l->sym && !from_r->sym) 381 if (!from_l->sym && !from_r->sym)
390 return right->level - left->level; 382 return right->level - left->level;
391 383
392 return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, 384 return _sort__sym_cmp(from_l->sym, from_r->sym);
393 from_r->addr);
394} 385}
395 386
396static int64_t 387static int64_t
@@ -402,12 +393,11 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
402 if (!to_l->sym && !to_r->sym) 393 if (!to_l->sym && !to_r->sym)
403 return right->level - left->level; 394 return right->level - left->level;
404 395
405 return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); 396 return _sort__sym_cmp(to_l->sym, to_r->sym);
406} 397}
407 398
408static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, 399static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
409 size_t size, 400 size_t size, unsigned int width)
410 unsigned int width __maybe_unused)
411{ 401{
412 struct addr_map_symbol *from = &self->branch_info->from; 402 struct addr_map_symbol *from = &self->branch_info->from;
413 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 403 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
@@ -416,8 +406,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
416} 406}
417 407
418static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, 408static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
419 size_t size, 409 size_t size, unsigned int width)
420 unsigned int width __maybe_unused)
421{ 410{
422 struct addr_map_symbol *to = &self->branch_info->to; 411 struct addr_map_symbol *to = &self->branch_info->to;
423 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 412 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
@@ -425,6 +414,13 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
425 414
426} 415}
427 416
417struct sort_entry sort_dso_from = {
418 .se_header = "Source Shared Object",
419 .se_cmp = sort__dso_from_cmp,
420 .se_snprintf = hist_entry__dso_from_snprintf,
421 .se_width_idx = HISTC_DSO_FROM,
422};
423
428struct sort_entry sort_dso_to = { 424struct sort_entry sort_dso_to = {
429 .se_header = "Target Shared Object", 425 .se_header = "Target Shared Object",
430 .se_cmp = sort__dso_to_cmp, 426 .se_cmp = sort__dso_to_cmp,
@@ -469,6 +465,304 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf,
469 return repsep_snprintf(bf, size, "%-*s", width, out); 465 return repsep_snprintf(bf, size, "%-*s", width, out);
470} 466}
471 467
468/* --sort daddr_sym */
469static int64_t
470sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
471{
472 uint64_t l = 0, r = 0;
473
474 if (left->mem_info)
475 l = left->mem_info->daddr.addr;
476 if (right->mem_info)
477 r = right->mem_info->daddr.addr;
478
479 return (int64_t)(r - l);
480}
481
482static int hist_entry__daddr_snprintf(struct hist_entry *self, char *bf,
483 size_t size, unsigned int width)
484{
485 uint64_t addr = 0;
486 struct map *map = NULL;
487 struct symbol *sym = NULL;
488
489 if (self->mem_info) {
490 addr = self->mem_info->daddr.addr;
491 map = self->mem_info->daddr.map;
492 sym = self->mem_info->daddr.sym;
493 }
494 return _hist_entry__sym_snprintf(map, sym, addr, self->level, bf, size,
495 width);
496}
497
498static int64_t
499sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
500{
501 struct map *map_l = NULL;
502 struct map *map_r = NULL;
503
504 if (left->mem_info)
505 map_l = left->mem_info->daddr.map;
506 if (right->mem_info)
507 map_r = right->mem_info->daddr.map;
508
509 return _sort__dso_cmp(map_l, map_r);
510}
511
512static int hist_entry__dso_daddr_snprintf(struct hist_entry *self, char *bf,
513 size_t size, unsigned int width)
514{
515 struct map *map = NULL;
516
517 if (self->mem_info)
518 map = self->mem_info->daddr.map;
519
520 return _hist_entry__dso_snprintf(map, bf, size, width);
521}
522
523static int64_t
524sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
525{
526 union perf_mem_data_src data_src_l;
527 union perf_mem_data_src data_src_r;
528
529 if (left->mem_info)
530 data_src_l = left->mem_info->data_src;
531 else
532 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
533
534 if (right->mem_info)
535 data_src_r = right->mem_info->data_src;
536 else
537 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
538
539 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
540}
541
542static int hist_entry__locked_snprintf(struct hist_entry *self, char *bf,
543 size_t size, unsigned int width)
544{
545 const char *out;
546 u64 mask = PERF_MEM_LOCK_NA;
547
548 if (self->mem_info)
549 mask = self->mem_info->data_src.mem_lock;
550
551 if (mask & PERF_MEM_LOCK_NA)
552 out = "N/A";
553 else if (mask & PERF_MEM_LOCK_LOCKED)
554 out = "Yes";
555 else
556 out = "No";
557
558 return repsep_snprintf(bf, size, "%-*s", width, out);
559}
560
561static int64_t
562sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
563{
564 union perf_mem_data_src data_src_l;
565 union perf_mem_data_src data_src_r;
566
567 if (left->mem_info)
568 data_src_l = left->mem_info->data_src;
569 else
570 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
571
572 if (right->mem_info)
573 data_src_r = right->mem_info->data_src;
574 else
575 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
576
577 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
578}
579
580static const char * const tlb_access[] = {
581 "N/A",
582 "HIT",
583 "MISS",
584 "L1",
585 "L2",
586 "Walker",
587 "Fault",
588};
589#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
590
591static int hist_entry__tlb_snprintf(struct hist_entry *self, char *bf,
592 size_t size, unsigned int width)
593{
594 char out[64];
595 size_t sz = sizeof(out) - 1; /* -1 for null termination */
596 size_t l = 0, i;
597 u64 m = PERF_MEM_TLB_NA;
598 u64 hit, miss;
599
600 out[0] = '\0';
601
602 if (self->mem_info)
603 m = self->mem_info->data_src.mem_dtlb;
604
605 hit = m & PERF_MEM_TLB_HIT;
606 miss = m & PERF_MEM_TLB_MISS;
607
608 /* already taken care of */
609 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
610
611 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
612 if (!(m & 0x1))
613 continue;
614 if (l) {
615 strcat(out, " or ");
616 l += 4;
617 }
618 strncat(out, tlb_access[i], sz - l);
619 l += strlen(tlb_access[i]);
620 }
621 if (*out == '\0')
622 strcpy(out, "N/A");
623 if (hit)
624 strncat(out, " hit", sz - l);
625 if (miss)
626 strncat(out, " miss", sz - l);
627
628 return repsep_snprintf(bf, size, "%-*s", width, out);
629}
630
631static int64_t
632sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
633{
634 union perf_mem_data_src data_src_l;
635 union perf_mem_data_src data_src_r;
636
637 if (left->mem_info)
638 data_src_l = left->mem_info->data_src;
639 else
640 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
641
642 if (right->mem_info)
643 data_src_r = right->mem_info->data_src;
644 else
645 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
646
647 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
648}
649
650static const char * const mem_lvl[] = {
651 "N/A",
652 "HIT",
653 "MISS",
654 "L1",
655 "LFB",
656 "L2",
657 "L3",
658 "Local RAM",
659 "Remote RAM (1 hop)",
660 "Remote RAM (2 hops)",
661 "Remote Cache (1 hop)",
662 "Remote Cache (2 hops)",
663 "I/O",
664 "Uncached",
665};
666#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
667
668static int hist_entry__lvl_snprintf(struct hist_entry *self, char *bf,
669 size_t size, unsigned int width)
670{
671 char out[64];
672 size_t sz = sizeof(out) - 1; /* -1 for null termination */
673 size_t i, l = 0;
674 u64 m = PERF_MEM_LVL_NA;
675 u64 hit, miss;
676
677 if (self->mem_info)
678 m = self->mem_info->data_src.mem_lvl;
679
680 out[0] = '\0';
681
682 hit = m & PERF_MEM_LVL_HIT;
683 miss = m & PERF_MEM_LVL_MISS;
684
685 /* already taken care of */
686 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
687
688 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
689 if (!(m & 0x1))
690 continue;
691 if (l) {
692 strcat(out, " or ");
693 l += 4;
694 }
695 strncat(out, mem_lvl[i], sz - l);
696 l += strlen(mem_lvl[i]);
697 }
698 if (*out == '\0')
699 strcpy(out, "N/A");
700 if (hit)
701 strncat(out, " hit", sz - l);
702 if (miss)
703 strncat(out, " miss", sz - l);
704
705 return repsep_snprintf(bf, size, "%-*s", width, out);
706}
707
708static int64_t
709sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
710{
711 union perf_mem_data_src data_src_l;
712 union perf_mem_data_src data_src_r;
713
714 if (left->mem_info)
715 data_src_l = left->mem_info->data_src;
716 else
717 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
718
719 if (right->mem_info)
720 data_src_r = right->mem_info->data_src;
721 else
722 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
723
724 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
725}
726
727static const char * const snoop_access[] = {
728 "N/A",
729 "None",
730 "Miss",
731 "Hit",
732 "HitM",
733};
734#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
735
736static int hist_entry__snoop_snprintf(struct hist_entry *self, char *bf,
737 size_t size, unsigned int width)
738{
739 char out[64];
740 size_t sz = sizeof(out) - 1; /* -1 for null termination */
741 size_t i, l = 0;
742 u64 m = PERF_MEM_SNOOP_NA;
743
744 out[0] = '\0';
745
746 if (self->mem_info)
747 m = self->mem_info->data_src.mem_snoop;
748
749 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
750 if (!(m & 0x1))
751 continue;
752 if (l) {
753 strcat(out, " or ");
754 l += 4;
755 }
756 strncat(out, snoop_access[i], sz - l);
757 l += strlen(snoop_access[i]);
758 }
759
760 if (*out == '\0')
761 strcpy(out, "N/A");
762
763 return repsep_snprintf(bf, size, "%-*s", width, out);
764}
765
472struct sort_entry sort_mispredict = { 766struct sort_entry sort_mispredict = {
473 .se_header = "Branch Mispredicted", 767 .se_header = "Branch Mispredicted",
474 .se_cmp = sort__mispredict_cmp, 768 .se_cmp = sort__mispredict_cmp,
@@ -476,6 +770,91 @@ struct sort_entry sort_mispredict = {
476 .se_width_idx = HISTC_MISPREDICT, 770 .se_width_idx = HISTC_MISPREDICT,
477}; 771};
478 772
773static u64 he_weight(struct hist_entry *he)
774{
775 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
776}
777
778static int64_t
779sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
780{
781 return he_weight(left) - he_weight(right);
782}
783
784static int hist_entry__local_weight_snprintf(struct hist_entry *self, char *bf,
785 size_t size, unsigned int width)
786{
787 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(self));
788}
789
790struct sort_entry sort_local_weight = {
791 .se_header = "Local Weight",
792 .se_cmp = sort__local_weight_cmp,
793 .se_snprintf = hist_entry__local_weight_snprintf,
794 .se_width_idx = HISTC_LOCAL_WEIGHT,
795};
796
797static int64_t
798sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
799{
800 return left->stat.weight - right->stat.weight;
801}
802
803static int hist_entry__global_weight_snprintf(struct hist_entry *self, char *bf,
804 size_t size, unsigned int width)
805{
806 return repsep_snprintf(bf, size, "%-*llu", width, self->stat.weight);
807}
808
809struct sort_entry sort_global_weight = {
810 .se_header = "Weight",
811 .se_cmp = sort__global_weight_cmp,
812 .se_snprintf = hist_entry__global_weight_snprintf,
813 .se_width_idx = HISTC_GLOBAL_WEIGHT,
814};
815
816struct sort_entry sort_mem_daddr_sym = {
817 .se_header = "Data Symbol",
818 .se_cmp = sort__daddr_cmp,
819 .se_snprintf = hist_entry__daddr_snprintf,
820 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
821};
822
823struct sort_entry sort_mem_daddr_dso = {
824 .se_header = "Data Object",
825 .se_cmp = sort__dso_daddr_cmp,
826 .se_snprintf = hist_entry__dso_daddr_snprintf,
827 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
828};
829
830struct sort_entry sort_mem_locked = {
831 .se_header = "Locked",
832 .se_cmp = sort__locked_cmp,
833 .se_snprintf = hist_entry__locked_snprintf,
834 .se_width_idx = HISTC_MEM_LOCKED,
835};
836
837struct sort_entry sort_mem_tlb = {
838 .se_header = "TLB access",
839 .se_cmp = sort__tlb_cmp,
840 .se_snprintf = hist_entry__tlb_snprintf,
841 .se_width_idx = HISTC_MEM_TLB,
842};
843
844struct sort_entry sort_mem_lvl = {
845 .se_header = "Memory access",
846 .se_cmp = sort__lvl_cmp,
847 .se_snprintf = hist_entry__lvl_snprintf,
848 .se_width_idx = HISTC_MEM_LVL,
849};
850
851struct sort_entry sort_mem_snoop = {
852 .se_header = "Snoop",
853 .se_cmp = sort__snoop_cmp,
854 .se_snprintf = hist_entry__snoop_snprintf,
855 .se_width_idx = HISTC_MEM_SNOOP,
856};
857
479struct sort_dimension { 858struct sort_dimension {
480 const char *name; 859 const char *name;
481 struct sort_entry *entry; 860 struct sort_entry *entry;
@@ -484,30 +863,48 @@ struct sort_dimension {
484 863
485#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 864#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
486 865
487static struct sort_dimension sort_dimensions[] = { 866static struct sort_dimension common_sort_dimensions[] = {
488 DIM(SORT_PID, "pid", sort_thread), 867 DIM(SORT_PID, "pid", sort_thread),
489 DIM(SORT_COMM, "comm", sort_comm), 868 DIM(SORT_COMM, "comm", sort_comm),
490 DIM(SORT_DSO, "dso", sort_dso), 869 DIM(SORT_DSO, "dso", sort_dso),
491 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
492 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
493 DIM(SORT_SYM, "symbol", sort_sym), 870 DIM(SORT_SYM, "symbol", sort_sym),
494 DIM(SORT_SYM_TO, "symbol_from", sort_sym_from),
495 DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to),
496 DIM(SORT_PARENT, "parent", sort_parent), 871 DIM(SORT_PARENT, "parent", sort_parent),
497 DIM(SORT_CPU, "cpu", sort_cpu), 872 DIM(SORT_CPU, "cpu", sort_cpu),
498 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
499 DIM(SORT_SRCLINE, "srcline", sort_srcline), 873 DIM(SORT_SRCLINE, "srcline", sort_srcline),
874 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
875 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
876 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
877 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
878 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
879 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
880 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
881 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
882};
883
884#undef DIM
885
886#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
887
888static struct sort_dimension bstack_sort_dimensions[] = {
889 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
890 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
891 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
892 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
893 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
500}; 894};
501 895
896#undef DIM
897
502int sort_dimension__add(const char *tok) 898int sort_dimension__add(const char *tok)
503{ 899{
504 unsigned int i; 900 unsigned int i;
505 901
506 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { 902 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
507 struct sort_dimension *sd = &sort_dimensions[i]; 903 struct sort_dimension *sd = &common_sort_dimensions[i];
508 904
509 if (strncasecmp(tok, sd->name, strlen(tok))) 905 if (strncasecmp(tok, sd->name, strlen(tok)))
510 continue; 906 continue;
907
511 if (sd->entry == &sort_parent) { 908 if (sd->entry == &sort_parent) {
512 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 909 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
513 if (ret) { 910 if (ret) {
@@ -520,7 +917,8 @@ int sort_dimension__add(const char *tok)
520 sort__has_parent = 1; 917 sort__has_parent = 1;
521 } else if (sd->entry == &sort_sym || 918 } else if (sd->entry == &sort_sym ||
522 sd->entry == &sort_sym_from || 919 sd->entry == &sort_sym_from ||
523 sd->entry == &sort_sym_to) { 920 sd->entry == &sort_sym_to ||
921 sd->entry == &sort_mem_daddr_sym) {
524 sort__has_sym = 1; 922 sort__has_sym = 1;
525 } 923 }
526 924
@@ -530,52 +928,69 @@ int sort_dimension__add(const char *tok)
530 if (sd->entry->se_collapse) 928 if (sd->entry->se_collapse)
531 sort__need_collapse = 1; 929 sort__need_collapse = 1;
532 930
533 if (list_empty(&hist_entry__sort_list)) { 931 if (list_empty(&hist_entry__sort_list))
534 if (!strcmp(sd->name, "pid")) 932 sort__first_dimension = i;
535 sort__first_dimension = SORT_PID; 933
536 else if (!strcmp(sd->name, "comm")) 934 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
537 sort__first_dimension = SORT_COMM; 935 sd->taken = 1;
538 else if (!strcmp(sd->name, "dso")) 936
539 sort__first_dimension = SORT_DSO; 937 return 0;
540 else if (!strcmp(sd->name, "symbol")) 938 }
541 sort__first_dimension = SORT_SYM; 939
542 else if (!strcmp(sd->name, "parent")) 940 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
543 sort__first_dimension = SORT_PARENT; 941 struct sort_dimension *sd = &bstack_sort_dimensions[i];
544 else if (!strcmp(sd->name, "cpu")) 942
545 sort__first_dimension = SORT_CPU; 943 if (strncasecmp(tok, sd->name, strlen(tok)))
546 else if (!strcmp(sd->name, "symbol_from")) 944 continue;
547 sort__first_dimension = SORT_SYM_FROM; 945
548 else if (!strcmp(sd->name, "symbol_to")) 946 if (sort__branch_mode != 1)
549 sort__first_dimension = SORT_SYM_TO; 947 return -EINVAL;
550 else if (!strcmp(sd->name, "dso_from")) 948
551 sort__first_dimension = SORT_DSO_FROM; 949 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
552 else if (!strcmp(sd->name, "dso_to")) 950 sort__has_sym = 1;
553 sort__first_dimension = SORT_DSO_TO; 951
554 else if (!strcmp(sd->name, "mispredict")) 952 if (sd->taken)
555 sort__first_dimension = SORT_MISPREDICT; 953 return 0;
556 } 954
955 if (sd->entry->se_collapse)
956 sort__need_collapse = 1;
957
958 if (list_empty(&hist_entry__sort_list))
959 sort__first_dimension = i + __SORT_BRANCH_STACK;
557 960
558 list_add_tail(&sd->entry->list, &hist_entry__sort_list); 961 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
559 sd->taken = 1; 962 sd->taken = 1;
560 963
561 return 0; 964 return 0;
562 } 965 }
966
563 return -ESRCH; 967 return -ESRCH;
564} 968}
565 969
566void setup_sorting(const char * const usagestr[], const struct option *opts) 970int setup_sorting(void)
567{ 971{
568 char *tmp, *tok, *str = strdup(sort_order); 972 char *tmp, *tok, *str = strdup(sort_order);
973 int ret = 0;
974
975 if (str == NULL) {
976 error("Not enough memory to setup sort keys");
977 return -ENOMEM;
978 }
569 979
570 for (tok = strtok_r(str, ", ", &tmp); 980 for (tok = strtok_r(str, ", ", &tmp);
571 tok; tok = strtok_r(NULL, ", ", &tmp)) { 981 tok; tok = strtok_r(NULL, ", ", &tmp)) {
572 if (sort_dimension__add(tok) < 0) { 982 ret = sort_dimension__add(tok);
983 if (ret == -EINVAL) {
984 error("Invalid --sort key: `%s'", tok);
985 break;
986 } else if (ret == -ESRCH) {
573 error("Unknown --sort key: `%s'", tok); 987 error("Unknown --sort key: `%s'", tok);
574 usage_with_options(usagestr, opts); 988 break;
575 } 989 }
576 } 990 }
577 991
578 free(str); 992 free(str);
993 return ret;
579} 994}
580 995
581void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 996void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index b4e8c3ba559d..f24bdf64238c 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -49,15 +49,13 @@ struct he_stat {
49 u64 period_us; 49 u64 period_us;
50 u64 period_guest_sys; 50 u64 period_guest_sys;
51 u64 period_guest_us; 51 u64 period_guest_us;
52 u64 weight;
52 u32 nr_events; 53 u32 nr_events;
53}; 54};
54 55
55struct hist_entry_diff { 56struct hist_entry_diff {
56 bool computed; 57 bool computed;
57 58
58 /* PERF_HPP__DISPL */
59 int displacement;
60
61 /* PERF_HPP__DELTA */ 59 /* PERF_HPP__DELTA */
62 double period_ratio_delta; 60 double period_ratio_delta;
63 61
@@ -103,7 +101,8 @@ struct hist_entry {
103 struct rb_root sorted_chain; 101 struct rb_root sorted_chain;
104 struct branch_info *branch_info; 102 struct branch_info *branch_info;
105 struct hists *hists; 103 struct hists *hists;
106 struct callchain_root callchain[0]; 104 struct mem_info *mem_info;
105 struct callchain_root callchain[0]; /* must be last member */
107}; 106};
108 107
109static inline bool hist_entry__has_pairs(struct hist_entry *he) 108static inline bool hist_entry__has_pairs(struct hist_entry *he)
@@ -118,25 +117,37 @@ static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
118 return NULL; 117 return NULL;
119} 118}
120 119
121static inline void hist__entry_add_pair(struct hist_entry *he, 120static inline void hist_entry__add_pair(struct hist_entry *he,
122 struct hist_entry *pair) 121 struct hist_entry *pair)
123{ 122{
124 list_add_tail(&he->pairs.head, &pair->pairs.node); 123 list_add_tail(&he->pairs.head, &pair->pairs.node);
125} 124}
126 125
127enum sort_type { 126enum sort_type {
127 /* common sort keys */
128 SORT_PID, 128 SORT_PID,
129 SORT_COMM, 129 SORT_COMM,
130 SORT_DSO, 130 SORT_DSO,
131 SORT_SYM, 131 SORT_SYM,
132 SORT_PARENT, 132 SORT_PARENT,
133 SORT_CPU, 133 SORT_CPU,
134 SORT_DSO_FROM, 134 SORT_SRCLINE,
135 SORT_LOCAL_WEIGHT,
136 SORT_GLOBAL_WEIGHT,
137 SORT_MEM_DADDR_SYMBOL,
138 SORT_MEM_DADDR_DSO,
139 SORT_MEM_LOCKED,
140 SORT_MEM_TLB,
141 SORT_MEM_LVL,
142 SORT_MEM_SNOOP,
143
144 /* branch stack specific sort keys */
145 __SORT_BRANCH_STACK,
146 SORT_DSO_FROM = __SORT_BRANCH_STACK,
135 SORT_DSO_TO, 147 SORT_DSO_TO,
136 SORT_SYM_FROM, 148 SORT_SYM_FROM,
137 SORT_SYM_TO, 149 SORT_SYM_TO,
138 SORT_MISPREDICT, 150 SORT_MISPREDICT,
139 SORT_SRCLINE,
140}; 151};
141 152
142/* 153/*
@@ -159,7 +170,7 @@ struct sort_entry {
159extern struct sort_entry sort_thread; 170extern struct sort_entry sort_thread;
160extern struct list_head hist_entry__sort_list; 171extern struct list_head hist_entry__sort_list;
161 172
162void setup_sorting(const char * const usagestr[], const struct option *opts); 173int setup_sorting(void);
163extern int sort_dimension__add(const char *); 174extern int sort_dimension__add(const char *);
164void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 175void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
165 const char *list_name, FILE *fp); 176 const char *list_name, FILE *fp);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 346707df04b9..29c7b2cb2521 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -332,6 +332,24 @@ char *strxfrchar(char *s, char from, char to)
332} 332}
333 333
334/** 334/**
335 * ltrim - Removes leading whitespace from @s.
336 * @s: The string to be stripped.
337 *
338 * Return pointer to the first non-whitespace character in @s.
339 */
340char *ltrim(char *s)
341{
342 int len = strlen(s);
343
344 while (len && isspace(*s)) {
345 len--;
346 s++;
347 }
348
349 return s;
350}
351
352/**
335 * rtrim - Removes trailing whitespace from @s. 353 * rtrim - Removes trailing whitespace from @s.
336 * @s: The string to be stripped. 354 * @s: The string to be stripped.
337 * 355 *
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 155d8b7078a7..eabdce0a2daa 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -35,11 +35,11 @@ out_delete:
35 return NULL; 35 return NULL;
36} 36}
37 37
38static void str_node__delete(struct str_node *self, bool dupstr) 38static void str_node__delete(struct str_node *snode, bool dupstr)
39{ 39{
40 if (dupstr) 40 if (dupstr)
41 free((void *)self->s); 41 free((void *)snode->s);
42 free(self); 42 free(snode);
43} 43}
44 44
45static 45static
@@ -59,12 +59,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
59 return strcmp(snode->s, str); 59 return strcmp(snode->s, str);
60} 60}
61 61
62int strlist__add(struct strlist *self, const char *new_entry) 62int strlist__add(struct strlist *slist, const char *new_entry)
63{ 63{
64 return rblist__add_node(&self->rblist, new_entry); 64 return rblist__add_node(&slist->rblist, new_entry);
65} 65}
66 66
67int strlist__load(struct strlist *self, const char *filename) 67int strlist__load(struct strlist *slist, const char *filename)
68{ 68{
69 char entry[1024]; 69 char entry[1024];
70 int err; 70 int err;
@@ -80,7 +80,7 @@ int strlist__load(struct strlist *self, const char *filename)
80 continue; 80 continue;
81 entry[len - 1] = '\0'; 81 entry[len - 1] = '\0';
82 82
83 err = strlist__add(self, entry); 83 err = strlist__add(slist, entry);
84 if (err != 0) 84 if (err != 0)
85 goto out; 85 goto out;
86 } 86 }
@@ -107,56 +107,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry)
107 return snode; 107 return snode;
108} 108}
109 109
110static int strlist__parse_list_entry(struct strlist *self, const char *s) 110static int strlist__parse_list_entry(struct strlist *slist, const char *s)
111{ 111{
112 if (strncmp(s, "file://", 7) == 0) 112 if (strncmp(s, "file://", 7) == 0)
113 return strlist__load(self, s + 7); 113 return strlist__load(slist, s + 7);
114 114
115 return strlist__add(self, s); 115 return strlist__add(slist, s);
116} 116}
117 117
118int strlist__parse_list(struct strlist *self, const char *s) 118int strlist__parse_list(struct strlist *slist, const char *s)
119{ 119{
120 char *sep; 120 char *sep;
121 int err; 121 int err;
122 122
123 while ((sep = strchr(s, ',')) != NULL) { 123 while ((sep = strchr(s, ',')) != NULL) {
124 *sep = '\0'; 124 *sep = '\0';
125 err = strlist__parse_list_entry(self, s); 125 err = strlist__parse_list_entry(slist, s);
126 *sep = ','; 126 *sep = ',';
127 if (err != 0) 127 if (err != 0)
128 return err; 128 return err;
129 s = sep + 1; 129 s = sep + 1;
130 } 130 }
131 131
132 return *s ? strlist__parse_list_entry(self, s) : 0; 132 return *s ? strlist__parse_list_entry(slist, s) : 0;
133} 133}
134 134
135struct strlist *strlist__new(bool dupstr, const char *slist) 135struct strlist *strlist__new(bool dupstr, const char *list)
136{ 136{
137 struct strlist *self = malloc(sizeof(*self)); 137 struct strlist *slist = malloc(sizeof(*slist));
138 138
139 if (self != NULL) { 139 if (slist != NULL) {
140 rblist__init(&self->rblist); 140 rblist__init(&slist->rblist);
141 self->rblist.node_cmp = strlist__node_cmp; 141 slist->rblist.node_cmp = strlist__node_cmp;
142 self->rblist.node_new = strlist__node_new; 142 slist->rblist.node_new = strlist__node_new;
143 self->rblist.node_delete = strlist__node_delete; 143 slist->rblist.node_delete = strlist__node_delete;
144 144
145 self->dupstr = dupstr; 145 slist->dupstr = dupstr;
146 if (slist && strlist__parse_list(self, slist) != 0) 146 if (list && strlist__parse_list(slist, list) != 0)
147 goto out_error; 147 goto out_error;
148 } 148 }
149 149
150 return self; 150 return slist;
151out_error: 151out_error:
152 free(self); 152 free(slist);
153 return NULL; 153 return NULL;
154} 154}
155 155
156void strlist__delete(struct strlist *self) 156void strlist__delete(struct strlist *slist)
157{ 157{
158 if (self != NULL) 158 if (slist != NULL)
159 rblist__delete(&self->rblist); 159 rblist__delete(&slist->rblist);
160} 160}
161 161
162struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) 162struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index dd9f922ec67c..5c7f87069d9c 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -17,34 +17,34 @@ struct strlist {
17}; 17};
18 18
19struct strlist *strlist__new(bool dupstr, const char *slist); 19struct strlist *strlist__new(bool dupstr, const char *slist);
20void strlist__delete(struct strlist *self); 20void strlist__delete(struct strlist *slist);
21 21
22void strlist__remove(struct strlist *self, struct str_node *sn); 22void strlist__remove(struct strlist *slist, struct str_node *sn);
23int strlist__load(struct strlist *self, const char *filename); 23int strlist__load(struct strlist *slist, const char *filename);
24int strlist__add(struct strlist *self, const char *str); 24int strlist__add(struct strlist *slist, const char *str);
25 25
26struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); 26struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
27struct str_node *strlist__find(struct strlist *self, const char *entry); 27struct str_node *strlist__find(struct strlist *slist, const char *entry);
28 28
29static inline bool strlist__has_entry(struct strlist *self, const char *entry) 29static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
30{ 30{
31 return strlist__find(self, entry) != NULL; 31 return strlist__find(slist, entry) != NULL;
32} 32}
33 33
34static inline bool strlist__empty(const struct strlist *self) 34static inline bool strlist__empty(const struct strlist *slist)
35{ 35{
36 return rblist__empty(&self->rblist); 36 return rblist__empty(&slist->rblist);
37} 37}
38 38
39static inline unsigned int strlist__nr_entries(const struct strlist *self) 39static inline unsigned int strlist__nr_entries(const struct strlist *slist)
40{ 40{
41 return rblist__nr_entries(&self->rblist); 41 return rblist__nr_entries(&slist->rblist);
42} 42}
43 43
44/* For strlist iteration */ 44/* For strlist iteration */
45static inline struct str_node *strlist__first(struct strlist *self) 45static inline struct str_node *strlist__first(struct strlist *slist)
46{ 46{
47 struct rb_node *rn = rb_first(&self->rblist.entries); 47 struct rb_node *rn = rb_first(&slist->rblist.entries);
48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
49} 49}
50static inline struct str_node *strlist__next(struct str_node *sn) 50static inline struct str_node *strlist__next(struct str_node *sn)
@@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn)
59/** 59/**
60 * strlist_for_each - iterate over a strlist 60 * strlist_for_each - iterate over a strlist
61 * @pos: the &struct str_node to use as a loop cursor. 61 * @pos: the &struct str_node to use as a loop cursor.
62 * @self: the &struct strlist for loop. 62 * @slist: the &struct strlist for loop.
63 */ 63 */
64#define strlist__for_each(pos, self) \ 64#define strlist__for_each(pos, slist) \
65 for (pos = strlist__first(self); pos; pos = strlist__next(pos)) 65 for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
66 66
67/** 67/**
68 * strlist_for_each_safe - iterate over a strlist safe against removal of 68 * strlist_for_each_safe - iterate over a strlist safe against removal of
69 * str_node 69 * str_node
70 * @pos: the &struct str_node to use as a loop cursor. 70 * @pos: the &struct str_node to use as a loop cursor.
71 * @n: another &struct str_node to use as temporary storage. 71 * @n: another &struct str_node to use as temporary storage.
72 * @self: the &struct strlist for loop. 72 * @slist: the &struct strlist for loop.
73 */ 73 */
74#define strlist__for_each_safe(pos, n, self) \ 74#define strlist__for_each_safe(pos, n, slist) \
75 for (pos = strlist__first(self), n = strlist__next(pos); pos;\ 75 for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
76 pos = n, n = strlist__next(n)) 76 pos = n, n = strlist__next(n))
77 77
78int strlist__parse_list(struct strlist *self, const char *s); 78int strlist__parse_list(struct strlist *slist, const char *s);
79#endif /* __PERF_STRLIST_H */ 79#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index db0cc92cf2ea..4b12bf850325 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1,6 +1,3 @@
1#include <libelf.h>
2#include <gelf.h>
3#include <elf.h>
4#include <fcntl.h> 1#include <fcntl.h>
5#include <stdio.h> 2#include <stdio.h>
6#include <errno.h> 3#include <errno.h>
@@ -718,6 +715,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
718 sym.st_value); 715 sym.st_value);
719 used_opd = true; 716 used_opd = true;
720 } 717 }
718 /*
719 * When loading symbols in a data mapping, ABS symbols (which
720 * has a value of SHN_ABS in its st_shndx) failed at
721 * elf_getscn(). And it marks the loading as a failure so
722 * already loaded symbols cannot be fixed up.
723 *
724 * I'm not sure what should be done. Just ignore them for now.
725 * - Namhyung Kim
726 */
727 if (sym.st_shndx == SHN_ABS)
728 continue;
721 729
722 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 730 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
723 if (!sec) 731 if (!sec)
@@ -798,9 +806,12 @@ int dso__load_sym(struct dso *dso, struct map *map,
798 * DWARF DW_compile_unit has this, but we don't always have access 806 * DWARF DW_compile_unit has this, but we don't always have access
799 * to it... 807 * to it...
800 */ 808 */
801 demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); 809 if (symbol_conf.demangle) {
802 if (demangled != NULL) 810 demangled = bfd_demangle(NULL, elf_name,
803 elf_name = demangled; 811 DMGL_PARAMS | DMGL_ANSI);
812 if (demangled != NULL)
813 elf_name = demangled;
814 }
804new_symbol: 815new_symbol:
805 f = symbol__new(sym.st_value, sym.st_size, 816 f = symbol__new(sym.st_value, sym.st_size,
806 GELF_ST_BIND(sym.st_info), elf_name); 817 GELF_ST_BIND(sym.st_info), elf_name);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 259f8f2ea9c9..a7390cde63bc 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -1,6 +1,5 @@
1#include "symbol.h" 1#include "symbol.h"
2 2
3#include <elf.h>
4#include <stdio.h> 3#include <stdio.h>
5#include <fcntl.h> 4#include <fcntl.h>
6#include <string.h> 5#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 295f8d4feedf..8cf3b5426a9a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -28,14 +28,15 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
28 symbol_filter_t filter); 28 symbol_filter_t filter);
29static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 29static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
30 symbol_filter_t filter); 30 symbol_filter_t filter);
31static int vmlinux_path__nr_entries; 31int vmlinux_path__nr_entries;
32static char **vmlinux_path; 32char **vmlinux_path;
33 33
34struct symbol_conf symbol_conf = { 34struct symbol_conf symbol_conf = {
35 .exclude_other = true, 35 .exclude_other = true,
36 .use_modules = true, 36 .use_modules = true,
37 .try_vmlinux_path = true, 37 .try_vmlinux_path = true,
38 .annotate_src = true, 38 .annotate_src = true,
39 .demangle = true,
39 .symfs = "", 40 .symfs = "",
40}; 41};
41 42
@@ -202,13 +203,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
202 curr->end = ~0ULL; 203 curr->end = ~0ULL;
203} 204}
204 205
205static void map_groups__fixup_end(struct map_groups *mg)
206{
207 int i;
208 for (i = 0; i < MAP__NR_TYPES; ++i)
209 __map_groups__fixup_end(mg, i);
210}
211
212struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 206struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
213{ 207{
214 size_t namelen = strlen(name) + 1; 208 size_t namelen = strlen(name) + 1;
@@ -652,8 +646,8 @@ discard_symbol: rb_erase(&pos->rb_node, root);
652 return count + moved; 646 return count + moved;
653} 647}
654 648
655static bool symbol__restricted_filename(const char *filename, 649bool symbol__restricted_filename(const char *filename,
656 const char *restricted_filename) 650 const char *restricted_filename)
657{ 651{
658 bool restricted = false; 652 bool restricted = false;
659 653
@@ -775,10 +769,6 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
775 else 769 else
776 machine = NULL; 770 machine = NULL;
777 771
778 name = malloc(PATH_MAX);
779 if (!name)
780 return -1;
781
782 dso->adjust_symbols = 0; 772 dso->adjust_symbols = 0;
783 773
784 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 774 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
@@ -802,6 +792,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
802 if (machine) 792 if (machine)
803 root_dir = machine->root_dir; 793 root_dir = machine->root_dir;
804 794
795 name = malloc(PATH_MAX);
796 if (!name)
797 return -1;
798
805 /* Iterate over candidate debug images. 799 /* Iterate over candidate debug images.
806 * Keep track of "interesting" ones (those which have a symtab, dynsym, 800 * Keep track of "interesting" ones (those which have a symtab, dynsym,
807 * and/or opd section) for processing. 801 * and/or opd section) for processing.
@@ -887,200 +881,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
887 return NULL; 881 return NULL;
888} 882}
889 883
890static int map_groups__set_modules_path_dir(struct map_groups *mg,
891 const char *dir_name)
892{
893 struct dirent *dent;
894 DIR *dir = opendir(dir_name);
895 int ret = 0;
896
897 if (!dir) {
898 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
899 return -1;
900 }
901
902 while ((dent = readdir(dir)) != NULL) {
903 char path[PATH_MAX];
904 struct stat st;
905
906 /*sshfs might return bad dent->d_type, so we have to stat*/
907 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
908 if (stat(path, &st))
909 continue;
910
911 if (S_ISDIR(st.st_mode)) {
912 if (!strcmp(dent->d_name, ".") ||
913 !strcmp(dent->d_name, ".."))
914 continue;
915
916 ret = map_groups__set_modules_path_dir(mg, path);
917 if (ret < 0)
918 goto out;
919 } else {
920 char *dot = strrchr(dent->d_name, '.'),
921 dso_name[PATH_MAX];
922 struct map *map;
923 char *long_name;
924
925 if (dot == NULL || strcmp(dot, ".ko"))
926 continue;
927 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
928 (int)(dot - dent->d_name), dent->d_name);
929
930 strxfrchar(dso_name, '-', '_');
931 map = map_groups__find_by_name(mg, MAP__FUNCTION,
932 dso_name);
933 if (map == NULL)
934 continue;
935
936 long_name = strdup(path);
937 if (long_name == NULL) {
938 ret = -1;
939 goto out;
940 }
941 dso__set_long_name(map->dso, long_name);
942 map->dso->lname_alloc = 1;
943 dso__kernel_module_get_build_id(map->dso, "");
944 }
945 }
946
947out:
948 closedir(dir);
949 return ret;
950}
951
952static char *get_kernel_version(const char *root_dir)
953{
954 char version[PATH_MAX];
955 FILE *file;
956 char *name, *tmp;
957 const char *prefix = "Linux version ";
958
959 sprintf(version, "%s/proc/version", root_dir);
960 file = fopen(version, "r");
961 if (!file)
962 return NULL;
963
964 version[0] = '\0';
965 tmp = fgets(version, sizeof(version), file);
966 fclose(file);
967
968 name = strstr(version, prefix);
969 if (!name)
970 return NULL;
971 name += strlen(prefix);
972 tmp = strchr(name, ' ');
973 if (tmp)
974 *tmp = '\0';
975
976 return strdup(name);
977}
978
979static int machine__set_modules_path(struct machine *machine)
980{
981 char *version;
982 char modules_path[PATH_MAX];
983
984 version = get_kernel_version(machine->root_dir);
985 if (!version)
986 return -1;
987
988 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
989 machine->root_dir, version);
990 free(version);
991
992 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
993}
994
995struct map *machine__new_module(struct machine *machine, u64 start,
996 const char *filename)
997{
998 struct map *map;
999 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
1000
1001 if (dso == NULL)
1002 return NULL;
1003
1004 map = map__new2(start, dso, MAP__FUNCTION);
1005 if (map == NULL)
1006 return NULL;
1007
1008 if (machine__is_host(machine))
1009 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
1010 else
1011 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
1012 map_groups__insert(&machine->kmaps, map);
1013 return map;
1014}
1015
1016static int machine__create_modules(struct machine *machine)
1017{
1018 char *line = NULL;
1019 size_t n;
1020 FILE *file;
1021 struct map *map;
1022 const char *modules;
1023 char path[PATH_MAX];
1024
1025 if (machine__is_default_guest(machine))
1026 modules = symbol_conf.default_guest_modules;
1027 else {
1028 sprintf(path, "%s/proc/modules", machine->root_dir);
1029 modules = path;
1030 }
1031
1032 if (symbol__restricted_filename(path, "/proc/modules"))
1033 return -1;
1034
1035 file = fopen(modules, "r");
1036 if (file == NULL)
1037 return -1;
1038
1039 while (!feof(file)) {
1040 char name[PATH_MAX];
1041 u64 start;
1042 char *sep;
1043 int line_len;
1044
1045 line_len = getline(&line, &n, file);
1046 if (line_len < 0)
1047 break;
1048
1049 if (!line)
1050 goto out_failure;
1051
1052 line[--line_len] = '\0'; /* \n */
1053
1054 sep = strrchr(line, 'x');
1055 if (sep == NULL)
1056 continue;
1057
1058 hex2u64(sep + 1, &start);
1059
1060 sep = strchr(line, ' ');
1061 if (sep == NULL)
1062 continue;
1063
1064 *sep = '\0';
1065
1066 snprintf(name, sizeof(name), "[%s]", line);
1067 map = machine__new_module(machine, start, name);
1068 if (map == NULL)
1069 goto out_delete_line;
1070 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1071 }
1072
1073 free(line);
1074 fclose(file);
1075
1076 return machine__set_modules_path(machine);
1077
1078out_delete_line:
1079 free(line);
1080out_failure:
1081 return -1;
1082}
1083
1084int dso__load_vmlinux(struct dso *dso, struct map *map, 884int dso__load_vmlinux(struct dso *dso, struct map *map,
1085 const char *vmlinux, symbol_filter_t filter) 885 const char *vmlinux, symbol_filter_t filter)
1086{ 886{
@@ -1124,8 +924,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1124 filename = dso__build_id_filename(dso, NULL, 0); 924 filename = dso__build_id_filename(dso, NULL, 0);
1125 if (filename != NULL) { 925 if (filename != NULL) {
1126 err = dso__load_vmlinux(dso, map, filename, filter); 926 err = dso__load_vmlinux(dso, map, filename, filter);
1127 if (err > 0) 927 if (err > 0) {
928 dso->lname_alloc = 1;
1128 goto out; 929 goto out;
930 }
1129 free(filename); 931 free(filename);
1130 } 932 }
1131 933
@@ -1133,6 +935,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1133 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); 935 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
1134 if (err > 0) { 936 if (err > 0) {
1135 dso__set_long_name(dso, strdup(vmlinux_path[i])); 937 dso__set_long_name(dso, strdup(vmlinux_path[i]));
938 dso->lname_alloc = 1;
1136 break; 939 break;
1137 } 940 }
1138 } 941 }
@@ -1172,6 +975,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1172 if (err > 0) { 975 if (err > 0) {
1173 dso__set_long_name(dso, 976 dso__set_long_name(dso,
1174 strdup(symbol_conf.vmlinux_name)); 977 strdup(symbol_conf.vmlinux_name));
978 dso->lname_alloc = 1;
1175 goto out_fixup; 979 goto out_fixup;
1176 } 980 }
1177 return err; 981 return err;
@@ -1300,195 +1104,6 @@ out_try_fixup:
1300 return err; 1104 return err;
1301} 1105}
1302 1106
1303size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
1304{
1305 struct rb_node *nd;
1306 size_t ret = 0;
1307
1308 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
1309 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1310 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
1311 ret += __dsos__fprintf(&pos->user_dsos, fp);
1312 }
1313
1314 return ret;
1315}
1316
1317size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
1318 bool with_hits)
1319{
1320 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
1321 __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
1322}
1323
1324size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
1325 FILE *fp, bool with_hits)
1326{
1327 struct rb_node *nd;
1328 size_t ret = 0;
1329
1330 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
1331 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1332 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
1333 }
1334 return ret;
1335}
1336
1337static struct dso *machine__get_kernel(struct machine *machine)
1338{
1339 const char *vmlinux_name = NULL;
1340 struct dso *kernel;
1341
1342 if (machine__is_host(machine)) {
1343 vmlinux_name = symbol_conf.vmlinux_name;
1344 if (!vmlinux_name)
1345 vmlinux_name = "[kernel.kallsyms]";
1346
1347 kernel = dso__kernel_findnew(machine, vmlinux_name,
1348 "[kernel]",
1349 DSO_TYPE_KERNEL);
1350 } else {
1351 char bf[PATH_MAX];
1352
1353 if (machine__is_default_guest(machine))
1354 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1355 if (!vmlinux_name)
1356 vmlinux_name = machine__mmap_name(machine, bf,
1357 sizeof(bf));
1358
1359 kernel = dso__kernel_findnew(machine, vmlinux_name,
1360 "[guest.kernel]",
1361 DSO_TYPE_GUEST_KERNEL);
1362 }
1363
1364 if (kernel != NULL && (!kernel->has_build_id))
1365 dso__read_running_kernel_build_id(kernel, machine);
1366
1367 return kernel;
1368}
1369
1370struct process_args {
1371 u64 start;
1372};
1373
1374static int symbol__in_kernel(void *arg, const char *name,
1375 char type __maybe_unused, u64 start)
1376{
1377 struct process_args *args = arg;
1378
1379 if (strchr(name, '['))
1380 return 0;
1381
1382 args->start = start;
1383 return 1;
1384}
1385
1386/* Figure out the start address of kernel map from /proc/kallsyms */
1387static u64 machine__get_kernel_start_addr(struct machine *machine)
1388{
1389 const char *filename;
1390 char path[PATH_MAX];
1391 struct process_args args;
1392
1393 if (machine__is_host(machine)) {
1394 filename = "/proc/kallsyms";
1395 } else {
1396 if (machine__is_default_guest(machine))
1397 filename = (char *)symbol_conf.default_guest_kallsyms;
1398 else {
1399 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1400 filename = path;
1401 }
1402 }
1403
1404 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1405 return 0;
1406
1407 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
1408 return 0;
1409
1410 return args.start;
1411}
1412
1413int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1414{
1415 enum map_type type;
1416 u64 start = machine__get_kernel_start_addr(machine);
1417
1418 for (type = 0; type < MAP__NR_TYPES; ++type) {
1419 struct kmap *kmap;
1420
1421 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
1422 if (machine->vmlinux_maps[type] == NULL)
1423 return -1;
1424
1425 machine->vmlinux_maps[type]->map_ip =
1426 machine->vmlinux_maps[type]->unmap_ip =
1427 identity__map_ip;
1428 kmap = map__kmap(machine->vmlinux_maps[type]);
1429 kmap->kmaps = &machine->kmaps;
1430 map_groups__insert(&machine->kmaps,
1431 machine->vmlinux_maps[type]);
1432 }
1433
1434 return 0;
1435}
1436
1437void machine__destroy_kernel_maps(struct machine *machine)
1438{
1439 enum map_type type;
1440
1441 for (type = 0; type < MAP__NR_TYPES; ++type) {
1442 struct kmap *kmap;
1443
1444 if (machine->vmlinux_maps[type] == NULL)
1445 continue;
1446
1447 kmap = map__kmap(machine->vmlinux_maps[type]);
1448 map_groups__remove(&machine->kmaps,
1449 machine->vmlinux_maps[type]);
1450 if (kmap->ref_reloc_sym) {
1451 /*
1452 * ref_reloc_sym is shared among all maps, so free just
1453 * on one of them.
1454 */
1455 if (type == MAP__FUNCTION) {
1456 free((char *)kmap->ref_reloc_sym->name);
1457 kmap->ref_reloc_sym->name = NULL;
1458 free(kmap->ref_reloc_sym);
1459 }
1460 kmap->ref_reloc_sym = NULL;
1461 }
1462
1463 map__delete(machine->vmlinux_maps[type]);
1464 machine->vmlinux_maps[type] = NULL;
1465 }
1466}
1467
1468int machine__create_kernel_maps(struct machine *machine)
1469{
1470 struct dso *kernel = machine__get_kernel(machine);
1471
1472 if (kernel == NULL ||
1473 __machine__create_kernel_maps(machine, kernel) < 0)
1474 return -1;
1475
1476 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1477 if (machine__is_host(machine))
1478 pr_debug("Problems creating module maps, "
1479 "continuing anyway...\n");
1480 else
1481 pr_debug("Problems creating module maps for guest %d, "
1482 "continuing anyway...\n", machine->pid);
1483 }
1484
1485 /*
1486 * Now that we have all the maps created, just set the ->end of them:
1487 */
1488 map_groups__fixup_end(&machine->kmaps);
1489 return 0;
1490}
1491
1492static void vmlinux_path__exit(void) 1107static void vmlinux_path__exit(void)
1493{ 1108{
1494 while (--vmlinux_path__nr_entries >= 0) { 1109 while (--vmlinux_path__nr_entries >= 0) {
@@ -1549,25 +1164,6 @@ out_fail:
1549 return -1; 1164 return -1;
1550} 1165}
1551 1166
1552size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1553{
1554 int i;
1555 size_t printed = 0;
1556 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
1557
1558 if (kdso->has_build_id) {
1559 char filename[PATH_MAX];
1560 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
1561 printed += fprintf(fp, "[0] %s\n", filename);
1562 }
1563
1564 for (i = 0; i < vmlinux_path__nr_entries; ++i)
1565 printed += fprintf(fp, "[%d] %s\n",
1566 i + kdso->has_build_id, vmlinux_path[i]);
1567
1568 return printed;
1569}
1570
1571static int setup_list(struct strlist **list, const char *list_str, 1167static int setup_list(struct strlist **list, const char *list_str,
1572 const char *list_name) 1168 const char *list_name)
1573{ 1169{
@@ -1671,108 +1267,3 @@ void symbol__exit(void)
1671 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1267 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
1672 symbol_conf.initialized = false; 1268 symbol_conf.initialized = false;
1673} 1269}
1674
1675int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
1676{
1677 struct machine *machine = machines__findnew(machines, pid);
1678
1679 if (machine == NULL)
1680 return -1;
1681
1682 return machine__create_kernel_maps(machine);
1683}
1684
1685int machines__create_guest_kernel_maps(struct rb_root *machines)
1686{
1687 int ret = 0;
1688 struct dirent **namelist = NULL;
1689 int i, items = 0;
1690 char path[PATH_MAX];
1691 pid_t pid;
1692 char *endp;
1693
1694 if (symbol_conf.default_guest_vmlinux_name ||
1695 symbol_conf.default_guest_modules ||
1696 symbol_conf.default_guest_kallsyms) {
1697 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1698 }
1699
1700 if (symbol_conf.guestmount) {
1701 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1702 if (items <= 0)
1703 return -ENOENT;
1704 for (i = 0; i < items; i++) {
1705 if (!isdigit(namelist[i]->d_name[0])) {
1706 /* Filter out . and .. */
1707 continue;
1708 }
1709 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1710 if ((*endp != '\0') ||
1711 (endp == namelist[i]->d_name) ||
1712 (errno == ERANGE)) {
1713 pr_debug("invalid directory (%s). Skipping.\n",
1714 namelist[i]->d_name);
1715 continue;
1716 }
1717 sprintf(path, "%s/%s/proc/kallsyms",
1718 symbol_conf.guestmount,
1719 namelist[i]->d_name);
1720 ret = access(path, R_OK);
1721 if (ret) {
1722 pr_debug("Can't access file %s\n", path);
1723 goto failure;
1724 }
1725 machines__create_kernel_maps(machines, pid);
1726 }
1727failure:
1728 free(namelist);
1729 }
1730
1731 return ret;
1732}
1733
1734void machines__destroy_guest_kernel_maps(struct rb_root *machines)
1735{
1736 struct rb_node *next = rb_first(machines);
1737
1738 while (next) {
1739 struct machine *pos = rb_entry(next, struct machine, rb_node);
1740
1741 next = rb_next(&pos->rb_node);
1742 rb_erase(&pos->rb_node, machines);
1743 machine__delete(pos);
1744 }
1745}
1746
1747int machine__load_kallsyms(struct machine *machine, const char *filename,
1748 enum map_type type, symbol_filter_t filter)
1749{
1750 struct map *map = machine->vmlinux_maps[type];
1751 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
1752
1753 if (ret > 0) {
1754 dso__set_loaded(map->dso, type);
1755 /*
1756 * Since /proc/kallsyms will have multiple sessions for the
1757 * kernel, with modules between them, fixup the end of all
1758 * sections.
1759 */
1760 __map_groups__fixup_end(&machine->kmaps, type);
1761 }
1762
1763 return ret;
1764}
1765
1766int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
1767 symbol_filter_t filter)
1768{
1769 struct map *map = machine->vmlinux_maps[type];
1770 int ret = dso__load_vmlinux_path(map->dso, map, filter);
1771
1772 if (ret > 0) {
1773 dso__set_loaded(map->dso, type);
1774 map__reloc_vmlinux(map);
1775 }
1776
1777 return ret;
1778}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index de68f98b236d..5f720dc076da 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -16,8 +16,8 @@
16#ifdef LIBELF_SUPPORT 16#ifdef LIBELF_SUPPORT
17#include <libelf.h> 17#include <libelf.h>
18#include <gelf.h> 18#include <gelf.h>
19#include <elf.h>
20#endif 19#endif
20#include <elf.h>
21 21
22#include "dso.h" 22#include "dso.h"
23 23
@@ -96,7 +96,9 @@ struct symbol_conf {
96 initialized, 96 initialized,
97 kptr_restrict, 97 kptr_restrict,
98 annotate_asm_raw, 98 annotate_asm_raw,
99 annotate_src; 99 annotate_src,
100 event_group,
101 demangle;
100 const char *vmlinux_name, 102 const char *vmlinux_name,
101 *kallsyms_name, 103 *kallsyms_name,
102 *source_prefix, 104 *source_prefix,
@@ -120,6 +122,8 @@ struct symbol_conf {
120}; 122};
121 123
122extern struct symbol_conf symbol_conf; 124extern struct symbol_conf symbol_conf;
125extern int vmlinux_path__nr_entries;
126extern char **vmlinux_path;
123 127
124static inline void *symbol__priv(struct symbol *sym) 128static inline void *symbol__priv(struct symbol *sym)
125{ 129{
@@ -152,6 +156,12 @@ struct branch_info {
152 struct branch_flags flags; 156 struct branch_flags flags;
153}; 157};
154 158
159struct mem_info {
160 struct addr_map_symbol iaddr;
161 struct addr_map_symbol daddr;
162 union perf_mem_data_src data_src;
163};
164
155struct addr_location { 165struct addr_location {
156 struct thread *thread; 166 struct thread *thread;
157 struct map *map; 167 struct map *map;
@@ -223,6 +233,8 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
223size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); 233size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
224size_t symbol__fprintf(struct symbol *sym, FILE *fp); 234size_t symbol__fprintf(struct symbol *sym, FILE *fp);
225bool symbol_type__is_a(char symbol_type, enum map_type map_type); 235bool symbol_type__is_a(char symbol_type, enum map_type map_type);
236bool symbol__restricted_filename(const char *filename,
237 const char *restricted_filename);
226 238
227int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 239int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
228 struct symsrc *runtime_ss, symbol_filter_t filter, 240 struct symsrc *runtime_ss, symbol_filter_t filter,
diff --git a/tools/perf/util/sysfs.c b/tools/perf/util/sysfs.c
index 48c6902e749f..f71e9eafe15a 100644
--- a/tools/perf/util/sysfs.c
+++ b/tools/perf/util/sysfs.c
@@ -8,7 +8,7 @@ static const char * const sysfs_known_mountpoints[] = {
8}; 8};
9 9
10static int sysfs_found; 10static int sysfs_found;
11char sysfs_mountpoint[PATH_MAX]; 11char sysfs_mountpoint[PATH_MAX + 1];
12 12
13static int sysfs_valid_mountpoint(const char *sysfs) 13static int sysfs_valid_mountpoint(const char *sysfs)
14{ 14{
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index df59623ac763..632e40e5ceca 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -54,10 +54,10 @@ int thread__comm_len(struct thread *self)
54 return self->comm_len; 54 return self->comm_len;
55} 55}
56 56
57static size_t thread__fprintf(struct thread *self, FILE *fp) 57size_t thread__fprintf(struct thread *thread, FILE *fp)
58{ 58{
59 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + 59 return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
60 map_groups__fprintf(&self->mg, verbose, fp); 60 map_groups__fprintf(&thread->mg, verbose, fp);
61} 61}
62 62
63void thread__insert_map(struct thread *self, struct map *map) 63void thread__insert_map(struct thread *self, struct map *map)
@@ -84,17 +84,3 @@ int thread__fork(struct thread *self, struct thread *parent)
84 return -ENOMEM; 84 return -ENOMEM;
85 return 0; 85 return 0;
86} 86}
87
88size_t machine__fprintf(struct machine *machine, FILE *fp)
89{
90 size_t ret = 0;
91 struct rb_node *nd;
92
93 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
94 struct thread *pos = rb_entry(nd, struct thread, rb_node);
95
96 ret += thread__fprintf(pos, fp);
97 }
98
99 return ret;
100}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index f2fa17caa7d5..5ad266403098 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -30,6 +30,7 @@ int thread__set_comm(struct thread *self, const char *comm);
30int thread__comm_len(struct thread *self); 30int thread__comm_len(struct thread *self);
31void thread__insert_map(struct thread *self, struct map *map); 31void thread__insert_map(struct thread *self, struct map *map);
32int thread__fork(struct thread *self, struct thread *parent); 32int thread__fork(struct thread *self, struct thread *parent);
33size_t thread__fprintf(struct thread *thread, FILE *fp);
33 34
34static inline struct map *thread__find_map(struct thread *self, 35static inline struct map *thread__find_map(struct thread *self,
35 enum map_type type, u64 addr) 36 enum map_type type, u64 addr)
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index f718df8a3c59..0cd8b3108084 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -21,4 +21,9 @@ void thread_map__delete(struct thread_map *threads);
21 21
22size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); 22size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
23 23
24static inline int thread_map__nr(struct thread_map *threads)
25{
26 return threads ? threads->nr : 1;
27}
28
24#endif /* __PERF_THREAD_MAP_H */ 29#endif /* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 884dde9b9bc1..54d37a4753c5 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -26,6 +26,8 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
26 float samples_per_sec = top->samples / top->delay_secs; 26 float samples_per_sec = top->samples / top->delay_secs;
27 float ksamples_per_sec = top->kernel_samples / top->delay_secs; 27 float ksamples_per_sec = top->kernel_samples / top->delay_secs;
28 float esamples_percent = (100.0 * top->exact_samples) / top->samples; 28 float esamples_percent = (100.0 * top->exact_samples) / top->samples;
29 struct perf_record_opts *opts = &top->record_opts;
30 struct perf_target *target = &opts->target;
29 size_t ret = 0; 31 size_t ret = 0;
30 32
31 if (!perf_guest) { 33 if (!perf_guest) {
@@ -61,31 +63,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
61 struct perf_evsel *first = perf_evlist__first(top->evlist); 63 struct perf_evsel *first = perf_evlist__first(top->evlist);
62 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", 64 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
63 (uint64_t)first->attr.sample_period, 65 (uint64_t)first->attr.sample_period,
64 top->freq ? "Hz" : ""); 66 opts->freq ? "Hz" : "");
65 } 67 }
66 68
67 ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); 69 ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
68 70
69 ret += SNPRINTF(bf + ret, size - ret, "], "); 71 ret += SNPRINTF(bf + ret, size - ret, "], ");
70 72
71 if (top->target.pid) 73 if (target->pid)
72 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", 74 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
73 top->target.pid); 75 target->pid);
74 else if (top->target.tid) 76 else if (target->tid)
75 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", 77 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
76 top->target.tid); 78 target->tid);
77 else if (top->target.uid_str != NULL) 79 else if (target->uid_str != NULL)
78 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", 80 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
79 top->target.uid_str); 81 target->uid_str);
80 else 82 else
81 ret += SNPRINTF(bf + ret, size - ret, " (all"); 83 ret += SNPRINTF(bf + ret, size - ret, " (all");
82 84
83 if (top->target.cpu_list) 85 if (target->cpu_list)
84 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", 86 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
85 top->evlist->cpus->nr > 1 ? "s" : "", 87 top->evlist->cpus->nr > 1 ? "s" : "",
86 top->target.cpu_list); 88 target->cpu_list);
87 else { 89 else {
88 if (top->target.tid) 90 if (target->tid)
89 ret += SNPRINTF(bf + ret, size - ret, ")"); 91 ret += SNPRINTF(bf + ret, size - ret, ")");
90 else 92 else
91 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", 93 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 86ff1b15059b..7ebf357dc9e1 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -14,7 +14,7 @@ struct perf_session;
14struct perf_top { 14struct perf_top {
15 struct perf_tool tool; 15 struct perf_tool tool;
16 struct perf_evlist *evlist; 16 struct perf_evlist *evlist;
17 struct perf_target target; 17 struct perf_record_opts record_opts;
18 /* 18 /*
19 * Symbols will be added here in perf_event__process_sample and will 19 * Symbols will be added here in perf_event__process_sample and will
20 * get out after decayed. 20 * get out after decayed.
@@ -24,24 +24,16 @@ struct perf_top {
24 u64 exact_samples; 24 u64 exact_samples;
25 u64 guest_us_samples, guest_kernel_samples; 25 u64 guest_us_samples, guest_kernel_samples;
26 int print_entries, count_filter, delay_secs; 26 int print_entries, count_filter, delay_secs;
27 int freq;
28 bool hide_kernel_symbols, hide_user_symbols, zero; 27 bool hide_kernel_symbols, hide_user_symbols, zero;
29 bool use_tui, use_stdio; 28 bool use_tui, use_stdio;
30 bool sort_has_symbols; 29 bool sort_has_symbols;
31 bool dont_use_callchains;
32 bool kptr_restrict_warned; 30 bool kptr_restrict_warned;
33 bool vmlinux_warned; 31 bool vmlinux_warned;
34 bool inherit;
35 bool group;
36 bool sample_id_all_missing;
37 bool exclude_guest_missing;
38 bool dump_symtab; 32 bool dump_symtab;
39 struct hist_entry *sym_filter_entry; 33 struct hist_entry *sym_filter_entry;
40 struct perf_evsel *sym_evsel; 34 struct perf_evsel *sym_evsel;
41 struct perf_session *session; 35 struct perf_session *session;
42 struct winsize winsize; 36 struct winsize winsize;
43 unsigned int mmap_pages;
44 int default_interval;
45 int realtime_prio; 37 int realtime_prio;
46 int sym_pcnt_filter; 38 int sym_pcnt_filter;
47 const char *sym_filter; 39 const char *sym_filter;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index a8d81c35ef66..3917eb9a8479 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -38,52 +38,20 @@
38 38
39#include "../perf.h" 39#include "../perf.h"
40#include "trace-event.h" 40#include "trace-event.h"
41#include "debugfs.h" 41#include <lk/debugfs.h>
42#include "evsel.h" 42#include "evsel.h"
43 43
44#define VERSION "0.5" 44#define VERSION "0.5"
45 45
46#define TRACE_CTRL "tracing_on"
47#define TRACE "trace"
48#define AVAILABLE "available_tracers"
49#define CURRENT "current_tracer"
50#define ITER_CTRL "trace_options"
51#define MAX_LATENCY "tracing_max_latency"
52
53unsigned int page_size;
54
55static const char *output_file = "trace.info";
56static int output_fd; 46static int output_fd;
57 47
58struct event_list {
59 struct event_list *next;
60 const char *event;
61};
62
63struct events {
64 struct events *sibling;
65 struct events *children;
66 struct events *next;
67 char *name;
68};
69
70
71static void *malloc_or_die(unsigned int size)
72{
73 void *data;
74
75 data = malloc(size);
76 if (!data)
77 die("malloc");
78 return data;
79}
80 48
81static const char *find_debugfs(void) 49static const char *find_debugfs(void)
82{ 50{
83 const char *path = debugfs_mount(NULL); 51 const char *path = perf_debugfs_mount(NULL);
84 52
85 if (!path) 53 if (!path)
86 die("Your kernel not support debugfs filesystem"); 54 pr_debug("Your kernel does not support the debugfs filesystem");
87 55
88 return path; 56 return path;
89} 57}
@@ -102,8 +70,12 @@ static const char *find_tracing_dir(void)
102 return tracing; 70 return tracing;
103 71
104 debugfs = find_debugfs(); 72 debugfs = find_debugfs();
73 if (!debugfs)
74 return NULL;
105 75
106 tracing = malloc_or_die(strlen(debugfs) + 9); 76 tracing = malloc(strlen(debugfs) + 9);
77 if (!tracing)
78 return NULL;
107 79
108 sprintf(tracing, "%s/tracing", debugfs); 80 sprintf(tracing, "%s/tracing", debugfs);
109 81
@@ -120,7 +92,9 @@ static char *get_tracing_file(const char *name)
120 if (!tracing) 92 if (!tracing)
121 return NULL; 93 return NULL;
122 94
123 file = malloc_or_die(strlen(tracing) + strlen(name) + 2); 95 file = malloc(strlen(tracing) + strlen(name) + 2);
96 if (!file)
97 return NULL;
124 98
125 sprintf(file, "%s/%s", tracing, name); 99 sprintf(file, "%s/%s", tracing, name);
126 return file; 100 return file;
@@ -131,24 +105,6 @@ static void put_tracing_file(char *file)
131 free(file); 105 free(file);
132} 106}
133 107
134static ssize_t calc_data_size;
135
136static ssize_t write_or_die(const void *buf, size_t len)
137{
138 int ret;
139
140 if (calc_data_size) {
141 calc_data_size += len;
142 return len;
143 }
144
145 ret = write(output_fd, buf, len);
146 if (ret < 0)
147 die("writing to '%s'", output_file);
148
149 return ret;
150}
151
152int bigendian(void) 108int bigendian(void)
153{ 109{
154 unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0}; 110 unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
@@ -159,59 +115,106 @@ int bigendian(void)
159} 115}
160 116
161/* unfortunately, you can not stat debugfs or proc files for size */ 117/* unfortunately, you can not stat debugfs or proc files for size */
162static void record_file(const char *file, size_t hdr_sz) 118static int record_file(const char *file, ssize_t hdr_sz)
163{ 119{
164 unsigned long long size = 0; 120 unsigned long long size = 0;
165 char buf[BUFSIZ], *sizep; 121 char buf[BUFSIZ], *sizep;
166 off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR); 122 off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR);
167 int r, fd; 123 int r, fd;
124 int err = -EIO;
168 125
169 fd = open(file, O_RDONLY); 126 fd = open(file, O_RDONLY);
170 if (fd < 0) 127 if (fd < 0) {
171 die("Can't read '%s'", file); 128 pr_debug("Can't read '%s'", file);
129 return -errno;
130 }
172 131
173 /* put in zeros for file size, then fill true size later */ 132 /* put in zeros for file size, then fill true size later */
174 if (hdr_sz) 133 if (hdr_sz) {
175 write_or_die(&size, hdr_sz); 134 if (write(output_fd, &size, hdr_sz) != hdr_sz)
135 goto out;
136 }
176 137
177 do { 138 do {
178 r = read(fd, buf, BUFSIZ); 139 r = read(fd, buf, BUFSIZ);
179 if (r > 0) { 140 if (r > 0) {
180 size += r; 141 size += r;
181 write_or_die(buf, r); 142 if (write(output_fd, buf, r) != r)
143 goto out;
182 } 144 }
183 } while (r > 0); 145 } while (r > 0);
184 close(fd);
185 146
186 /* ugh, handle big-endian hdr_size == 4 */ 147 /* ugh, handle big-endian hdr_size == 4 */
187 sizep = (char*)&size; 148 sizep = (char*)&size;
188 if (bigendian()) 149 if (bigendian())
189 sizep += sizeof(u64) - hdr_sz; 150 sizep += sizeof(u64) - hdr_sz;
190 151
191 if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) 152 if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) {
192 die("writing to %s", output_file); 153 pr_debug("writing file size failed\n");
154 goto out;
155 }
156
157 err = 0;
158out:
159 close(fd);
160 return err;
193} 161}
194 162
195static void read_header_files(void) 163static int read_header_files(void)
196{ 164{
197 char *path; 165 char *path;
198 struct stat st; 166 struct stat st;
167 int err = -EIO;
199 168
200 path = get_tracing_file("events/header_page"); 169 path = get_tracing_file("events/header_page");
201 if (stat(path, &st) < 0) 170 if (!path) {
202 die("can't read '%s'", path); 171 pr_debug("can't get tracing/events/header_page");
172 return -ENOMEM;
173 }
174
175 if (stat(path, &st) < 0) {
176 pr_debug("can't read '%s'", path);
177 goto out;
178 }
179
180 if (write(output_fd, "header_page", 12) != 12) {
181 pr_debug("can't write header_page\n");
182 goto out;
183 }
184
185 if (record_file(path, 8) < 0) {
186 pr_debug("can't record header_page file\n");
187 goto out;
188 }
203 189
204 write_or_die("header_page", 12);
205 record_file(path, 8);
206 put_tracing_file(path); 190 put_tracing_file(path);
207 191
208 path = get_tracing_file("events/header_event"); 192 path = get_tracing_file("events/header_event");
209 if (stat(path, &st) < 0) 193 if (!path) {
210 die("can't read '%s'", path); 194 pr_debug("can't get tracing/events/header_event");
195 err = -ENOMEM;
196 goto out;
197 }
198
199 if (stat(path, &st) < 0) {
200 pr_debug("can't read '%s'", path);
201 goto out;
202 }
211 203
212 write_or_die("header_event", 13); 204 if (write(output_fd, "header_event", 13) != 13) {
213 record_file(path, 8); 205 pr_debug("can't write header_event\n");
206 goto out;
207 }
208
209 if (record_file(path, 8) < 0) {
210 pr_debug("can't record header_event file\n");
211 goto out;
212 }
213
214 err = 0;
215out:
214 put_tracing_file(path); 216 put_tracing_file(path);
217 return err;
215} 218}
216 219
217static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) 220static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -225,7 +228,7 @@ static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
225 return false; 228 return false;
226} 229}
227 230
228static void copy_event_system(const char *sys, struct tracepoint_path *tps) 231static int copy_event_system(const char *sys, struct tracepoint_path *tps)
229{ 232{
230 struct dirent *dent; 233 struct dirent *dent;
231 struct stat st; 234 struct stat st;
@@ -233,10 +236,13 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
233 DIR *dir; 236 DIR *dir;
234 int count = 0; 237 int count = 0;
235 int ret; 238 int ret;
239 int err;
236 240
237 dir = opendir(sys); 241 dir = opendir(sys);
238 if (!dir) 242 if (!dir) {
239 die("can't read directory '%s'", sys); 243 pr_debug("can't read directory '%s'", sys);
244 return -errno;
245 }
240 246
241 while ((dent = readdir(dir))) { 247 while ((dent = readdir(dir))) {
242 if (dent->d_type != DT_DIR || 248 if (dent->d_type != DT_DIR ||
@@ -244,7 +250,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
244 strcmp(dent->d_name, "..") == 0 || 250 strcmp(dent->d_name, "..") == 0 ||
245 !name_in_tp_list(dent->d_name, tps)) 251 !name_in_tp_list(dent->d_name, tps))
246 continue; 252 continue;
247 format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10); 253 format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
254 if (!format) {
255 err = -ENOMEM;
256 goto out;
257 }
248 sprintf(format, "%s/%s/format", sys, dent->d_name); 258 sprintf(format, "%s/%s/format", sys, dent->d_name);
249 ret = stat(format, &st); 259 ret = stat(format, &st);
250 free(format); 260 free(format);
@@ -253,7 +263,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
253 count++; 263 count++;
254 } 264 }
255 265
256 write_or_die(&count, 4); 266 if (write(output_fd, &count, 4) != 4) {
267 err = -EIO;
268 pr_debug("can't write count\n");
269 goto out;
270 }
257 271
258 rewinddir(dir); 272 rewinddir(dir);
259 while ((dent = readdir(dir))) { 273 while ((dent = readdir(dir))) {
@@ -262,27 +276,45 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
262 strcmp(dent->d_name, "..") == 0 || 276 strcmp(dent->d_name, "..") == 0 ||
263 !name_in_tp_list(dent->d_name, tps)) 277 !name_in_tp_list(dent->d_name, tps))
264 continue; 278 continue;
265 format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10); 279 format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
280 if (!format) {
281 err = -ENOMEM;
282 goto out;
283 }
266 sprintf(format, "%s/%s/format", sys, dent->d_name); 284 sprintf(format, "%s/%s/format", sys, dent->d_name);
267 ret = stat(format, &st); 285 ret = stat(format, &st);
268 286
269 if (ret >= 0) 287 if (ret >= 0) {
270 record_file(format, 8); 288 err = record_file(format, 8);
271 289 if (err) {
290 free(format);
291 goto out;
292 }
293 }
272 free(format); 294 free(format);
273 } 295 }
296 err = 0;
297out:
274 closedir(dir); 298 closedir(dir);
299 return err;
275} 300}
276 301
277static void read_ftrace_files(struct tracepoint_path *tps) 302static int read_ftrace_files(struct tracepoint_path *tps)
278{ 303{
279 char *path; 304 char *path;
305 int ret;
280 306
281 path = get_tracing_file("events/ftrace"); 307 path = get_tracing_file("events/ftrace");
308 if (!path) {
309 pr_debug("can't get tracing/events/ftrace");
310 return -ENOMEM;
311 }
282 312
283 copy_event_system(path, tps); 313 ret = copy_event_system(path, tps);
284 314
285 put_tracing_file(path); 315 put_tracing_file(path);
316
317 return ret;
286} 318}
287 319
288static bool system_in_tp_list(char *sys, struct tracepoint_path *tps) 320static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -296,7 +328,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
296 return false; 328 return false;
297} 329}
298 330
299static void read_event_files(struct tracepoint_path *tps) 331static int read_event_files(struct tracepoint_path *tps)
300{ 332{
301 struct dirent *dent; 333 struct dirent *dent;
302 struct stat st; 334 struct stat st;
@@ -305,12 +337,20 @@ static void read_event_files(struct tracepoint_path *tps)
305 DIR *dir; 337 DIR *dir;
306 int count = 0; 338 int count = 0;
307 int ret; 339 int ret;
340 int err;
308 341
309 path = get_tracing_file("events"); 342 path = get_tracing_file("events");
343 if (!path) {
344 pr_debug("can't get tracing/events");
345 return -ENOMEM;
346 }
310 347
311 dir = opendir(path); 348 dir = opendir(path);
312 if (!dir) 349 if (!dir) {
313 die("can't read directory '%s'", path); 350 err = -errno;
351 pr_debug("can't read directory '%s'", path);
352 goto out;
353 }
314 354
315 while ((dent = readdir(dir))) { 355 while ((dent = readdir(dir))) {
316 if (dent->d_type != DT_DIR || 356 if (dent->d_type != DT_DIR ||
@@ -322,7 +362,11 @@ static void read_event_files(struct tracepoint_path *tps)
322 count++; 362 count++;
323 } 363 }
324 364
325 write_or_die(&count, 4); 365 if (write(output_fd, &count, 4) != 4) {
366 err = -EIO;
367 pr_debug("can't write count\n");
368 goto out;
369 }
326 370
327 rewinddir(dir); 371 rewinddir(dir);
328 while ((dent = readdir(dir))) { 372 while ((dent = readdir(dir))) {
@@ -332,56 +376,90 @@ static void read_event_files(struct tracepoint_path *tps)
332 strcmp(dent->d_name, "ftrace") == 0 || 376 strcmp(dent->d_name, "ftrace") == 0 ||
333 !system_in_tp_list(dent->d_name, tps)) 377 !system_in_tp_list(dent->d_name, tps))
334 continue; 378 continue;
335 sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2); 379 sys = malloc(strlen(path) + strlen(dent->d_name) + 2);
380 if (!sys) {
381 err = -ENOMEM;
382 goto out;
383 }
336 sprintf(sys, "%s/%s", path, dent->d_name); 384 sprintf(sys, "%s/%s", path, dent->d_name);
337 ret = stat(sys, &st); 385 ret = stat(sys, &st);
338 if (ret >= 0) { 386 if (ret >= 0) {
339 write_or_die(dent->d_name, strlen(dent->d_name) + 1); 387 ssize_t size = strlen(dent->d_name) + 1;
340 copy_event_system(sys, tps); 388
389 if (write(output_fd, dent->d_name, size) != size ||
390 copy_event_system(sys, tps) < 0) {
391 err = -EIO;
392 free(sys);
393 goto out;
394 }
341 } 395 }
342 free(sys); 396 free(sys);
343 } 397 }
344 398 err = 0;
399out:
345 closedir(dir); 400 closedir(dir);
346 put_tracing_file(path); 401 put_tracing_file(path);
402
403 return err;
347} 404}
348 405
349static void read_proc_kallsyms(void) 406static int read_proc_kallsyms(void)
350{ 407{
351 unsigned int size; 408 unsigned int size;
352 const char *path = "/proc/kallsyms"; 409 const char *path = "/proc/kallsyms";
353 struct stat st; 410 struct stat st;
354 int ret; 411 int ret, err = 0;
355 412
356 ret = stat(path, &st); 413 ret = stat(path, &st);
357 if (ret < 0) { 414 if (ret < 0) {
358 /* not found */ 415 /* not found */
359 size = 0; 416 size = 0;
360 write_or_die(&size, 4); 417 if (write(output_fd, &size, 4) != 4)
361 return; 418 err = -EIO;
419 return err;
362 } 420 }
363 record_file(path, 4); 421 return record_file(path, 4);
364} 422}
365 423
366static void read_ftrace_printk(void) 424static int read_ftrace_printk(void)
367{ 425{
368 unsigned int size; 426 unsigned int size;
369 char *path; 427 char *path;
370 struct stat st; 428 struct stat st;
371 int ret; 429 int ret, err = 0;
372 430
373 path = get_tracing_file("printk_formats"); 431 path = get_tracing_file("printk_formats");
432 if (!path) {
433 pr_debug("can't get tracing/printk_formats");
434 return -ENOMEM;
435 }
436
374 ret = stat(path, &st); 437 ret = stat(path, &st);
375 if (ret < 0) { 438 if (ret < 0) {
376 /* not found */ 439 /* not found */
377 size = 0; 440 size = 0;
378 write_or_die(&size, 4); 441 if (write(output_fd, &size, 4) != 4)
442 err = -EIO;
379 goto out; 443 goto out;
380 } 444 }
381 record_file(path, 4); 445 err = record_file(path, 4);
382 446
383out: 447out:
384 put_tracing_file(path); 448 put_tracing_file(path);
449 return err;
450}
451
452static void
453put_tracepoints_path(struct tracepoint_path *tps)
454{
455 while (tps) {
456 struct tracepoint_path *t = tps;
457
458 tps = tps->next;
459 free(t->name);
460 free(t->system);
461 free(t);
462 }
385} 463}
386 464
387static struct tracepoint_path * 465static struct tracepoint_path *
@@ -396,27 +474,17 @@ get_tracepoints_path(struct list_head *pattrs)
396 continue; 474 continue;
397 ++nr_tracepoints; 475 ++nr_tracepoints;
398 ppath->next = tracepoint_id_to_path(pos->attr.config); 476 ppath->next = tracepoint_id_to_path(pos->attr.config);
399 if (!ppath->next) 477 if (!ppath->next) {
400 die("%s\n", "No memory to alloc tracepoints list"); 478 pr_debug("No memory to alloc tracepoints list\n");
479 put_tracepoints_path(&path);
480 return NULL;
481 }
401 ppath = ppath->next; 482 ppath = ppath->next;
402 } 483 }
403 484
404 return nr_tracepoints > 0 ? path.next : NULL; 485 return nr_tracepoints > 0 ? path.next : NULL;
405} 486}
406 487
407static void
408put_tracepoints_path(struct tracepoint_path *tps)
409{
410 while (tps) {
411 struct tracepoint_path *t = tps;
412
413 tps = tps->next;
414 free(t->name);
415 free(t->system);
416 free(t);
417 }
418}
419
420bool have_tracepoints(struct list_head *pattrs) 488bool have_tracepoints(struct list_head *pattrs)
421{ 489{
422 struct perf_evsel *pos; 490 struct perf_evsel *pos;
@@ -428,9 +496,10 @@ bool have_tracepoints(struct list_head *pattrs)
428 return false; 496 return false;
429} 497}
430 498
431static void tracing_data_header(void) 499static int tracing_data_header(void)
432{ 500{
433 char buf[20]; 501 char buf[20];
502 ssize_t size;
434 503
435 /* just guessing this is someone's birthday.. ;) */ 504 /* just guessing this is someone's birthday.. ;) */
436 buf[0] = 23; 505 buf[0] = 23;
@@ -438,9 +507,12 @@ static void tracing_data_header(void)
438 buf[2] = 68; 507 buf[2] = 68;
439 memcpy(buf + 3, "tracing", 7); 508 memcpy(buf + 3, "tracing", 7);
440 509
441 write_or_die(buf, 10); 510 if (write(output_fd, buf, 10) != 10)
511 return -1;
442 512
443 write_or_die(VERSION, strlen(VERSION) + 1); 513 size = strlen(VERSION) + 1;
514 if (write(output_fd, VERSION, size) != size)
515 return -1;
444 516
445 /* save endian */ 517 /* save endian */
446 if (bigendian()) 518 if (bigendian())
@@ -450,15 +522,19 @@ static void tracing_data_header(void)
450 522
451 read_trace_init(buf[0], buf[0]); 523 read_trace_init(buf[0], buf[0]);
452 524
453 write_or_die(buf, 1); 525 if (write(output_fd, buf, 1) != 1)
526 return -1;
454 527
455 /* save size of long */ 528 /* save size of long */
456 buf[0] = sizeof(long); 529 buf[0] = sizeof(long);
457 write_or_die(buf, 1); 530 if (write(output_fd, buf, 1) != 1)
531 return -1;
458 532
459 /* save page_size */ 533 /* save page_size */
460 page_size = sysconf(_SC_PAGESIZE); 534 if (write(output_fd, &page_size, 4) != 4)
461 write_or_die(&page_size, 4); 535 return -1;
536
537 return 0;
462} 538}
463 539
464struct tracing_data *tracing_data_get(struct list_head *pattrs, 540struct tracing_data *tracing_data_get(struct list_head *pattrs,
@@ -466,6 +542,7 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
466{ 542{
467 struct tracepoint_path *tps; 543 struct tracepoint_path *tps;
468 struct tracing_data *tdata; 544 struct tracing_data *tdata;
545 int err;
469 546
470 output_fd = fd; 547 output_fd = fd;
471 548
@@ -473,7 +550,10 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
473 if (!tps) 550 if (!tps)
474 return NULL; 551 return NULL;
475 552
476 tdata = malloc_or_die(sizeof(*tdata)); 553 tdata = malloc(sizeof(*tdata));
554 if (!tdata)
555 return NULL;
556
477 tdata->temp = temp; 557 tdata->temp = temp;
478 tdata->size = 0; 558 tdata->size = 0;
479 559
@@ -482,12 +562,16 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
482 562
483 snprintf(tdata->temp_file, sizeof(tdata->temp_file), 563 snprintf(tdata->temp_file, sizeof(tdata->temp_file),
484 "/tmp/perf-XXXXXX"); 564 "/tmp/perf-XXXXXX");
485 if (!mkstemp(tdata->temp_file)) 565 if (!mkstemp(tdata->temp_file)) {
486 die("Can't make temp file"); 566 pr_debug("Can't make temp file");
567 return NULL;
568 }
487 569
488 temp_fd = open(tdata->temp_file, O_RDWR); 570 temp_fd = open(tdata->temp_file, O_RDWR);
489 if (temp_fd < 0) 571 if (temp_fd < 0) {
490 die("Can't read '%s'", tdata->temp_file); 572 pr_debug("Can't read '%s'", tdata->temp_file);
573 return NULL;
574 }
491 575
492 /* 576 /*
493 * Set the temp file the default output, so all the 577 * Set the temp file the default output, so all the
@@ -496,13 +580,24 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
496 output_fd = temp_fd; 580 output_fd = temp_fd;
497 } 581 }
498 582
499 tracing_data_header(); 583 err = tracing_data_header();
500 read_header_files(); 584 if (err)
501 read_ftrace_files(tps); 585 goto out;
502 read_event_files(tps); 586 err = read_header_files();
503 read_proc_kallsyms(); 587 if (err)
504 read_ftrace_printk(); 588 goto out;
589 err = read_ftrace_files(tps);
590 if (err)
591 goto out;
592 err = read_event_files(tps);
593 if (err)
594 goto out;
595 err = read_proc_kallsyms();
596 if (err)
597 goto out;
598 err = read_ftrace_printk();
505 599
600out:
506 /* 601 /*
507 * All tracing data are stored by now, we can restore 602 * All tracing data are stored by now, we can restore
508 * the default output file in case we used temp file. 603 * the default output file in case we used temp file.
@@ -513,22 +608,31 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
513 output_fd = fd; 608 output_fd = fd;
514 } 609 }
515 610
611 if (err) {
612 free(tdata);
613 tdata = NULL;
614 }
615
516 put_tracepoints_path(tps); 616 put_tracepoints_path(tps);
517 return tdata; 617 return tdata;
518} 618}
519 619
520void tracing_data_put(struct tracing_data *tdata) 620int tracing_data_put(struct tracing_data *tdata)
521{ 621{
622 int err = 0;
623
522 if (tdata->temp) { 624 if (tdata->temp) {
523 record_file(tdata->temp_file, 0); 625 err = record_file(tdata->temp_file, 0);
524 unlink(tdata->temp_file); 626 unlink(tdata->temp_file);
525 } 627 }
526 628
527 free(tdata); 629 free(tdata);
630 return err;
528} 631}
529 632
530int read_tracing_data(int fd, struct list_head *pattrs) 633int read_tracing_data(int fd, struct list_head *pattrs)
531{ 634{
635 int err;
532 struct tracing_data *tdata; 636 struct tracing_data *tdata;
533 637
534 /* 638 /*
@@ -539,6 +643,6 @@ int read_tracing_data(int fd, struct list_head *pattrs)
539 if (!tdata) 643 if (!tdata)
540 return -ENOMEM; 644 return -ENOMEM;
541 645
542 tracing_data_put(tdata); 646 err = tracing_data_put(tdata);
543 return 0; 647 return err;
544} 648}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 3aabcd687cd5..4454835a9ebc 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -183,43 +183,6 @@ void event_format__print(struct event_format *event,
183 trace_seq_do_printf(&s); 183 trace_seq_do_printf(&s);
184} 184}
185 185
186void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
187{
188 int type = trace_parse_common_type(pevent, data);
189 struct event_format *event = pevent_find_event(pevent, type);
190
191 if (!event) {
192 warning("ug! no event found for type %d", type);
193 return;
194 }
195
196 event_format__print(event, cpu, data, size);
197}
198
199void print_event(struct pevent *pevent, int cpu, void *data, int size,
200 unsigned long long nsecs, char *comm)
201{
202 struct pevent_record record;
203 struct trace_seq s;
204 int pid;
205
206 pevent->latency_format = latency_format;
207
208 record.ts = nsecs;
209 record.cpu = cpu;
210 record.size = size;
211 record.data = data;
212 pid = pevent_data_pid(pevent, &record);
213
214 if (!pevent_pid_is_registered(pevent, pid))
215 pevent_register_comm(pevent, comm, pid);
216
217 trace_seq_init(&s);
218 pevent_print_event(pevent, &s, &record);
219 trace_seq_do_printf(&s);
220 printf("\n");
221}
222
223void parse_proc_kallsyms(struct pevent *pevent, 186void parse_proc_kallsyms(struct pevent *pevent,
224 char *file, unsigned int size __maybe_unused) 187 char *file, unsigned int size __maybe_unused)
225{ 188{
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 3741572696af..af215c0d2379 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -18,8 +18,6 @@
18 * 18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 20 */
21#define _FILE_OFFSET_BITS 64
22
23#include <dirent.h> 21#include <dirent.h>
24#include <stdio.h> 22#include <stdio.h>
25#include <stdlib.h> 23#include <stdlib.h>
@@ -41,26 +39,14 @@
41 39
42static int input_fd; 40static int input_fd;
43 41
44static int read_page;
45
46int file_bigendian; 42int file_bigendian;
47int host_bigendian; 43int host_bigendian;
48static int long_size; 44static int long_size;
49 45
50static ssize_t calc_data_size; 46static ssize_t trace_data_size;
51static bool repipe; 47static bool repipe;
52 48
53static void *malloc_or_die(int size) 49static int __do_read(int fd, void *buf, int size)
54{
55 void *ret;
56
57 ret = malloc(size);
58 if (!ret)
59 die("malloc");
60 return ret;
61}
62
63static int do_read(int fd, void *buf, int size)
64{ 50{
65 int rsize = size; 51 int rsize = size;
66 52
@@ -73,8 +59,10 @@ static int do_read(int fd, void *buf, int size)
73 if (repipe) { 59 if (repipe) {
74 int retw = write(STDOUT_FILENO, buf, ret); 60 int retw = write(STDOUT_FILENO, buf, ret);
75 61
76 if (retw <= 0 || retw != ret) 62 if (retw <= 0 || retw != ret) {
77 die("repiping input file"); 63 pr_debug("repiping input file");
64 return -1;
65 }
78 } 66 }
79 67
80 size -= ret; 68 size -= ret;
@@ -84,17 +72,18 @@ static int do_read(int fd, void *buf, int size)
84 return rsize; 72 return rsize;
85} 73}
86 74
87static int read_or_die(void *data, int size) 75static int do_read(void *data, int size)
88{ 76{
89 int r; 77 int r;
90 78
91 r = do_read(input_fd, data, size); 79 r = __do_read(input_fd, data, size);
92 if (r <= 0) 80 if (r <= 0) {
93 die("reading input file (size expected=%d received=%d)", 81 pr_debug("reading input file (size expected=%d received=%d)",
94 size, r); 82 size, r);
83 return -1;
84 }
95 85
96 if (calc_data_size) 86 trace_data_size += r;
97 calc_data_size += r;
98 87
99 return r; 88 return r;
100} 89}
@@ -107,7 +96,7 @@ static void skip(int size)
107 96
108 while (size) { 97 while (size) {
109 r = size > BUFSIZ ? BUFSIZ : size; 98 r = size > BUFSIZ ? BUFSIZ : size;
110 read_or_die(buf, r); 99 do_read(buf, r);
111 size -= r; 100 size -= r;
112 }; 101 };
113} 102}
@@ -116,7 +105,8 @@ static unsigned int read4(struct pevent *pevent)
116{ 105{
117 unsigned int data; 106 unsigned int data;
118 107
119 read_or_die(&data, 4); 108 if (do_read(&data, 4) < 0)
109 return 0;
120 return __data2host4(pevent, data); 110 return __data2host4(pevent, data);
121} 111}
122 112
@@ -124,7 +114,8 @@ static unsigned long long read8(struct pevent *pevent)
124{ 114{
125 unsigned long long data; 115 unsigned long long data;
126 116
127 read_or_die(&data, 8); 117 if (do_read(&data, 8) < 0)
118 return 0;
128 return __data2host8(pevent, data); 119 return __data2host8(pevent, data);
129} 120}
130 121
@@ -138,17 +129,23 @@ static char *read_string(void)
138 129
139 for (;;) { 130 for (;;) {
140 r = read(input_fd, &c, 1); 131 r = read(input_fd, &c, 1);
141 if (r < 0) 132 if (r < 0) {
142 die("reading input file"); 133 pr_debug("reading input file");
134 goto out;
135 }
143 136
144 if (!r) 137 if (!r) {
145 die("no data"); 138 pr_debug("no data");
139 goto out;
140 }
146 141
147 if (repipe) { 142 if (repipe) {
148 int retw = write(STDOUT_FILENO, &c, 1); 143 int retw = write(STDOUT_FILENO, &c, 1);
149 144
150 if (retw <= 0 || retw != r) 145 if (retw <= 0 || retw != r) {
151 die("repiping input file string"); 146 pr_debug("repiping input file string");
147 goto out;
148 }
152 } 149 }
153 150
154 buf[size++] = c; 151 buf[size++] = c;
@@ -157,60 +154,79 @@ static char *read_string(void)
157 break; 154 break;
158 } 155 }
159 156
160 if (calc_data_size) 157 trace_data_size += size;
161 calc_data_size += size;
162
163 str = malloc_or_die(size);
164 memcpy(str, buf, size);
165 158
159 str = malloc(size);
160 if (str)
161 memcpy(str, buf, size);
162out:
166 return str; 163 return str;
167} 164}
168 165
169static void read_proc_kallsyms(struct pevent *pevent) 166static int read_proc_kallsyms(struct pevent *pevent)
170{ 167{
171 unsigned int size; 168 unsigned int size;
172 char *buf; 169 char *buf;
173 170
174 size = read4(pevent); 171 size = read4(pevent);
175 if (!size) 172 if (!size)
176 return; 173 return 0;
174
175 buf = malloc(size + 1);
176 if (buf == NULL)
177 return -1;
177 178
178 buf = malloc_or_die(size + 1); 179 if (do_read(buf, size) < 0) {
179 read_or_die(buf, size); 180 free(buf);
181 return -1;
182 }
180 buf[size] = '\0'; 183 buf[size] = '\0';
181 184
182 parse_proc_kallsyms(pevent, buf, size); 185 parse_proc_kallsyms(pevent, buf, size);
183 186
184 free(buf); 187 free(buf);
188 return 0;
185} 189}
186 190
187static void read_ftrace_printk(struct pevent *pevent) 191static int read_ftrace_printk(struct pevent *pevent)
188{ 192{
189 unsigned int size; 193 unsigned int size;
190 char *buf; 194 char *buf;
191 195
196 /* it can have 0 size */
192 size = read4(pevent); 197 size = read4(pevent);
193 if (!size) 198 if (!size)
194 return; 199 return 0;
200
201 buf = malloc(size);
202 if (buf == NULL)
203 return -1;
195 204
196 buf = malloc_or_die(size); 205 if (do_read(buf, size) < 0) {
197 read_or_die(buf, size); 206 free(buf);
207 return -1;
208 }
198 209
199 parse_ftrace_printk(pevent, buf, size); 210 parse_ftrace_printk(pevent, buf, size);
200 211
201 free(buf); 212 free(buf);
213 return 0;
202} 214}
203 215
204static void read_header_files(struct pevent *pevent) 216static int read_header_files(struct pevent *pevent)
205{ 217{
206 unsigned long long size; 218 unsigned long long size;
207 char *header_event; 219 char *header_event;
208 char buf[BUFSIZ]; 220 char buf[BUFSIZ];
221 int ret = 0;
209 222
210 read_or_die(buf, 12); 223 if (do_read(buf, 12) < 0)
224 return -1;
211 225
212 if (memcmp(buf, "header_page", 12) != 0) 226 if (memcmp(buf, "header_page", 12) != 0) {
213 die("did not read header page"); 227 pr_debug("did not read header page");
228 return -1;
229 }
214 230
215 size = read8(pevent); 231 size = read8(pevent);
216 skip(size); 232 skip(size);
@@ -221,269 +237,107 @@ static void read_header_files(struct pevent *pevent)
221 */ 237 */
222 long_size = header_page_size_size; 238 long_size = header_page_size_size;
223 239
224 read_or_die(buf, 13); 240 if (do_read(buf, 13) < 0)
225 if (memcmp(buf, "header_event", 13) != 0) 241 return -1;
226 die("did not read header event"); 242
243 if (memcmp(buf, "header_event", 13) != 0) {
244 pr_debug("did not read header event");
245 return -1;
246 }
227 247
228 size = read8(pevent); 248 size = read8(pevent);
229 header_event = malloc_or_die(size); 249 header_event = malloc(size);
230 read_or_die(header_event, size); 250 if (header_event == NULL)
251 return -1;
252
253 if (do_read(header_event, size) < 0)
254 ret = -1;
255
231 free(header_event); 256 free(header_event);
257 return ret;
232} 258}
233 259
234static void read_ftrace_file(struct pevent *pevent, unsigned long long size) 260static int read_ftrace_file(struct pevent *pevent, unsigned long long size)
235{ 261{
236 char *buf; 262 char *buf;
237 263
238 buf = malloc_or_die(size); 264 buf = malloc(size);
239 read_or_die(buf, size); 265 if (buf == NULL)
266 return -1;
267
268 if (do_read(buf, size) < 0) {
269 free(buf);
270 return -1;
271 }
272
240 parse_ftrace_file(pevent, buf, size); 273 parse_ftrace_file(pevent, buf, size);
241 free(buf); 274 free(buf);
275 return 0;
242} 276}
243 277
244static void read_event_file(struct pevent *pevent, char *sys, 278static int read_event_file(struct pevent *pevent, char *sys,
245 unsigned long long size) 279 unsigned long long size)
246{ 280{
247 char *buf; 281 char *buf;
248 282
249 buf = malloc_or_die(size); 283 buf = malloc(size);
250 read_or_die(buf, size); 284 if (buf == NULL)
285 return -1;
286
287 if (do_read(buf, size) < 0) {
288 free(buf);
289 return -1;
290 }
291
251 parse_event_file(pevent, buf, size, sys); 292 parse_event_file(pevent, buf, size, sys);
252 free(buf); 293 free(buf);
294 return 0;
253} 295}
254 296
255static void read_ftrace_files(struct pevent *pevent) 297static int read_ftrace_files(struct pevent *pevent)
256{ 298{
257 unsigned long long size; 299 unsigned long long size;
258 int count; 300 int count;
259 int i; 301 int i;
302 int ret;
260 303
261 count = read4(pevent); 304 count = read4(pevent);
262 305
263 for (i = 0; i < count; i++) { 306 for (i = 0; i < count; i++) {
264 size = read8(pevent); 307 size = read8(pevent);
265 read_ftrace_file(pevent, size); 308 ret = read_ftrace_file(pevent, size);
309 if (ret)
310 return ret;
266 } 311 }
312 return 0;
267} 313}
268 314
269static void read_event_files(struct pevent *pevent) 315static int read_event_files(struct pevent *pevent)
270{ 316{
271 unsigned long long size; 317 unsigned long long size;
272 char *sys; 318 char *sys;
273 int systems; 319 int systems;
274 int count; 320 int count;
275 int i,x; 321 int i,x;
322 int ret;
276 323
277 systems = read4(pevent); 324 systems = read4(pevent);
278 325
279 for (i = 0; i < systems; i++) { 326 for (i = 0; i < systems; i++) {
280 sys = read_string(); 327 sys = read_string();
328 if (sys == NULL)
329 return -1;
281 330
282 count = read4(pevent); 331 count = read4(pevent);
332
283 for (x=0; x < count; x++) { 333 for (x=0; x < count; x++) {
284 size = read8(pevent); 334 size = read8(pevent);
285 read_event_file(pevent, sys, size); 335 ret = read_event_file(pevent, sys, size);
336 if (ret)
337 return ret;
286 } 338 }
287 } 339 }
288} 340 return 0;
289
290struct cpu_data {
291 unsigned long long offset;
292 unsigned long long size;
293 unsigned long long timestamp;
294 struct pevent_record *next;
295 char *page;
296 int cpu;
297 int index;
298 int page_size;
299};
300
301static struct cpu_data *cpu_data;
302
303static void update_cpu_data_index(int cpu)
304{
305 cpu_data[cpu].offset += page_size;
306 cpu_data[cpu].size -= page_size;
307 cpu_data[cpu].index = 0;
308}
309
310static void get_next_page(int cpu)
311{
312 off_t save_seek;
313 off_t ret;
314
315 if (!cpu_data[cpu].page)
316 return;
317
318 if (read_page) {
319 if (cpu_data[cpu].size <= page_size) {
320 free(cpu_data[cpu].page);
321 cpu_data[cpu].page = NULL;
322 return;
323 }
324
325 update_cpu_data_index(cpu);
326
327 /* other parts of the code may expect the pointer to not move */
328 save_seek = lseek(input_fd, 0, SEEK_CUR);
329
330 ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
331 if (ret == (off_t)-1)
332 die("failed to lseek");
333 ret = read(input_fd, cpu_data[cpu].page, page_size);
334 if (ret < 0)
335 die("failed to read page");
336
337 /* reset the file pointer back */
338 lseek(input_fd, save_seek, SEEK_SET);
339
340 return;
341 }
342
343 munmap(cpu_data[cpu].page, page_size);
344 cpu_data[cpu].page = NULL;
345
346 if (cpu_data[cpu].size <= page_size)
347 return;
348
349 update_cpu_data_index(cpu);
350
351 cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
352 input_fd, cpu_data[cpu].offset);
353 if (cpu_data[cpu].page == MAP_FAILED)
354 die("failed to mmap cpu %d at offset 0x%llx",
355 cpu, cpu_data[cpu].offset);
356}
357
358static unsigned int type_len4host(unsigned int type_len_ts)
359{
360 if (file_bigendian)
361 return (type_len_ts >> 27) & ((1 << 5) - 1);
362 else
363 return type_len_ts & ((1 << 5) - 1);
364}
365
366static unsigned int ts4host(unsigned int type_len_ts)
367{
368 if (file_bigendian)
369 return type_len_ts & ((1 << 27) - 1);
370 else
371 return type_len_ts >> 5;
372}
373
374static int calc_index(void *ptr, int cpu)
375{
376 return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
377}
378
379struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu)
380{
381 struct pevent_record *data;
382 void *page = cpu_data[cpu].page;
383 int idx = cpu_data[cpu].index;
384 void *ptr = page + idx;
385 unsigned long long extend;
386 unsigned int type_len_ts;
387 unsigned int type_len;
388 unsigned int delta;
389 unsigned int length = 0;
390
391 if (cpu_data[cpu].next)
392 return cpu_data[cpu].next;
393
394 if (!page)
395 return NULL;
396
397 if (!idx) {
398 /* FIXME: handle header page */
399 if (header_page_ts_size != 8)
400 die("expected a long long type for timestamp");
401 cpu_data[cpu].timestamp = data2host8(pevent, ptr);
402 ptr += 8;
403 switch (header_page_size_size) {
404 case 4:
405 cpu_data[cpu].page_size = data2host4(pevent, ptr);
406 ptr += 4;
407 break;
408 case 8:
409 cpu_data[cpu].page_size = data2host8(pevent, ptr);
410 ptr += 8;
411 break;
412 default:
413 die("bad long size");
414 }
415 ptr = cpu_data[cpu].page + header_page_data_offset;
416 }
417
418read_again:
419 idx = calc_index(ptr, cpu);
420
421 if (idx >= cpu_data[cpu].page_size) {
422 get_next_page(cpu);
423 return trace_peek_data(pevent, cpu);
424 }
425
426 type_len_ts = data2host4(pevent, ptr);
427 ptr += 4;
428
429 type_len = type_len4host(type_len_ts);
430 delta = ts4host(type_len_ts);
431
432 switch (type_len) {
433 case RINGBUF_TYPE_PADDING:
434 if (!delta)
435 die("error, hit unexpected end of page");
436 length = data2host4(pevent, ptr);
437 ptr += 4;
438 length *= 4;
439 ptr += length;
440 goto read_again;
441
442 case RINGBUF_TYPE_TIME_EXTEND:
443 extend = data2host4(pevent, ptr);
444 ptr += 4;
445 extend <<= TS_SHIFT;
446 extend += delta;
447 cpu_data[cpu].timestamp += extend;
448 goto read_again;
449
450 case RINGBUF_TYPE_TIME_STAMP:
451 ptr += 12;
452 break;
453 case 0:
454 length = data2host4(pevent, ptr);
455 ptr += 4;
456 die("here! length=%d", length);
457 break;
458 default:
459 length = type_len * 4;
460 break;
461 }
462
463 cpu_data[cpu].timestamp += delta;
464
465 data = malloc_or_die(sizeof(*data));
466 memset(data, 0, sizeof(*data));
467
468 data->ts = cpu_data[cpu].timestamp;
469 data->size = length;
470 data->data = ptr;
471 ptr += length;
472
473 cpu_data[cpu].index = calc_index(ptr, cpu);
474 cpu_data[cpu].next = data;
475
476 return data;
477}
478
479struct pevent_record *trace_read_data(struct pevent *pevent, int cpu)
480{
481 struct pevent_record *data;
482
483 data = trace_peek_data(pevent, cpu);
484 cpu_data[cpu].next = NULL;
485
486 return data;
487} 341}
488 342
489ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) 343ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
@@ -494,58 +348,85 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
494 int show_version = 0; 348 int show_version = 0;
495 int show_funcs = 0; 349 int show_funcs = 0;
496 int show_printk = 0; 350 int show_printk = 0;
497 ssize_t size; 351 ssize_t size = -1;
352 struct pevent *pevent;
353 int err;
498 354
499 calc_data_size = 1; 355 *ppevent = NULL;
500 repipe = __repipe;
501 356
357 repipe = __repipe;
502 input_fd = fd; 358 input_fd = fd;
503 359
504 read_or_die(buf, 3); 360 if (do_read(buf, 3) < 0)
505 if (memcmp(buf, test, 3) != 0) 361 return -1;
506 die("no trace data in the file"); 362 if (memcmp(buf, test, 3) != 0) {
363 pr_debug("no trace data in the file");
364 return -1;
365 }
507 366
508 read_or_die(buf, 7); 367 if (do_read(buf, 7) < 0)
509 if (memcmp(buf, "tracing", 7) != 0) 368 return -1;
510 die("not a trace file (missing 'tracing' tag)"); 369 if (memcmp(buf, "tracing", 7) != 0) {
370 pr_debug("not a trace file (missing 'tracing' tag)");
371 return -1;
372 }
511 373
512 version = read_string(); 374 version = read_string();
375 if (version == NULL)
376 return -1;
513 if (show_version) 377 if (show_version)
514 printf("version = %s\n", version); 378 printf("version = %s\n", version);
515 free(version); 379 free(version);
516 380
517 read_or_die(buf, 1); 381 if (do_read(buf, 1) < 0)
382 return -1;
518 file_bigendian = buf[0]; 383 file_bigendian = buf[0];
519 host_bigendian = bigendian(); 384 host_bigendian = bigendian();
520 385
521 *ppevent = read_trace_init(file_bigendian, host_bigendian); 386 pevent = read_trace_init(file_bigendian, host_bigendian);
522 if (*ppevent == NULL) 387 if (pevent == NULL) {
523 die("read_trace_init failed"); 388 pr_debug("read_trace_init failed");
389 goto out;
390 }
524 391
525 read_or_die(buf, 1); 392 if (do_read(buf, 1) < 0)
393 goto out;
526 long_size = buf[0]; 394 long_size = buf[0];
527 395
528 page_size = read4(*ppevent); 396 page_size = read4(pevent);
529 397 if (!page_size)
530 read_header_files(*ppevent); 398 goto out;
531 399
532 read_ftrace_files(*ppevent); 400 err = read_header_files(pevent);
533 read_event_files(*ppevent); 401 if (err)
534 read_proc_kallsyms(*ppevent); 402 goto out;
535 read_ftrace_printk(*ppevent); 403 err = read_ftrace_files(pevent);
536 404 if (err)
537 size = calc_data_size - 1; 405 goto out;
538 calc_data_size = 0; 406 err = read_event_files(pevent);
407 if (err)
408 goto out;
409 err = read_proc_kallsyms(pevent);
410 if (err)
411 goto out;
412 err = read_ftrace_printk(pevent);
413 if (err)
414 goto out;
415
416 size = trace_data_size;
539 repipe = false; 417 repipe = false;
540 418
541 if (show_funcs) { 419 if (show_funcs) {
542 pevent_print_funcs(*ppevent); 420 pevent_print_funcs(pevent);
543 return size; 421 } else if (show_printk) {
544 } 422 pevent_print_printk(pevent);
545 if (show_printk) {
546 pevent_print_printk(*ppevent);
547 return size;
548 } 423 }
549 424
425 *ppevent = pevent;
426 pevent = NULL;
427
428out:
429 if (pevent)
430 pevent_free(pevent);
550 return size; 431 return size;
551} 432}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a55fd37ffea1..1978c398ad87 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -30,13 +30,9 @@ enum {
30int bigendian(void); 30int bigendian(void);
31 31
32struct pevent *read_trace_init(int file_bigendian, int host_bigendian); 32struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
33void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
34void event_format__print(struct event_format *event, 33void event_format__print(struct event_format *event,
35 int cpu, void *data, int size); 34 int cpu, void *data, int size);
36 35
37void print_event(struct pevent *pevent, int cpu, void *data, int size,
38 unsigned long long nsecs, char *comm);
39
40int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size); 36int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
41int parse_event_file(struct pevent *pevent, 37int parse_event_file(struct pevent *pevent,
42 char *buf, unsigned long size, char *sys); 38 char *buf, unsigned long size, char *sys);
@@ -72,7 +68,7 @@ struct tracing_data {
72 68
73struct tracing_data *tracing_data_get(struct list_head *pattrs, 69struct tracing_data *tracing_data_get(struct list_head *pattrs,
74 int fd, bool temp); 70 int fd, bool temp);
75void tracing_data_put(struct tracing_data *tdata); 71int tracing_data_put(struct tracing_data *tdata);
76 72
77 73
78struct addr_location; 74struct addr_location;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 5906e8426cc7..59d868add275 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -12,9 +12,13 @@
12 */ 12 */
13unsigned int page_size; 13unsigned int page_size;
14 14
15bool test_attr__enabled;
16
15bool perf_host = true; 17bool perf_host = true;
16bool perf_guest = false; 18bool perf_guest = false;
17 19
20char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
21
18void event_attr_init(struct perf_event_attr *attr) 22void event_attr_init(struct perf_event_attr *attr)
19{ 23{
20 if (!perf_host) 24 if (!perf_host)
@@ -218,3 +222,50 @@ void dump_stack(void)
218#else 222#else
219void dump_stack(void) {} 223void dump_stack(void) {}
220#endif 224#endif
225
226void get_term_dimensions(struct winsize *ws)
227{
228 char *s = getenv("LINES");
229
230 if (s != NULL) {
231 ws->ws_row = atoi(s);
232 s = getenv("COLUMNS");
233 if (s != NULL) {
234 ws->ws_col = atoi(s);
235 if (ws->ws_row && ws->ws_col)
236 return;
237 }
238 }
239#ifdef TIOCGWINSZ
240 if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
241 ws->ws_row && ws->ws_col)
242 return;
243#endif
244 ws->ws_row = 25;
245 ws->ws_col = 80;
246}
247
248static void set_tracing_events_path(const char *mountpoint)
249{
250 snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
251 mountpoint, "tracing/events");
252}
253
254const char *perf_debugfs_mount(const char *mountpoint)
255{
256 const char *mnt;
257
258 mnt = debugfs_mount(mountpoint);
259 if (!mnt)
260 return NULL;
261
262 set_tracing_events_path(mnt);
263
264 return mnt;
265}
266
267void perf_debugfs_set_path(const char *mntpt)
268{
269 snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
270 set_tracing_events_path(mntpt);
271}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c2330918110c..a45710b70a55 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -1,8 +1,6 @@
1#ifndef GIT_COMPAT_UTIL_H 1#ifndef GIT_COMPAT_UTIL_H
2#define GIT_COMPAT_UTIL_H 2#define GIT_COMPAT_UTIL_H
3 3
4#define _FILE_OFFSET_BITS 64
5
6#ifndef FLEX_ARRAY 4#ifndef FLEX_ARRAY
7/* 5/*
8 * See if our compiler is known to support flexible array members. 6 * See if our compiler is known to support flexible array members.
@@ -73,10 +71,14 @@
73#include <linux/magic.h> 71#include <linux/magic.h>
74#include "types.h" 72#include "types.h"
75#include <sys/ttydefaults.h> 73#include <sys/ttydefaults.h>
74#include <lk/debugfs.h>
76 75
77extern const char *graph_line; 76extern const char *graph_line;
78extern const char *graph_dotted_line; 77extern const char *graph_dotted_line;
79extern char buildid_dir[]; 78extern char buildid_dir[];
79extern char tracing_events_path[];
80extern void perf_debugfs_set_path(const char *mountpoint);
81const char *perf_debugfs_mount(const char *mountpoint);
80 82
81/* On most systems <limits.h> would have given us this, but 83/* On most systems <limits.h> would have given us this, but
82 * not on some systems (e.g. GNU/Hurd). 84 * not on some systems (e.g. GNU/Hurd).
@@ -265,10 +267,13 @@ bool is_power_of_2(unsigned long n)
265size_t hex_width(u64 v); 267size_t hex_width(u64 v);
266int hex2u64(const char *ptr, u64 *val); 268int hex2u64(const char *ptr, u64 *val);
267 269
270char *ltrim(char *s);
268char *rtrim(char *s); 271char *rtrim(char *s);
269 272
270void dump_stack(void); 273void dump_stack(void);
271 274
272extern unsigned int page_size; 275extern unsigned int page_size;
273 276
274#endif 277struct winsize;
278void get_term_dimensions(struct winsize *ws);
279#endif /* GIT_COMPAT_UTIL_H */