diff options
Diffstat (limited to 'tools/perf/util')
39 files changed, 1722 insertions, 631 deletions
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index e437edb72417..deffb8c96071 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -14,7 +14,9 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include "debug.h" | 15 | #include "debug.h" |
16 | 16 | ||
17 | static int build_id__mark_dso_hit(event_t *event, struct perf_session *session) | 17 | static int build_id__mark_dso_hit(event_t *event, |
18 | struct sample_data *sample __used, | ||
19 | struct perf_session *session) | ||
18 | { | 20 | { |
19 | struct addr_location al; | 21 | struct addr_location al; |
20 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 22 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
@@ -35,7 +37,8 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session) | |||
35 | return 0; | 37 | return 0; |
36 | } | 38 | } |
37 | 39 | ||
38 | static int event__exit_del_thread(event_t *self, struct perf_session *session) | 40 | static int event__exit_del_thread(event_t *self, struct sample_data *sample __used, |
41 | struct perf_session *session) | ||
39 | { | 42 | { |
40 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 43 | struct thread *thread = perf_session__findnew(session, self->fork.tid); |
41 | 44 | ||
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 0f9b8d7a7d7e..3ccaa1043383 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c | |||
@@ -4,32 +4,53 @@ | |||
4 | #include <assert.h> | 4 | #include <assert.h> |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | 6 | ||
7 | int cpumap[MAX_NR_CPUS]; | 7 | static struct cpu_map *cpu_map__default_new(void) |
8 | |||
9 | static int default_cpu_map(void) | ||
10 | { | 8 | { |
11 | int nr_cpus, i; | 9 | struct cpu_map *cpus; |
10 | int nr_cpus; | ||
12 | 11 | ||
13 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 12 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
14 | assert(nr_cpus <= MAX_NR_CPUS); | 13 | if (nr_cpus < 0) |
15 | assert((int)nr_cpus >= 0); | 14 | return NULL; |
15 | |||
16 | cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); | ||
17 | if (cpus != NULL) { | ||
18 | int i; | ||
19 | for (i = 0; i < nr_cpus; ++i) | ||
20 | cpus->map[i] = i; | ||
16 | 21 | ||
17 | for (i = 0; i < nr_cpus; ++i) | 22 | cpus->nr = nr_cpus; |
18 | cpumap[i] = i; | 23 | } |
19 | 24 | ||
20 | return nr_cpus; | 25 | return cpus; |
21 | } | 26 | } |
22 | 27 | ||
23 | static int read_all_cpu_map(void) | 28 | static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) |
24 | { | 29 | { |
30 | size_t payload_size = nr_cpus * sizeof(int); | ||
31 | struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); | ||
32 | |||
33 | if (cpus != NULL) { | ||
34 | cpus->nr = nr_cpus; | ||
35 | memcpy(cpus->map, tmp_cpus, payload_size); | ||
36 | } | ||
37 | |||
38 | return cpus; | ||
39 | } | ||
40 | |||
41 | static struct cpu_map *cpu_map__read_all_cpu_map(void) | ||
42 | { | ||
43 | struct cpu_map *cpus = NULL; | ||
25 | FILE *onlnf; | 44 | FILE *onlnf; |
26 | int nr_cpus = 0; | 45 | int nr_cpus = 0; |
46 | int *tmp_cpus = NULL, *tmp; | ||
47 | int max_entries = 0; | ||
27 | int n, cpu, prev; | 48 | int n, cpu, prev; |
28 | char sep; | 49 | char sep; |
29 | 50 | ||
30 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); | 51 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); |
31 | if (!onlnf) | 52 | if (!onlnf) |
32 | return default_cpu_map(); | 53 | return cpu_map__default_new(); |
33 | 54 | ||
34 | sep = 0; | 55 | sep = 0; |
35 | prev = -1; | 56 | prev = -1; |
@@ -38,12 +59,28 @@ static int read_all_cpu_map(void) | |||
38 | if (n <= 0) | 59 | if (n <= 0) |
39 | break; | 60 | break; |
40 | if (prev >= 0) { | 61 | if (prev >= 0) { |
41 | assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); | 62 | int new_max = nr_cpus + cpu - prev - 1; |
63 | |||
64 | if (new_max >= max_entries) { | ||
65 | max_entries = new_max + MAX_NR_CPUS / 2; | ||
66 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
67 | if (tmp == NULL) | ||
68 | goto out_free_tmp; | ||
69 | tmp_cpus = tmp; | ||
70 | } | ||
71 | |||
42 | while (++prev < cpu) | 72 | while (++prev < cpu) |
43 | cpumap[nr_cpus++] = prev; | 73 | tmp_cpus[nr_cpus++] = prev; |
74 | } | ||
75 | if (nr_cpus == max_entries) { | ||
76 | max_entries += MAX_NR_CPUS; | ||
77 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
78 | if (tmp == NULL) | ||
79 | goto out_free_tmp; | ||
80 | tmp_cpus = tmp; | ||
44 | } | 81 | } |
45 | assert (nr_cpus < MAX_NR_CPUS); | 82 | |
46 | cpumap[nr_cpus++] = cpu; | 83 | tmp_cpus[nr_cpus++] = cpu; |
47 | if (n == 2 && sep == '-') | 84 | if (n == 2 && sep == '-') |
48 | prev = cpu; | 85 | prev = cpu; |
49 | else | 86 | else |
@@ -51,24 +88,31 @@ static int read_all_cpu_map(void) | |||
51 | if (n == 1 || sep == '\n') | 88 | if (n == 1 || sep == '\n') |
52 | break; | 89 | break; |
53 | } | 90 | } |
54 | fclose(onlnf); | ||
55 | if (nr_cpus > 0) | ||
56 | return nr_cpus; | ||
57 | 91 | ||
58 | return default_cpu_map(); | 92 | if (nr_cpus > 0) |
93 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); | ||
94 | else | ||
95 | cpus = cpu_map__default_new(); | ||
96 | out_free_tmp: | ||
97 | free(tmp_cpus); | ||
98 | fclose(onlnf); | ||
99 | return cpus; | ||
59 | } | 100 | } |
60 | 101 | ||
61 | int read_cpu_map(const char *cpu_list) | 102 | struct cpu_map *cpu_map__new(const char *cpu_list) |
62 | { | 103 | { |
104 | struct cpu_map *cpus = NULL; | ||
63 | unsigned long start_cpu, end_cpu = 0; | 105 | unsigned long start_cpu, end_cpu = 0; |
64 | char *p = NULL; | 106 | char *p = NULL; |
65 | int i, nr_cpus = 0; | 107 | int i, nr_cpus = 0; |
108 | int *tmp_cpus = NULL, *tmp; | ||
109 | int max_entries = 0; | ||
66 | 110 | ||
67 | if (!cpu_list) | 111 | if (!cpu_list) |
68 | return read_all_cpu_map(); | 112 | return cpu_map__read_all_cpu_map(); |
69 | 113 | ||
70 | if (!isdigit(*cpu_list)) | 114 | if (!isdigit(*cpu_list)) |
71 | goto invalid; | 115 | goto out; |
72 | 116 | ||
73 | while (isdigit(*cpu_list)) { | 117 | while (isdigit(*cpu_list)) { |
74 | p = NULL; | 118 | p = NULL; |
@@ -94,21 +138,42 @@ int read_cpu_map(const char *cpu_list) | |||
94 | for (; start_cpu <= end_cpu; start_cpu++) { | 138 | for (; start_cpu <= end_cpu; start_cpu++) { |
95 | /* check for duplicates */ | 139 | /* check for duplicates */ |
96 | for (i = 0; i < nr_cpus; i++) | 140 | for (i = 0; i < nr_cpus; i++) |
97 | if (cpumap[i] == (int)start_cpu) | 141 | if (tmp_cpus[i] == (int)start_cpu) |
98 | goto invalid; | 142 | goto invalid; |
99 | 143 | ||
100 | assert(nr_cpus < MAX_NR_CPUS); | 144 | if (nr_cpus == max_entries) { |
101 | cpumap[nr_cpus++] = (int)start_cpu; | 145 | max_entries += MAX_NR_CPUS; |
146 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
147 | if (tmp == NULL) | ||
148 | goto invalid; | ||
149 | tmp_cpus = tmp; | ||
150 | } | ||
151 | tmp_cpus[nr_cpus++] = (int)start_cpu; | ||
102 | } | 152 | } |
103 | if (*p) | 153 | if (*p) |
104 | ++p; | 154 | ++p; |
105 | 155 | ||
106 | cpu_list = p; | 156 | cpu_list = p; |
107 | } | 157 | } |
108 | if (nr_cpus > 0) | ||
109 | return nr_cpus; | ||
110 | 158 | ||
111 | return default_cpu_map(); | 159 | if (nr_cpus > 0) |
160 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); | ||
161 | else | ||
162 | cpus = cpu_map__default_new(); | ||
112 | invalid: | 163 | invalid: |
113 | return -1; | 164 | free(tmp_cpus); |
165 | out: | ||
166 | return cpus; | ||
167 | } | ||
168 | |||
169 | struct cpu_map *cpu_map__dummy_new(void) | ||
170 | { | ||
171 | struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); | ||
172 | |||
173 | if (cpus != NULL) { | ||
174 | cpus->nr = 1; | ||
175 | cpus->map[0] = -1; | ||
176 | } | ||
177 | |||
178 | return cpus; | ||
114 | } | 179 | } |
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 3e60f56e490e..f7a4f42f6307 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h | |||
@@ -1,7 +1,13 @@ | |||
1 | #ifndef __PERF_CPUMAP_H | 1 | #ifndef __PERF_CPUMAP_H |
2 | #define __PERF_CPUMAP_H | 2 | #define __PERF_CPUMAP_H |
3 | 3 | ||
4 | extern int read_cpu_map(const char *cpu_list); | 4 | struct cpu_map { |
5 | extern int cpumap[]; | 5 | int nr; |
6 | int map[]; | ||
7 | }; | ||
8 | |||
9 | struct cpu_map *cpu_map__new(const char *cpu_list); | ||
10 | struct cpu_map *cpu_map__dummy_new(void); | ||
11 | void *cpu_map__delete(struct cpu_map *map); | ||
6 | 12 | ||
7 | #endif /* __PERF_CPUMAP_H */ | 13 | #endif /* __PERF_CPUMAP_H */ |
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index c8d81b00089d..01bbe8ecec3f 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c | |||
@@ -46,20 +46,16 @@ int dump_printf(const char *fmt, ...) | |||
46 | return ret; | 46 | return ret; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int dump_printf_color(const char *fmt, const char *color, ...) | 49 | #ifdef NO_NEWT_SUPPORT |
50 | void ui__warning(const char *format, ...) | ||
50 | { | 51 | { |
51 | va_list args; | 52 | va_list args; |
52 | int ret = 0; | ||
53 | 53 | ||
54 | if (dump_trace) { | 54 | va_start(args, format); |
55 | va_start(args, color); | 55 | vfprintf(stderr, format, args); |
56 | ret = color_vfprintf(stdout, color, fmt, args); | 56 | va_end(args); |
57 | va_end(args); | ||
58 | } | ||
59 | |||
60 | return ret; | ||
61 | } | 57 | } |
62 | 58 | #endif | |
63 | 59 | ||
64 | void trace_event(event_t *event) | 60 | void trace_event(event_t *event) |
65 | { | 61 | { |
@@ -70,29 +66,29 @@ void trace_event(event_t *event) | |||
70 | if (!dump_trace) | 66 | if (!dump_trace) |
71 | return; | 67 | return; |
72 | 68 | ||
73 | dump_printf("."); | 69 | printf("."); |
74 | dump_printf_color("\n. ... raw event: size %d bytes\n", color, | 70 | color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", |
75 | event->header.size); | 71 | event->header.size); |
76 | 72 | ||
77 | for (i = 0; i < event->header.size; i++) { | 73 | for (i = 0; i < event->header.size; i++) { |
78 | if ((i & 15) == 0) { | 74 | if ((i & 15) == 0) { |
79 | dump_printf("."); | 75 | printf("."); |
80 | dump_printf_color(" %04x: ", color, i); | 76 | color_fprintf(stdout, color, " %04x: ", i); |
81 | } | 77 | } |
82 | 78 | ||
83 | dump_printf_color(" %02x", color, raw_event[i]); | 79 | color_fprintf(stdout, color, " %02x", raw_event[i]); |
84 | 80 | ||
85 | if (((i & 15) == 15) || i == event->header.size-1) { | 81 | if (((i & 15) == 15) || i == event->header.size-1) { |
86 | dump_printf_color(" ", color); | 82 | color_fprintf(stdout, color, " "); |
87 | for (j = 0; j < 15-(i & 15); j++) | 83 | for (j = 0; j < 15-(i & 15); j++) |
88 | dump_printf_color(" ", color); | 84 | color_fprintf(stdout, color, " "); |
89 | for (j = i & ~15; j <= i; j++) { | 85 | for (j = i & ~15; j <= i; j++) { |
90 | dump_printf_color("%c", color, | 86 | color_fprintf(stdout, color, "%c", |
91 | isprint(raw_event[j]) ? | 87 | isprint(raw_event[j]) ? |
92 | raw_event[j] : '.'); | 88 | raw_event[j] : '.'); |
93 | } | 89 | } |
94 | dump_printf_color("\n", color); | 90 | color_fprintf(stdout, color, "\n"); |
95 | } | 91 | } |
96 | } | 92 | } |
97 | dump_printf(".\n"); | 93 | printf(".\n"); |
98 | } | 94 | } |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 7b514082bbaf..ca35fd66b5df 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -35,4 +35,6 @@ int ui_helpline__show_help(const char *format, va_list ap); | |||
35 | #include "ui/progress.h" | 35 | #include "ui/progress.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); | ||
39 | |||
38 | #endif /* __PERF_DEBUG_H */ | 40 | #endif /* __PERF_DEBUG_H */ |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index dab9e754a281..2302ec051bb4 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include "strlist.h" | 7 | #include "strlist.h" |
8 | #include "thread.h" | 8 | #include "thread.h" |
9 | 9 | ||
10 | const char *event__name[] = { | 10 | static const char *event__name[] = { |
11 | [0] = "TOTAL", | 11 | [0] = "TOTAL", |
12 | [PERF_RECORD_MMAP] = "MMAP", | 12 | [PERF_RECORD_MMAP] = "MMAP", |
13 | [PERF_RECORD_LOST] = "LOST", | 13 | [PERF_RECORD_LOST] = "LOST", |
@@ -22,13 +22,31 @@ const char *event__name[] = { | |||
22 | [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", | 22 | [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", |
23 | [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", | 23 | [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", |
24 | [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", | 24 | [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", |
25 | [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", | ||
25 | }; | 26 | }; |
26 | 27 | ||
27 | static pid_t event__synthesize_comm(pid_t pid, int full, | 28 | const char *event__get_event_name(unsigned int id) |
29 | { | ||
30 | if (id >= ARRAY_SIZE(event__name)) | ||
31 | return "INVALID"; | ||
32 | if (!event__name[id]) | ||
33 | return "UNKNOWN"; | ||
34 | return event__name[id]; | ||
35 | } | ||
36 | |||
37 | static struct sample_data synth_sample = { | ||
38 | .pid = -1, | ||
39 | .tid = -1, | ||
40 | .time = -1, | ||
41 | .stream_id = -1, | ||
42 | .cpu = -1, | ||
43 | .period = 1, | ||
44 | }; | ||
45 | |||
46 | static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full, | ||
28 | event__handler_t process, | 47 | event__handler_t process, |
29 | struct perf_session *session) | 48 | struct perf_session *session) |
30 | { | 49 | { |
31 | event_t ev; | ||
32 | char filename[PATH_MAX]; | 50 | char filename[PATH_MAX]; |
33 | char bf[BUFSIZ]; | 51 | char bf[BUFSIZ]; |
34 | FILE *fp; | 52 | FILE *fp; |
@@ -49,34 +67,39 @@ out_race: | |||
49 | return 0; | 67 | return 0; |
50 | } | 68 | } |
51 | 69 | ||
52 | memset(&ev.comm, 0, sizeof(ev.comm)); | 70 | memset(&event->comm, 0, sizeof(event->comm)); |
53 | while (!ev.comm.comm[0] || !ev.comm.pid) { | 71 | |
54 | if (fgets(bf, sizeof(bf), fp) == NULL) | 72 | while (!event->comm.comm[0] || !event->comm.pid) { |
55 | goto out_failure; | 73 | if (fgets(bf, sizeof(bf), fp) == NULL) { |
74 | pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); | ||
75 | goto out; | ||
76 | } | ||
56 | 77 | ||
57 | if (memcmp(bf, "Name:", 5) == 0) { | 78 | if (memcmp(bf, "Name:", 5) == 0) { |
58 | char *name = bf + 5; | 79 | char *name = bf + 5; |
59 | while (*name && isspace(*name)) | 80 | while (*name && isspace(*name)) |
60 | ++name; | 81 | ++name; |
61 | size = strlen(name) - 1; | 82 | size = strlen(name) - 1; |
62 | memcpy(ev.comm.comm, name, size++); | 83 | memcpy(event->comm.comm, name, size++); |
63 | } else if (memcmp(bf, "Tgid:", 5) == 0) { | 84 | } else if (memcmp(bf, "Tgid:", 5) == 0) { |
64 | char *tgids = bf + 5; | 85 | char *tgids = bf + 5; |
65 | while (*tgids && isspace(*tgids)) | 86 | while (*tgids && isspace(*tgids)) |
66 | ++tgids; | 87 | ++tgids; |
67 | tgid = ev.comm.pid = atoi(tgids); | 88 | tgid = event->comm.pid = atoi(tgids); |
68 | } | 89 | } |
69 | } | 90 | } |
70 | 91 | ||
71 | ev.comm.header.type = PERF_RECORD_COMM; | 92 | event->comm.header.type = PERF_RECORD_COMM; |
72 | size = ALIGN(size, sizeof(u64)); | 93 | size = ALIGN(size, sizeof(u64)); |
73 | ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); | 94 | memset(event->comm.comm + size, 0, session->id_hdr_size); |
74 | 95 | event->comm.header.size = (sizeof(event->comm) - | |
96 | (sizeof(event->comm.comm) - size) + | ||
97 | session->id_hdr_size); | ||
75 | if (!full) { | 98 | if (!full) { |
76 | ev.comm.tid = pid; | 99 | event->comm.tid = pid; |
77 | 100 | ||
78 | process(&ev, session); | 101 | process(event, &synth_sample, session); |
79 | goto out_fclose; | 102 | goto out; |
80 | } | 103 | } |
81 | 104 | ||
82 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); | 105 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); |
@@ -91,22 +114,19 @@ out_race: | |||
91 | if (*end) | 114 | if (*end) |
92 | continue; | 115 | continue; |
93 | 116 | ||
94 | ev.comm.tid = pid; | 117 | event->comm.tid = pid; |
95 | 118 | ||
96 | process(&ev, session); | 119 | process(event, &synth_sample, session); |
97 | } | 120 | } |
98 | closedir(tasks); | ||
99 | 121 | ||
100 | out_fclose: | 122 | closedir(tasks); |
123 | out: | ||
101 | fclose(fp); | 124 | fclose(fp); |
102 | return tgid; | ||
103 | 125 | ||
104 | out_failure: | 126 | return tgid; |
105 | pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); | ||
106 | return -1; | ||
107 | } | 127 | } |
108 | 128 | ||
109 | static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | 129 | static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid, |
110 | event__handler_t process, | 130 | event__handler_t process, |
111 | struct perf_session *session) | 131 | struct perf_session *session) |
112 | { | 132 | { |
@@ -124,29 +144,25 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | |||
124 | return -1; | 144 | return -1; |
125 | } | 145 | } |
126 | 146 | ||
147 | event->header.type = PERF_RECORD_MMAP; | ||
148 | /* | ||
149 | * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c | ||
150 | */ | ||
151 | event->header.misc = PERF_RECORD_MISC_USER; | ||
152 | |||
127 | while (1) { | 153 | while (1) { |
128 | char bf[BUFSIZ], *pbf = bf; | 154 | char bf[BUFSIZ], *pbf = bf; |
129 | event_t ev = { | ||
130 | .header = { | ||
131 | .type = PERF_RECORD_MMAP, | ||
132 | /* | ||
133 | * Just like the kernel, see __perf_event_mmap | ||
134 | * in kernel/perf_event.c | ||
135 | */ | ||
136 | .misc = PERF_RECORD_MISC_USER, | ||
137 | }, | ||
138 | }; | ||
139 | int n; | 155 | int n; |
140 | size_t size; | 156 | size_t size; |
141 | if (fgets(bf, sizeof(bf), fp) == NULL) | 157 | if (fgets(bf, sizeof(bf), fp) == NULL) |
142 | break; | 158 | break; |
143 | 159 | ||
144 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ | 160 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ |
145 | n = hex2u64(pbf, &ev.mmap.start); | 161 | n = hex2u64(pbf, &event->mmap.start); |
146 | if (n < 0) | 162 | if (n < 0) |
147 | continue; | 163 | continue; |
148 | pbf += n + 1; | 164 | pbf += n + 1; |
149 | n = hex2u64(pbf, &ev.mmap.len); | 165 | n = hex2u64(pbf, &event->mmap.len); |
150 | if (n < 0) | 166 | if (n < 0) |
151 | continue; | 167 | continue; |
152 | pbf += n + 3; | 168 | pbf += n + 3; |
@@ -161,19 +177,21 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | |||
161 | continue; | 177 | continue; |
162 | 178 | ||
163 | pbf += 3; | 179 | pbf += 3; |
164 | n = hex2u64(pbf, &ev.mmap.pgoff); | 180 | n = hex2u64(pbf, &event->mmap.pgoff); |
165 | 181 | ||
166 | size = strlen(execname); | 182 | size = strlen(execname); |
167 | execname[size - 1] = '\0'; /* Remove \n */ | 183 | execname[size - 1] = '\0'; /* Remove \n */ |
168 | memcpy(ev.mmap.filename, execname, size); | 184 | memcpy(event->mmap.filename, execname, size); |
169 | size = ALIGN(size, sizeof(u64)); | 185 | size = ALIGN(size, sizeof(u64)); |
170 | ev.mmap.len -= ev.mmap.start; | 186 | event->mmap.len -= event->mmap.start; |
171 | ev.mmap.header.size = (sizeof(ev.mmap) - | 187 | event->mmap.header.size = (sizeof(event->mmap) - |
172 | (sizeof(ev.mmap.filename) - size)); | 188 | (sizeof(event->mmap.filename) - size)); |
173 | ev.mmap.pid = tgid; | 189 | memset(event->mmap.filename + size, 0, session->id_hdr_size); |
174 | ev.mmap.tid = pid; | 190 | event->mmap.header.size += session->id_hdr_size; |
175 | 191 | event->mmap.pid = tgid; | |
176 | process(&ev, session); | 192 | event->mmap.tid = pid; |
193 | |||
194 | process(event, &synth_sample, session); | ||
177 | } | 195 | } |
178 | } | 196 | } |
179 | 197 | ||
@@ -187,20 +205,27 @@ int event__synthesize_modules(event__handler_t process, | |||
187 | { | 205 | { |
188 | struct rb_node *nd; | 206 | struct rb_node *nd; |
189 | struct map_groups *kmaps = &machine->kmaps; | 207 | struct map_groups *kmaps = &machine->kmaps; |
190 | u16 misc; | 208 | event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size); |
209 | |||
210 | if (event == NULL) { | ||
211 | pr_debug("Not enough memory synthesizing mmap event " | ||
212 | "for kernel modules\n"); | ||
213 | return -1; | ||
214 | } | ||
215 | |||
216 | event->header.type = PERF_RECORD_MMAP; | ||
191 | 217 | ||
192 | /* | 218 | /* |
193 | * kernel uses 0 for user space maps, see kernel/perf_event.c | 219 | * kernel uses 0 for user space maps, see kernel/perf_event.c |
194 | * __perf_event_mmap | 220 | * __perf_event_mmap |
195 | */ | 221 | */ |
196 | if (machine__is_host(machine)) | 222 | if (machine__is_host(machine)) |
197 | misc = PERF_RECORD_MISC_KERNEL; | 223 | event->header.misc = PERF_RECORD_MISC_KERNEL; |
198 | else | 224 | else |
199 | misc = PERF_RECORD_MISC_GUEST_KERNEL; | 225 | event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; |
200 | 226 | ||
201 | for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); | 227 | for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); |
202 | nd; nd = rb_next(nd)) { | 228 | nd; nd = rb_next(nd)) { |
203 | event_t ev; | ||
204 | size_t size; | 229 | size_t size; |
205 | struct map *pos = rb_entry(nd, struct map, rb_node); | 230 | struct map *pos = rb_entry(nd, struct map, rb_node); |
206 | 231 | ||
@@ -208,39 +233,78 @@ int event__synthesize_modules(event__handler_t process, | |||
208 | continue; | 233 | continue; |
209 | 234 | ||
210 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); | 235 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); |
211 | memset(&ev, 0, sizeof(ev)); | 236 | event->mmap.header.type = PERF_RECORD_MMAP; |
212 | ev.mmap.header.misc = misc; | 237 | event->mmap.header.size = (sizeof(event->mmap) - |
213 | ev.mmap.header.type = PERF_RECORD_MMAP; | 238 | (sizeof(event->mmap.filename) - size)); |
214 | ev.mmap.header.size = (sizeof(ev.mmap) - | 239 | memset(event->mmap.filename + size, 0, session->id_hdr_size); |
215 | (sizeof(ev.mmap.filename) - size)); | 240 | event->mmap.header.size += session->id_hdr_size; |
216 | ev.mmap.start = pos->start; | 241 | event->mmap.start = pos->start; |
217 | ev.mmap.len = pos->end - pos->start; | 242 | event->mmap.len = pos->end - pos->start; |
218 | ev.mmap.pid = machine->pid; | 243 | event->mmap.pid = machine->pid; |
219 | 244 | ||
220 | memcpy(ev.mmap.filename, pos->dso->long_name, | 245 | memcpy(event->mmap.filename, pos->dso->long_name, |
221 | pos->dso->long_name_len + 1); | 246 | pos->dso->long_name_len + 1); |
222 | process(&ev, session); | 247 | process(event, &synth_sample, session); |
223 | } | 248 | } |
224 | 249 | ||
250 | free(event); | ||
225 | return 0; | 251 | return 0; |
226 | } | 252 | } |
227 | 253 | ||
228 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 254 | static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event, |
229 | struct perf_session *session) | 255 | pid_t pid, event__handler_t process, |
256 | struct perf_session *session) | ||
230 | { | 257 | { |
231 | pid_t tgid = event__synthesize_comm(pid, 1, process, session); | 258 | pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process, |
259 | session); | ||
232 | if (tgid == -1) | 260 | if (tgid == -1) |
233 | return -1; | 261 | return -1; |
234 | return event__synthesize_mmap_events(pid, tgid, process, session); | 262 | return event__synthesize_mmap_events(mmap_event, pid, tgid, |
263 | process, session); | ||
264 | } | ||
265 | |||
266 | int event__synthesize_thread(pid_t pid, event__handler_t process, | ||
267 | struct perf_session *session) | ||
268 | { | ||
269 | event_t *comm_event, *mmap_event; | ||
270 | int err = -1; | ||
271 | |||
272 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | ||
273 | if (comm_event == NULL) | ||
274 | goto out; | ||
275 | |||
276 | mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); | ||
277 | if (mmap_event == NULL) | ||
278 | goto out_free_comm; | ||
279 | |||
280 | err = __event__synthesize_thread(comm_event, mmap_event, pid, | ||
281 | process, session); | ||
282 | free(mmap_event); | ||
283 | out_free_comm: | ||
284 | free(comm_event); | ||
285 | out: | ||
286 | return err; | ||
235 | } | 287 | } |
236 | 288 | ||
237 | void event__synthesize_threads(event__handler_t process, | 289 | int event__synthesize_threads(event__handler_t process, |
238 | struct perf_session *session) | 290 | struct perf_session *session) |
239 | { | 291 | { |
240 | DIR *proc; | 292 | DIR *proc; |
241 | struct dirent dirent, *next; | 293 | struct dirent dirent, *next; |
294 | event_t *comm_event, *mmap_event; | ||
295 | int err = -1; | ||
296 | |||
297 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | ||
298 | if (comm_event == NULL) | ||
299 | goto out; | ||
300 | |||
301 | mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); | ||
302 | if (mmap_event == NULL) | ||
303 | goto out_free_comm; | ||
242 | 304 | ||
243 | proc = opendir("/proc"); | 305 | proc = opendir("/proc"); |
306 | if (proc == NULL) | ||
307 | goto out_free_mmap; | ||
244 | 308 | ||
245 | while (!readdir_r(proc, &dirent, &next) && next) { | 309 | while (!readdir_r(proc, &dirent, &next) && next) { |
246 | char *end; | 310 | char *end; |
@@ -249,10 +313,18 @@ void event__synthesize_threads(event__handler_t process, | |||
249 | if (*end) /* only interested in proper numerical dirents */ | 313 | if (*end) /* only interested in proper numerical dirents */ |
250 | continue; | 314 | continue; |
251 | 315 | ||
252 | event__synthesize_thread(pid, process, session); | 316 | __event__synthesize_thread(comm_event, mmap_event, pid, |
317 | process, session); | ||
253 | } | 318 | } |
254 | 319 | ||
255 | closedir(proc); | 320 | closedir(proc); |
321 | err = 0; | ||
322 | out_free_mmap: | ||
323 | free(mmap_event); | ||
324 | out_free_comm: | ||
325 | free(comm_event); | ||
326 | out: | ||
327 | return err; | ||
256 | } | 328 | } |
257 | 329 | ||
258 | struct process_symbol_args { | 330 | struct process_symbol_args { |
@@ -260,7 +332,8 @@ struct process_symbol_args { | |||
260 | u64 start; | 332 | u64 start; |
261 | }; | 333 | }; |
262 | 334 | ||
263 | static int find_symbol_cb(void *arg, const char *name, char type, u64 start) | 335 | static int find_symbol_cb(void *arg, const char *name, char type, |
336 | u64 start, u64 end __used) | ||
264 | { | 337 | { |
265 | struct process_symbol_args *args = arg; | 338 | struct process_symbol_args *args = arg; |
266 | 339 | ||
@@ -286,18 +359,20 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
286 | char path[PATH_MAX]; | 359 | char path[PATH_MAX]; |
287 | char name_buff[PATH_MAX]; | 360 | char name_buff[PATH_MAX]; |
288 | struct map *map; | 361 | struct map *map; |
289 | 362 | int err; | |
290 | event_t ev = { | ||
291 | .header = { | ||
292 | .type = PERF_RECORD_MMAP, | ||
293 | }, | ||
294 | }; | ||
295 | /* | 363 | /* |
296 | * We should get this from /sys/kernel/sections/.text, but till that is | 364 | * We should get this from /sys/kernel/sections/.text, but till that is |
297 | * available use this, and after it is use this as a fallback for older | 365 | * available use this, and after it is use this as a fallback for older |
298 | * kernels. | 366 | * kernels. |
299 | */ | 367 | */ |
300 | struct process_symbol_args args = { .name = symbol_name, }; | 368 | struct process_symbol_args args = { .name = symbol_name, }; |
369 | event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size); | ||
370 | |||
371 | if (event == NULL) { | ||
372 | pr_debug("Not enough memory synthesizing mmap event " | ||
373 | "for kernel modules\n"); | ||
374 | return -1; | ||
375 | } | ||
301 | 376 | ||
302 | mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); | 377 | mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); |
303 | if (machine__is_host(machine)) { | 378 | if (machine__is_host(machine)) { |
@@ -305,10 +380,10 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
305 | * kernel uses PERF_RECORD_MISC_USER for user space maps, | 380 | * kernel uses PERF_RECORD_MISC_USER for user space maps, |
306 | * see kernel/perf_event.c __perf_event_mmap | 381 | * see kernel/perf_event.c __perf_event_mmap |
307 | */ | 382 | */ |
308 | ev.header.misc = PERF_RECORD_MISC_KERNEL; | 383 | event->header.misc = PERF_RECORD_MISC_KERNEL; |
309 | filename = "/proc/kallsyms"; | 384 | filename = "/proc/kallsyms"; |
310 | } else { | 385 | } else { |
311 | ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; | 386 | event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; |
312 | if (machine__is_default_guest(machine)) | 387 | if (machine__is_default_guest(machine)) |
313 | filename = (char *) symbol_conf.default_guest_kallsyms; | 388 | filename = (char *) symbol_conf.default_guest_kallsyms; |
314 | else { | 389 | else { |
@@ -321,17 +396,21 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
321 | return -ENOENT; | 396 | return -ENOENT; |
322 | 397 | ||
323 | map = machine->vmlinux_maps[MAP__FUNCTION]; | 398 | map = machine->vmlinux_maps[MAP__FUNCTION]; |
324 | size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), | 399 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), |
325 | "%s%s", mmap_name, symbol_name) + 1; | 400 | "%s%s", mmap_name, symbol_name) + 1; |
326 | size = ALIGN(size, sizeof(u64)); | 401 | size = ALIGN(size, sizeof(u64)); |
327 | ev.mmap.header.size = (sizeof(ev.mmap) - | 402 | event->mmap.header.type = PERF_RECORD_MMAP; |
328 | (sizeof(ev.mmap.filename) - size)); | 403 | event->mmap.header.size = (sizeof(event->mmap) - |
329 | ev.mmap.pgoff = args.start; | 404 | (sizeof(event->mmap.filename) - size) + session->id_hdr_size); |
330 | ev.mmap.start = map->start; | 405 | event->mmap.pgoff = args.start; |
331 | ev.mmap.len = map->end - ev.mmap.start; | 406 | event->mmap.start = map->start; |
332 | ev.mmap.pid = machine->pid; | 407 | event->mmap.len = map->end - event->mmap.start; |
333 | 408 | event->mmap.pid = machine->pid; | |
334 | return process(&ev, session); | 409 | |
410 | err = process(event, &synth_sample, session); | ||
411 | free(event); | ||
412 | |||
413 | return err; | ||
335 | } | 414 | } |
336 | 415 | ||
337 | static void thread__comm_adjust(struct thread *self, struct hists *hists) | 416 | static void thread__comm_adjust(struct thread *self, struct hists *hists) |
@@ -361,7 +440,8 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm, | |||
361 | return 0; | 440 | return 0; |
362 | } | 441 | } |
363 | 442 | ||
364 | int event__process_comm(event_t *self, struct perf_session *session) | 443 | int event__process_comm(event_t *self, struct sample_data *sample __used, |
444 | struct perf_session *session) | ||
365 | { | 445 | { |
366 | struct thread *thread = perf_session__findnew(session, self->comm.tid); | 446 | struct thread *thread = perf_session__findnew(session, self->comm.tid); |
367 | 447 | ||
@@ -376,7 +456,8 @@ int event__process_comm(event_t *self, struct perf_session *session) | |||
376 | return 0; | 456 | return 0; |
377 | } | 457 | } |
378 | 458 | ||
379 | int event__process_lost(event_t *self, struct perf_session *session) | 459 | int event__process_lost(event_t *self, struct sample_data *sample __used, |
460 | struct perf_session *session) | ||
380 | { | 461 | { |
381 | dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); | 462 | dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); |
382 | session->hists.stats.total_lost += self->lost.lost; | 463 | session->hists.stats.total_lost += self->lost.lost; |
@@ -392,7 +473,7 @@ static void event_set_kernel_mmap_len(struct map **maps, event_t *self) | |||
392 | * a zero sized synthesized MMAP event for the kernel. | 473 | * a zero sized synthesized MMAP event for the kernel. |
393 | */ | 474 | */ |
394 | if (maps[MAP__FUNCTION]->end == 0) | 475 | if (maps[MAP__FUNCTION]->end == 0) |
395 | maps[MAP__FUNCTION]->end = ~0UL; | 476 | maps[MAP__FUNCTION]->end = ~0ULL; |
396 | } | 477 | } |
397 | 478 | ||
398 | static int event__process_kernel_mmap(event_t *self, | 479 | static int event__process_kernel_mmap(event_t *self, |
@@ -485,7 +566,8 @@ out_problem: | |||
485 | return -1; | 566 | return -1; |
486 | } | 567 | } |
487 | 568 | ||
488 | int event__process_mmap(event_t *self, struct perf_session *session) | 569 | int event__process_mmap(event_t *self, struct sample_data *sample __used, |
570 | struct perf_session *session) | ||
489 | { | 571 | { |
490 | struct machine *machine; | 572 | struct machine *machine; |
491 | struct thread *thread; | 573 | struct thread *thread; |
@@ -526,7 +608,8 @@ out_problem: | |||
526 | return 0; | 608 | return 0; |
527 | } | 609 | } |
528 | 610 | ||
529 | int event__process_task(event_t *self, struct perf_session *session) | 611 | int event__process_task(event_t *self, struct sample_data *sample __used, |
612 | struct perf_session *session) | ||
530 | { | 613 | { |
531 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 614 | struct thread *thread = perf_session__findnew(session, self->fork.tid); |
532 | struct thread *parent = perf_session__findnew(session, self->fork.ptid); | 615 | struct thread *parent = perf_session__findnew(session, self->fork.ptid); |
@@ -548,18 +631,19 @@ int event__process_task(event_t *self, struct perf_session *session) | |||
548 | return 0; | 631 | return 0; |
549 | } | 632 | } |
550 | 633 | ||
551 | int event__process(event_t *event, struct perf_session *session) | 634 | int event__process(event_t *event, struct sample_data *sample, |
635 | struct perf_session *session) | ||
552 | { | 636 | { |
553 | switch (event->header.type) { | 637 | switch (event->header.type) { |
554 | case PERF_RECORD_COMM: | 638 | case PERF_RECORD_COMM: |
555 | event__process_comm(event, session); | 639 | event__process_comm(event, sample, session); |
556 | break; | 640 | break; |
557 | case PERF_RECORD_MMAP: | 641 | case PERF_RECORD_MMAP: |
558 | event__process_mmap(event, session); | 642 | event__process_mmap(event, sample, session); |
559 | break; | 643 | break; |
560 | case PERF_RECORD_FORK: | 644 | case PERF_RECORD_FORK: |
561 | case PERF_RECORD_EXIT: | 645 | case PERF_RECORD_EXIT: |
562 | event__process_task(event, session); | 646 | event__process_task(event, sample, session); |
563 | break; | 647 | break; |
564 | default: | 648 | default: |
565 | break; | 649 | break; |
@@ -674,32 +758,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session, | |||
674 | symbol_filter_t filter) | 758 | symbol_filter_t filter) |
675 | { | 759 | { |
676 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 760 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
677 | struct thread *thread; | 761 | struct thread *thread = perf_session__findnew(session, self->ip.pid); |
678 | |||
679 | event__parse_sample(self, session->sample_type, data); | ||
680 | |||
681 | dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n", | ||
682 | self->header.misc, data->pid, data->tid, data->ip, | ||
683 | data->period, data->cpu); | ||
684 | |||
685 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
686 | unsigned int i; | ||
687 | |||
688 | dump_printf("... chain: nr:%Lu\n", data->callchain->nr); | ||
689 | 762 | ||
690 | if (!ip_callchain__valid(data->callchain, self)) { | ||
691 | pr_debug("call-chain problem with event, " | ||
692 | "skipping it.\n"); | ||
693 | goto out_filtered; | ||
694 | } | ||
695 | |||
696 | if (dump_trace) { | ||
697 | for (i = 0; i < data->callchain->nr; i++) | ||
698 | dump_printf("..... %2d: %016Lx\n", | ||
699 | i, data->callchain->ips[i]); | ||
700 | } | ||
701 | } | ||
702 | thread = perf_session__findnew(session, self->ip.pid); | ||
703 | if (thread == NULL) | 763 | if (thread == NULL) |
704 | return -1; | 764 | return -1; |
705 | 765 | ||
@@ -766,9 +826,65 @@ out_filtered: | |||
766 | return 0; | 826 | return 0; |
767 | } | 827 | } |
768 | 828 | ||
769 | int event__parse_sample(const event_t *event, u64 type, struct sample_data *data) | 829 | static int event__parse_id_sample(const event_t *event, |
830 | struct perf_session *session, | ||
831 | struct sample_data *sample) | ||
770 | { | 832 | { |
771 | const u64 *array = event->sample.array; | 833 | const u64 *array; |
834 | u64 type; | ||
835 | |||
836 | sample->cpu = sample->pid = sample->tid = -1; | ||
837 | sample->stream_id = sample->id = sample->time = -1ULL; | ||
838 | |||
839 | if (!session->sample_id_all) | ||
840 | return 0; | ||
841 | |||
842 | array = event->sample.array; | ||
843 | array += ((event->header.size - | ||
844 | sizeof(event->header)) / sizeof(u64)) - 1; | ||
845 | type = session->sample_type; | ||
846 | |||
847 | if (type & PERF_SAMPLE_CPU) { | ||
848 | u32 *p = (u32 *)array; | ||
849 | sample->cpu = *p; | ||
850 | array--; | ||
851 | } | ||
852 | |||
853 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
854 | sample->stream_id = *array; | ||
855 | array--; | ||
856 | } | ||
857 | |||
858 | if (type & PERF_SAMPLE_ID) { | ||
859 | sample->id = *array; | ||
860 | array--; | ||
861 | } | ||
862 | |||
863 | if (type & PERF_SAMPLE_TIME) { | ||
864 | sample->time = *array; | ||
865 | array--; | ||
866 | } | ||
867 | |||
868 | if (type & PERF_SAMPLE_TID) { | ||
869 | u32 *p = (u32 *)array; | ||
870 | sample->pid = p[0]; | ||
871 | sample->tid = p[1]; | ||
872 | } | ||
873 | |||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | int event__parse_sample(const event_t *event, struct perf_session *session, | ||
878 | struct sample_data *data) | ||
879 | { | ||
880 | const u64 *array; | ||
881 | u64 type; | ||
882 | |||
883 | if (event->header.type != PERF_RECORD_SAMPLE) | ||
884 | return event__parse_id_sample(event, session, data); | ||
885 | |||
886 | array = event->sample.array; | ||
887 | type = session->sample_type; | ||
772 | 888 | ||
773 | if (type & PERF_SAMPLE_IP) { | 889 | if (type & PERF_SAMPLE_IP) { |
774 | data->ip = event->ip.ip; | 890 | data->ip = event->ip.ip; |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 8e790dae7026..2b7e91902f10 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -85,6 +85,7 @@ struct build_id_event { | |||
85 | }; | 85 | }; |
86 | 86 | ||
87 | enum perf_user_event_type { /* above any possible kernel type */ | 87 | enum perf_user_event_type { /* above any possible kernel type */ |
88 | PERF_RECORD_USER_TYPE_START = 64, | ||
88 | PERF_RECORD_HEADER_ATTR = 64, | 89 | PERF_RECORD_HEADER_ATTR = 64, |
89 | PERF_RECORD_HEADER_EVENT_TYPE = 65, | 90 | PERF_RECORD_HEADER_EVENT_TYPE = 65, |
90 | PERF_RECORD_HEADER_TRACING_DATA = 66, | 91 | PERF_RECORD_HEADER_TRACING_DATA = 66, |
@@ -135,12 +136,15 @@ void event__print_totals(void); | |||
135 | 136 | ||
136 | struct perf_session; | 137 | struct perf_session; |
137 | 138 | ||
138 | typedef int (*event__handler_t)(event_t *event, struct perf_session *session); | 139 | typedef int (*event__handler_synth_t)(event_t *event, |
140 | struct perf_session *session); | ||
141 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, | ||
142 | struct perf_session *session); | ||
139 | 143 | ||
140 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 144 | int event__synthesize_thread(pid_t pid, event__handler_t process, |
141 | struct perf_session *session); | 145 | struct perf_session *session); |
142 | void event__synthesize_threads(event__handler_t process, | 146 | int event__synthesize_threads(event__handler_t process, |
143 | struct perf_session *session); | 147 | struct perf_session *session); |
144 | int event__synthesize_kernel_mmap(event__handler_t process, | 148 | int event__synthesize_kernel_mmap(event__handler_t process, |
145 | struct perf_session *session, | 149 | struct perf_session *session, |
146 | struct machine *machine, | 150 | struct machine *machine, |
@@ -150,18 +154,24 @@ int event__synthesize_modules(event__handler_t process, | |||
150 | struct perf_session *session, | 154 | struct perf_session *session, |
151 | struct machine *machine); | 155 | struct machine *machine); |
152 | 156 | ||
153 | int event__process_comm(event_t *self, struct perf_session *session); | 157 | int event__process_comm(event_t *self, struct sample_data *sample, |
154 | int event__process_lost(event_t *self, struct perf_session *session); | 158 | struct perf_session *session); |
155 | int event__process_mmap(event_t *self, struct perf_session *session); | 159 | int event__process_lost(event_t *self, struct sample_data *sample, |
156 | int event__process_task(event_t *self, struct perf_session *session); | 160 | struct perf_session *session); |
157 | int event__process(event_t *event, struct perf_session *session); | 161 | int event__process_mmap(event_t *self, struct sample_data *sample, |
162 | struct perf_session *session); | ||
163 | int event__process_task(event_t *self, struct sample_data *sample, | ||
164 | struct perf_session *session); | ||
165 | int event__process(event_t *event, struct sample_data *sample, | ||
166 | struct perf_session *session); | ||
158 | 167 | ||
159 | struct addr_location; | 168 | struct addr_location; |
160 | int event__preprocess_sample(const event_t *self, struct perf_session *session, | 169 | int event__preprocess_sample(const event_t *self, struct perf_session *session, |
161 | struct addr_location *al, struct sample_data *data, | 170 | struct addr_location *al, struct sample_data *data, |
162 | symbol_filter_t filter); | 171 | symbol_filter_t filter); |
163 | int event__parse_sample(const event_t *event, u64 type, struct sample_data *data); | 172 | int event__parse_sample(const event_t *event, struct perf_session *session, |
173 | struct sample_data *sample); | ||
164 | 174 | ||
165 | extern const char *event__name[]; | 175 | const char *event__get_event_name(unsigned int id); |
166 | 176 | ||
167 | #endif /* __PERF_RECORD_H */ | 177 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c new file mode 100644 index 000000000000..c95267e63c5b --- /dev/null +++ b/tools/perf/util/evsel.c | |||
@@ -0,0 +1,186 @@ | |||
1 | #include "evsel.h" | ||
2 | #include "../perf.h" | ||
3 | #include "util.h" | ||
4 | #include "cpumap.h" | ||
5 | #include "thread.h" | ||
6 | |||
7 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
8 | |||
9 | struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx) | ||
10 | { | ||
11 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | ||
12 | |||
13 | if (evsel != NULL) { | ||
14 | evsel->idx = idx; | ||
15 | evsel->attr.type = type; | ||
16 | evsel->attr.config = config; | ||
17 | INIT_LIST_HEAD(&evsel->node); | ||
18 | } | ||
19 | |||
20 | return evsel; | ||
21 | } | ||
22 | |||
23 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
24 | { | ||
25 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | ||
26 | return evsel->fd != NULL ? 0 : -ENOMEM; | ||
27 | } | ||
28 | |||
29 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) | ||
30 | { | ||
31 | evsel->counts = zalloc((sizeof(*evsel->counts) + | ||
32 | (ncpus * sizeof(struct perf_counts_values)))); | ||
33 | return evsel->counts != NULL ? 0 : -ENOMEM; | ||
34 | } | ||
35 | |||
36 | void perf_evsel__free_fd(struct perf_evsel *evsel) | ||
37 | { | ||
38 | xyarray__delete(evsel->fd); | ||
39 | evsel->fd = NULL; | ||
40 | } | ||
41 | |||
42 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
43 | { | ||
44 | int cpu, thread; | ||
45 | |||
46 | for (cpu = 0; cpu < ncpus; cpu++) | ||
47 | for (thread = 0; thread < nthreads; ++thread) { | ||
48 | close(FD(evsel, cpu, thread)); | ||
49 | FD(evsel, cpu, thread) = -1; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | void perf_evsel__delete(struct perf_evsel *evsel) | ||
54 | { | ||
55 | assert(list_empty(&evsel->node)); | ||
56 | xyarray__delete(evsel->fd); | ||
57 | free(evsel); | ||
58 | } | ||
59 | |||
60 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
61 | int cpu, int thread, bool scale) | ||
62 | { | ||
63 | struct perf_counts_values count; | ||
64 | size_t nv = scale ? 3 : 1; | ||
65 | |||
66 | if (FD(evsel, cpu, thread) < 0) | ||
67 | return -EINVAL; | ||
68 | |||
69 | if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) | ||
70 | return -ENOMEM; | ||
71 | |||
72 | if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) | ||
73 | return -errno; | ||
74 | |||
75 | if (scale) { | ||
76 | if (count.run == 0) | ||
77 | count.val = 0; | ||
78 | else if (count.run < count.ena) | ||
79 | count.val = (u64)((double)count.val * count.ena / count.run + 0.5); | ||
80 | } else | ||
81 | count.ena = count.run = 0; | ||
82 | |||
83 | evsel->counts->cpu[cpu] = count; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | int __perf_evsel__read(struct perf_evsel *evsel, | ||
88 | int ncpus, int nthreads, bool scale) | ||
89 | { | ||
90 | size_t nv = scale ? 3 : 1; | ||
91 | int cpu, thread; | ||
92 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; | ||
93 | |||
94 | aggr->val = 0; | ||
95 | |||
96 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
97 | for (thread = 0; thread < nthreads; thread++) { | ||
98 | if (FD(evsel, cpu, thread) < 0) | ||
99 | continue; | ||
100 | |||
101 | if (readn(FD(evsel, cpu, thread), | ||
102 | &count, nv * sizeof(u64)) < 0) | ||
103 | return -errno; | ||
104 | |||
105 | aggr->val += count.val; | ||
106 | if (scale) { | ||
107 | aggr->ena += count.ena; | ||
108 | aggr->run += count.run; | ||
109 | } | ||
110 | } | ||
111 | } | ||
112 | |||
113 | evsel->counts->scaled = 0; | ||
114 | if (scale) { | ||
115 | if (aggr->run == 0) { | ||
116 | evsel->counts->scaled = -1; | ||
117 | aggr->val = 0; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | if (aggr->run < aggr->ena) { | ||
122 | evsel->counts->scaled = 1; | ||
123 | aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5); | ||
124 | } | ||
125 | } else | ||
126 | aggr->ena = aggr->run = 0; | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus) | ||
132 | { | ||
133 | int cpu; | ||
134 | |||
135 | if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0) | ||
136 | return -1; | ||
137 | |||
138 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
139 | FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1, | ||
140 | cpus->map[cpu], -1, 0); | ||
141 | if (FD(evsel, cpu, 0) < 0) | ||
142 | goto out_close; | ||
143 | } | ||
144 | |||
145 | return 0; | ||
146 | |||
147 | out_close: | ||
148 | while (--cpu >= 0) { | ||
149 | close(FD(evsel, cpu, 0)); | ||
150 | FD(evsel, cpu, 0) = -1; | ||
151 | } | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads) | ||
156 | { | ||
157 | int thread; | ||
158 | |||
159 | if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr)) | ||
160 | return -1; | ||
161 | |||
162 | for (thread = 0; thread < threads->nr; thread++) { | ||
163 | FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr, | ||
164 | threads->map[thread], -1, -1, 0); | ||
165 | if (FD(evsel, 0, thread) < 0) | ||
166 | goto out_close; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | |||
171 | out_close: | ||
172 | while (--thread >= 0) { | ||
173 | close(FD(evsel, 0, thread)); | ||
174 | FD(evsel, 0, thread) = -1; | ||
175 | } | ||
176 | return -1; | ||
177 | } | ||
178 | |||
179 | int perf_evsel__open(struct perf_evsel *evsel, | ||
180 | struct cpu_map *cpus, struct thread_map *threads) | ||
181 | { | ||
182 | if (threads == NULL) | ||
183 | return perf_evsel__open_per_cpu(evsel, cpus); | ||
184 | |||
185 | return perf_evsel__open_per_thread(evsel, threads); | ||
186 | } | ||
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h new file mode 100644 index 000000000000..863d78d5ef1a --- /dev/null +++ b/tools/perf/util/evsel.h | |||
@@ -0,0 +1,115 @@ | |||
1 | #ifndef __PERF_EVSEL_H | ||
2 | #define __PERF_EVSEL_H 1 | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include <stdbool.h> | ||
6 | #include <linux/perf_event.h> | ||
7 | #include "types.h" | ||
8 | #include "xyarray.h" | ||
9 | |||
10 | struct perf_counts_values { | ||
11 | union { | ||
12 | struct { | ||
13 | u64 val; | ||
14 | u64 ena; | ||
15 | u64 run; | ||
16 | }; | ||
17 | u64 values[3]; | ||
18 | }; | ||
19 | }; | ||
20 | |||
21 | struct perf_counts { | ||
22 | s8 scaled; | ||
23 | struct perf_counts_values aggr; | ||
24 | struct perf_counts_values cpu[]; | ||
25 | }; | ||
26 | |||
27 | struct perf_evsel { | ||
28 | struct list_head node; | ||
29 | struct perf_event_attr attr; | ||
30 | char *filter; | ||
31 | struct xyarray *fd; | ||
32 | struct perf_counts *counts; | ||
33 | int idx; | ||
34 | void *priv; | ||
35 | }; | ||
36 | |||
37 | struct cpu_map; | ||
38 | struct thread_map; | ||
39 | |||
40 | struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx); | ||
41 | void perf_evsel__delete(struct perf_evsel *evsel); | ||
42 | |||
43 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
44 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); | ||
45 | void perf_evsel__free_fd(struct perf_evsel *evsel); | ||
46 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
47 | |||
48 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus); | ||
49 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads); | ||
50 | int perf_evsel__open(struct perf_evsel *evsel, | ||
51 | struct cpu_map *cpus, struct thread_map *threads); | ||
52 | |||
53 | #define perf_evsel__match(evsel, t, c) \ | ||
54 | (evsel->attr.type == PERF_TYPE_##t && \ | ||
55 | evsel->attr.config == PERF_COUNT_##c) | ||
56 | |||
57 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
58 | int cpu, int thread, bool scale); | ||
59 | |||
60 | /** | ||
61 | * perf_evsel__read_on_cpu - Read out the results on a CPU and thread | ||
62 | * | ||
63 | * @evsel - event selector to read value | ||
64 | * @cpu - CPU of interest | ||
65 | * @thread - thread of interest | ||
66 | */ | ||
67 | static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
68 | int cpu, int thread) | ||
69 | { | ||
70 | return __perf_evsel__read_on_cpu(evsel, cpu, thread, false); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled | ||
75 | * | ||
76 | * @evsel - event selector to read value | ||
77 | * @cpu - CPU of interest | ||
78 | * @thread - thread of interest | ||
79 | */ | ||
80 | static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel, | ||
81 | int cpu, int thread) | ||
82 | { | ||
83 | return __perf_evsel__read_on_cpu(evsel, cpu, thread, true); | ||
84 | } | ||
85 | |||
86 | int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads, | ||
87 | bool scale); | ||
88 | |||
89 | /** | ||
90 | * perf_evsel__read - Read the aggregate results on all CPUs | ||
91 | * | ||
92 | * @evsel - event selector to read value | ||
93 | * @ncpus - Number of cpus affected, from zero | ||
94 | * @nthreads - Number of threads affected, from zero | ||
95 | */ | ||
96 | static inline int perf_evsel__read(struct perf_evsel *evsel, | ||
97 | int ncpus, int nthreads) | ||
98 | { | ||
99 | return __perf_evsel__read(evsel, ncpus, nthreads, false); | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled | ||
104 | * | ||
105 | * @evsel - event selector to read value | ||
106 | * @ncpus - Number of cpus affected, from zero | ||
107 | * @nthreads - Number of threads affected, from zero | ||
108 | */ | ||
109 | static inline int perf_evsel__read_scaled(struct perf_evsel *evsel, | ||
110 | int ncpus, int nthreads) | ||
111 | { | ||
112 | return __perf_evsel__read(evsel, ncpus, nthreads, true); | ||
113 | } | ||
114 | |||
115 | #endif /* __PERF_EVSEL_H */ | ||
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 7cba0551a565..989fa2dee2fd 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -152,6 +152,11 @@ void perf_header__set_feat(struct perf_header *self, int feat) | |||
152 | set_bit(feat, self->adds_features); | 152 | set_bit(feat, self->adds_features); |
153 | } | 153 | } |
154 | 154 | ||
155 | void perf_header__clear_feat(struct perf_header *self, int feat) | ||
156 | { | ||
157 | clear_bit(feat, self->adds_features); | ||
158 | } | ||
159 | |||
155 | bool perf_header__has_feat(const struct perf_header *self, int feat) | 160 | bool perf_header__has_feat(const struct perf_header *self, int feat) |
156 | { | 161 | { |
157 | return test_bit(feat, self->adds_features); | 162 | return test_bit(feat, self->adds_features); |
@@ -433,8 +438,10 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
433 | int idx = 0, err; | 438 | int idx = 0, err; |
434 | 439 | ||
435 | session = container_of(self, struct perf_session, header); | 440 | session = container_of(self, struct perf_session, header); |
436 | if (perf_session__read_build_ids(session, true)) | 441 | |
437 | perf_header__set_feat(self, HEADER_BUILD_ID); | 442 | if (perf_header__has_feat(self, HEADER_BUILD_ID && |
443 | !perf_session__read_build_ids(session, true))) | ||
444 | perf_header__clear_feat(self, HEADER_BUILD_ID); | ||
438 | 445 | ||
439 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | 446 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); |
440 | if (!nr_sections) | 447 | if (!nr_sections) |
@@ -456,7 +463,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
456 | 463 | ||
457 | /* Write trace info */ | 464 | /* Write trace info */ |
458 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); | 465 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); |
459 | read_tracing_data(fd, attrs, nr_counters); | 466 | read_tracing_data(fd, &evsel_list); |
460 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; | 467 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; |
461 | } | 468 | } |
462 | 469 | ||
@@ -599,7 +606,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
599 | static int perf_header__getbuffer64(struct perf_header *self, | 606 | static int perf_header__getbuffer64(struct perf_header *self, |
600 | int fd, void *buf, size_t size) | 607 | int fd, void *buf, size_t size) |
601 | { | 608 | { |
602 | if (do_read(fd, buf, size) <= 0) | 609 | if (readn(fd, buf, size) <= 0) |
603 | return -1; | 610 | return -1; |
604 | 611 | ||
605 | if (self->needs_swap) | 612 | if (self->needs_swap) |
@@ -655,7 +662,7 @@ int perf_file_header__read(struct perf_file_header *self, | |||
655 | { | 662 | { |
656 | lseek(fd, 0, SEEK_SET); | 663 | lseek(fd, 0, SEEK_SET); |
657 | 664 | ||
658 | if (do_read(fd, self, sizeof(*self)) <= 0 || | 665 | if (readn(fd, self, sizeof(*self)) <= 0 || |
659 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 666 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) |
660 | return -1; | 667 | return -1; |
661 | 668 | ||
@@ -816,7 +823,7 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self, | |||
816 | struct perf_header *ph, int fd, | 823 | struct perf_header *ph, int fd, |
817 | bool repipe) | 824 | bool repipe) |
818 | { | 825 | { |
819 | if (do_read(fd, self, sizeof(*self)) <= 0 || | 826 | if (readn(fd, self, sizeof(*self)) <= 0 || |
820 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 827 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) |
821 | return -1; | 828 | return -1; |
822 | 829 | ||
@@ -941,6 +948,24 @@ u64 perf_header__sample_type(struct perf_header *header) | |||
941 | return type; | 948 | return type; |
942 | } | 949 | } |
943 | 950 | ||
951 | bool perf_header__sample_id_all(const struct perf_header *header) | ||
952 | { | ||
953 | bool value = false, first = true; | ||
954 | int i; | ||
955 | |||
956 | for (i = 0; i < header->attrs; i++) { | ||
957 | struct perf_header_attr *attr = header->attr[i]; | ||
958 | |||
959 | if (first) { | ||
960 | value = attr->attr.sample_id_all; | ||
961 | first = false; | ||
962 | } else if (value != attr->attr.sample_id_all) | ||
963 | die("non matching sample_id_all"); | ||
964 | } | ||
965 | |||
966 | return value; | ||
967 | } | ||
968 | |||
944 | struct perf_event_attr * | 969 | struct perf_event_attr * |
945 | perf_header__find_attr(u64 id, struct perf_header *header) | 970 | perf_header__find_attr(u64 id, struct perf_header *header) |
946 | { | 971 | { |
@@ -987,21 +1012,23 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | |||
987 | 1012 | ||
988 | ev = malloc(size); | 1013 | ev = malloc(size); |
989 | 1014 | ||
1015 | if (ev == NULL) | ||
1016 | return -ENOMEM; | ||
1017 | |||
990 | ev->attr.attr = *attr; | 1018 | ev->attr.attr = *attr; |
991 | memcpy(ev->attr.id, id, ids * sizeof(u64)); | 1019 | memcpy(ev->attr.id, id, ids * sizeof(u64)); |
992 | 1020 | ||
993 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; | 1021 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; |
994 | ev->attr.header.size = size; | 1022 | ev->attr.header.size = size; |
995 | 1023 | ||
996 | err = process(ev, session); | 1024 | err = process(ev, NULL, session); |
997 | 1025 | ||
998 | free(ev); | 1026 | free(ev); |
999 | 1027 | ||
1000 | return err; | 1028 | return err; |
1001 | } | 1029 | } |
1002 | 1030 | ||
1003 | int event__synthesize_attrs(struct perf_header *self, | 1031 | int event__synthesize_attrs(struct perf_header *self, event__handler_t process, |
1004 | event__handler_t process, | ||
1005 | struct perf_session *session) | 1032 | struct perf_session *session) |
1006 | { | 1033 | { |
1007 | struct perf_header_attr *attr; | 1034 | struct perf_header_attr *attr; |
@@ -1071,7 +1098,7 @@ int event__synthesize_event_type(u64 event_id, char *name, | |||
1071 | ev.event_type.header.size = sizeof(ev.event_type) - | 1098 | ev.event_type.header.size = sizeof(ev.event_type) - |
1072 | (sizeof(ev.event_type.event_type.name) - size); | 1099 | (sizeof(ev.event_type.event_type.name) - size); |
1073 | 1100 | ||
1074 | err = process(&ev, session); | 1101 | err = process(&ev, NULL, session); |
1075 | 1102 | ||
1076 | return err; | 1103 | return err; |
1077 | } | 1104 | } |
@@ -1106,8 +1133,7 @@ int event__process_event_type(event_t *self, | |||
1106 | return 0; | 1133 | return 0; |
1107 | } | 1134 | } |
1108 | 1135 | ||
1109 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | 1136 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, |
1110 | int nb_events, | ||
1111 | event__handler_t process, | 1137 | event__handler_t process, |
1112 | struct perf_session *session __unused) | 1138 | struct perf_session *session __unused) |
1113 | { | 1139 | { |
@@ -1118,7 +1144,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | |||
1118 | memset(&ev, 0, sizeof(ev)); | 1144 | memset(&ev, 0, sizeof(ev)); |
1119 | 1145 | ||
1120 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 1146 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
1121 | size = read_tracing_data_size(fd, pattrs, nb_events); | 1147 | size = read_tracing_data_size(fd, pattrs); |
1122 | if (size <= 0) | 1148 | if (size <= 0) |
1123 | return size; | 1149 | return size; |
1124 | aligned_size = ALIGN(size, sizeof(u64)); | 1150 | aligned_size = ALIGN(size, sizeof(u64)); |
@@ -1126,9 +1152,9 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | |||
1126 | ev.tracing_data.header.size = sizeof(ev.tracing_data); | 1152 | ev.tracing_data.header.size = sizeof(ev.tracing_data); |
1127 | ev.tracing_data.size = aligned_size; | 1153 | ev.tracing_data.size = aligned_size; |
1128 | 1154 | ||
1129 | process(&ev, session); | 1155 | process(&ev, NULL, session); |
1130 | 1156 | ||
1131 | err = read_tracing_data(fd, pattrs, nb_events); | 1157 | err = read_tracing_data(fd, pattrs); |
1132 | write_padded(fd, NULL, 0, padding); | 1158 | write_padded(fd, NULL, 0, padding); |
1133 | 1159 | ||
1134 | return aligned_size; | 1160 | return aligned_size; |
@@ -1186,7 +1212,7 @@ int event__synthesize_build_id(struct dso *pos, u16 misc, | |||
1186 | ev.build_id.header.size = sizeof(ev.build_id) + len; | 1212 | ev.build_id.header.size = sizeof(ev.build_id) + len; |
1187 | memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); | 1213 | memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); |
1188 | 1214 | ||
1189 | err = process(&ev, session); | 1215 | err = process(&ev, NULL, session); |
1190 | 1216 | ||
1191 | return err; | 1217 | return err; |
1192 | } | 1218 | } |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 402ac2454cf8..33f16be7b72f 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -81,9 +81,11 @@ void perf_header_attr__delete(struct perf_header_attr *self); | |||
81 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); | 81 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); |
82 | 82 | ||
83 | u64 perf_header__sample_type(struct perf_header *header); | 83 | u64 perf_header__sample_type(struct perf_header *header); |
84 | bool perf_header__sample_id_all(const struct perf_header *header); | ||
84 | struct perf_event_attr * | 85 | struct perf_event_attr * |
85 | perf_header__find_attr(u64 id, struct perf_header *header); | 86 | perf_header__find_attr(u64 id, struct perf_header *header); |
86 | void perf_header__set_feat(struct perf_header *self, int feat); | 87 | void perf_header__set_feat(struct perf_header *self, int feat); |
88 | void perf_header__clear_feat(struct perf_header *self, int feat); | ||
87 | bool perf_header__has_feat(const struct perf_header *self, int feat); | 89 | bool perf_header__has_feat(const struct perf_header *self, int feat); |
88 | 90 | ||
89 | int perf_header__process_sections(struct perf_header *self, int fd, | 91 | int perf_header__process_sections(struct perf_header *self, int fd, |
@@ -111,8 +113,7 @@ int event__synthesize_event_types(event__handler_t process, | |||
111 | int event__process_event_type(event_t *self, | 113 | int event__process_event_type(event_t *self, |
112 | struct perf_session *session); | 114 | struct perf_session *session); |
113 | 115 | ||
114 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | 116 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, |
115 | int nb_events, | ||
116 | event__handler_t process, | 117 | event__handler_t process, |
117 | struct perf_session *session); | 118 | struct perf_session *session); |
118 | int event__process_tracing_data(event_t *self, | 119 | int event__process_tracing_data(event_t *self, |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 76bcc35cf9b1..c749ba6136a0 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1092,6 +1092,12 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | |||
1092 | FILE *file; | 1092 | FILE *file; |
1093 | int err = 0; | 1093 | int err = 0; |
1094 | u64 len; | 1094 | u64 len; |
1095 | char symfs_filename[PATH_MAX]; | ||
1096 | |||
1097 | if (filename) { | ||
1098 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
1099 | symbol_conf.symfs, filename); | ||
1100 | } | ||
1095 | 1101 | ||
1096 | if (filename == NULL) { | 1102 | if (filename == NULL) { |
1097 | if (dso->has_build_id) { | 1103 | if (dso->has_build_id) { |
@@ -1100,9 +1106,9 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | |||
1100 | return -ENOMEM; | 1106 | return -ENOMEM; |
1101 | } | 1107 | } |
1102 | goto fallback; | 1108 | goto fallback; |
1103 | } else if (readlink(filename, command, sizeof(command)) < 0 || | 1109 | } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || |
1104 | strstr(command, "[kernel.kallsyms]") || | 1110 | strstr(command, "[kernel.kallsyms]") || |
1105 | access(filename, R_OK)) { | 1111 | access(symfs_filename, R_OK)) { |
1106 | free(filename); | 1112 | free(filename); |
1107 | fallback: | 1113 | fallback: |
1108 | /* | 1114 | /* |
@@ -1111,6 +1117,8 @@ fallback: | |||
1111 | * DSO is the same as when 'perf record' ran. | 1117 | * DSO is the same as when 'perf record' ran. |
1112 | */ | 1118 | */ |
1113 | filename = dso->long_name; | 1119 | filename = dso->long_name; |
1120 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
1121 | symbol_conf.symfs, filename); | ||
1114 | free_filename = false; | 1122 | free_filename = false; |
1115 | } | 1123 | } |
1116 | 1124 | ||
@@ -1137,7 +1145,7 @@ fallback: | |||
1137 | "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand", | 1145 | "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand", |
1138 | map__rip_2objdump(map, sym->start), | 1146 | map__rip_2objdump(map, sym->start), |
1139 | map__rip_2objdump(map, sym->end), | 1147 | map__rip_2objdump(map, sym->end), |
1140 | filename, filename); | 1148 | symfs_filename, filename); |
1141 | 1149 | ||
1142 | pr_debug("Executing: %s\n", command); | 1150 | pr_debug("Executing: %s\n", command); |
1143 | 1151 | ||
@@ -1168,10 +1176,13 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp) | |||
1168 | size_t ret = 0; | 1176 | size_t ret = 0; |
1169 | 1177 | ||
1170 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { | 1178 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { |
1171 | if (!event__name[i]) | 1179 | const char *name = event__get_event_name(i); |
1180 | |||
1181 | if (!strcmp(name, "UNKNOWN")) | ||
1172 | continue; | 1182 | continue; |
1173 | ret += fprintf(fp, "%10s events: %10d\n", | 1183 | |
1174 | event__name[i], self->stats.nr_events[i]); | 1184 | ret += fprintf(fp, "%16s events: %10d\n", name, |
1185 | self->stats.nr_events[i]); | ||
1175 | } | 1186 | } |
1176 | 1187 | ||
1177 | return ret; | 1188 | return ret; |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 587d375d3430..ee789856a8c9 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -52,8 +52,10 @@ struct sym_priv { | |||
52 | struct events_stats { | 52 | struct events_stats { |
53 | u64 total_period; | 53 | u64 total_period; |
54 | u64 total_lost; | 54 | u64 total_lost; |
55 | u64 total_invalid_chains; | ||
55 | u32 nr_events[PERF_RECORD_HEADER_MAX]; | 56 | u32 nr_events[PERF_RECORD_HEADER_MAX]; |
56 | u32 nr_unknown_events; | 57 | u32 nr_unknown_events; |
58 | u32 nr_invalid_chains; | ||
57 | }; | 59 | }; |
58 | 60 | ||
59 | enum hist_column { | 61 | enum hist_column { |
diff --git a/tools/perf/util/include/asm/cpufeature.h b/tools/perf/util/include/asm/cpufeature.h new file mode 100644 index 000000000000..acffd5e4d1d4 --- /dev/null +++ b/tools/perf/util/include/asm/cpufeature.h | |||
@@ -0,0 +1,9 @@ | |||
1 | |||
2 | #ifndef PERF_CPUFEATURE_H | ||
3 | #define PERF_CPUFEATURE_H | ||
4 | |||
5 | /* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define X86_FEATURE_REP_GOOD 0 | ||
8 | |||
9 | #endif /* PERF_CPUFEATURE_H */ | ||
diff --git a/tools/perf/util/include/asm/dwarf2.h b/tools/perf/util/include/asm/dwarf2.h new file mode 100644 index 000000000000..bb4198e7837a --- /dev/null +++ b/tools/perf/util/include/asm/dwarf2.h | |||
@@ -0,0 +1,11 @@ | |||
1 | |||
2 | #ifndef PERF_DWARF2_H | ||
3 | #define PERF_DWARF2_H | ||
4 | |||
5 | /* dwarf2.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define CFI_STARTPROC | ||
8 | #define CFI_ENDPROC | ||
9 | |||
10 | #endif /* PERF_DWARF2_H */ | ||
11 | |||
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index bb4ac2e05385..8be0b968ca0b 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -13,6 +13,11 @@ static inline void set_bit(int nr, unsigned long *addr) | |||
13 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); | 13 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); |
14 | } | 14 | } |
15 | 15 | ||
16 | static inline void clear_bit(int nr, unsigned long *addr) | ||
17 | { | ||
18 | addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG)); | ||
19 | } | ||
20 | |||
16 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) | 21 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) |
17 | { | 22 | { |
18 | return ((1UL << (nr % BITS_PER_LONG)) & | 23 | return ((1UL << (nr % BITS_PER_LONG)) & |
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h new file mode 100644 index 000000000000..06387cffe125 --- /dev/null +++ b/tools/perf/util/include/linux/linkage.h | |||
@@ -0,0 +1,13 @@ | |||
1 | |||
2 | #ifndef PERF_LINUX_LINKAGE_H_ | ||
3 | #define PERF_LINUX_LINKAGE_H_ | ||
4 | |||
5 | /* linkage.h ... for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define ENTRY(name) \ | ||
8 | .globl name; \ | ||
9 | name: | ||
10 | |||
11 | #define ENDPROC(name) | ||
12 | |||
13 | #endif /* PERF_LINUX_LINKAGE_H_ */ | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4af5bd59cfd1..649083f27e08 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "../../../include/linux/hw_breakpoint.h" | 1 | #include "../../../include/linux/hw_breakpoint.h" |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include "../perf.h" | 3 | #include "../perf.h" |
4 | #include "evsel.h" | ||
4 | #include "parse-options.h" | 5 | #include "parse-options.h" |
5 | #include "parse-events.h" | 6 | #include "parse-events.h" |
6 | #include "exec_cmd.h" | 7 | #include "exec_cmd.h" |
@@ -12,8 +13,7 @@ | |||
12 | 13 | ||
13 | int nr_counters; | 14 | int nr_counters; |
14 | 15 | ||
15 | struct perf_event_attr attrs[MAX_COUNTERS]; | 16 | LIST_HEAD(evsel_list); |
16 | char *filters[MAX_COUNTERS]; | ||
17 | 17 | ||
18 | struct event_symbol { | 18 | struct event_symbol { |
19 | u8 type; | 19 | u8 type; |
@@ -266,10 +266,10 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | |||
266 | return name; | 266 | return name; |
267 | } | 267 | } |
268 | 268 | ||
269 | const char *event_name(int counter) | 269 | const char *event_name(struct perf_evsel *evsel) |
270 | { | 270 | { |
271 | u64 config = attrs[counter].config; | 271 | u64 config = evsel->attr.config; |
272 | int type = attrs[counter].type; | 272 | int type = evsel->attr.type; |
273 | 273 | ||
274 | return __event_name(type, config); | 274 | return __event_name(type, config); |
275 | } | 275 | } |
@@ -434,7 +434,7 @@ parse_single_tracepoint_event(char *sys_name, | |||
434 | id = atoll(id_buf); | 434 | id = atoll(id_buf); |
435 | attr->config = id; | 435 | attr->config = id; |
436 | attr->type = PERF_TYPE_TRACEPOINT; | 436 | attr->type = PERF_TYPE_TRACEPOINT; |
437 | *strp = evt_name + evt_length; | 437 | *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */ |
438 | 438 | ||
439 | attr->sample_type |= PERF_SAMPLE_RAW; | 439 | attr->sample_type |= PERF_SAMPLE_RAW; |
440 | attr->sample_type |= PERF_SAMPLE_TIME; | 440 | attr->sample_type |= PERF_SAMPLE_TIME; |
@@ -495,7 +495,7 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
495 | struct perf_event_attr *attr) | 495 | struct perf_event_attr *attr) |
496 | { | 496 | { |
497 | const char *evt_name; | 497 | const char *evt_name; |
498 | char *flags; | 498 | char *flags = NULL, *comma_loc; |
499 | char sys_name[MAX_EVENT_LENGTH]; | 499 | char sys_name[MAX_EVENT_LENGTH]; |
500 | unsigned int sys_length, evt_length; | 500 | unsigned int sys_length, evt_length; |
501 | 501 | ||
@@ -514,6 +514,11 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
514 | sys_name[sys_length] = '\0'; | 514 | sys_name[sys_length] = '\0'; |
515 | evt_name = evt_name + 1; | 515 | evt_name = evt_name + 1; |
516 | 516 | ||
517 | comma_loc = strchr(evt_name, ','); | ||
518 | if (comma_loc) { | ||
519 | /* take the event name up to the comma */ | ||
520 | evt_name = strndup(evt_name, comma_loc - evt_name); | ||
521 | } | ||
517 | flags = strchr(evt_name, ':'); | 522 | flags = strchr(evt_name, ':'); |
518 | if (flags) { | 523 | if (flags) { |
519 | /* split it out: */ | 524 | /* split it out: */ |
@@ -524,9 +529,8 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
524 | evt_length = strlen(evt_name); | 529 | evt_length = strlen(evt_name); |
525 | if (evt_length >= MAX_EVENT_LENGTH) | 530 | if (evt_length >= MAX_EVENT_LENGTH) |
526 | return EVT_FAILED; | 531 | return EVT_FAILED; |
527 | |||
528 | if (strpbrk(evt_name, "*?")) { | 532 | if (strpbrk(evt_name, "*?")) { |
529 | *strp = evt_name + evt_length; | 533 | *strp += strlen(sys_name) + evt_length; |
530 | return parse_multiple_tracepoint_event(sys_name, evt_name, | 534 | return parse_multiple_tracepoint_event(sys_name, evt_name, |
531 | flags); | 535 | flags); |
532 | } else | 536 | } else |
@@ -810,9 +814,6 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
810 | return -1; | 814 | return -1; |
811 | 815 | ||
812 | for (;;) { | 816 | for (;;) { |
813 | if (nr_counters == MAX_COUNTERS) | ||
814 | return -1; | ||
815 | |||
816 | memset(&attr, 0, sizeof(attr)); | 817 | memset(&attr, 0, sizeof(attr)); |
817 | ret = parse_event_symbols(&str, &attr); | 818 | ret = parse_event_symbols(&str, &attr); |
818 | if (ret == EVT_FAILED) | 819 | if (ret == EVT_FAILED) |
@@ -822,8 +823,13 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
822 | return -1; | 823 | return -1; |
823 | 824 | ||
824 | if (ret != EVT_HANDLED_ALL) { | 825 | if (ret != EVT_HANDLED_ALL) { |
825 | attrs[nr_counters] = attr; | 826 | struct perf_evsel *evsel; |
826 | nr_counters++; | 827 | evsel = perf_evsel__new(attr.type, attr.config, |
828 | nr_counters); | ||
829 | if (evsel == NULL) | ||
830 | return -1; | ||
831 | list_add_tail(&evsel->node, &evsel_list); | ||
832 | ++nr_counters; | ||
827 | } | 833 | } |
828 | 834 | ||
829 | if (*str == 0) | 835 | if (*str == 0) |
@@ -840,21 +846,22 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
840 | int parse_filter(const struct option *opt __used, const char *str, | 846 | int parse_filter(const struct option *opt __used, const char *str, |
841 | int unset __used) | 847 | int unset __used) |
842 | { | 848 | { |
843 | int i = nr_counters - 1; | 849 | struct perf_evsel *last = NULL; |
844 | int len = strlen(str); | ||
845 | 850 | ||
846 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { | 851 | if (!list_empty(&evsel_list)) |
852 | last = list_entry(evsel_list.prev, struct perf_evsel, node); | ||
853 | |||
854 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { | ||
847 | fprintf(stderr, | 855 | fprintf(stderr, |
848 | "-F option should follow a -e tracepoint option\n"); | 856 | "-F option should follow a -e tracepoint option\n"); |
849 | return -1; | 857 | return -1; |
850 | } | 858 | } |
851 | 859 | ||
852 | filters[i] = malloc(len + 1); | 860 | last->filter = strdup(str); |
853 | if (!filters[i]) { | 861 | if (last->filter == NULL) { |
854 | fprintf(stderr, "not enough memory to hold filter string\n"); | 862 | fprintf(stderr, "not enough memory to hold filter string\n"); |
855 | return -1; | 863 | return -1; |
856 | } | 864 | } |
857 | strcpy(filters[i], str); | ||
858 | 865 | ||
859 | return 0; | 866 | return 0; |
860 | } | 867 | } |
@@ -906,6 +913,47 @@ static void print_tracepoint_events(void) | |||
906 | } | 913 | } |
907 | 914 | ||
908 | /* | 915 | /* |
916 | * Check whether event is in <debugfs_mount_point>/tracing/events | ||
917 | */ | ||
918 | |||
919 | int is_valid_tracepoint(const char *event_string) | ||
920 | { | ||
921 | DIR *sys_dir, *evt_dir; | ||
922 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | ||
923 | char evt_path[MAXPATHLEN]; | ||
924 | char dir_path[MAXPATHLEN]; | ||
925 | |||
926 | if (debugfs_valid_mountpoint(debugfs_path)) | ||
927 | return 0; | ||
928 | |||
929 | sys_dir = opendir(debugfs_path); | ||
930 | if (!sys_dir) | ||
931 | return 0; | ||
932 | |||
933 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { | ||
934 | |||
935 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | ||
936 | sys_dirent.d_name); | ||
937 | evt_dir = opendir(dir_path); | ||
938 | if (!evt_dir) | ||
939 | continue; | ||
940 | |||
941 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | ||
942 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | ||
943 | sys_dirent.d_name, evt_dirent.d_name); | ||
944 | if (!strcmp(evt_path, event_string)) { | ||
945 | closedir(evt_dir); | ||
946 | closedir(sys_dir); | ||
947 | return 1; | ||
948 | } | ||
949 | } | ||
950 | closedir(evt_dir); | ||
951 | } | ||
952 | closedir(sys_dir); | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | /* | ||
909 | * Print the help text for the event symbols: | 957 | * Print the help text for the event symbols: |
910 | */ | 958 | */ |
911 | void print_events(void) | 959 | void print_events(void) |
@@ -963,3 +1011,26 @@ void print_events(void) | |||
963 | 1011 | ||
964 | exit(129); | 1012 | exit(129); |
965 | } | 1013 | } |
1014 | |||
1015 | int perf_evsel_list__create_default(void) | ||
1016 | { | ||
1017 | struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE, | ||
1018 | PERF_COUNT_HW_CPU_CYCLES, 0); | ||
1019 | if (evsel == NULL) | ||
1020 | return -ENOMEM; | ||
1021 | |||
1022 | list_add(&evsel->node, &evsel_list); | ||
1023 | ++nr_counters; | ||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | void perf_evsel_list__delete(void) | ||
1028 | { | ||
1029 | struct perf_evsel *pos, *n; | ||
1030 | |||
1031 | list_for_each_entry_safe(pos, n, &evsel_list, node) { | ||
1032 | list_del_init(&pos->node); | ||
1033 | perf_evsel__delete(pos); | ||
1034 | } | ||
1035 | nr_counters = 0; | ||
1036 | } | ||
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index fc4ab3fe877a..1c9043ccd173 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -4,6 +4,16 @@ | |||
4 | * Parse symbolic events/counts passed in as options: | 4 | * Parse symbolic events/counts passed in as options: |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/perf_event.h> | ||
8 | |||
9 | struct list_head; | ||
10 | struct perf_evsel; | ||
11 | |||
12 | extern struct list_head evsel_list; | ||
13 | |||
14 | int perf_evsel_list__create_default(void); | ||
15 | void perf_evsel_list__delete(void); | ||
16 | |||
7 | struct option; | 17 | struct option; |
8 | 18 | ||
9 | struct tracepoint_path { | 19 | struct tracepoint_path { |
@@ -13,14 +23,11 @@ struct tracepoint_path { | |||
13 | }; | 23 | }; |
14 | 24 | ||
15 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | 25 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); |
16 | extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events); | 26 | extern bool have_tracepoints(struct list_head *evsel_list); |
17 | 27 | ||
18 | extern int nr_counters; | 28 | extern int nr_counters; |
19 | 29 | ||
20 | extern struct perf_event_attr attrs[MAX_COUNTERS]; | 30 | const char *event_name(struct perf_evsel *event); |
21 | extern char *filters[MAX_COUNTERS]; | ||
22 | |||
23 | extern const char *event_name(int ctr); | ||
24 | extern const char *__event_name(int type, u64 config); | 31 | extern const char *__event_name(int type, u64 config); |
25 | 32 | ||
26 | extern int parse_events(const struct option *opt, const char *str, int unset); | 33 | extern int parse_events(const struct option *opt, const char *str, int unset); |
@@ -29,9 +36,9 @@ extern int parse_filter(const struct option *opt, const char *str, int unset); | |||
29 | #define EVENTS_HELP_MAX (128*1024) | 36 | #define EVENTS_HELP_MAX (128*1024) |
30 | 37 | ||
31 | extern void print_events(void); | 38 | extern void print_events(void); |
39 | extern int is_valid_tracepoint(const char *event_string); | ||
32 | 40 | ||
33 | extern char debugfs_path[]; | 41 | extern char debugfs_path[]; |
34 | extern int valid_debugfs_mount(const char *debugfs); | 42 | extern int valid_debugfs_mount(const char *debugfs); |
35 | 43 | ||
36 | |||
37 | #endif /* __PERF_PARSE_EVENTS_H */ | 44 | #endif /* __PERF_PARSE_EVENTS_H */ |
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index c7d72dce54b2..abc31a1dac1a 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h | |||
@@ -119,6 +119,10 @@ struct option { | |||
119 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } | 119 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } |
120 | #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ | 120 | #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ |
121 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } | 121 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } |
122 | #define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \ | ||
123 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\ | ||
124 | .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\ | ||
125 | .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG} | ||
122 | 126 | ||
123 | /* parse_options() will filter out the processed options and leave the | 127 | /* parse_options() will filter out the processed options and leave the |
124 | * non-option argments in argv[]. | 128 | * non-option argments in argv[]. |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 61191c6cbe7a..128aaab0aeda 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -95,7 +95,7 @@ static int init_vmlinux(void) | |||
95 | goto out; | 95 | goto out; |
96 | 96 | ||
97 | if (machine__create_kernel_maps(&machine) < 0) { | 97 | if (machine__create_kernel_maps(&machine) < 0) { |
98 | pr_debug("machine__create_kernel_maps "); | 98 | pr_debug("machine__create_kernel_maps() failed.\n"); |
99 | goto out; | 99 | goto out; |
100 | } | 100 | } |
101 | out: | 101 | out: |
@@ -149,7 +149,8 @@ static int open_vmlinux(const char *module) | |||
149 | { | 149 | { |
150 | const char *path = kernel_get_module_path(module); | 150 | const char *path = kernel_get_module_path(module); |
151 | if (!path) { | 151 | if (!path) { |
152 | pr_err("Failed to find path of %s module", module ?: "kernel"); | 152 | pr_err("Failed to find path of %s module.\n", |
153 | module ?: "kernel"); | ||
153 | return -ENOENT; | 154 | return -ENOENT; |
154 | } | 155 | } |
155 | pr_debug("Try to open %s\n", path); | 156 | pr_debug("Try to open %s\n", path); |
@@ -226,7 +227,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
226 | pr_warning("Warning: No dwarf info found in the vmlinux - " | 227 | pr_warning("Warning: No dwarf info found in the vmlinux - " |
227 | "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); | 228 | "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); |
228 | if (!need_dwarf) { | 229 | if (!need_dwarf) { |
229 | pr_debug("Trying to use symbols.\nn"); | 230 | pr_debug("Trying to use symbols.\n"); |
230 | return 0; | 231 | return 0; |
231 | } | 232 | } |
232 | } | 233 | } |
@@ -295,42 +296,49 @@ static int get_real_path(const char *raw_path, const char *comp_dir, | |||
295 | #define LINEBUF_SIZE 256 | 296 | #define LINEBUF_SIZE 256 |
296 | #define NR_ADDITIONAL_LINES 2 | 297 | #define NR_ADDITIONAL_LINES 2 |
297 | 298 | ||
298 | static int show_one_line(FILE *fp, int l, bool skip, bool show_num) | 299 | static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) |
299 | { | 300 | { |
300 | char buf[LINEBUF_SIZE]; | 301 | char buf[LINEBUF_SIZE]; |
301 | const char *color = PERF_COLOR_BLUE; | 302 | const char *color = show_num ? "" : PERF_COLOR_BLUE; |
303 | const char *prefix = NULL; | ||
302 | 304 | ||
303 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) | 305 | do { |
304 | goto error; | ||
305 | if (!skip) { | ||
306 | if (show_num) | ||
307 | fprintf(stdout, "%7d %s", l, buf); | ||
308 | else | ||
309 | color_fprintf(stdout, color, " %s", buf); | ||
310 | } | ||
311 | |||
312 | while (strlen(buf) == LINEBUF_SIZE - 1 && | ||
313 | buf[LINEBUF_SIZE - 2] != '\n') { | ||
314 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) | 306 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) |
315 | goto error; | 307 | goto error; |
316 | if (!skip) { | 308 | if (skip) |
317 | if (show_num) | 309 | continue; |
318 | fprintf(stdout, "%s", buf); | 310 | if (!prefix) { |
319 | else | 311 | prefix = show_num ? "%7d " : " "; |
320 | color_fprintf(stdout, color, "%s", buf); | 312 | color_fprintf(stdout, color, prefix, l); |
321 | } | 313 | } |
322 | } | 314 | color_fprintf(stdout, color, "%s", buf); |
323 | 315 | ||
324 | return 0; | 316 | } while (strchr(buf, '\n') == NULL); |
317 | |||
318 | return 1; | ||
325 | error: | 319 | error: |
326 | if (feof(fp)) | 320 | if (ferror(fp)) { |
327 | pr_warning("Source file is shorter than expected.\n"); | ||
328 | else | ||
329 | pr_warning("File read error: %s\n", strerror(errno)); | 321 | pr_warning("File read error: %s\n", strerror(errno)); |
322 | return -1; | ||
323 | } | ||
324 | return 0; | ||
325 | } | ||
330 | 326 | ||
331 | return -1; | 327 | static int _show_one_line(FILE *fp, int l, bool skip, bool show_num) |
328 | { | ||
329 | int rv = __show_one_line(fp, l, skip, show_num); | ||
330 | if (rv == 0) { | ||
331 | pr_warning("Source file is shorter than expected.\n"); | ||
332 | rv = -1; | ||
333 | } | ||
334 | return rv; | ||
332 | } | 335 | } |
333 | 336 | ||
337 | #define show_one_line_with_num(f,l) _show_one_line(f,l,false,true) | ||
338 | #define show_one_line(f,l) _show_one_line(f,l,false,false) | ||
339 | #define skip_one_line(f,l) _show_one_line(f,l,true,false) | ||
340 | #define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false) | ||
341 | |||
334 | /* | 342 | /* |
335 | * Show line-range always requires debuginfo to find source file and | 343 | * Show line-range always requires debuginfo to find source file and |
336 | * line number. | 344 | * line number. |
@@ -379,7 +387,7 @@ int show_line_range(struct line_range *lr, const char *module) | |||
379 | fprintf(stdout, "<%s:%d>\n", lr->function, | 387 | fprintf(stdout, "<%s:%d>\n", lr->function, |
380 | lr->start - lr->offset); | 388 | lr->start - lr->offset); |
381 | else | 389 | else |
382 | fprintf(stdout, "<%s:%d>\n", lr->file, lr->start); | 390 | fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); |
383 | 391 | ||
384 | fp = fopen(lr->path, "r"); | 392 | fp = fopen(lr->path, "r"); |
385 | if (fp == NULL) { | 393 | if (fp == NULL) { |
@@ -388,26 +396,30 @@ int show_line_range(struct line_range *lr, const char *module) | |||
388 | return -errno; | 396 | return -errno; |
389 | } | 397 | } |
390 | /* Skip to starting line number */ | 398 | /* Skip to starting line number */ |
391 | while (l < lr->start && ret >= 0) | 399 | while (l < lr->start) { |
392 | ret = show_one_line(fp, l++, true, false); | 400 | ret = skip_one_line(fp, l++); |
393 | if (ret < 0) | 401 | if (ret < 0) |
394 | goto end; | 402 | goto end; |
403 | } | ||
395 | 404 | ||
396 | list_for_each_entry(ln, &lr->line_list, list) { | 405 | list_for_each_entry(ln, &lr->line_list, list) { |
397 | while (ln->line > l && ret >= 0) | 406 | for (; ln->line > l; l++) { |
398 | ret = show_one_line(fp, (l++) - lr->offset, | 407 | ret = show_one_line(fp, l - lr->offset); |
399 | false, false); | 408 | if (ret < 0) |
400 | if (ret >= 0) | 409 | goto end; |
401 | ret = show_one_line(fp, (l++) - lr->offset, | 410 | } |
402 | false, true); | 411 | ret = show_one_line_with_num(fp, l++ - lr->offset); |
403 | if (ret < 0) | 412 | if (ret < 0) |
404 | goto end; | 413 | goto end; |
405 | } | 414 | } |
406 | 415 | ||
407 | if (lr->end == INT_MAX) | 416 | if (lr->end == INT_MAX) |
408 | lr->end = l + NR_ADDITIONAL_LINES; | 417 | lr->end = l + NR_ADDITIONAL_LINES; |
409 | while (l <= lr->end && !feof(fp) && ret >= 0) | 418 | while (l <= lr->end) { |
410 | ret = show_one_line(fp, (l++) - lr->offset, false, false); | 419 | ret = show_one_line_or_eof(fp, l++ - lr->offset); |
420 | if (ret <= 0) | ||
421 | break; | ||
422 | } | ||
411 | end: | 423 | end: |
412 | fclose(fp); | 424 | fclose(fp); |
413 | return ret; | 425 | return ret; |
@@ -466,7 +478,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, | |||
466 | 478 | ||
467 | fd = open_vmlinux(module); | 479 | fd = open_vmlinux(module); |
468 | if (fd < 0) { | 480 | if (fd < 0) { |
469 | pr_warning("Failed to open debuginfo file.\n"); | 481 | pr_warning("Failed to open debug information file.\n"); |
470 | return fd; | 482 | return fd; |
471 | } | 483 | } |
472 | 484 | ||
@@ -526,56 +538,87 @@ int show_available_vars(struct perf_probe_event *pevs __unused, | |||
526 | } | 538 | } |
527 | #endif | 539 | #endif |
528 | 540 | ||
541 | static int parse_line_num(char **ptr, int *val, const char *what) | ||
542 | { | ||
543 | const char *start = *ptr; | ||
544 | |||
545 | errno = 0; | ||
546 | *val = strtol(*ptr, ptr, 0); | ||
547 | if (errno || *ptr == start) { | ||
548 | semantic_error("'%s' is not a valid number.\n", what); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * Stuff 'lr' according to the line range described by 'arg'. | ||
556 | * The line range syntax is described by: | ||
557 | * | ||
558 | * SRC[:SLN[+NUM|-ELN]] | ||
559 | * FNC[:SLN[+NUM|-ELN]] | ||
560 | */ | ||
529 | int parse_line_range_desc(const char *arg, struct line_range *lr) | 561 | int parse_line_range_desc(const char *arg, struct line_range *lr) |
530 | { | 562 | { |
531 | const char *ptr; | 563 | char *range, *name = strdup(arg); |
532 | char *tmp; | 564 | int err; |
533 | /* | 565 | |
534 | * <Syntax> | 566 | if (!name) |
535 | * SRC:SLN[+NUM|-ELN] | 567 | return -ENOMEM; |
536 | * FUNC[:SLN[+NUM|-ELN]] | 568 | |
537 | */ | 569 | lr->start = 0; |
538 | ptr = strchr(arg, ':'); | 570 | lr->end = INT_MAX; |
539 | if (ptr) { | 571 | |
540 | lr->start = (int)strtoul(ptr + 1, &tmp, 0); | 572 | range = strchr(name, ':'); |
541 | if (*tmp == '+') { | 573 | if (range) { |
542 | lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0); | 574 | *range++ = '\0'; |
543 | lr->end--; /* | 575 | |
544 | * Adjust the number of lines here. | 576 | err = parse_line_num(&range, &lr->start, "start line"); |
545 | * If the number of lines == 1, the | 577 | if (err) |
546 | * the end of line should be equal to | 578 | goto err; |
547 | * the start of line. | 579 | |
548 | */ | 580 | if (*range == '+' || *range == '-') { |
549 | } else if (*tmp == '-') | 581 | const char c = *range++; |
550 | lr->end = (int)strtoul(tmp + 1, &tmp, 0); | 582 | |
551 | else | 583 | err = parse_line_num(&range, &lr->end, "end line"); |
552 | lr->end = INT_MAX; | 584 | if (err) |
585 | goto err; | ||
586 | |||
587 | if (c == '+') { | ||
588 | lr->end += lr->start; | ||
589 | /* | ||
590 | * Adjust the number of lines here. | ||
591 | * If the number of lines == 1, the | ||
592 | * the end of line should be equal to | ||
593 | * the start of line. | ||
594 | */ | ||
595 | lr->end--; | ||
596 | } | ||
597 | } | ||
598 | |||
553 | pr_debug("Line range is %d to %d\n", lr->start, lr->end); | 599 | pr_debug("Line range is %d to %d\n", lr->start, lr->end); |
600 | |||
601 | err = -EINVAL; | ||
554 | if (lr->start > lr->end) { | 602 | if (lr->start > lr->end) { |
555 | semantic_error("Start line must be smaller" | 603 | semantic_error("Start line must be smaller" |
556 | " than end line.\n"); | 604 | " than end line.\n"); |
557 | return -EINVAL; | 605 | goto err; |
558 | } | 606 | } |
559 | if (*tmp != '\0') { | 607 | if (*range != '\0') { |
560 | semantic_error("Tailing with invalid character '%d'.\n", | 608 | semantic_error("Tailing with invalid str '%s'.\n", range); |
561 | *tmp); | 609 | goto err; |
562 | return -EINVAL; | ||
563 | } | 610 | } |
564 | tmp = strndup(arg, (ptr - arg)); | ||
565 | } else { | ||
566 | tmp = strdup(arg); | ||
567 | lr->end = INT_MAX; | ||
568 | } | 611 | } |
569 | 612 | ||
570 | if (tmp == NULL) | 613 | if (strchr(name, '.')) |
571 | return -ENOMEM; | 614 | lr->file = name; |
572 | |||
573 | if (strchr(tmp, '.')) | ||
574 | lr->file = tmp; | ||
575 | else | 615 | else |
576 | lr->function = tmp; | 616 | lr->function = name; |
577 | 617 | ||
578 | return 0; | 618 | return 0; |
619 | err: | ||
620 | free(name); | ||
621 | return err; | ||
579 | } | 622 | } |
580 | 623 | ||
581 | /* Check the name is good for event/group */ | 624 | /* Check the name is good for event/group */ |
@@ -699,39 +742,40 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) | |||
699 | 742 | ||
700 | /* Exclusion check */ | 743 | /* Exclusion check */ |
701 | if (pp->lazy_line && pp->line) { | 744 | if (pp->lazy_line && pp->line) { |
702 | semantic_error("Lazy pattern can't be used with line number."); | 745 | semantic_error("Lazy pattern can't be used with" |
746 | " line number.\n"); | ||
703 | return -EINVAL; | 747 | return -EINVAL; |
704 | } | 748 | } |
705 | 749 | ||
706 | if (pp->lazy_line && pp->offset) { | 750 | if (pp->lazy_line && pp->offset) { |
707 | semantic_error("Lazy pattern can't be used with offset."); | 751 | semantic_error("Lazy pattern can't be used with offset.\n"); |
708 | return -EINVAL; | 752 | return -EINVAL; |
709 | } | 753 | } |
710 | 754 | ||
711 | if (pp->line && pp->offset) { | 755 | if (pp->line && pp->offset) { |
712 | semantic_error("Offset can't be used with line number."); | 756 | semantic_error("Offset can't be used with line number.\n"); |
713 | return -EINVAL; | 757 | return -EINVAL; |
714 | } | 758 | } |
715 | 759 | ||
716 | if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { | 760 | if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { |
717 | semantic_error("File always requires line number or " | 761 | semantic_error("File always requires line number or " |
718 | "lazy pattern."); | 762 | "lazy pattern.\n"); |
719 | return -EINVAL; | 763 | return -EINVAL; |
720 | } | 764 | } |
721 | 765 | ||
722 | if (pp->offset && !pp->function) { | 766 | if (pp->offset && !pp->function) { |
723 | semantic_error("Offset requires an entry function."); | 767 | semantic_error("Offset requires an entry function.\n"); |
724 | return -EINVAL; | 768 | return -EINVAL; |
725 | } | 769 | } |
726 | 770 | ||
727 | if (pp->retprobe && !pp->function) { | 771 | if (pp->retprobe && !pp->function) { |
728 | semantic_error("Return probe requires an entry function."); | 772 | semantic_error("Return probe requires an entry function.\n"); |
729 | return -EINVAL; | 773 | return -EINVAL; |
730 | } | 774 | } |
731 | 775 | ||
732 | if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { | 776 | if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { |
733 | semantic_error("Offset/Line/Lazy pattern can't be used with " | 777 | semantic_error("Offset/Line/Lazy pattern can't be used with " |
734 | "return probe."); | 778 | "return probe.\n"); |
735 | return -EINVAL; | 779 | return -EINVAL; |
736 | } | 780 | } |
737 | 781 | ||
@@ -1005,7 +1049,7 @@ int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len) | |||
1005 | 1049 | ||
1006 | return tmp - buf; | 1050 | return tmp - buf; |
1007 | error: | 1051 | error: |
1008 | pr_debug("Failed to synthesize perf probe argument: %s", | 1052 | pr_debug("Failed to synthesize perf probe argument: %s\n", |
1009 | strerror(-ret)); | 1053 | strerror(-ret)); |
1010 | return ret; | 1054 | return ret; |
1011 | } | 1055 | } |
@@ -1033,13 +1077,13 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) | |||
1033 | goto error; | 1077 | goto error; |
1034 | } | 1078 | } |
1035 | if (pp->file) { | 1079 | if (pp->file) { |
1036 | len = strlen(pp->file) - 31; | 1080 | tmp = pp->file; |
1037 | if (len < 0) | 1081 | len = strlen(tmp); |
1038 | len = 0; | 1082 | if (len > 30) { |
1039 | tmp = strchr(pp->file + len, '/'); | 1083 | tmp = strchr(pp->file + len - 30, '/'); |
1040 | if (!tmp) | 1084 | tmp = tmp ? tmp + 1 : pp->file + len - 30; |
1041 | tmp = pp->file + len; | 1085 | } |
1042 | ret = e_snprintf(file, 32, "@%s", tmp + 1); | 1086 | ret = e_snprintf(file, 32, "@%s", tmp); |
1043 | if (ret <= 0) | 1087 | if (ret <= 0) |
1044 | goto error; | 1088 | goto error; |
1045 | } | 1089 | } |
@@ -1055,7 +1099,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) | |||
1055 | 1099 | ||
1056 | return buf; | 1100 | return buf; |
1057 | error: | 1101 | error: |
1058 | pr_debug("Failed to synthesize perf probe point: %s", | 1102 | pr_debug("Failed to synthesize perf probe point: %s\n", |
1059 | strerror(-ret)); | 1103 | strerror(-ret)); |
1060 | if (buf) | 1104 | if (buf) |
1061 | free(buf); | 1105 | free(buf); |
@@ -1796,7 +1840,7 @@ static int del_trace_probe_event(int fd, const char *group, | |||
1796 | 1840 | ||
1797 | ret = e_snprintf(buf, 128, "%s:%s", group, event); | 1841 | ret = e_snprintf(buf, 128, "%s:%s", group, event); |
1798 | if (ret < 0) { | 1842 | if (ret < 0) { |
1799 | pr_err("Failed to copy event."); | 1843 | pr_err("Failed to copy event.\n"); |
1800 | return ret; | 1844 | return ret; |
1801 | } | 1845 | } |
1802 | 1846 | ||
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index ddf4d4556321..ab83b6ac5d65 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -652,8 +652,8 @@ static_var: | |||
652 | regs = get_arch_regstr(regn); | 652 | regs = get_arch_regstr(regn); |
653 | if (!regs) { | 653 | if (!regs) { |
654 | /* This should be a bug in DWARF or this tool */ | 654 | /* This should be a bug in DWARF or this tool */ |
655 | pr_warning("Mapping for DWARF register number %u " | 655 | pr_warning("Mapping for the register number %u " |
656 | "missing on this architecture.", regn); | 656 | "missing on this architecture.\n", regn); |
657 | return -ERANGE; | 657 | return -ERANGE; |
658 | } | 658 | } |
659 | 659 | ||
@@ -699,13 +699,14 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
699 | if (ret != DW_TAG_pointer_type && | 699 | if (ret != DW_TAG_pointer_type && |
700 | ret != DW_TAG_array_type) { | 700 | ret != DW_TAG_array_type) { |
701 | pr_warning("Failed to cast into string: " | 701 | pr_warning("Failed to cast into string: " |
702 | "%s(%s) is not a pointer nor array.", | 702 | "%s(%s) is not a pointer nor array.\n", |
703 | dwarf_diename(vr_die), dwarf_diename(&type)); | 703 | dwarf_diename(vr_die), dwarf_diename(&type)); |
704 | return -EINVAL; | 704 | return -EINVAL; |
705 | } | 705 | } |
706 | if (ret == DW_TAG_pointer_type) { | 706 | if (ret == DW_TAG_pointer_type) { |
707 | if (die_get_real_type(&type, &type) == NULL) { | 707 | if (die_get_real_type(&type, &type) == NULL) { |
708 | pr_warning("Failed to get a type information."); | 708 | pr_warning("Failed to get a type" |
709 | " information.\n"); | ||
709 | return -ENOENT; | 710 | return -ENOENT; |
710 | } | 711 | } |
711 | while (*ref_ptr) | 712 | while (*ref_ptr) |
@@ -720,7 +721,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
720 | if (!die_compare_name(&type, "char") && | 721 | if (!die_compare_name(&type, "char") && |
721 | !die_compare_name(&type, "unsigned char")) { | 722 | !die_compare_name(&type, "unsigned char")) { |
722 | pr_warning("Failed to cast into string: " | 723 | pr_warning("Failed to cast into string: " |
723 | "%s is not (unsigned) char *.", | 724 | "%s is not (unsigned) char *.\n", |
724 | dwarf_diename(vr_die)); | 725 | dwarf_diename(vr_die)); |
725 | return -EINVAL; | 726 | return -EINVAL; |
726 | } | 727 | } |
@@ -830,8 +831,8 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, | |||
830 | return -EINVAL; | 831 | return -EINVAL; |
831 | } | 832 | } |
832 | if (field->name[0] == '[') { | 833 | if (field->name[0] == '[') { |
833 | pr_err("Semantic error: %s is not a pointor nor array.", | 834 | pr_err("Semantic error: %s is not a pointor" |
834 | varname); | 835 | " nor array.\n", varname); |
835 | return -EINVAL; | 836 | return -EINVAL; |
836 | } | 837 | } |
837 | if (field->ref) { | 838 | if (field->ref) { |
@@ -978,7 +979,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, | |||
978 | name = dwarf_diename(sp_die); | 979 | name = dwarf_diename(sp_die); |
979 | if (name) { | 980 | if (name) { |
980 | if (dwarf_entrypc(sp_die, &eaddr) != 0) { | 981 | if (dwarf_entrypc(sp_die, &eaddr) != 0) { |
981 | pr_warning("Failed to get entry pc of %s\n", | 982 | pr_warning("Failed to get entry address of %s\n", |
982 | dwarf_diename(sp_die)); | 983 | dwarf_diename(sp_die)); |
983 | return -ENOENT; | 984 | return -ENOENT; |
984 | } | 985 | } |
@@ -994,7 +995,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, | |||
994 | if (retprobe) { | 995 | if (retprobe) { |
995 | if (eaddr != paddr) { | 996 | if (eaddr != paddr) { |
996 | pr_warning("Return probe must be on the head of" | 997 | pr_warning("Return probe must be on the head of" |
997 | " a real function\n"); | 998 | " a real function.\n"); |
998 | return -EINVAL; | 999 | return -EINVAL; |
999 | } | 1000 | } |
1000 | tp->retprobe = true; | 1001 | tp->retprobe = true; |
@@ -1033,7 +1034,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1033 | Dwarf_Frame *frame; | 1034 | Dwarf_Frame *frame; |
1034 | if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || | 1035 | if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || |
1035 | dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { | 1036 | dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { |
1036 | pr_warning("Failed to get CFA on 0x%jx\n", | 1037 | pr_warning("Failed to get call frame on 0x%jx\n", |
1037 | (uintmax_t)pf->addr); | 1038 | (uintmax_t)pf->addr); |
1038 | return -ENOENT; | 1039 | return -ENOENT; |
1039 | } | 1040 | } |
@@ -1060,7 +1061,7 @@ static int find_probe_point_by_line(struct probe_finder *pf) | |||
1060 | int ret = 0; | 1061 | int ret = 0; |
1061 | 1062 | ||
1062 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | 1063 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { |
1063 | pr_warning("No source lines found in this CU.\n"); | 1064 | pr_warning("No source lines found.\n"); |
1064 | return -ENOENT; | 1065 | return -ENOENT; |
1065 | } | 1066 | } |
1066 | 1067 | ||
@@ -1162,7 +1163,7 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1162 | } | 1163 | } |
1163 | 1164 | ||
1164 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | 1165 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { |
1165 | pr_warning("No source lines found in this CU.\n"); | 1166 | pr_warning("No source lines found.\n"); |
1166 | return -ENOENT; | 1167 | return -ENOENT; |
1167 | } | 1168 | } |
1168 | 1169 | ||
@@ -1220,7 +1221,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) | |||
1220 | else { | 1221 | else { |
1221 | /* Get probe address */ | 1222 | /* Get probe address */ |
1222 | if (dwarf_entrypc(in_die, &addr) != 0) { | 1223 | if (dwarf_entrypc(in_die, &addr) != 0) { |
1223 | pr_warning("Failed to get entry pc of %s.\n", | 1224 | pr_warning("Failed to get entry address of %s.\n", |
1224 | dwarf_diename(in_die)); | 1225 | dwarf_diename(in_die)); |
1225 | param->retval = -ENOENT; | 1226 | param->retval = -ENOENT; |
1226 | return DWARF_CB_ABORT; | 1227 | return DWARF_CB_ABORT; |
@@ -1261,8 +1262,8 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) | |||
1261 | param->retval = find_probe_point_lazy(sp_die, pf); | 1262 | param->retval = find_probe_point_lazy(sp_die, pf); |
1262 | else { | 1263 | else { |
1263 | if (dwarf_entrypc(sp_die, &pf->addr) != 0) { | 1264 | if (dwarf_entrypc(sp_die, &pf->addr) != 0) { |
1264 | pr_warning("Failed to get entry pc of %s.\n", | 1265 | pr_warning("Failed to get entry address of " |
1265 | dwarf_diename(sp_die)); | 1266 | "%s.\n", dwarf_diename(sp_die)); |
1266 | param->retval = -ENOENT; | 1267 | param->retval = -ENOENT; |
1267 | return DWARF_CB_ABORT; | 1268 | return DWARF_CB_ABORT; |
1268 | } | 1269 | } |
@@ -1304,7 +1305,7 @@ static int find_probes(int fd, struct probe_finder *pf) | |||
1304 | 1305 | ||
1305 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); | 1306 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); |
1306 | if (!dbg) { | 1307 | if (!dbg) { |
1307 | pr_warning("No dwarf info found in the vmlinux - " | 1308 | pr_warning("No debug information found in the vmlinux - " |
1308 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | 1309 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); |
1309 | return -EBADF; | 1310 | return -EBADF; |
1310 | } | 1311 | } |
@@ -1549,7 +1550,7 @@ int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt) | |||
1549 | /* Open the live linux kernel */ | 1550 | /* Open the live linux kernel */ |
1550 | dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); | 1551 | dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); |
1551 | if (!dbg) { | 1552 | if (!dbg) { |
1552 | pr_warning("No dwarf info found in the vmlinux - " | 1553 | pr_warning("No debug information found in the vmlinux - " |
1553 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | 1554 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); |
1554 | ret = -EINVAL; | 1555 | ret = -EINVAL; |
1555 | goto end; | 1556 | goto end; |
@@ -1559,7 +1560,8 @@ int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt) | |||
1559 | addr += bias; | 1560 | addr += bias; |
1560 | /* Find cu die */ | 1561 | /* Find cu die */ |
1561 | if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) { | 1562 | if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) { |
1562 | pr_warning("No CU DIE is found at %lx\n", addr); | 1563 | pr_warning("Failed to find debug information for address %lx\n", |
1564 | addr); | ||
1563 | ret = -EINVAL; | 1565 | ret = -EINVAL; |
1564 | goto end; | 1566 | goto end; |
1565 | } | 1567 | } |
@@ -1684,7 +1686,7 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) | |||
1684 | 1686 | ||
1685 | line_list__init(&lf->lr->line_list); | 1687 | line_list__init(&lf->lr->line_list); |
1686 | if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) { | 1688 | if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) { |
1687 | pr_warning("No source lines found in this CU.\n"); | 1689 | pr_warning("No source lines found.\n"); |
1688 | return -ENOENT; | 1690 | return -ENOENT; |
1689 | } | 1691 | } |
1690 | 1692 | ||
@@ -1809,7 +1811,7 @@ int find_line_range(int fd, struct line_range *lr) | |||
1809 | 1811 | ||
1810 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); | 1812 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); |
1811 | if (!dbg) { | 1813 | if (!dbg) { |
1812 | pr_warning("No dwarf info found in the vmlinux - " | 1814 | pr_warning("No debug information found in the vmlinux - " |
1813 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | 1815 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); |
1814 | return -EBADF; | 1816 | return -EBADF; |
1815 | } | 1817 | } |
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index bba69d455699..beaefc3c1223 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h | |||
@@ -34,9 +34,9 @@ extern int find_available_vars_at(int fd, struct perf_probe_event *pev, | |||
34 | bool externs); | 34 | bool externs); |
35 | 35 | ||
36 | #include <dwarf.h> | 36 | #include <dwarf.h> |
37 | #include <libdw.h> | 37 | #include <elfutils/libdw.h> |
38 | #include <libdwfl.h> | 38 | #include <elfutils/libdwfl.h> |
39 | #include <version.h> | 39 | #include <elfutils/version.h> |
40 | 40 | ||
41 | struct probe_finder { | 41 | struct probe_finder { |
42 | struct perf_probe_event *pev; /* Target probe event */ | 42 | struct perf_probe_event *pev; /* Target probe event */ |
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index b059dc50cc2d..93680818e244 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. | 2 | * trace-event-perl. Feed perf script events to an embedded Perl interpreter. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | 4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> |
5 | * | 5 | * |
@@ -411,8 +411,8 @@ static int perl_generate_script(const char *outfile) | |||
411 | return -1; | 411 | return -1; |
412 | } | 412 | } |
413 | 413 | ||
414 | fprintf(ofp, "# perf trace event handlers, " | 414 | fprintf(ofp, "# perf script event handlers, " |
415 | "generated by perf trace -g perl\n"); | 415 | "generated by perf script -g perl\n"); |
416 | 416 | ||
417 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" | 417 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" |
418 | " License version 2\n\n"); | 418 | " License version 2\n\n"); |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 33a632523743..c6d99334bdfa 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -442,8 +442,8 @@ static int python_generate_script(const char *outfile) | |||
442 | fprintf(stderr, "couldn't open %s\n", fname); | 442 | fprintf(stderr, "couldn't open %s\n", fname); |
443 | return -1; | 443 | return -1; |
444 | } | 444 | } |
445 | fprintf(ofp, "# perf trace event handlers, " | 445 | fprintf(ofp, "# perf script event handlers, " |
446 | "generated by perf trace -g python\n"); | 446 | "generated by perf script -g python\n"); |
447 | 447 | ||
448 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" | 448 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" |
449 | " License version 2\n\n"); | 449 | " License version 2\n\n"); |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index fa9d652c2dc3..b163dfd6cbc5 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -65,9 +65,49 @@ out_close: | |||
65 | return -1; | 65 | return -1; |
66 | } | 66 | } |
67 | 67 | ||
68 | static void perf_session__id_header_size(struct perf_session *session) | ||
69 | { | ||
70 | struct sample_data *data; | ||
71 | u64 sample_type = session->sample_type; | ||
72 | u16 size = 0; | ||
73 | |||
74 | if (!session->sample_id_all) | ||
75 | goto out; | ||
76 | |||
77 | if (sample_type & PERF_SAMPLE_TID) | ||
78 | size += sizeof(data->tid) * 2; | ||
79 | |||
80 | if (sample_type & PERF_SAMPLE_TIME) | ||
81 | size += sizeof(data->time); | ||
82 | |||
83 | if (sample_type & PERF_SAMPLE_ID) | ||
84 | size += sizeof(data->id); | ||
85 | |||
86 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
87 | size += sizeof(data->stream_id); | ||
88 | |||
89 | if (sample_type & PERF_SAMPLE_CPU) | ||
90 | size += sizeof(data->cpu) * 2; | ||
91 | out: | ||
92 | session->id_hdr_size = size; | ||
93 | } | ||
94 | |||
95 | void perf_session__set_sample_id_all(struct perf_session *session, bool value) | ||
96 | { | ||
97 | session->sample_id_all = value; | ||
98 | perf_session__id_header_size(session); | ||
99 | } | ||
100 | |||
101 | void perf_session__set_sample_type(struct perf_session *session, u64 type) | ||
102 | { | ||
103 | session->sample_type = type; | ||
104 | } | ||
105 | |||
68 | void perf_session__update_sample_type(struct perf_session *self) | 106 | void perf_session__update_sample_type(struct perf_session *self) |
69 | { | 107 | { |
70 | self->sample_type = perf_header__sample_type(&self->header); | 108 | self->sample_type = perf_header__sample_type(&self->header); |
109 | self->sample_id_all = perf_header__sample_id_all(&self->header); | ||
110 | perf_session__id_header_size(self); | ||
71 | } | 111 | } |
72 | 112 | ||
73 | int perf_session__create_kernel_maps(struct perf_session *self) | 113 | int perf_session__create_kernel_maps(struct perf_session *self) |
@@ -85,7 +125,9 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self) | |||
85 | machines__destroy_guest_kernel_maps(&self->machines); | 125 | machines__destroy_guest_kernel_maps(&self->machines); |
86 | } | 126 | } |
87 | 127 | ||
88 | struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe) | 128 | struct perf_session *perf_session__new(const char *filename, int mode, |
129 | bool force, bool repipe, | ||
130 | struct perf_event_ops *ops) | ||
89 | { | 131 | { |
90 | size_t len = filename ? strlen(filename) + 1 : 0; | 132 | size_t len = filename ? strlen(filename) + 1 : 0; |
91 | struct perf_session *self = zalloc(sizeof(*self) + len); | 133 | struct perf_session *self = zalloc(sizeof(*self) + len); |
@@ -101,10 +143,20 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc | |||
101 | INIT_LIST_HEAD(&self->dead_threads); | 143 | INIT_LIST_HEAD(&self->dead_threads); |
102 | self->hists_tree = RB_ROOT; | 144 | self->hists_tree = RB_ROOT; |
103 | self->last_match = NULL; | 145 | self->last_match = NULL; |
104 | self->mmap_window = 32; | 146 | /* |
147 | * On 64bit we can mmap the data file in one go. No need for tiny mmap | ||
148 | * slices. On 32bit we use 32MB. | ||
149 | */ | ||
150 | #if BITS_PER_LONG == 64 | ||
151 | self->mmap_window = ULLONG_MAX; | ||
152 | #else | ||
153 | self->mmap_window = 32 * 1024 * 1024ULL; | ||
154 | #endif | ||
105 | self->machines = RB_ROOT; | 155 | self->machines = RB_ROOT; |
106 | self->repipe = repipe; | 156 | self->repipe = repipe; |
107 | INIT_LIST_HEAD(&self->ordered_samples.samples_head); | 157 | INIT_LIST_HEAD(&self->ordered_samples.samples); |
158 | INIT_LIST_HEAD(&self->ordered_samples.sample_cache); | ||
159 | INIT_LIST_HEAD(&self->ordered_samples.to_free); | ||
108 | machine__init(&self->host_machine, "", HOST_KERNEL_ID); | 160 | machine__init(&self->host_machine, "", HOST_KERNEL_ID); |
109 | 161 | ||
110 | if (mode == O_RDONLY) { | 162 | if (mode == O_RDONLY) { |
@@ -120,6 +172,13 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc | |||
120 | } | 172 | } |
121 | 173 | ||
122 | perf_session__update_sample_type(self); | 174 | perf_session__update_sample_type(self); |
175 | |||
176 | if (ops && ops->ordering_requires_timestamps && | ||
177 | ops->ordered_samples && !self->sample_id_all) { | ||
178 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); | ||
179 | ops->ordered_samples = false; | ||
180 | } | ||
181 | |||
123 | out: | 182 | out: |
124 | return self; | 183 | return self; |
125 | out_free: | 184 | out_free: |
@@ -230,7 +289,15 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | |||
230 | return syms; | 289 | return syms; |
231 | } | 290 | } |
232 | 291 | ||
292 | static int process_event_synth_stub(event_t *event __used, | ||
293 | struct perf_session *session __used) | ||
294 | { | ||
295 | dump_printf(": unhandled!\n"); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
233 | static int process_event_stub(event_t *event __used, | 299 | static int process_event_stub(event_t *event __used, |
300 | struct sample_data *sample __used, | ||
234 | struct perf_session *session __used) | 301 | struct perf_session *session __used) |
235 | { | 302 | { |
236 | dump_printf(": unhandled!\n"); | 303 | dump_printf(": unhandled!\n"); |
@@ -262,7 +329,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
262 | if (handler->exit == NULL) | 329 | if (handler->exit == NULL) |
263 | handler->exit = process_event_stub; | 330 | handler->exit = process_event_stub; |
264 | if (handler->lost == NULL) | 331 | if (handler->lost == NULL) |
265 | handler->lost = process_event_stub; | 332 | handler->lost = event__process_lost; |
266 | if (handler->read == NULL) | 333 | if (handler->read == NULL) |
267 | handler->read = process_event_stub; | 334 | handler->read = process_event_stub; |
268 | if (handler->throttle == NULL) | 335 | if (handler->throttle == NULL) |
@@ -270,13 +337,13 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
270 | if (handler->unthrottle == NULL) | 337 | if (handler->unthrottle == NULL) |
271 | handler->unthrottle = process_event_stub; | 338 | handler->unthrottle = process_event_stub; |
272 | if (handler->attr == NULL) | 339 | if (handler->attr == NULL) |
273 | handler->attr = process_event_stub; | 340 | handler->attr = process_event_synth_stub; |
274 | if (handler->event_type == NULL) | 341 | if (handler->event_type == NULL) |
275 | handler->event_type = process_event_stub; | 342 | handler->event_type = process_event_synth_stub; |
276 | if (handler->tracing_data == NULL) | 343 | if (handler->tracing_data == NULL) |
277 | handler->tracing_data = process_event_stub; | 344 | handler->tracing_data = process_event_synth_stub; |
278 | if (handler->build_id == NULL) | 345 | if (handler->build_id == NULL) |
279 | handler->build_id = process_event_stub; | 346 | handler->build_id = process_event_synth_stub; |
280 | if (handler->finished_round == NULL) { | 347 | if (handler->finished_round == NULL) { |
281 | if (handler->ordered_samples) | 348 | if (handler->ordered_samples) |
282 | handler->finished_round = process_finished_round; | 349 | handler->finished_round = process_finished_round; |
@@ -386,33 +453,61 @@ static event__swap_op event__swap_ops[] = { | |||
386 | 453 | ||
387 | struct sample_queue { | 454 | struct sample_queue { |
388 | u64 timestamp; | 455 | u64 timestamp; |
389 | struct sample_event *event; | 456 | u64 file_offset; |
457 | event_t *event; | ||
390 | struct list_head list; | 458 | struct list_head list; |
391 | }; | 459 | }; |
392 | 460 | ||
461 | static void perf_session_free_sample_buffers(struct perf_session *session) | ||
462 | { | ||
463 | struct ordered_samples *os = &session->ordered_samples; | ||
464 | |||
465 | while (!list_empty(&os->to_free)) { | ||
466 | struct sample_queue *sq; | ||
467 | |||
468 | sq = list_entry(os->to_free.next, struct sample_queue, list); | ||
469 | list_del(&sq->list); | ||
470 | free(sq); | ||
471 | } | ||
472 | } | ||
473 | |||
474 | static int perf_session_deliver_event(struct perf_session *session, | ||
475 | event_t *event, | ||
476 | struct sample_data *sample, | ||
477 | struct perf_event_ops *ops, | ||
478 | u64 file_offset); | ||
479 | |||
393 | static void flush_sample_queue(struct perf_session *s, | 480 | static void flush_sample_queue(struct perf_session *s, |
394 | struct perf_event_ops *ops) | 481 | struct perf_event_ops *ops) |
395 | { | 482 | { |
396 | struct list_head *head = &s->ordered_samples.samples_head; | 483 | struct ordered_samples *os = &s->ordered_samples; |
397 | u64 limit = s->ordered_samples.next_flush; | 484 | struct list_head *head = &os->samples; |
398 | struct sample_queue *tmp, *iter; | 485 | struct sample_queue *tmp, *iter; |
486 | struct sample_data sample; | ||
487 | u64 limit = os->next_flush; | ||
488 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; | ||
399 | 489 | ||
400 | if (!ops->ordered_samples || !limit) | 490 | if (!ops->ordered_samples || !limit) |
401 | return; | 491 | return; |
402 | 492 | ||
403 | list_for_each_entry_safe(iter, tmp, head, list) { | 493 | list_for_each_entry_safe(iter, tmp, head, list) { |
404 | if (iter->timestamp > limit) | 494 | if (iter->timestamp > limit) |
405 | return; | 495 | break; |
406 | 496 | ||
407 | if (iter == s->ordered_samples.last_inserted) | 497 | event__parse_sample(iter->event, s, &sample); |
408 | s->ordered_samples.last_inserted = NULL; | 498 | perf_session_deliver_event(s, iter->event, &sample, ops, |
499 | iter->file_offset); | ||
409 | 500 | ||
410 | ops->sample((event_t *)iter->event, s); | 501 | os->last_flush = iter->timestamp; |
411 | |||
412 | s->ordered_samples.last_flush = iter->timestamp; | ||
413 | list_del(&iter->list); | 502 | list_del(&iter->list); |
414 | free(iter->event); | 503 | list_add(&iter->list, &os->sample_cache); |
415 | free(iter); | 504 | } |
505 | |||
506 | if (list_empty(head)) { | ||
507 | os->last_sample = NULL; | ||
508 | } else if (last_ts <= limit) { | ||
509 | os->last_sample = | ||
510 | list_entry(head->prev, struct sample_queue, list); | ||
416 | } | 511 | } |
417 | } | 512 | } |
418 | 513 | ||
@@ -465,178 +560,265 @@ static int process_finished_round(event_t *event __used, | |||
465 | return 0; | 560 | return 0; |
466 | } | 561 | } |
467 | 562 | ||
468 | static void __queue_sample_end(struct sample_queue *new, struct list_head *head) | ||
469 | { | ||
470 | struct sample_queue *iter; | ||
471 | |||
472 | list_for_each_entry_reverse(iter, head, list) { | ||
473 | if (iter->timestamp < new->timestamp) { | ||
474 | list_add(&new->list, &iter->list); | ||
475 | return; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | list_add(&new->list, head); | ||
480 | } | ||
481 | |||
482 | static void __queue_sample_before(struct sample_queue *new, | ||
483 | struct sample_queue *iter, | ||
484 | struct list_head *head) | ||
485 | { | ||
486 | list_for_each_entry_continue_reverse(iter, head, list) { | ||
487 | if (iter->timestamp < new->timestamp) { | ||
488 | list_add(&new->list, &iter->list); | ||
489 | return; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | list_add(&new->list, head); | ||
494 | } | ||
495 | |||
496 | static void __queue_sample_after(struct sample_queue *new, | ||
497 | struct sample_queue *iter, | ||
498 | struct list_head *head) | ||
499 | { | ||
500 | list_for_each_entry_continue(iter, head, list) { | ||
501 | if (iter->timestamp > new->timestamp) { | ||
502 | list_add_tail(&new->list, &iter->list); | ||
503 | return; | ||
504 | } | ||
505 | } | ||
506 | list_add_tail(&new->list, head); | ||
507 | } | ||
508 | |||
509 | /* The queue is ordered by time */ | 563 | /* The queue is ordered by time */ |
510 | static void __queue_sample_event(struct sample_queue *new, | 564 | static void __queue_event(struct sample_queue *new, struct perf_session *s) |
511 | struct perf_session *s) | ||
512 | { | 565 | { |
513 | struct sample_queue *last_inserted = s->ordered_samples.last_inserted; | 566 | struct ordered_samples *os = &s->ordered_samples; |
514 | struct list_head *head = &s->ordered_samples.samples_head; | 567 | struct sample_queue *sample = os->last_sample; |
568 | u64 timestamp = new->timestamp; | ||
569 | struct list_head *p; | ||
515 | 570 | ||
571 | os->last_sample = new; | ||
516 | 572 | ||
517 | if (!last_inserted) { | 573 | if (!sample) { |
518 | __queue_sample_end(new, head); | 574 | list_add(&new->list, &os->samples); |
575 | os->max_timestamp = timestamp; | ||
519 | return; | 576 | return; |
520 | } | 577 | } |
521 | 578 | ||
522 | /* | 579 | /* |
523 | * Most of the time the current event has a timestamp | 580 | * last_sample might point to some random place in the list as it's |
524 | * very close to the last event inserted, unless we just switched | 581 | * the last queued event. We expect that the new event is close to |
525 | * to another event buffer. Having a sorting based on a list and | 582 | * this. |
526 | * on the last inserted event that is close to the current one is | ||
527 | * probably more efficient than an rbtree based sorting. | ||
528 | */ | 583 | */ |
529 | if (last_inserted->timestamp >= new->timestamp) | 584 | if (sample->timestamp <= timestamp) { |
530 | __queue_sample_before(new, last_inserted, head); | 585 | while (sample->timestamp <= timestamp) { |
531 | else | 586 | p = sample->list.next; |
532 | __queue_sample_after(new, last_inserted, head); | 587 | if (p == &os->samples) { |
588 | list_add_tail(&new->list, &os->samples); | ||
589 | os->max_timestamp = timestamp; | ||
590 | return; | ||
591 | } | ||
592 | sample = list_entry(p, struct sample_queue, list); | ||
593 | } | ||
594 | list_add_tail(&new->list, &sample->list); | ||
595 | } else { | ||
596 | while (sample->timestamp > timestamp) { | ||
597 | p = sample->list.prev; | ||
598 | if (p == &os->samples) { | ||
599 | list_add(&new->list, &os->samples); | ||
600 | return; | ||
601 | } | ||
602 | sample = list_entry(p, struct sample_queue, list); | ||
603 | } | ||
604 | list_add(&new->list, &sample->list); | ||
605 | } | ||
533 | } | 606 | } |
534 | 607 | ||
535 | static int queue_sample_event(event_t *event, struct sample_data *data, | 608 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
536 | struct perf_session *s) | 609 | |
610 | static int perf_session_queue_event(struct perf_session *s, event_t *event, | ||
611 | struct sample_data *data, u64 file_offset) | ||
537 | { | 612 | { |
613 | struct ordered_samples *os = &s->ordered_samples; | ||
614 | struct list_head *sc = &os->sample_cache; | ||
538 | u64 timestamp = data->time; | 615 | u64 timestamp = data->time; |
539 | struct sample_queue *new; | 616 | struct sample_queue *new; |
540 | 617 | ||
618 | if (!timestamp || timestamp == ~0ULL) | ||
619 | return -ETIME; | ||
541 | 620 | ||
542 | if (timestamp < s->ordered_samples.last_flush) { | 621 | if (timestamp < s->ordered_samples.last_flush) { |
543 | printf("Warning: Timestamp below last timeslice flush\n"); | 622 | printf("Warning: Timestamp below last timeslice flush\n"); |
544 | return -EINVAL; | 623 | return -EINVAL; |
545 | } | 624 | } |
546 | 625 | ||
547 | new = malloc(sizeof(*new)); | 626 | if (!list_empty(sc)) { |
548 | if (!new) | 627 | new = list_entry(sc->next, struct sample_queue, list); |
549 | return -ENOMEM; | 628 | list_del(&new->list); |
629 | } else if (os->sample_buffer) { | ||
630 | new = os->sample_buffer + os->sample_buffer_idx; | ||
631 | if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) | ||
632 | os->sample_buffer = NULL; | ||
633 | } else { | ||
634 | os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); | ||
635 | if (!os->sample_buffer) | ||
636 | return -ENOMEM; | ||
637 | list_add(&os->sample_buffer->list, &os->to_free); | ||
638 | os->sample_buffer_idx = 2; | ||
639 | new = os->sample_buffer + 1; | ||
640 | } | ||
550 | 641 | ||
551 | new->timestamp = timestamp; | 642 | new->timestamp = timestamp; |
643 | new->file_offset = file_offset; | ||
644 | new->event = event; | ||
552 | 645 | ||
553 | new->event = malloc(event->header.size); | 646 | __queue_event(new, s); |
554 | if (!new->event) { | ||
555 | free(new); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | 647 | ||
559 | memcpy(new->event, event, event->header.size); | 648 | return 0; |
649 | } | ||
560 | 650 | ||
561 | __queue_sample_event(new, s); | 651 | static void callchain__printf(struct sample_data *sample) |
562 | s->ordered_samples.last_inserted = new; | 652 | { |
653 | unsigned int i; | ||
563 | 654 | ||
564 | if (new->timestamp > s->ordered_samples.max_timestamp) | 655 | printf("... chain: nr:%Lu\n", sample->callchain->nr); |
565 | s->ordered_samples.max_timestamp = new->timestamp; | ||
566 | 656 | ||
567 | return 0; | 657 | for (i = 0; i < sample->callchain->nr; i++) |
658 | printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]); | ||
568 | } | 659 | } |
569 | 660 | ||
570 | static int perf_session__process_sample(event_t *event, struct perf_session *s, | 661 | static void perf_session__print_tstamp(struct perf_session *session, |
571 | struct perf_event_ops *ops) | 662 | event_t *event, |
663 | struct sample_data *sample) | ||
572 | { | 664 | { |
573 | struct sample_data data; | 665 | if (event->header.type != PERF_RECORD_SAMPLE && |
666 | !session->sample_id_all) { | ||
667 | fputs("-1 -1 ", stdout); | ||
668 | return; | ||
669 | } | ||
670 | |||
671 | if ((session->sample_type & PERF_SAMPLE_CPU)) | ||
672 | printf("%u ", sample->cpu); | ||
673 | |||
674 | if (session->sample_type & PERF_SAMPLE_TIME) | ||
675 | printf("%Lu ", sample->time); | ||
676 | } | ||
574 | 677 | ||
575 | if (!ops->ordered_samples) | 678 | static void dump_event(struct perf_session *session, event_t *event, |
576 | return ops->sample(event, s); | 679 | u64 file_offset, struct sample_data *sample) |
680 | { | ||
681 | if (!dump_trace) | ||
682 | return; | ||
577 | 683 | ||
578 | bzero(&data, sizeof(struct sample_data)); | 684 | printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size, |
579 | event__parse_sample(event, s->sample_type, &data); | 685 | event->header.type); |
580 | 686 | ||
581 | queue_sample_event(event, &data, s); | 687 | trace_event(event); |
582 | 688 | ||
583 | return 0; | 689 | if (sample) |
690 | perf_session__print_tstamp(session, event, sample); | ||
691 | |||
692 | printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size, | ||
693 | event__get_event_name(event->header.type)); | ||
584 | } | 694 | } |
585 | 695 | ||
586 | static int perf_session__process_event(struct perf_session *self, | 696 | static void dump_sample(struct perf_session *session, event_t *event, |
587 | event_t *event, | 697 | struct sample_data *sample) |
588 | struct perf_event_ops *ops, | ||
589 | u64 offset, u64 head) | ||
590 | { | 698 | { |
591 | trace_event(event); | 699 | if (!dump_trace) |
700 | return; | ||
592 | 701 | ||
593 | if (event->header.type < PERF_RECORD_HEADER_MAX) { | 702 | printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, |
594 | dump_printf("%#Lx [%#x]: PERF_RECORD_%s", | 703 | sample->pid, sample->tid, sample->ip, sample->period); |
595 | offset + head, event->header.size, | ||
596 | event__name[event->header.type]); | ||
597 | hists__inc_nr_events(&self->hists, event->header.type); | ||
598 | } | ||
599 | 704 | ||
600 | if (self->header.needs_swap && event__swap_ops[event->header.type]) | 705 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) |
601 | event__swap_ops[event->header.type](event); | 706 | callchain__printf(sample); |
707 | } | ||
708 | |||
709 | static int perf_session_deliver_event(struct perf_session *session, | ||
710 | event_t *event, | ||
711 | struct sample_data *sample, | ||
712 | struct perf_event_ops *ops, | ||
713 | u64 file_offset) | ||
714 | { | ||
715 | dump_event(session, event, file_offset, sample); | ||
602 | 716 | ||
603 | switch (event->header.type) { | 717 | switch (event->header.type) { |
604 | case PERF_RECORD_SAMPLE: | 718 | case PERF_RECORD_SAMPLE: |
605 | return perf_session__process_sample(event, self, ops); | 719 | dump_sample(session, event, sample); |
720 | return ops->sample(event, sample, session); | ||
606 | case PERF_RECORD_MMAP: | 721 | case PERF_RECORD_MMAP: |
607 | return ops->mmap(event, self); | 722 | return ops->mmap(event, sample, session); |
608 | case PERF_RECORD_COMM: | 723 | case PERF_RECORD_COMM: |
609 | return ops->comm(event, self); | 724 | return ops->comm(event, sample, session); |
610 | case PERF_RECORD_FORK: | 725 | case PERF_RECORD_FORK: |
611 | return ops->fork(event, self); | 726 | return ops->fork(event, sample, session); |
612 | case PERF_RECORD_EXIT: | 727 | case PERF_RECORD_EXIT: |
613 | return ops->exit(event, self); | 728 | return ops->exit(event, sample, session); |
614 | case PERF_RECORD_LOST: | 729 | case PERF_RECORD_LOST: |
615 | return ops->lost(event, self); | 730 | return ops->lost(event, sample, session); |
616 | case PERF_RECORD_READ: | 731 | case PERF_RECORD_READ: |
617 | return ops->read(event, self); | 732 | return ops->read(event, sample, session); |
618 | case PERF_RECORD_THROTTLE: | 733 | case PERF_RECORD_THROTTLE: |
619 | return ops->throttle(event, self); | 734 | return ops->throttle(event, sample, session); |
620 | case PERF_RECORD_UNTHROTTLE: | 735 | case PERF_RECORD_UNTHROTTLE: |
621 | return ops->unthrottle(event, self); | 736 | return ops->unthrottle(event, sample, session); |
737 | default: | ||
738 | ++session->hists.stats.nr_unknown_events; | ||
739 | return -1; | ||
740 | } | ||
741 | } | ||
742 | |||
743 | static int perf_session__preprocess_sample(struct perf_session *session, | ||
744 | event_t *event, struct sample_data *sample) | ||
745 | { | ||
746 | if (event->header.type != PERF_RECORD_SAMPLE || | ||
747 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | ||
748 | return 0; | ||
749 | |||
750 | if (!ip_callchain__valid(sample->callchain, event)) { | ||
751 | pr_debug("call-chain problem with event, skipping it.\n"); | ||
752 | ++session->hists.stats.nr_invalid_chains; | ||
753 | session->hists.stats.total_invalid_chains += sample->period; | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int perf_session__process_user_event(struct perf_session *session, event_t *event, | ||
760 | struct perf_event_ops *ops, u64 file_offset) | ||
761 | { | ||
762 | dump_event(session, event, file_offset, NULL); | ||
763 | |||
764 | /* These events are processed right away */ | ||
765 | switch (event->header.type) { | ||
622 | case PERF_RECORD_HEADER_ATTR: | 766 | case PERF_RECORD_HEADER_ATTR: |
623 | return ops->attr(event, self); | 767 | return ops->attr(event, session); |
624 | case PERF_RECORD_HEADER_EVENT_TYPE: | 768 | case PERF_RECORD_HEADER_EVENT_TYPE: |
625 | return ops->event_type(event, self); | 769 | return ops->event_type(event, session); |
626 | case PERF_RECORD_HEADER_TRACING_DATA: | 770 | case PERF_RECORD_HEADER_TRACING_DATA: |
627 | /* setup for reading amidst mmap */ | 771 | /* setup for reading amidst mmap */ |
628 | lseek(self->fd, offset + head, SEEK_SET); | 772 | lseek(session->fd, file_offset, SEEK_SET); |
629 | return ops->tracing_data(event, self); | 773 | return ops->tracing_data(event, session); |
630 | case PERF_RECORD_HEADER_BUILD_ID: | 774 | case PERF_RECORD_HEADER_BUILD_ID: |
631 | return ops->build_id(event, self); | 775 | return ops->build_id(event, session); |
632 | case PERF_RECORD_FINISHED_ROUND: | 776 | case PERF_RECORD_FINISHED_ROUND: |
633 | return ops->finished_round(event, self, ops); | 777 | return ops->finished_round(event, session, ops); |
634 | default: | 778 | default: |
635 | ++self->hists.stats.nr_unknown_events; | 779 | return -EINVAL; |
636 | return -1; | ||
637 | } | 780 | } |
638 | } | 781 | } |
639 | 782 | ||
783 | static int perf_session__process_event(struct perf_session *session, | ||
784 | event_t *event, | ||
785 | struct perf_event_ops *ops, | ||
786 | u64 file_offset) | ||
787 | { | ||
788 | struct sample_data sample; | ||
789 | int ret; | ||
790 | |||
791 | if (session->header.needs_swap && event__swap_ops[event->header.type]) | ||
792 | event__swap_ops[event->header.type](event); | ||
793 | |||
794 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | ||
795 | return -EINVAL; | ||
796 | |||
797 | hists__inc_nr_events(&session->hists, event->header.type); | ||
798 | |||
799 | if (event->header.type >= PERF_RECORD_USER_TYPE_START) | ||
800 | return perf_session__process_user_event(session, event, ops, file_offset); | ||
801 | |||
802 | /* | ||
803 | * For all kernel events we get the sample data | ||
804 | */ | ||
805 | event__parse_sample(event, session, &sample); | ||
806 | |||
807 | /* Preprocess sample records - precheck callchains */ | ||
808 | if (perf_session__preprocess_sample(session, event, &sample)) | ||
809 | return 0; | ||
810 | |||
811 | if (ops->ordered_samples) { | ||
812 | ret = perf_session_queue_event(session, event, &sample, | ||
813 | file_offset); | ||
814 | if (ret != -ETIME) | ||
815 | return ret; | ||
816 | } | ||
817 | |||
818 | return perf_session_deliver_event(session, event, &sample, ops, | ||
819 | file_offset); | ||
820 | } | ||
821 | |||
640 | void perf_event_header__bswap(struct perf_event_header *self) | 822 | void perf_event_header__bswap(struct perf_event_header *self) |
641 | { | 823 | { |
642 | self->type = bswap_32(self->type); | 824 | self->type = bswap_32(self->type); |
@@ -656,23 +838,6 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se | |||
656 | return thread; | 838 | return thread; |
657 | } | 839 | } |
658 | 840 | ||
659 | int do_read(int fd, void *buf, size_t size) | ||
660 | { | ||
661 | void *buf_start = buf; | ||
662 | |||
663 | while (size) { | ||
664 | int ret = read(fd, buf, size); | ||
665 | |||
666 | if (ret <= 0) | ||
667 | return ret; | ||
668 | |||
669 | size -= ret; | ||
670 | buf += ret; | ||
671 | } | ||
672 | |||
673 | return buf - buf_start; | ||
674 | } | ||
675 | |||
676 | #define session_done() (*(volatile int *)(&session_done)) | 841 | #define session_done() (*(volatile int *)(&session_done)) |
677 | volatile int session_done; | 842 | volatile int session_done; |
678 | 843 | ||
@@ -690,7 +855,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, | |||
690 | 855 | ||
691 | head = 0; | 856 | head = 0; |
692 | more: | 857 | more: |
693 | err = do_read(self->fd, &event, sizeof(struct perf_event_header)); | 858 | err = readn(self->fd, &event, sizeof(struct perf_event_header)); |
694 | if (err <= 0) { | 859 | if (err <= 0) { |
695 | if (err == 0) | 860 | if (err == 0) |
696 | goto done; | 861 | goto done; |
@@ -710,8 +875,7 @@ more: | |||
710 | p += sizeof(struct perf_event_header); | 875 | p += sizeof(struct perf_event_header); |
711 | 876 | ||
712 | if (size - sizeof(struct perf_event_header)) { | 877 | if (size - sizeof(struct perf_event_header)) { |
713 | err = do_read(self->fd, p, | 878 | err = readn(self->fd, p, size - sizeof(struct perf_event_header)); |
714 | size - sizeof(struct perf_event_header)); | ||
715 | if (err <= 0) { | 879 | if (err <= 0) { |
716 | if (err == 0) { | 880 | if (err == 0) { |
717 | pr_err("unexpected end of event stream\n"); | 881 | pr_err("unexpected end of event stream\n"); |
@@ -724,8 +888,7 @@ more: | |||
724 | } | 888 | } |
725 | 889 | ||
726 | if (size == 0 || | 890 | if (size == 0 || |
727 | (skip = perf_session__process_event(self, &event, ops, | 891 | (skip = perf_session__process_event(self, &event, ops, head)) < 0) { |
728 | 0, head)) < 0) { | ||
729 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", | 892 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
730 | head, event.header.size, event.header.type); | 893 | head, event.header.size, event.header.type); |
731 | /* | 894 | /* |
@@ -740,9 +903,6 @@ more: | |||
740 | 903 | ||
741 | head += size; | 904 | head += size; |
742 | 905 | ||
743 | dump_printf("\n%#Lx [%#x]: event: %d\n", | ||
744 | head, event.header.size, event.header.type); | ||
745 | |||
746 | if (skip > 0) | 906 | if (skip > 0) |
747 | head += skip; | 907 | head += skip; |
748 | 908 | ||
@@ -751,82 +911,90 @@ more: | |||
751 | done: | 911 | done: |
752 | err = 0; | 912 | err = 0; |
753 | out_err: | 913 | out_err: |
914 | perf_session_free_sample_buffers(self); | ||
754 | return err; | 915 | return err; |
755 | } | 916 | } |
756 | 917 | ||
757 | int __perf_session__process_events(struct perf_session *self, | 918 | int __perf_session__process_events(struct perf_session *session, |
758 | u64 data_offset, u64 data_size, | 919 | u64 data_offset, u64 data_size, |
759 | u64 file_size, struct perf_event_ops *ops) | 920 | u64 file_size, struct perf_event_ops *ops) |
760 | { | 921 | { |
761 | int err, mmap_prot, mmap_flags; | 922 | u64 head, page_offset, file_offset, file_pos, progress_next; |
762 | u64 head, shift; | 923 | int err, mmap_prot, mmap_flags, map_idx = 0; |
763 | u64 offset = 0; | 924 | struct ui_progress *progress; |
764 | size_t page_size; | 925 | size_t page_size, mmap_size; |
926 | char *buf, *mmaps[8]; | ||
765 | event_t *event; | 927 | event_t *event; |
766 | uint32_t size; | 928 | uint32_t size; |
767 | char *buf; | ||
768 | struct ui_progress *progress = ui_progress__new("Processing events...", | ||
769 | self->size); | ||
770 | if (progress == NULL) | ||
771 | return -1; | ||
772 | 929 | ||
773 | perf_event_ops__fill_defaults(ops); | 930 | perf_event_ops__fill_defaults(ops); |
774 | 931 | ||
775 | page_size = sysconf(_SC_PAGESIZE); | 932 | page_size = sysconf(_SC_PAGESIZE); |
776 | 933 | ||
777 | head = data_offset; | 934 | page_offset = page_size * (data_offset / page_size); |
778 | shift = page_size * (head / page_size); | 935 | file_offset = page_offset; |
779 | offset += shift; | 936 | head = data_offset - page_offset; |
780 | head -= shift; | 937 | |
938 | if (data_offset + data_size < file_size) | ||
939 | file_size = data_offset + data_size; | ||
940 | |||
941 | progress_next = file_size / 16; | ||
942 | progress = ui_progress__new("Processing events...", file_size); | ||
943 | if (progress == NULL) | ||
944 | return -1; | ||
945 | |||
946 | mmap_size = session->mmap_window; | ||
947 | if (mmap_size > file_size) | ||
948 | mmap_size = file_size; | ||
949 | |||
950 | memset(mmaps, 0, sizeof(mmaps)); | ||
781 | 951 | ||
782 | mmap_prot = PROT_READ; | 952 | mmap_prot = PROT_READ; |
783 | mmap_flags = MAP_SHARED; | 953 | mmap_flags = MAP_SHARED; |
784 | 954 | ||
785 | if (self->header.needs_swap) { | 955 | if (session->header.needs_swap) { |
786 | mmap_prot |= PROT_WRITE; | 956 | mmap_prot |= PROT_WRITE; |
787 | mmap_flags = MAP_PRIVATE; | 957 | mmap_flags = MAP_PRIVATE; |
788 | } | 958 | } |
789 | remap: | 959 | remap: |
790 | buf = mmap(NULL, page_size * self->mmap_window, mmap_prot, | 960 | buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, |
791 | mmap_flags, self->fd, offset); | 961 | file_offset); |
792 | if (buf == MAP_FAILED) { | 962 | if (buf == MAP_FAILED) { |
793 | pr_err("failed to mmap file\n"); | 963 | pr_err("failed to mmap file\n"); |
794 | err = -errno; | 964 | err = -errno; |
795 | goto out_err; | 965 | goto out_err; |
796 | } | 966 | } |
967 | mmaps[map_idx] = buf; | ||
968 | map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); | ||
969 | file_pos = file_offset + head; | ||
797 | 970 | ||
798 | more: | 971 | more: |
799 | event = (event_t *)(buf + head); | 972 | event = (event_t *)(buf + head); |
800 | ui_progress__update(progress, offset); | ||
801 | 973 | ||
802 | if (self->header.needs_swap) | 974 | if (session->header.needs_swap) |
803 | perf_event_header__bswap(&event->header); | 975 | perf_event_header__bswap(&event->header); |
804 | size = event->header.size; | 976 | size = event->header.size; |
805 | if (size == 0) | 977 | if (size == 0) |
806 | size = 8; | 978 | size = 8; |
807 | 979 | ||
808 | if (head + event->header.size >= page_size * self->mmap_window) { | 980 | if (head + event->header.size >= mmap_size) { |
809 | int munmap_ret; | 981 | if (mmaps[map_idx]) { |
810 | 982 | munmap(mmaps[map_idx], mmap_size); | |
811 | shift = page_size * (head / page_size); | 983 | mmaps[map_idx] = NULL; |
812 | 984 | } | |
813 | munmap_ret = munmap(buf, page_size * self->mmap_window); | ||
814 | assert(munmap_ret == 0); | ||
815 | 985 | ||
816 | offset += shift; | 986 | page_offset = page_size * (head / page_size); |
817 | head -= shift; | 987 | file_offset += page_offset; |
988 | head -= page_offset; | ||
818 | goto remap; | 989 | goto remap; |
819 | } | 990 | } |
820 | 991 | ||
821 | size = event->header.size; | 992 | size = event->header.size; |
822 | 993 | ||
823 | dump_printf("\n%#Lx [%#x]: event: %d\n", | ||
824 | offset + head, event->header.size, event->header.type); | ||
825 | |||
826 | if (size == 0 || | 994 | if (size == 0 || |
827 | perf_session__process_event(self, event, ops, offset, head) < 0) { | 995 | perf_session__process_event(session, event, ops, file_pos) < 0) { |
828 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", | 996 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
829 | offset + head, event->header.size, | 997 | file_offset + head, event->header.size, |
830 | event->header.type); | 998 | event->header.type); |
831 | /* | 999 | /* |
832 | * assume we lost track of the stream, check alignment, and | 1000 | * assume we lost track of the stream, check alignment, and |
@@ -839,19 +1007,49 @@ more: | |||
839 | } | 1007 | } |
840 | 1008 | ||
841 | head += size; | 1009 | head += size; |
1010 | file_pos += size; | ||
842 | 1011 | ||
843 | if (offset + head >= data_offset + data_size) | 1012 | if (file_pos >= progress_next) { |
844 | goto done; | 1013 | progress_next += file_size / 16; |
1014 | ui_progress__update(progress, file_pos); | ||
1015 | } | ||
845 | 1016 | ||
846 | if (offset + head < file_size) | 1017 | if (file_pos < file_size) |
847 | goto more; | 1018 | goto more; |
848 | done: | 1019 | |
849 | err = 0; | 1020 | err = 0; |
850 | /* do the final flush for ordered samples */ | 1021 | /* do the final flush for ordered samples */ |
851 | self->ordered_samples.next_flush = ULLONG_MAX; | 1022 | session->ordered_samples.next_flush = ULLONG_MAX; |
852 | flush_sample_queue(self, ops); | 1023 | flush_sample_queue(session, ops); |
853 | out_err: | 1024 | out_err: |
854 | ui_progress__delete(progress); | 1025 | ui_progress__delete(progress); |
1026 | |||
1027 | if (ops->lost == event__process_lost && | ||
1028 | session->hists.stats.total_lost != 0) { | ||
1029 | ui__warning("Processed %Lu events and LOST %Lu!\n\n" | ||
1030 | "Check IO/CPU overload!\n\n", | ||
1031 | session->hists.stats.total_period, | ||
1032 | session->hists.stats.total_lost); | ||
1033 | } | ||
1034 | |||
1035 | if (session->hists.stats.nr_unknown_events != 0) { | ||
1036 | ui__warning("Found %u unknown events!\n\n" | ||
1037 | "Is this an older tool processing a perf.data " | ||
1038 | "file generated by a more recent tool?\n\n" | ||
1039 | "If that is not the case, consider " | ||
1040 | "reporting to linux-kernel@vger.kernel.org.\n\n", | ||
1041 | session->hists.stats.nr_unknown_events); | ||
1042 | } | ||
1043 | |||
1044 | if (session->hists.stats.nr_invalid_chains != 0) { | ||
1045 | ui__warning("Found invalid callchains!\n\n" | ||
1046 | "%u out of %u events were discarded for this reason.\n\n" | ||
1047 | "Consider reporting to linux-kernel@vger.kernel.org.\n\n", | ||
1048 | session->hists.stats.nr_invalid_chains, | ||
1049 | session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); | ||
1050 | } | ||
1051 | |||
1052 | perf_session_free_sample_buffers(session); | ||
855 | return err; | 1053 | return err; |
856 | } | 1054 | } |
857 | 1055 | ||
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 9fa0fc2a863f..decd83f274fd 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -17,8 +17,12 @@ struct ordered_samples { | |||
17 | u64 last_flush; | 17 | u64 last_flush; |
18 | u64 next_flush; | 18 | u64 next_flush; |
19 | u64 max_timestamp; | 19 | u64 max_timestamp; |
20 | struct list_head samples_head; | 20 | struct list_head samples; |
21 | struct sample_queue *last_inserted; | 21 | struct list_head sample_cache; |
22 | struct list_head to_free; | ||
23 | struct sample_queue *sample_buffer; | ||
24 | struct sample_queue *last_sample; | ||
25 | int sample_buffer_idx; | ||
22 | }; | 26 | }; |
23 | 27 | ||
24 | struct perf_session { | 28 | struct perf_session { |
@@ -42,6 +46,8 @@ struct perf_session { | |||
42 | int fd; | 46 | int fd; |
43 | bool fd_pipe; | 47 | bool fd_pipe; |
44 | bool repipe; | 48 | bool repipe; |
49 | bool sample_id_all; | ||
50 | u16 id_hdr_size; | ||
45 | int cwdlen; | 51 | int cwdlen; |
46 | char *cwd; | 52 | char *cwd; |
47 | struct ordered_samples ordered_samples; | 53 | struct ordered_samples ordered_samples; |
@@ -50,7 +56,9 @@ struct perf_session { | |||
50 | 56 | ||
51 | struct perf_event_ops; | 57 | struct perf_event_ops; |
52 | 58 | ||
53 | typedef int (*event_op)(event_t *self, struct perf_session *session); | 59 | typedef int (*event_op)(event_t *self, struct sample_data *sample, |
60 | struct perf_session *session); | ||
61 | typedef int (*event_synth_op)(event_t *self, struct perf_session *session); | ||
54 | typedef int (*event_op2)(event_t *self, struct perf_session *session, | 62 | typedef int (*event_op2)(event_t *self, struct perf_session *session, |
55 | struct perf_event_ops *ops); | 63 | struct perf_event_ops *ops); |
56 | 64 | ||
@@ -63,16 +71,19 @@ struct perf_event_ops { | |||
63 | lost, | 71 | lost, |
64 | read, | 72 | read, |
65 | throttle, | 73 | throttle, |
66 | unthrottle, | 74 | unthrottle; |
67 | attr, | 75 | event_synth_op attr, |
68 | event_type, | 76 | event_type, |
69 | tracing_data, | 77 | tracing_data, |
70 | build_id; | 78 | build_id; |
71 | event_op2 finished_round; | 79 | event_op2 finished_round; |
72 | bool ordered_samples; | 80 | bool ordered_samples; |
81 | bool ordering_requires_timestamps; | ||
73 | }; | 82 | }; |
74 | 83 | ||
75 | struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe); | 84 | struct perf_session *perf_session__new(const char *filename, int mode, |
85 | bool force, bool repipe, | ||
86 | struct perf_event_ops *ops); | ||
76 | void perf_session__delete(struct perf_session *self); | 87 | void perf_session__delete(struct perf_session *self); |
77 | 88 | ||
78 | void perf_event_header__bswap(struct perf_event_header *self); | 89 | void perf_event_header__bswap(struct perf_event_header *self); |
@@ -98,8 +109,9 @@ void mem_bswap_64(void *src, int byte_size); | |||
98 | 109 | ||
99 | int perf_session__create_kernel_maps(struct perf_session *self); | 110 | int perf_session__create_kernel_maps(struct perf_session *self); |
100 | 111 | ||
101 | int do_read(int fd, void *buf, size_t size); | ||
102 | void perf_session__update_sample_type(struct perf_session *self); | 112 | void perf_session__update_sample_type(struct perf_session *self); |
113 | void perf_session__set_sample_id_all(struct perf_session *session, bool value); | ||
114 | void perf_session__set_sample_type(struct perf_session *session, u64 type); | ||
103 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); | 115 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); |
104 | 116 | ||
105 | static inline | 117 | static inline |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index b62a553cc67d..f44fa541d56e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -170,7 +170,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | |||
170 | return repsep_snprintf(bf, size, "%-*s", width, dso_name); | 170 | return repsep_snprintf(bf, size, "%-*s", width, dso_name); |
171 | } | 171 | } |
172 | 172 | ||
173 | return repsep_snprintf(bf, size, "%*Lx", width, self->ip); | 173 | return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* --sort symbol */ | 176 | /* --sort symbol */ |
@@ -196,7 +196,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | |||
196 | 196 | ||
197 | if (verbose) { | 197 | if (verbose) { |
198 | char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; | 198 | char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; |
199 | ret += repsep_snprintf(bf, size, "%*Lx %c ", | 199 | ret += repsep_snprintf(bf, size, "%-#*llx %c ", |
200 | BITS_PER_LONG / 4, self->ip, o); | 200 | BITS_PER_LONG / 4, self->ip, o); |
201 | } | 201 | } |
202 | 202 | ||
@@ -205,7 +205,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | |||
205 | ret += repsep_snprintf(bf + ret, size - ret, "%s", | 205 | ret += repsep_snprintf(bf + ret, size - ret, "%s", |
206 | self->ms.sym->name); | 206 | self->ms.sym->name); |
207 | else | 207 | else |
208 | ret += repsep_snprintf(bf + ret, size - ret, "%*Lx", | 208 | ret += repsep_snprintf(bf + ret, size - ret, "%-#*llx", |
209 | BITS_PER_LONG / 4, self->ip); | 209 | BITS_PER_LONG / 4, self->ip); |
210 | 210 | ||
211 | return ret; | 211 | return ret; |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 439ab947daf4..15ccfba8cdf8 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -22,6 +22,10 @@ | |||
22 | #include <limits.h> | 22 | #include <limits.h> |
23 | #include <sys/utsname.h> | 23 | #include <sys/utsname.h> |
24 | 24 | ||
25 | #ifndef KSYM_NAME_LEN | ||
26 | #define KSYM_NAME_LEN 128 | ||
27 | #endif | ||
28 | |||
25 | #ifndef NT_GNU_BUILD_ID | 29 | #ifndef NT_GNU_BUILD_ID |
26 | #define NT_GNU_BUILD_ID 3 | 30 | #define NT_GNU_BUILD_ID 3 |
27 | #endif | 31 | #endif |
@@ -41,6 +45,7 @@ struct symbol_conf symbol_conf = { | |||
41 | .exclude_other = true, | 45 | .exclude_other = true, |
42 | .use_modules = true, | 46 | .use_modules = true, |
43 | .try_vmlinux_path = true, | 47 | .try_vmlinux_path = true, |
48 | .symfs = "", | ||
44 | }; | 49 | }; |
45 | 50 | ||
46 | int dso__name_len(const struct dso *self) | 51 | int dso__name_len(const struct dso *self) |
@@ -92,7 +97,7 @@ static void symbols__fixup_end(struct rb_root *self) | |||
92 | prev = curr; | 97 | prev = curr; |
93 | curr = rb_entry(nd, struct symbol, rb_node); | 98 | curr = rb_entry(nd, struct symbol, rb_node); |
94 | 99 | ||
95 | if (prev->end == prev->start) | 100 | if (prev->end == prev->start && prev->end != curr->start) |
96 | prev->end = curr->start - 1; | 101 | prev->end = curr->start - 1; |
97 | } | 102 | } |
98 | 103 | ||
@@ -121,7 +126,7 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) | |||
121 | * We still haven't the actual symbols, so guess the | 126 | * We still haven't the actual symbols, so guess the |
122 | * last map final address. | 127 | * last map final address. |
123 | */ | 128 | */ |
124 | curr->end = ~0UL; | 129 | curr->end = ~0ULL; |
125 | } | 130 | } |
126 | 131 | ||
127 | static void map_groups__fixup_end(struct map_groups *self) | 132 | static void map_groups__fixup_end(struct map_groups *self) |
@@ -425,16 +430,25 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) | |||
425 | 430 | ||
426 | int kallsyms__parse(const char *filename, void *arg, | 431 | int kallsyms__parse(const char *filename, void *arg, |
427 | int (*process_symbol)(void *arg, const char *name, | 432 | int (*process_symbol)(void *arg, const char *name, |
428 | char type, u64 start)) | 433 | char type, u64 start, u64 end)) |
429 | { | 434 | { |
430 | char *line = NULL; | 435 | char *line = NULL; |
431 | size_t n; | 436 | size_t n; |
432 | int err = 0; | 437 | int err = -1; |
438 | u64 prev_start = 0; | ||
439 | char prev_symbol_type = 0; | ||
440 | char *prev_symbol_name; | ||
433 | FILE *file = fopen(filename, "r"); | 441 | FILE *file = fopen(filename, "r"); |
434 | 442 | ||
435 | if (file == NULL) | 443 | if (file == NULL) |
436 | goto out_failure; | 444 | goto out_failure; |
437 | 445 | ||
446 | prev_symbol_name = malloc(KSYM_NAME_LEN); | ||
447 | if (prev_symbol_name == NULL) | ||
448 | goto out_close; | ||
449 | |||
450 | err = 0; | ||
451 | |||
438 | while (!feof(file)) { | 452 | while (!feof(file)) { |
439 | u64 start; | 453 | u64 start; |
440 | int line_len, len; | 454 | int line_len, len; |
@@ -454,14 +468,33 @@ int kallsyms__parse(const char *filename, void *arg, | |||
454 | continue; | 468 | continue; |
455 | 469 | ||
456 | symbol_type = toupper(line[len]); | 470 | symbol_type = toupper(line[len]); |
457 | symbol_name = line + len + 2; | 471 | len += 2; |
472 | symbol_name = line + len; | ||
473 | len = line_len - len; | ||
458 | 474 | ||
459 | err = process_symbol(arg, symbol_name, symbol_type, start); | 475 | if (len >= KSYM_NAME_LEN) { |
460 | if (err) | 476 | err = -1; |
461 | break; | 477 | break; |
478 | } | ||
479 | |||
480 | if (prev_symbol_type) { | ||
481 | u64 end = start; | ||
482 | if (end != prev_start) | ||
483 | --end; | ||
484 | err = process_symbol(arg, prev_symbol_name, | ||
485 | prev_symbol_type, prev_start, end); | ||
486 | if (err) | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | memcpy(prev_symbol_name, symbol_name, len + 1); | ||
491 | prev_symbol_type = symbol_type; | ||
492 | prev_start = start; | ||
462 | } | 493 | } |
463 | 494 | ||
495 | free(prev_symbol_name); | ||
464 | free(line); | 496 | free(line); |
497 | out_close: | ||
465 | fclose(file); | 498 | fclose(file); |
466 | return err; | 499 | return err; |
467 | 500 | ||
@@ -483,7 +516,7 @@ static u8 kallsyms2elf_type(char type) | |||
483 | } | 516 | } |
484 | 517 | ||
485 | static int map__process_kallsym_symbol(void *arg, const char *name, | 518 | static int map__process_kallsym_symbol(void *arg, const char *name, |
486 | char type, u64 start) | 519 | char type, u64 start, u64 end) |
487 | { | 520 | { |
488 | struct symbol *sym; | 521 | struct symbol *sym; |
489 | struct process_kallsyms_args *a = arg; | 522 | struct process_kallsyms_args *a = arg; |
@@ -492,11 +525,8 @@ static int map__process_kallsym_symbol(void *arg, const char *name, | |||
492 | if (!symbol_type__is_a(type, a->map->type)) | 525 | if (!symbol_type__is_a(type, a->map->type)) |
493 | return 0; | 526 | return 0; |
494 | 527 | ||
495 | /* | 528 | sym = symbol__new(start, end - start + 1, |
496 | * Will fix up the end later, when we have all symbols sorted. | 529 | kallsyms2elf_type(type), name); |
497 | */ | ||
498 | sym = symbol__new(start, 0, kallsyms2elf_type(type), name); | ||
499 | |||
500 | if (sym == NULL) | 530 | if (sym == NULL) |
501 | return -ENOMEM; | 531 | return -ENOMEM; |
502 | /* | 532 | /* |
@@ -649,7 +679,6 @@ int dso__load_kallsyms(struct dso *self, const char *filename, | |||
649 | if (dso__load_all_kallsyms(self, filename, map) < 0) | 679 | if (dso__load_all_kallsyms(self, filename, map) < 0) |
650 | return -1; | 680 | return -1; |
651 | 681 | ||
652 | symbols__fixup_end(&self->symbols[map->type]); | ||
653 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 682 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) |
654 | self->origin = DSO__ORIG_GUEST_KERNEL; | 683 | self->origin = DSO__ORIG_GUEST_KERNEL; |
655 | else | 684 | else |
@@ -839,8 +868,11 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
839 | char sympltname[1024]; | 868 | char sympltname[1024]; |
840 | Elf *elf; | 869 | Elf *elf; |
841 | int nr = 0, symidx, fd, err = 0; | 870 | int nr = 0, symidx, fd, err = 0; |
871 | char name[PATH_MAX]; | ||
842 | 872 | ||
843 | fd = open(self->long_name, O_RDONLY); | 873 | snprintf(name, sizeof(name), "%s%s", |
874 | symbol_conf.symfs, self->long_name); | ||
875 | fd = open(name, O_RDONLY); | ||
844 | if (fd < 0) | 876 | if (fd < 0) |
845 | goto out; | 877 | goto out; |
846 | 878 | ||
@@ -1452,16 +1484,19 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1452 | self->origin++) { | 1484 | self->origin++) { |
1453 | switch (self->origin) { | 1485 | switch (self->origin) { |
1454 | case DSO__ORIG_BUILD_ID_CACHE: | 1486 | case DSO__ORIG_BUILD_ID_CACHE: |
1455 | if (dso__build_id_filename(self, name, size) == NULL) | 1487 | /* skip the locally configured cache if a symfs is given */ |
1488 | if (symbol_conf.symfs[0] || | ||
1489 | (dso__build_id_filename(self, name, size) == NULL)) { | ||
1456 | continue; | 1490 | continue; |
1491 | } | ||
1457 | break; | 1492 | break; |
1458 | case DSO__ORIG_FEDORA: | 1493 | case DSO__ORIG_FEDORA: |
1459 | snprintf(name, size, "/usr/lib/debug%s.debug", | 1494 | snprintf(name, size, "%s/usr/lib/debug%s.debug", |
1460 | self->long_name); | 1495 | symbol_conf.symfs, self->long_name); |
1461 | break; | 1496 | break; |
1462 | case DSO__ORIG_UBUNTU: | 1497 | case DSO__ORIG_UBUNTU: |
1463 | snprintf(name, size, "/usr/lib/debug%s", | 1498 | snprintf(name, size, "%s/usr/lib/debug%s", |
1464 | self->long_name); | 1499 | symbol_conf.symfs, self->long_name); |
1465 | break; | 1500 | break; |
1466 | case DSO__ORIG_BUILDID: { | 1501 | case DSO__ORIG_BUILDID: { |
1467 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; | 1502 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; |
@@ -1473,19 +1508,26 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1473 | sizeof(self->build_id), | 1508 | sizeof(self->build_id), |
1474 | build_id_hex); | 1509 | build_id_hex); |
1475 | snprintf(name, size, | 1510 | snprintf(name, size, |
1476 | "/usr/lib/debug/.build-id/%.2s/%s.debug", | 1511 | "%s/usr/lib/debug/.build-id/%.2s/%s.debug", |
1477 | build_id_hex, build_id_hex + 2); | 1512 | symbol_conf.symfs, build_id_hex, build_id_hex + 2); |
1478 | } | 1513 | } |
1479 | break; | 1514 | break; |
1480 | case DSO__ORIG_DSO: | 1515 | case DSO__ORIG_DSO: |
1481 | snprintf(name, size, "%s", self->long_name); | 1516 | snprintf(name, size, "%s%s", |
1517 | symbol_conf.symfs, self->long_name); | ||
1482 | break; | 1518 | break; |
1483 | case DSO__ORIG_GUEST_KMODULE: | 1519 | case DSO__ORIG_GUEST_KMODULE: |
1484 | if (map->groups && map->groups->machine) | 1520 | if (map->groups && map->groups->machine) |
1485 | root_dir = map->groups->machine->root_dir; | 1521 | root_dir = map->groups->machine->root_dir; |
1486 | else | 1522 | else |
1487 | root_dir = ""; | 1523 | root_dir = ""; |
1488 | snprintf(name, size, "%s%s", root_dir, self->long_name); | 1524 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, |
1525 | root_dir, self->long_name); | ||
1526 | break; | ||
1527 | |||
1528 | case DSO__ORIG_KMODULE: | ||
1529 | snprintf(name, size, "%s%s", symbol_conf.symfs, | ||
1530 | self->long_name); | ||
1489 | break; | 1531 | break; |
1490 | 1532 | ||
1491 | default: | 1533 | default: |
@@ -1784,17 +1826,20 @@ int dso__load_vmlinux(struct dso *self, struct map *map, | |||
1784 | const char *vmlinux, symbol_filter_t filter) | 1826 | const char *vmlinux, symbol_filter_t filter) |
1785 | { | 1827 | { |
1786 | int err = -1, fd; | 1828 | int err = -1, fd; |
1829 | char symfs_vmlinux[PATH_MAX]; | ||
1787 | 1830 | ||
1788 | fd = open(vmlinux, O_RDONLY); | 1831 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", |
1832 | symbol_conf.symfs, vmlinux); | ||
1833 | fd = open(symfs_vmlinux, O_RDONLY); | ||
1789 | if (fd < 0) | 1834 | if (fd < 0) |
1790 | return -1; | 1835 | return -1; |
1791 | 1836 | ||
1792 | dso__set_loaded(self, map->type); | 1837 | dso__set_loaded(self, map->type); |
1793 | err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0); | 1838 | err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); |
1794 | close(fd); | 1839 | close(fd); |
1795 | 1840 | ||
1796 | if (err > 0) | 1841 | if (err > 0) |
1797 | pr_debug("Using %s for symbols\n", vmlinux); | 1842 | pr_debug("Using %s for symbols\n", symfs_vmlinux); |
1798 | 1843 | ||
1799 | return err; | 1844 | return err; |
1800 | } | 1845 | } |
@@ -1836,8 +1881,8 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1836 | const char *kallsyms_filename = NULL; | 1881 | const char *kallsyms_filename = NULL; |
1837 | char *kallsyms_allocated_filename = NULL; | 1882 | char *kallsyms_allocated_filename = NULL; |
1838 | /* | 1883 | /* |
1839 | * Step 1: if the user specified a vmlinux filename, use it and only | 1884 | * Step 1: if the user specified a kallsyms or vmlinux filename, use |
1840 | * it, reporting errors to the user if it cannot be used. | 1885 | * it and only it, reporting errors to the user if it cannot be used. |
1841 | * | 1886 | * |
1842 | * For instance, try to analyse an ARM perf.data file _without_ a | 1887 | * For instance, try to analyse an ARM perf.data file _without_ a |
1843 | * build-id, or if the user specifies the wrong path to the right | 1888 | * build-id, or if the user specifies the wrong path to the right |
@@ -1850,6 +1895,11 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1850 | * validation in dso__load_vmlinux and will bail out if they don't | 1895 | * validation in dso__load_vmlinux and will bail out if they don't |
1851 | * match. | 1896 | * match. |
1852 | */ | 1897 | */ |
1898 | if (symbol_conf.kallsyms_name != NULL) { | ||
1899 | kallsyms_filename = symbol_conf.kallsyms_name; | ||
1900 | goto do_kallsyms; | ||
1901 | } | ||
1902 | |||
1853 | if (symbol_conf.vmlinux_name != NULL) { | 1903 | if (symbol_conf.vmlinux_name != NULL) { |
1854 | err = dso__load_vmlinux(self, map, | 1904 | err = dso__load_vmlinux(self, map, |
1855 | symbol_conf.vmlinux_name, filter); | 1905 | symbol_conf.vmlinux_name, filter); |
@@ -1867,6 +1917,10 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1867 | goto out_fixup; | 1917 | goto out_fixup; |
1868 | } | 1918 | } |
1869 | 1919 | ||
1920 | /* do not try local files if a symfs was given */ | ||
1921 | if (symbol_conf.symfs[0] != 0) | ||
1922 | return -1; | ||
1923 | |||
1870 | /* | 1924 | /* |
1871 | * Say the kernel DSO was created when processing the build-id header table, | 1925 | * Say the kernel DSO was created when processing the build-id header table, |
1872 | * we have a build-id, so check if it is the same as the running kernel, | 1926 | * we have a build-id, so check if it is the same as the running kernel, |
@@ -2136,7 +2190,7 @@ struct process_args { | |||
2136 | }; | 2190 | }; |
2137 | 2191 | ||
2138 | static int symbol__in_kernel(void *arg, const char *name, | 2192 | static int symbol__in_kernel(void *arg, const char *name, |
2139 | char type __used, u64 start) | 2193 | char type __used, u64 start, u64 end __used) |
2140 | { | 2194 | { |
2141 | struct process_args *args = arg; | 2195 | struct process_args *args = arg; |
2142 | 2196 | ||
@@ -2257,9 +2311,6 @@ static int vmlinux_path__init(void) | |||
2257 | struct utsname uts; | 2311 | struct utsname uts; |
2258 | char bf[PATH_MAX]; | 2312 | char bf[PATH_MAX]; |
2259 | 2313 | ||
2260 | if (uname(&uts) < 0) | ||
2261 | return -1; | ||
2262 | |||
2263 | vmlinux_path = malloc(sizeof(char *) * 5); | 2314 | vmlinux_path = malloc(sizeof(char *) * 5); |
2264 | if (vmlinux_path == NULL) | 2315 | if (vmlinux_path == NULL) |
2265 | return -1; | 2316 | return -1; |
@@ -2272,6 +2323,14 @@ static int vmlinux_path__init(void) | |||
2272 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | 2323 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) |
2273 | goto out_fail; | 2324 | goto out_fail; |
2274 | ++vmlinux_path__nr_entries; | 2325 | ++vmlinux_path__nr_entries; |
2326 | |||
2327 | /* only try running kernel version if no symfs was given */ | ||
2328 | if (symbol_conf.symfs[0] != 0) | ||
2329 | return 0; | ||
2330 | |||
2331 | if (uname(&uts) < 0) | ||
2332 | return -1; | ||
2333 | |||
2275 | snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); | 2334 | snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); |
2276 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); | 2335 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); |
2277 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | 2336 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) |
@@ -2331,6 +2390,8 @@ static int setup_list(struct strlist **list, const char *list_str, | |||
2331 | 2390 | ||
2332 | int symbol__init(void) | 2391 | int symbol__init(void) |
2333 | { | 2392 | { |
2393 | const char *symfs; | ||
2394 | |||
2334 | if (symbol_conf.initialized) | 2395 | if (symbol_conf.initialized) |
2335 | return 0; | 2396 | return 0; |
2336 | 2397 | ||
@@ -2359,6 +2420,18 @@ int symbol__init(void) | |||
2359 | symbol_conf.sym_list_str, "symbol") < 0) | 2420 | symbol_conf.sym_list_str, "symbol") < 0) |
2360 | goto out_free_comm_list; | 2421 | goto out_free_comm_list; |
2361 | 2422 | ||
2423 | /* | ||
2424 | * A path to symbols of "/" is identical to "" | ||
2425 | * reset here for simplicity. | ||
2426 | */ | ||
2427 | symfs = realpath(symbol_conf.symfs, NULL); | ||
2428 | if (symfs == NULL) | ||
2429 | symfs = symbol_conf.symfs; | ||
2430 | if (strcmp(symfs, "/") == 0) | ||
2431 | symbol_conf.symfs = ""; | ||
2432 | if (symfs != symbol_conf.symfs) | ||
2433 | free((void *)symfs); | ||
2434 | |||
2362 | symbol_conf.initialized = true; | 2435 | symbol_conf.initialized = true; |
2363 | return 0; | 2436 | return 0; |
2364 | 2437 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 6c6eafdb932d..670cd1c88f54 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -72,6 +72,7 @@ struct symbol_conf { | |||
72 | show_cpu_utilization, | 72 | show_cpu_utilization, |
73 | initialized; | 73 | initialized; |
74 | const char *vmlinux_name, | 74 | const char *vmlinux_name, |
75 | *kallsyms_name, | ||
75 | *source_prefix, | 76 | *source_prefix, |
76 | *field_sep; | 77 | *field_sep; |
77 | const char *default_guest_vmlinux_name, | 78 | const char *default_guest_vmlinux_name, |
@@ -85,6 +86,7 @@ struct symbol_conf { | |||
85 | struct strlist *dso_list, | 86 | struct strlist *dso_list, |
86 | *comm_list, | 87 | *comm_list, |
87 | *sym_list; | 88 | *sym_list; |
89 | const char *symfs; | ||
88 | }; | 90 | }; |
89 | 91 | ||
90 | extern struct symbol_conf symbol_conf; | 92 | extern struct symbol_conf symbol_conf; |
@@ -215,7 +217,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits); | |||
215 | int build_id__sprintf(const u8 *self, int len, char *bf); | 217 | int build_id__sprintf(const u8 *self, int len, char *bf); |
216 | int kallsyms__parse(const char *filename, void *arg, | 218 | int kallsyms__parse(const char *filename, void *arg, |
217 | int (*process_symbol)(void *arg, const char *name, | 219 | int (*process_symbol)(void *arg, const char *name, |
218 | char type, u64 start)); | 220 | char type, u64 start, u64 end)); |
219 | 221 | ||
220 | void machine__destroy_kernel_maps(struct machine *self); | 222 | void machine__destroy_kernel_maps(struct machine *self); |
221 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); | 223 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8c72d888e449..00f4eade2e3e 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -16,35 +16,50 @@ static int filter(const struct dirent *dir) | |||
16 | return 1; | 16 | return 1; |
17 | } | 17 | } |
18 | 18 | ||
19 | int find_all_tid(int pid, pid_t ** all_tid) | 19 | struct thread_map *thread_map__new_by_pid(pid_t pid) |
20 | { | 20 | { |
21 | struct thread_map *threads; | ||
21 | char name[256]; | 22 | char name[256]; |
22 | int items; | 23 | int items; |
23 | struct dirent **namelist = NULL; | 24 | struct dirent **namelist = NULL; |
24 | int ret = 0; | ||
25 | int i; | 25 | int i; |
26 | 26 | ||
27 | sprintf(name, "/proc/%d/task", pid); | 27 | sprintf(name, "/proc/%d/task", pid); |
28 | items = scandir(name, &namelist, filter, NULL); | 28 | items = scandir(name, &namelist, filter, NULL); |
29 | if (items <= 0) | 29 | if (items <= 0) |
30 | return -ENOENT; | 30 | return NULL; |
31 | *all_tid = malloc(sizeof(pid_t) * items); | ||
32 | if (!*all_tid) { | ||
33 | ret = -ENOMEM; | ||
34 | goto failure; | ||
35 | } | ||
36 | |||
37 | for (i = 0; i < items; i++) | ||
38 | (*all_tid)[i] = atoi(namelist[i]->d_name); | ||
39 | 31 | ||
40 | ret = items; | 32 | threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); |
33 | if (threads != NULL) { | ||
34 | for (i = 0; i < items; i++) | ||
35 | threads->map[i] = atoi(namelist[i]->d_name); | ||
36 | threads->nr = items; | ||
37 | } | ||
41 | 38 | ||
42 | failure: | ||
43 | for (i=0; i<items; i++) | 39 | for (i=0; i<items; i++) |
44 | free(namelist[i]); | 40 | free(namelist[i]); |
45 | free(namelist); | 41 | free(namelist); |
46 | 42 | ||
47 | return ret; | 43 | return threads; |
44 | } | ||
45 | |||
46 | struct thread_map *thread_map__new_by_tid(pid_t tid) | ||
47 | { | ||
48 | struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); | ||
49 | |||
50 | if (threads != NULL) { | ||
51 | threads->map[0] = tid; | ||
52 | threads->nr = 1; | ||
53 | } | ||
54 | |||
55 | return threads; | ||
56 | } | ||
57 | |||
58 | struct thread_map *thread_map__new(pid_t pid, pid_t tid) | ||
59 | { | ||
60 | if (pid != -1) | ||
61 | return thread_map__new_by_pid(pid); | ||
62 | return thread_map__new_by_tid(tid); | ||
48 | } | 63 | } |
49 | 64 | ||
50 | static struct thread *thread__new(pid_t pid) | 65 | static struct thread *thread__new(pid_t pid) |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 688500ff826f..d7574101054a 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -18,11 +18,24 @@ struct thread { | |||
18 | int comm_len; | 18 | int comm_len; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct thread_map { | ||
22 | int nr; | ||
23 | int map[]; | ||
24 | }; | ||
25 | |||
21 | struct perf_session; | 26 | struct perf_session; |
22 | 27 | ||
23 | void thread__delete(struct thread *self); | 28 | void thread__delete(struct thread *self); |
24 | 29 | ||
25 | int find_all_tid(int pid, pid_t ** all_tid); | 30 | struct thread_map *thread_map__new_by_pid(pid_t pid); |
31 | struct thread_map *thread_map__new_by_tid(pid_t tid); | ||
32 | struct thread_map *thread_map__new(pid_t pid, pid_t tid); | ||
33 | |||
34 | static inline void thread_map__delete(struct thread_map *threads) | ||
35 | { | ||
36 | free(threads); | ||
37 | } | ||
38 | |||
26 | int thread__set_comm(struct thread *self, const char *comm); | 39 | int thread__set_comm(struct thread *self, const char *comm); |
27 | int thread__comm_len(struct thread *self); | 40 | int thread__comm_len(struct thread *self); |
28 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); | 41 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); |
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index b1572601286c..35729f4c40cb 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c | |||
@@ -34,11 +34,13 @@ | |||
34 | #include <ctype.h> | 34 | #include <ctype.h> |
35 | #include <errno.h> | 35 | #include <errno.h> |
36 | #include <stdbool.h> | 36 | #include <stdbool.h> |
37 | #include <linux/list.h> | ||
37 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
38 | 39 | ||
39 | #include "../perf.h" | 40 | #include "../perf.h" |
40 | #include "trace-event.h" | 41 | #include "trace-event.h" |
41 | #include "debugfs.h" | 42 | #include "debugfs.h" |
43 | #include "evsel.h" | ||
42 | 44 | ||
43 | #define VERSION "0.5" | 45 | #define VERSION "0.5" |
44 | 46 | ||
@@ -469,16 +471,17 @@ out: | |||
469 | } | 471 | } |
470 | 472 | ||
471 | static struct tracepoint_path * | 473 | static struct tracepoint_path * |
472 | get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | 474 | get_tracepoints_path(struct list_head *pattrs) |
473 | { | 475 | { |
474 | struct tracepoint_path path, *ppath = &path; | 476 | struct tracepoint_path path, *ppath = &path; |
475 | int i, nr_tracepoints = 0; | 477 | struct perf_evsel *pos; |
478 | int nr_tracepoints = 0; | ||
476 | 479 | ||
477 | for (i = 0; i < nb_events; i++) { | 480 | list_for_each_entry(pos, pattrs, node) { |
478 | if (pattrs[i].type != PERF_TYPE_TRACEPOINT) | 481 | if (pos->attr.type != PERF_TYPE_TRACEPOINT) |
479 | continue; | 482 | continue; |
480 | ++nr_tracepoints; | 483 | ++nr_tracepoints; |
481 | ppath->next = tracepoint_id_to_path(pattrs[i].config); | 484 | ppath->next = tracepoint_id_to_path(pos->attr.config); |
482 | if (!ppath->next) | 485 | if (!ppath->next) |
483 | die("%s\n", "No memory to alloc tracepoints list"); | 486 | die("%s\n", "No memory to alloc tracepoints list"); |
484 | ppath = ppath->next; | 487 | ppath = ppath->next; |
@@ -487,21 +490,21 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | |||
487 | return nr_tracepoints > 0 ? path.next : NULL; | 490 | return nr_tracepoints > 0 ? path.next : NULL; |
488 | } | 491 | } |
489 | 492 | ||
490 | bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events) | 493 | bool have_tracepoints(struct list_head *pattrs) |
491 | { | 494 | { |
492 | int i; | 495 | struct perf_evsel *pos; |
493 | 496 | ||
494 | for (i = 0; i < nb_events; i++) | 497 | list_for_each_entry(pos, pattrs, node) |
495 | if (pattrs[i].type == PERF_TYPE_TRACEPOINT) | 498 | if (pos->attr.type == PERF_TYPE_TRACEPOINT) |
496 | return true; | 499 | return true; |
497 | 500 | ||
498 | return false; | 501 | return false; |
499 | } | 502 | } |
500 | 503 | ||
501 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | 504 | int read_tracing_data(int fd, struct list_head *pattrs) |
502 | { | 505 | { |
503 | char buf[BUFSIZ]; | 506 | char buf[BUFSIZ]; |
504 | struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); | 507 | struct tracepoint_path *tps = get_tracepoints_path(pattrs); |
505 | 508 | ||
506 | /* | 509 | /* |
507 | * What? No tracepoints? No sense writing anything here, bail out. | 510 | * What? No tracepoints? No sense writing anything here, bail out. |
@@ -545,14 +548,13 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | |||
545 | return 0; | 548 | return 0; |
546 | } | 549 | } |
547 | 550 | ||
548 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 551 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs) |
549 | int nb_events) | ||
550 | { | 552 | { |
551 | ssize_t size; | 553 | ssize_t size; |
552 | int err = 0; | 554 | int err = 0; |
553 | 555 | ||
554 | calc_data_size = 1; | 556 | calc_data_size = 1; |
555 | err = read_tracing_data(fd, pattrs, nb_events); | 557 | err = read_tracing_data(fd, pattrs); |
556 | size = calc_data_size - 1; | 558 | size = calc_data_size - 1; |
557 | calc_data_size = 0; | 559 | calc_data_size = 0; |
558 | 560 | ||
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index b3e86b1e4444..b5f12ca24d99 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -262,9 +262,8 @@ raw_field_value(struct event *event, const char *name, void *data); | |||
262 | void *raw_field_ptr(struct event *event, const char *name, void *data); | 262 | void *raw_field_ptr(struct event *event, const char *name, void *data); |
263 | unsigned long long eval_flag(const char *flag); | 263 | unsigned long long eval_flag(const char *flag); |
264 | 264 | ||
265 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); | 265 | int read_tracing_data(int fd, struct list_head *pattrs); |
266 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 266 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs); |
267 | int nb_events); | ||
268 | 267 | ||
269 | /* taken from kernel/trace/trace.h */ | 268 | /* taken from kernel/trace/trace.h */ |
270 | enum trace_flag_type { | 269 | enum trace_flag_type { |
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c index 056c69521a38..7b5a8926624e 100644 --- a/tools/perf/util/ui/util.c +++ b/tools/perf/util/ui/util.c | |||
@@ -104,10 +104,24 @@ out_destroy_form: | |||
104 | return rc; | 104 | return rc; |
105 | } | 105 | } |
106 | 106 | ||
107 | static const char yes[] = "Yes", no[] = "No"; | 107 | static const char yes[] = "Yes", no[] = "No", |
108 | warning_str[] = "Warning!", ok[] = "Ok"; | ||
108 | 109 | ||
109 | bool ui__dialog_yesno(const char *msg) | 110 | bool ui__dialog_yesno(const char *msg) |
110 | { | 111 | { |
111 | /* newtWinChoice should really be accepting const char pointers... */ | 112 | /* newtWinChoice should really be accepting const char pointers... */ |
112 | return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1; | 113 | return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1; |
113 | } | 114 | } |
115 | |||
116 | void ui__warning(const char *format, ...) | ||
117 | { | ||
118 | va_list args; | ||
119 | |||
120 | va_start(args, format); | ||
121 | if (use_browser > 0) | ||
122 | newtWinMessagev((char *)warning_str, (char *)ok, | ||
123 | (char *)format, args); | ||
124 | else | ||
125 | vfprintf(stderr, format, args); | ||
126 | va_end(args); | ||
127 | } | ||
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 214265674ddd..5b3ea49aa63e 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -114,3 +114,20 @@ unsigned long convert_unit(unsigned long value, char *unit) | |||
114 | 114 | ||
115 | return value; | 115 | return value; |
116 | } | 116 | } |
117 | |||
118 | int readn(int fd, void *buf, size_t n) | ||
119 | { | ||
120 | void *buf_start = buf; | ||
121 | |||
122 | while (n) { | ||
123 | int ret = read(fd, buf, n); | ||
124 | |||
125 | if (ret <= 0) | ||
126 | return ret; | ||
127 | |||
128 | n -= ret; | ||
129 | buf += ret; | ||
130 | } | ||
131 | |||
132 | return buf - buf_start; | ||
133 | } | ||
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 7562707ddd1c..e833f26f3bfc 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -265,6 +265,7 @@ void argv_free(char **argv); | |||
265 | bool strglobmatch(const char *str, const char *pat); | 265 | bool strglobmatch(const char *str, const char *pat); |
266 | bool strlazymatch(const char *str, const char *pat); | 266 | bool strlazymatch(const char *str, const char *pat); |
267 | unsigned long convert_unit(unsigned long value, char *unit); | 267 | unsigned long convert_unit(unsigned long value, char *unit); |
268 | int readn(int fd, void *buf, size_t size); | ||
268 | 269 | ||
269 | #define _STR(x) #x | 270 | #define _STR(x) #x |
270 | #define STR(x) _STR(x) | 271 | #define STR(x) _STR(x) |
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c new file mode 100644 index 000000000000..22afbf6c536a --- /dev/null +++ b/tools/perf/util/xyarray.c | |||
@@ -0,0 +1,20 @@ | |||
1 | #include "xyarray.h" | ||
2 | #include "util.h" | ||
3 | |||
4 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) | ||
5 | { | ||
6 | size_t row_size = ylen * entry_size; | ||
7 | struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size); | ||
8 | |||
9 | if (xy != NULL) { | ||
10 | xy->entry_size = entry_size; | ||
11 | xy->row_size = row_size; | ||
12 | } | ||
13 | |||
14 | return xy; | ||
15 | } | ||
16 | |||
17 | void xyarray__delete(struct xyarray *xy) | ||
18 | { | ||
19 | free(xy); | ||
20 | } | ||
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h new file mode 100644 index 000000000000..c488a07275dd --- /dev/null +++ b/tools/perf/util/xyarray.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _PERF_XYARRAY_H_ | ||
2 | #define _PERF_XYARRAY_H_ 1 | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | |||
6 | struct xyarray { | ||
7 | size_t row_size; | ||
8 | size_t entry_size; | ||
9 | char contents[]; | ||
10 | }; | ||
11 | |||
12 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); | ||
13 | void xyarray__delete(struct xyarray *xy); | ||
14 | |||
15 | static inline void *xyarray__entry(struct xyarray *xy, int x, int y) | ||
16 | { | ||
17 | return &xy->contents[x * xy->row_size + y * xy->entry_size]; | ||
18 | } | ||
19 | |||
20 | #endif /* _PERF_XYARRAY_H_ */ | ||