diff options
author | Jiri Olsa <jolsa@redhat.com> | 2012-11-09 19:46:45 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-11-14 14:49:43 -0500 |
commit | 16d00fee703866c61c9006eff097952289335479 (patch) | |
tree | a9923a2095d37ea1a7f236ee33f22eb1d390ab75 /tools | |
parent | a65b9c62be044b7956022e2823c5f079cf35b069 (diff) |
perf tests: Move test__PERF_RECORD into separate object
Separating test__PERF_RECORD test from the builtin-test into perf-record
object.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1352508412-16914-6-git-send-email-jolsa@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/Makefile | 1 | ||||
-rw-r--r-- | tools/perf/tests/builtin-test.c | 307 | ||||
-rw-r--r-- | tools/perf/tests/perf-record.c | 312 | ||||
-rw-r--r-- | tools/perf/tests/tests.h | 1 |
4 files changed, 314 insertions, 307 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 337489e827ca..a2d6153a6d06 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -435,6 +435,7 @@ LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o | |||
435 | LIB_OBJS += $(OUTPUT)tests/open-syscall.o | 435 | LIB_OBJS += $(OUTPUT)tests/open-syscall.o |
436 | LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o | 436 | LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o |
437 | LIB_OBJS += $(OUTPUT)tests/mmap-basic.o | 437 | LIB_OBJS += $(OUTPUT)tests/mmap-basic.o |
438 | LIB_OBJS += $(OUTPUT)tests/perf-record.o | ||
438 | LIB_OBJS += $(OUTPUT)tests/util.o | 439 | LIB_OBJS += $(OUTPUT)tests/util.o |
439 | 440 | ||
440 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o | 441 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o |
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 609f59295326..7cb3928d896a 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c | |||
@@ -31,313 +31,6 @@ | |||
31 | 31 | ||
32 | 32 | ||
33 | 33 | ||
34 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) | ||
35 | { | ||
36 | int i, cpu = -1, nrcpus = 1024; | ||
37 | realloc: | ||
38 | CPU_ZERO(maskp); | ||
39 | |||
40 | if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { | ||
41 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
42 | nrcpus = nrcpus << 2; | ||
43 | goto realloc; | ||
44 | } | ||
45 | perror("sched_getaffinity"); | ||
46 | return -1; | ||
47 | } | ||
48 | |||
49 | for (i = 0; i < nrcpus; i++) { | ||
50 | if (CPU_ISSET(i, maskp)) { | ||
51 | if (cpu == -1) | ||
52 | cpu = i; | ||
53 | else | ||
54 | CPU_CLR(i, maskp); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | return cpu; | ||
59 | } | ||
60 | |||
61 | static int test__PERF_RECORD(void) | ||
62 | { | ||
63 | struct perf_record_opts opts = { | ||
64 | .target = { | ||
65 | .uid = UINT_MAX, | ||
66 | .uses_mmap = true, | ||
67 | }, | ||
68 | .no_delay = true, | ||
69 | .freq = 10, | ||
70 | .mmap_pages = 256, | ||
71 | }; | ||
72 | cpu_set_t cpu_mask; | ||
73 | size_t cpu_mask_size = sizeof(cpu_mask); | ||
74 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
75 | struct perf_evsel *evsel; | ||
76 | struct perf_sample sample; | ||
77 | const char *cmd = "sleep"; | ||
78 | const char *argv[] = { cmd, "1", NULL, }; | ||
79 | char *bname; | ||
80 | u64 prev_time = 0; | ||
81 | bool found_cmd_mmap = false, | ||
82 | found_libc_mmap = false, | ||
83 | found_vdso_mmap = false, | ||
84 | found_ld_mmap = false; | ||
85 | int err = -1, errs = 0, i, wakeups = 0; | ||
86 | u32 cpu; | ||
87 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
88 | |||
89 | if (evlist == NULL || argv == NULL) { | ||
90 | pr_debug("Not enough memory to create evlist\n"); | ||
91 | goto out; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * We need at least one evsel in the evlist, use the default | ||
96 | * one: "cycles". | ||
97 | */ | ||
98 | err = perf_evlist__add_default(evlist); | ||
99 | if (err < 0) { | ||
100 | pr_debug("Not enough memory to create evsel\n"); | ||
101 | goto out_delete_evlist; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Create maps of threads and cpus to monitor. In this case | ||
106 | * we start with all threads and cpus (-1, -1) but then in | ||
107 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
108 | * we're monitoring, the one forked there. | ||
109 | */ | ||
110 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
111 | if (err < 0) { | ||
112 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
113 | goto out_delete_evlist; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
118 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
119 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
120 | * on all the fds) and then mmap them. | ||
121 | */ | ||
122 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
123 | if (err < 0) { | ||
124 | pr_debug("Couldn't run the workload!\n"); | ||
125 | goto out_delete_evlist; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Config the evsels, setting attr->comm on the first one, etc. | ||
130 | */ | ||
131 | evsel = perf_evlist__first(evlist); | ||
132 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
133 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
134 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
135 | perf_evlist__config_attrs(evlist, &opts); | ||
136 | |||
137 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); | ||
138 | if (err < 0) { | ||
139 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
140 | goto out_delete_evlist; | ||
141 | } | ||
142 | |||
143 | cpu = err; | ||
144 | |||
145 | /* | ||
146 | * So that we can check perf_sample.cpu on all the samples. | ||
147 | */ | ||
148 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { | ||
149 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
150 | goto out_delete_evlist; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
155 | * grouping them if asked to. | ||
156 | */ | ||
157 | err = perf_evlist__open(evlist); | ||
158 | if (err < 0) { | ||
159 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
160 | goto out_delete_evlist; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * mmap the first fd on a given CPU and ask for events for the other | ||
165 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
166 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
167 | */ | ||
168 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
169 | if (err < 0) { | ||
170 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
171 | goto out_delete_evlist; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Now that all is properly set up, enable the events, they will | ||
176 | * count just on workload.pid, which will start... | ||
177 | */ | ||
178 | perf_evlist__enable(evlist); | ||
179 | |||
180 | /* | ||
181 | * Now! | ||
182 | */ | ||
183 | perf_evlist__start_workload(evlist); | ||
184 | |||
185 | while (1) { | ||
186 | int before = total_events; | ||
187 | |||
188 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
189 | union perf_event *event; | ||
190 | |||
191 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
192 | const u32 type = event->header.type; | ||
193 | const char *name = perf_event__name(type); | ||
194 | |||
195 | ++total_events; | ||
196 | if (type < PERF_RECORD_MAX) | ||
197 | nr_events[type]++; | ||
198 | |||
199 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
200 | if (err < 0) { | ||
201 | if (verbose) | ||
202 | perf_event__fprintf(event, stderr); | ||
203 | pr_debug("Couldn't parse sample\n"); | ||
204 | goto out_err; | ||
205 | } | ||
206 | |||
207 | if (verbose) { | ||
208 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
209 | perf_event__fprintf(event, stderr); | ||
210 | } | ||
211 | |||
212 | if (prev_time > sample.time) { | ||
213 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
214 | name, prev_time, sample.time); | ||
215 | ++errs; | ||
216 | } | ||
217 | |||
218 | prev_time = sample.time; | ||
219 | |||
220 | if (sample.cpu != cpu) { | ||
221 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
222 | name, cpu, sample.cpu); | ||
223 | ++errs; | ||
224 | } | ||
225 | |||
226 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
227 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
228 | name, evlist->workload.pid, sample.pid); | ||
229 | ++errs; | ||
230 | } | ||
231 | |||
232 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
233 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
234 | name, evlist->workload.pid, sample.tid); | ||
235 | ++errs; | ||
236 | } | ||
237 | |||
238 | if ((type == PERF_RECORD_COMM || | ||
239 | type == PERF_RECORD_MMAP || | ||
240 | type == PERF_RECORD_FORK || | ||
241 | type == PERF_RECORD_EXIT) && | ||
242 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
243 | pr_debug("%s with unexpected pid/tid\n", name); | ||
244 | ++errs; | ||
245 | } | ||
246 | |||
247 | if ((type == PERF_RECORD_COMM || | ||
248 | type == PERF_RECORD_MMAP) && | ||
249 | event->comm.pid != event->comm.tid) { | ||
250 | pr_debug("%s with different pid/tid!\n", name); | ||
251 | ++errs; | ||
252 | } | ||
253 | |||
254 | switch (type) { | ||
255 | case PERF_RECORD_COMM: | ||
256 | if (strcmp(event->comm.comm, cmd)) { | ||
257 | pr_debug("%s with unexpected comm!\n", name); | ||
258 | ++errs; | ||
259 | } | ||
260 | break; | ||
261 | case PERF_RECORD_EXIT: | ||
262 | goto found_exit; | ||
263 | case PERF_RECORD_MMAP: | ||
264 | bname = strrchr(event->mmap.filename, '/'); | ||
265 | if (bname != NULL) { | ||
266 | if (!found_cmd_mmap) | ||
267 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
268 | if (!found_libc_mmap) | ||
269 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
270 | if (!found_ld_mmap) | ||
271 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
272 | } else if (!found_vdso_mmap) | ||
273 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
274 | break; | ||
275 | |||
276 | case PERF_RECORD_SAMPLE: | ||
277 | /* Just ignore samples for now */ | ||
278 | break; | ||
279 | default: | ||
280 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
281 | type); | ||
282 | ++errs; | ||
283 | } | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * We don't use poll here because at least at 3.1 times the | ||
289 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
290 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
291 | */ | ||
292 | if (total_events == before && false) | ||
293 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
294 | |||
295 | sleep(1); | ||
296 | if (++wakeups > 5) { | ||
297 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
298 | break; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | found_exit: | ||
303 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
304 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
305 | ++errs; | ||
306 | } | ||
307 | |||
308 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
309 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
310 | ++errs; | ||
311 | } | ||
312 | |||
313 | if (!found_cmd_mmap) { | ||
314 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
315 | ++errs; | ||
316 | } | ||
317 | |||
318 | if (!found_libc_mmap) { | ||
319 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
320 | ++errs; | ||
321 | } | ||
322 | |||
323 | if (!found_ld_mmap) { | ||
324 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
325 | ++errs; | ||
326 | } | ||
327 | |||
328 | if (!found_vdso_mmap) { | ||
329 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
330 | ++errs; | ||
331 | } | ||
332 | out_err: | ||
333 | perf_evlist__munmap(evlist); | ||
334 | out_delete_evlist: | ||
335 | perf_evlist__delete(evlist); | ||
336 | out: | ||
337 | return (err < 0 || errs > 0) ? -1 : 0; | ||
338 | } | ||
339 | |||
340 | |||
341 | #if defined(__x86_64__) || defined(__i386__) | 34 | #if defined(__x86_64__) || defined(__i386__) |
342 | 35 | ||
343 | #define barrier() asm volatile("" ::: "memory") | 36 | #define barrier() asm volatile("" ::: "memory") |
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c new file mode 100644 index 000000000000..70e0d4421df8 --- /dev/null +++ b/tools/perf/tests/perf-record.c | |||
@@ -0,0 +1,312 @@ | |||
1 | #include <sched.h> | ||
2 | #include "evlist.h" | ||
3 | #include "evsel.h" | ||
4 | #include "perf.h" | ||
5 | #include "debug.h" | ||
6 | #include "tests.h" | ||
7 | |||
8 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) | ||
9 | { | ||
10 | int i, cpu = -1, nrcpus = 1024; | ||
11 | realloc: | ||
12 | CPU_ZERO(maskp); | ||
13 | |||
14 | if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { | ||
15 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
16 | nrcpus = nrcpus << 2; | ||
17 | goto realloc; | ||
18 | } | ||
19 | perror("sched_getaffinity"); | ||
20 | return -1; | ||
21 | } | ||
22 | |||
23 | for (i = 0; i < nrcpus; i++) { | ||
24 | if (CPU_ISSET(i, maskp)) { | ||
25 | if (cpu == -1) | ||
26 | cpu = i; | ||
27 | else | ||
28 | CPU_CLR(i, maskp); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | return cpu; | ||
33 | } | ||
34 | |||
35 | int test__PERF_RECORD(void) | ||
36 | { | ||
37 | struct perf_record_opts opts = { | ||
38 | .target = { | ||
39 | .uid = UINT_MAX, | ||
40 | .uses_mmap = true, | ||
41 | }, | ||
42 | .no_delay = true, | ||
43 | .freq = 10, | ||
44 | .mmap_pages = 256, | ||
45 | }; | ||
46 | cpu_set_t cpu_mask; | ||
47 | size_t cpu_mask_size = sizeof(cpu_mask); | ||
48 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
49 | struct perf_evsel *evsel; | ||
50 | struct perf_sample sample; | ||
51 | const char *cmd = "sleep"; | ||
52 | const char *argv[] = { cmd, "1", NULL, }; | ||
53 | char *bname; | ||
54 | u64 prev_time = 0; | ||
55 | bool found_cmd_mmap = false, | ||
56 | found_libc_mmap = false, | ||
57 | found_vdso_mmap = false, | ||
58 | found_ld_mmap = false; | ||
59 | int err = -1, errs = 0, i, wakeups = 0; | ||
60 | u32 cpu; | ||
61 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
62 | |||
63 | if (evlist == NULL || argv == NULL) { | ||
64 | pr_debug("Not enough memory to create evlist\n"); | ||
65 | goto out; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * We need at least one evsel in the evlist, use the default | ||
70 | * one: "cycles". | ||
71 | */ | ||
72 | err = perf_evlist__add_default(evlist); | ||
73 | if (err < 0) { | ||
74 | pr_debug("Not enough memory to create evsel\n"); | ||
75 | goto out_delete_evlist; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Create maps of threads and cpus to monitor. In this case | ||
80 | * we start with all threads and cpus (-1, -1) but then in | ||
81 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
82 | * we're monitoring, the one forked there. | ||
83 | */ | ||
84 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
85 | if (err < 0) { | ||
86 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
87 | goto out_delete_evlist; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
92 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
93 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
94 | * on all the fds) and then mmap them. | ||
95 | */ | ||
96 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
97 | if (err < 0) { | ||
98 | pr_debug("Couldn't run the workload!\n"); | ||
99 | goto out_delete_evlist; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Config the evsels, setting attr->comm on the first one, etc. | ||
104 | */ | ||
105 | evsel = perf_evlist__first(evlist); | ||
106 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
107 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
108 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
109 | perf_evlist__config_attrs(evlist, &opts); | ||
110 | |||
111 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); | ||
112 | if (err < 0) { | ||
113 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
114 | goto out_delete_evlist; | ||
115 | } | ||
116 | |||
117 | cpu = err; | ||
118 | |||
119 | /* | ||
120 | * So that we can check perf_sample.cpu on all the samples. | ||
121 | */ | ||
122 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { | ||
123 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
124 | goto out_delete_evlist; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
129 | * grouping them if asked to. | ||
130 | */ | ||
131 | err = perf_evlist__open(evlist); | ||
132 | if (err < 0) { | ||
133 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
134 | goto out_delete_evlist; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * mmap the first fd on a given CPU and ask for events for the other | ||
139 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
140 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
141 | */ | ||
142 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
143 | if (err < 0) { | ||
144 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
145 | goto out_delete_evlist; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Now that all is properly set up, enable the events, they will | ||
150 | * count just on workload.pid, which will start... | ||
151 | */ | ||
152 | perf_evlist__enable(evlist); | ||
153 | |||
154 | /* | ||
155 | * Now! | ||
156 | */ | ||
157 | perf_evlist__start_workload(evlist); | ||
158 | |||
159 | while (1) { | ||
160 | int before = total_events; | ||
161 | |||
162 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
163 | union perf_event *event; | ||
164 | |||
165 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
166 | const u32 type = event->header.type; | ||
167 | const char *name = perf_event__name(type); | ||
168 | |||
169 | ++total_events; | ||
170 | if (type < PERF_RECORD_MAX) | ||
171 | nr_events[type]++; | ||
172 | |||
173 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
174 | if (err < 0) { | ||
175 | if (verbose) | ||
176 | perf_event__fprintf(event, stderr); | ||
177 | pr_debug("Couldn't parse sample\n"); | ||
178 | goto out_err; | ||
179 | } | ||
180 | |||
181 | if (verbose) { | ||
182 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
183 | perf_event__fprintf(event, stderr); | ||
184 | } | ||
185 | |||
186 | if (prev_time > sample.time) { | ||
187 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
188 | name, prev_time, sample.time); | ||
189 | ++errs; | ||
190 | } | ||
191 | |||
192 | prev_time = sample.time; | ||
193 | |||
194 | if (sample.cpu != cpu) { | ||
195 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
196 | name, cpu, sample.cpu); | ||
197 | ++errs; | ||
198 | } | ||
199 | |||
200 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
201 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
202 | name, evlist->workload.pid, sample.pid); | ||
203 | ++errs; | ||
204 | } | ||
205 | |||
206 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
207 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
208 | name, evlist->workload.pid, sample.tid); | ||
209 | ++errs; | ||
210 | } | ||
211 | |||
212 | if ((type == PERF_RECORD_COMM || | ||
213 | type == PERF_RECORD_MMAP || | ||
214 | type == PERF_RECORD_FORK || | ||
215 | type == PERF_RECORD_EXIT) && | ||
216 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
217 | pr_debug("%s with unexpected pid/tid\n", name); | ||
218 | ++errs; | ||
219 | } | ||
220 | |||
221 | if ((type == PERF_RECORD_COMM || | ||
222 | type == PERF_RECORD_MMAP) && | ||
223 | event->comm.pid != event->comm.tid) { | ||
224 | pr_debug("%s with different pid/tid!\n", name); | ||
225 | ++errs; | ||
226 | } | ||
227 | |||
228 | switch (type) { | ||
229 | case PERF_RECORD_COMM: | ||
230 | if (strcmp(event->comm.comm, cmd)) { | ||
231 | pr_debug("%s with unexpected comm!\n", name); | ||
232 | ++errs; | ||
233 | } | ||
234 | break; | ||
235 | case PERF_RECORD_EXIT: | ||
236 | goto found_exit; | ||
237 | case PERF_RECORD_MMAP: | ||
238 | bname = strrchr(event->mmap.filename, '/'); | ||
239 | if (bname != NULL) { | ||
240 | if (!found_cmd_mmap) | ||
241 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
242 | if (!found_libc_mmap) | ||
243 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
244 | if (!found_ld_mmap) | ||
245 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
246 | } else if (!found_vdso_mmap) | ||
247 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
248 | break; | ||
249 | |||
250 | case PERF_RECORD_SAMPLE: | ||
251 | /* Just ignore samples for now */ | ||
252 | break; | ||
253 | default: | ||
254 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
255 | type); | ||
256 | ++errs; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * We don't use poll here because at least at 3.1 times the | ||
263 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
264 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
265 | */ | ||
266 | if (total_events == before && false) | ||
267 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
268 | |||
269 | sleep(1); | ||
270 | if (++wakeups > 5) { | ||
271 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
272 | break; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | found_exit: | ||
277 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
278 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
279 | ++errs; | ||
280 | } | ||
281 | |||
282 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
283 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
284 | ++errs; | ||
285 | } | ||
286 | |||
287 | if (!found_cmd_mmap) { | ||
288 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
289 | ++errs; | ||
290 | } | ||
291 | |||
292 | if (!found_libc_mmap) { | ||
293 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
294 | ++errs; | ||
295 | } | ||
296 | |||
297 | if (!found_ld_mmap) { | ||
298 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
299 | ++errs; | ||
300 | } | ||
301 | |||
302 | if (!found_vdso_mmap) { | ||
303 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
304 | ++errs; | ||
305 | } | ||
306 | out_err: | ||
307 | perf_evlist__munmap(evlist); | ||
308 | out_delete_evlist: | ||
309 | perf_evlist__delete(evlist); | ||
310 | out: | ||
311 | return (err < 0 || errs > 0) ? -1 : 0; | ||
312 | } | ||
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 1a925ddeae7a..374b039dd22c 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h | |||
@@ -6,6 +6,7 @@ int test__vmlinux_matches_kallsyms(void); | |||
6 | int test__open_syscall_event(void); | 6 | int test__open_syscall_event(void); |
7 | int test__open_syscall_event_on_all_cpus(void); | 7 | int test__open_syscall_event_on_all_cpus(void); |
8 | int test__basic_mmap(void); | 8 | int test__basic_mmap(void); |
9 | int test__PERF_RECORD(void); | ||
9 | 10 | ||
10 | /* Util */ | 11 | /* Util */ |
11 | int trace_event__id(const char *evname); | 12 | int trace_event__id(const char *evname); |