diff options
Diffstat (limited to 'tools/perf/tests/builtin-test.c')
-rw-r--r-- | tools/perf/tests/builtin-test.c | 1559 |
1 files changed, 1559 insertions, 0 deletions
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c new file mode 100644 index 000000000000..f6c642415c44 --- /dev/null +++ b/tools/perf/tests/builtin-test.c | |||
@@ -0,0 +1,1559 @@ | |||
1 | /* | ||
2 | * builtin-test.c | ||
3 | * | ||
4 | * Builtin regression testing command: ever growing number of sanity tests | ||
5 | */ | ||
6 | #include "builtin.h" | ||
7 | |||
8 | #include "util/cache.h" | ||
9 | #include "util/color.h" | ||
10 | #include "util/debug.h" | ||
11 | #include "util/debugfs.h" | ||
12 | #include "util/evlist.h" | ||
13 | #include "util/parse-options.h" | ||
14 | #include "util/parse-events.h" | ||
15 | #include "util/symbol.h" | ||
16 | #include "util/thread_map.h" | ||
17 | #include "util/pmu.h" | ||
18 | #include "event-parse.h" | ||
19 | #include "../../include/linux/hw_breakpoint.h" | ||
20 | |||
21 | #include <sys/mman.h> | ||
22 | |||
23 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, | ||
24 | struct symbol *sym) | ||
25 | { | ||
26 | bool *visited = symbol__priv(sym); | ||
27 | *visited = true; | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static int test__vmlinux_matches_kallsyms(void) | ||
32 | { | ||
33 | int err = -1; | ||
34 | struct rb_node *nd; | ||
35 | struct symbol *sym; | ||
36 | struct map *kallsyms_map, *vmlinux_map; | ||
37 | struct machine kallsyms, vmlinux; | ||
38 | enum map_type type = MAP__FUNCTION; | ||
39 | struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; | ||
40 | |||
41 | /* | ||
42 | * Step 1: | ||
43 | * | ||
44 | * Init the machines that will hold kernel, modules obtained from | ||
45 | * both vmlinux + .ko files and from /proc/kallsyms split by modules. | ||
46 | */ | ||
47 | machine__init(&kallsyms, "", HOST_KERNEL_ID); | ||
48 | machine__init(&vmlinux, "", HOST_KERNEL_ID); | ||
49 | |||
50 | /* | ||
51 | * Step 2: | ||
52 | * | ||
53 | * Create the kernel maps for kallsyms and the DSO where we will then | ||
54 | * load /proc/kallsyms. Also create the modules maps from /proc/modules | ||
55 | * and find the .ko files that match them in /lib/modules/`uname -r`/. | ||
56 | */ | ||
57 | if (machine__create_kernel_maps(&kallsyms) < 0) { | ||
58 | pr_debug("machine__create_kernel_maps "); | ||
59 | return -1; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Step 3: | ||
64 | * | ||
65 | * Load and split /proc/kallsyms into multiple maps, one per module. | ||
66 | */ | ||
67 | if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { | ||
68 | pr_debug("dso__load_kallsyms "); | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Step 4: | ||
74 | * | ||
75 | * kallsyms will be internally on demand sorted by name so that we can | ||
76 | * find the reference relocation * symbol, i.e. the symbol we will use | ||
77 | * to see if the running kernel was relocated by checking if it has the | ||
78 | * same value in the vmlinux file we load. | ||
79 | */ | ||
80 | kallsyms_map = machine__kernel_map(&kallsyms, type); | ||
81 | |||
82 | sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); | ||
83 | if (sym == NULL) { | ||
84 | pr_debug("dso__find_symbol_by_name "); | ||
85 | goto out; | ||
86 | } | ||
87 | |||
88 | ref_reloc_sym.addr = sym->start; | ||
89 | |||
90 | /* | ||
91 | * Step 5: | ||
92 | * | ||
93 | * Now repeat step 2, this time for the vmlinux file we'll auto-locate. | ||
94 | */ | ||
95 | if (machine__create_kernel_maps(&vmlinux) < 0) { | ||
96 | pr_debug("machine__create_kernel_maps "); | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | vmlinux_map = machine__kernel_map(&vmlinux, type); | ||
101 | map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; | ||
102 | |||
103 | /* | ||
104 | * Step 6: | ||
105 | * | ||
106 | * Locate a vmlinux file in the vmlinux path that has a buildid that | ||
107 | * matches the one of the running kernel. | ||
108 | * | ||
109 | * While doing that look if we find the ref reloc symbol, if we find it | ||
110 | * we'll have its ref_reloc_symbol.unrelocated_addr and then | ||
111 | * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines | ||
112 | * to fixup the symbols. | ||
113 | */ | ||
114 | if (machine__load_vmlinux_path(&vmlinux, type, | ||
115 | vmlinux_matches_kallsyms_filter) <= 0) { | ||
116 | pr_debug("machine__load_vmlinux_path "); | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | err = 0; | ||
121 | /* | ||
122 | * Step 7: | ||
123 | * | ||
124 | * Now look at the symbols in the vmlinux DSO and check if we find all of them | ||
125 | * in the kallsyms dso. For the ones that are in both, check its names and | ||
126 | * end addresses too. | ||
127 | */ | ||
128 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | ||
129 | struct symbol *pair, *first_pair; | ||
130 | bool backwards = true; | ||
131 | |||
132 | sym = rb_entry(nd, struct symbol, rb_node); | ||
133 | |||
134 | if (sym->start == sym->end) | ||
135 | continue; | ||
136 | |||
137 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
138 | pair = first_pair; | ||
139 | |||
140 | if (pair && pair->start == sym->start) { | ||
141 | next_pair: | ||
142 | if (strcmp(sym->name, pair->name) == 0) { | ||
143 | /* | ||
144 | * kallsyms don't have the symbol end, so we | ||
145 | * set that by using the next symbol start - 1, | ||
146 | * in some cases we get this up to a page | ||
147 | * wrong, trace_kmalloc when I was developing | ||
148 | * this code was one such example, 2106 bytes | ||
149 | * off the real size. More than that and we | ||
150 | * _really_ have a problem. | ||
151 | */ | ||
152 | s64 skew = sym->end - pair->end; | ||
153 | if (llabs(skew) < page_size) | ||
154 | continue; | ||
155 | |||
156 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", | ||
157 | sym->start, sym->name, sym->end, pair->end); | ||
158 | } else { | ||
159 | struct rb_node *nnd; | ||
160 | detour: | ||
161 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
162 | rb_next(&pair->rb_node); | ||
163 | if (nnd) { | ||
164 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | ||
165 | |||
166 | if (next->start == sym->start) { | ||
167 | pair = next; | ||
168 | goto next_pair; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | if (backwards) { | ||
173 | backwards = false; | ||
174 | pair = first_pair; | ||
175 | goto detour; | ||
176 | } | ||
177 | |||
178 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
179 | sym->start, sym->name, pair->name); | ||
180 | } | ||
181 | } else | ||
182 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); | ||
183 | |||
184 | err = -1; | ||
185 | } | ||
186 | |||
187 | if (!verbose) | ||
188 | goto out; | ||
189 | |||
190 | pr_info("Maps only in vmlinux:\n"); | ||
191 | |||
192 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
193 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
194 | /* | ||
195 | * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while | ||
196 | * the kernel will have the path for the vmlinux file being used, | ||
197 | * so use the short name, less descriptive but the same ("[kernel]" in | ||
198 | * both cases. | ||
199 | */ | ||
200 | pair = map_groups__find_by_name(&kallsyms.kmaps, type, | ||
201 | (pos->dso->kernel ? | ||
202 | pos->dso->short_name : | ||
203 | pos->dso->name)); | ||
204 | if (pair) | ||
205 | pair->priv = 1; | ||
206 | else | ||
207 | map__fprintf(pos, stderr); | ||
208 | } | ||
209 | |||
210 | pr_info("Maps in vmlinux with a different name in kallsyms:\n"); | ||
211 | |||
212 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
213 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
214 | |||
215 | pair = map_groups__find(&kallsyms.kmaps, type, pos->start); | ||
216 | if (pair == NULL || pair->priv) | ||
217 | continue; | ||
218 | |||
219 | if (pair->start == pos->start) { | ||
220 | pair->priv = 1; | ||
221 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", | ||
222 | pos->start, pos->end, pos->pgoff, pos->dso->name); | ||
223 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | ||
224 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", | ||
225 | pair->start, pair->end, pair->pgoff); | ||
226 | pr_info(" %s\n", pair->dso->name); | ||
227 | pair->priv = 1; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | pr_info("Maps only in kallsyms:\n"); | ||
232 | |||
233 | for (nd = rb_first(&kallsyms.kmaps.maps[type]); | ||
234 | nd; nd = rb_next(nd)) { | ||
235 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
236 | |||
237 | if (!pos->priv) | ||
238 | map__fprintf(pos, stderr); | ||
239 | } | ||
240 | out: | ||
241 | return err; | ||
242 | } | ||
243 | |||
244 | #include "util/cpumap.h" | ||
245 | #include "util/evsel.h" | ||
246 | #include <sys/types.h> | ||
247 | |||
248 | static int trace_event__id(const char *evname) | ||
249 | { | ||
250 | char *filename; | ||
251 | int err = -1, fd; | ||
252 | |||
253 | if (asprintf(&filename, | ||
254 | "%s/syscalls/%s/id", | ||
255 | tracing_events_path, evname) < 0) | ||
256 | return -1; | ||
257 | |||
258 | fd = open(filename, O_RDONLY); | ||
259 | if (fd >= 0) { | ||
260 | char id[16]; | ||
261 | if (read(fd, id, sizeof(id)) > 0) | ||
262 | err = atoi(id); | ||
263 | close(fd); | ||
264 | } | ||
265 | |||
266 | free(filename); | ||
267 | return err; | ||
268 | } | ||
269 | |||
270 | static int test__open_syscall_event(void) | ||
271 | { | ||
272 | int err = -1, fd; | ||
273 | struct thread_map *threads; | ||
274 | struct perf_evsel *evsel; | ||
275 | struct perf_event_attr attr; | ||
276 | unsigned int nr_open_calls = 111, i; | ||
277 | int id = trace_event__id("sys_enter_open"); | ||
278 | |||
279 | if (id < 0) { | ||
280 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
281 | return -1; | ||
282 | } | ||
283 | |||
284 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
285 | if (threads == NULL) { | ||
286 | pr_debug("thread_map__new\n"); | ||
287 | return -1; | ||
288 | } | ||
289 | |||
290 | memset(&attr, 0, sizeof(attr)); | ||
291 | attr.type = PERF_TYPE_TRACEPOINT; | ||
292 | attr.config = id; | ||
293 | evsel = perf_evsel__new(&attr, 0); | ||
294 | if (evsel == NULL) { | ||
295 | pr_debug("perf_evsel__new\n"); | ||
296 | goto out_thread_map_delete; | ||
297 | } | ||
298 | |||
299 | if (perf_evsel__open_per_thread(evsel, threads) < 0) { | ||
300 | pr_debug("failed to open counter: %s, " | ||
301 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
302 | strerror(errno)); | ||
303 | goto out_evsel_delete; | ||
304 | } | ||
305 | |||
306 | for (i = 0; i < nr_open_calls; ++i) { | ||
307 | fd = open("/etc/passwd", O_RDONLY); | ||
308 | close(fd); | ||
309 | } | ||
310 | |||
311 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
312 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
313 | goto out_close_fd; | ||
314 | } | ||
315 | |||
316 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
317 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
318 | nr_open_calls, evsel->counts->cpu[0].val); | ||
319 | goto out_close_fd; | ||
320 | } | ||
321 | |||
322 | err = 0; | ||
323 | out_close_fd: | ||
324 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
325 | out_evsel_delete: | ||
326 | perf_evsel__delete(evsel); | ||
327 | out_thread_map_delete: | ||
328 | thread_map__delete(threads); | ||
329 | return err; | ||
330 | } | ||
331 | |||
332 | #include <sched.h> | ||
333 | |||
334 | static int test__open_syscall_event_on_all_cpus(void) | ||
335 | { | ||
336 | int err = -1, fd, cpu; | ||
337 | struct thread_map *threads; | ||
338 | struct cpu_map *cpus; | ||
339 | struct perf_evsel *evsel; | ||
340 | struct perf_event_attr attr; | ||
341 | unsigned int nr_open_calls = 111, i; | ||
342 | cpu_set_t cpu_set; | ||
343 | int id = trace_event__id("sys_enter_open"); | ||
344 | |||
345 | if (id < 0) { | ||
346 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
347 | return -1; | ||
348 | } | ||
349 | |||
350 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
351 | if (threads == NULL) { | ||
352 | pr_debug("thread_map__new\n"); | ||
353 | return -1; | ||
354 | } | ||
355 | |||
356 | cpus = cpu_map__new(NULL); | ||
357 | if (cpus == NULL) { | ||
358 | pr_debug("cpu_map__new\n"); | ||
359 | goto out_thread_map_delete; | ||
360 | } | ||
361 | |||
362 | |||
363 | CPU_ZERO(&cpu_set); | ||
364 | |||
365 | memset(&attr, 0, sizeof(attr)); | ||
366 | attr.type = PERF_TYPE_TRACEPOINT; | ||
367 | attr.config = id; | ||
368 | evsel = perf_evsel__new(&attr, 0); | ||
369 | if (evsel == NULL) { | ||
370 | pr_debug("perf_evsel__new\n"); | ||
371 | goto out_thread_map_delete; | ||
372 | } | ||
373 | |||
374 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | ||
375 | pr_debug("failed to open counter: %s, " | ||
376 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
377 | strerror(errno)); | ||
378 | goto out_evsel_delete; | ||
379 | } | ||
380 | |||
381 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
382 | unsigned int ncalls = nr_open_calls + cpu; | ||
383 | /* | ||
384 | * XXX eventually lift this restriction in a way that | ||
385 | * keeps perf building on older glibc installations | ||
386 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
387 | * a reasonable upper limit tho :-) | ||
388 | */ | ||
389 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
390 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
391 | continue; | ||
392 | } | ||
393 | |||
394 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
395 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
396 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
397 | cpus->map[cpu], | ||
398 | strerror(errno)); | ||
399 | goto out_close_fd; | ||
400 | } | ||
401 | for (i = 0; i < ncalls; ++i) { | ||
402 | fd = open("/etc/passwd", O_RDONLY); | ||
403 | close(fd); | ||
404 | } | ||
405 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Here we need to explicitely preallocate the counts, as if | ||
410 | * we use the auto allocation it will allocate just for 1 cpu, | ||
411 | * as we start by cpu 0. | ||
412 | */ | ||
413 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
414 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
415 | goto out_close_fd; | ||
416 | } | ||
417 | |||
418 | err = 0; | ||
419 | |||
420 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
421 | unsigned int expected; | ||
422 | |||
423 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
424 | continue; | ||
425 | |||
426 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
427 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
428 | err = -1; | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | expected = nr_open_calls + cpu; | ||
433 | if (evsel->counts->cpu[cpu].val != expected) { | ||
434 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
435 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
436 | err = -1; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | out_close_fd: | ||
441 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
442 | out_evsel_delete: | ||
443 | perf_evsel__delete(evsel); | ||
444 | out_thread_map_delete: | ||
445 | thread_map__delete(threads); | ||
446 | return err; | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * This test will generate random numbers of calls to some getpid syscalls, | ||
451 | * then establish an mmap for a group of events that are created to monitor | ||
452 | * the syscalls. | ||
453 | * | ||
454 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
455 | * sample.id field to map back to its respective perf_evsel instance. | ||
456 | * | ||
457 | * Then it checks if the number of syscalls reported as perf events by | ||
458 | * the kernel corresponds to the number of syscalls made. | ||
459 | */ | ||
460 | static int test__basic_mmap(void) | ||
461 | { | ||
462 | int err = -1; | ||
463 | union perf_event *event; | ||
464 | struct thread_map *threads; | ||
465 | struct cpu_map *cpus; | ||
466 | struct perf_evlist *evlist; | ||
467 | struct perf_event_attr attr = { | ||
468 | .type = PERF_TYPE_TRACEPOINT, | ||
469 | .read_format = PERF_FORMAT_ID, | ||
470 | .sample_type = PERF_SAMPLE_ID, | ||
471 | .watermark = 0, | ||
472 | }; | ||
473 | cpu_set_t cpu_set; | ||
474 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
475 | "getpgid", }; | ||
476 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
477 | (void*)getpgid }; | ||
478 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
479 | int ids[nsyscalls]; | ||
480 | unsigned int nr_events[nsyscalls], | ||
481 | expected_nr_events[nsyscalls], i, j; | ||
482 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
483 | |||
484 | for (i = 0; i < nsyscalls; ++i) { | ||
485 | char name[64]; | ||
486 | |||
487 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
488 | ids[i] = trace_event__id(name); | ||
489 | if (ids[i] < 0) { | ||
490 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
491 | return -1; | ||
492 | } | ||
493 | nr_events[i] = 0; | ||
494 | expected_nr_events[i] = random() % 257; | ||
495 | } | ||
496 | |||
497 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
498 | if (threads == NULL) { | ||
499 | pr_debug("thread_map__new\n"); | ||
500 | return -1; | ||
501 | } | ||
502 | |||
503 | cpus = cpu_map__new(NULL); | ||
504 | if (cpus == NULL) { | ||
505 | pr_debug("cpu_map__new\n"); | ||
506 | goto out_free_threads; | ||
507 | } | ||
508 | |||
509 | CPU_ZERO(&cpu_set); | ||
510 | CPU_SET(cpus->map[0], &cpu_set); | ||
511 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
512 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
513 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
514 | cpus->map[0], strerror(errno)); | ||
515 | goto out_free_cpus; | ||
516 | } | ||
517 | |||
518 | evlist = perf_evlist__new(cpus, threads); | ||
519 | if (evlist == NULL) { | ||
520 | pr_debug("perf_evlist__new\n"); | ||
521 | goto out_free_cpus; | ||
522 | } | ||
523 | |||
524 | /* anonymous union fields, can't be initialized above */ | ||
525 | attr.wakeup_events = 1; | ||
526 | attr.sample_period = 1; | ||
527 | |||
528 | for (i = 0; i < nsyscalls; ++i) { | ||
529 | attr.config = ids[i]; | ||
530 | evsels[i] = perf_evsel__new(&attr, i); | ||
531 | if (evsels[i] == NULL) { | ||
532 | pr_debug("perf_evsel__new\n"); | ||
533 | goto out_free_evlist; | ||
534 | } | ||
535 | |||
536 | perf_evlist__add(evlist, evsels[i]); | ||
537 | |||
538 | if (perf_evsel__open(evsels[i], cpus, threads) < 0) { | ||
539 | pr_debug("failed to open counter: %s, " | ||
540 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
541 | strerror(errno)); | ||
542 | goto out_close_fd; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
547 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
548 | strerror(errno)); | ||
549 | goto out_close_fd; | ||
550 | } | ||
551 | |||
552 | for (i = 0; i < nsyscalls; ++i) | ||
553 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
554 | int foo = syscalls[i](); | ||
555 | ++foo; | ||
556 | } | ||
557 | |||
558 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
559 | struct perf_sample sample; | ||
560 | |||
561 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
562 | pr_debug("unexpected %s event\n", | ||
563 | perf_event__name(event->header.type)); | ||
564 | goto out_munmap; | ||
565 | } | ||
566 | |||
567 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
568 | if (err) { | ||
569 | pr_err("Can't parse sample, err = %d\n", err); | ||
570 | goto out_munmap; | ||
571 | } | ||
572 | |||
573 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
574 | if (evsel == NULL) { | ||
575 | pr_debug("event with id %" PRIu64 | ||
576 | " doesn't map to an evsel\n", sample.id); | ||
577 | goto out_munmap; | ||
578 | } | ||
579 | nr_events[evsel->idx]++; | ||
580 | } | ||
581 | |||
582 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
583 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
584 | pr_debug("expected %d %s events, got %d\n", | ||
585 | expected_nr_events[evsel->idx], | ||
586 | perf_evsel__name(evsel), nr_events[evsel->idx]); | ||
587 | goto out_munmap; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | err = 0; | ||
592 | out_munmap: | ||
593 | perf_evlist__munmap(evlist); | ||
594 | out_close_fd: | ||
595 | for (i = 0; i < nsyscalls; ++i) | ||
596 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
597 | out_free_evlist: | ||
598 | perf_evlist__delete(evlist); | ||
599 | out_free_cpus: | ||
600 | cpu_map__delete(cpus); | ||
601 | out_free_threads: | ||
602 | thread_map__delete(threads); | ||
603 | return err; | ||
604 | #undef nsyscalls | ||
605 | } | ||
606 | |||
607 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, | ||
608 | size_t *sizep) | ||
609 | { | ||
610 | cpu_set_t *mask; | ||
611 | size_t size; | ||
612 | int i, cpu = -1, nrcpus = 1024; | ||
613 | realloc: | ||
614 | mask = CPU_ALLOC(nrcpus); | ||
615 | size = CPU_ALLOC_SIZE(nrcpus); | ||
616 | CPU_ZERO_S(size, mask); | ||
617 | |||
618 | if (sched_getaffinity(pid, size, mask) == -1) { | ||
619 | CPU_FREE(mask); | ||
620 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
621 | nrcpus = nrcpus << 2; | ||
622 | goto realloc; | ||
623 | } | ||
624 | perror("sched_getaffinity"); | ||
625 | return -1; | ||
626 | } | ||
627 | |||
628 | for (i = 0; i < nrcpus; i++) { | ||
629 | if (CPU_ISSET_S(i, size, mask)) { | ||
630 | if (cpu == -1) { | ||
631 | cpu = i; | ||
632 | *maskp = mask; | ||
633 | *sizep = size; | ||
634 | } else | ||
635 | CPU_CLR_S(i, size, mask); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | if (cpu == -1) | ||
640 | CPU_FREE(mask); | ||
641 | |||
642 | return cpu; | ||
643 | } | ||
644 | |||
645 | static int test__PERF_RECORD(void) | ||
646 | { | ||
647 | struct perf_record_opts opts = { | ||
648 | .target = { | ||
649 | .uid = UINT_MAX, | ||
650 | .uses_mmap = true, | ||
651 | }, | ||
652 | .no_delay = true, | ||
653 | .freq = 10, | ||
654 | .mmap_pages = 256, | ||
655 | }; | ||
656 | cpu_set_t *cpu_mask = NULL; | ||
657 | size_t cpu_mask_size = 0; | ||
658 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
659 | struct perf_evsel *evsel; | ||
660 | struct perf_sample sample; | ||
661 | const char *cmd = "sleep"; | ||
662 | const char *argv[] = { cmd, "1", NULL, }; | ||
663 | char *bname; | ||
664 | u64 prev_time = 0; | ||
665 | bool found_cmd_mmap = false, | ||
666 | found_libc_mmap = false, | ||
667 | found_vdso_mmap = false, | ||
668 | found_ld_mmap = false; | ||
669 | int err = -1, errs = 0, i, wakeups = 0; | ||
670 | u32 cpu; | ||
671 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
672 | |||
673 | if (evlist == NULL || argv == NULL) { | ||
674 | pr_debug("Not enough memory to create evlist\n"); | ||
675 | goto out; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * We need at least one evsel in the evlist, use the default | ||
680 | * one: "cycles". | ||
681 | */ | ||
682 | err = perf_evlist__add_default(evlist); | ||
683 | if (err < 0) { | ||
684 | pr_debug("Not enough memory to create evsel\n"); | ||
685 | goto out_delete_evlist; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Create maps of threads and cpus to monitor. In this case | ||
690 | * we start with all threads and cpus (-1, -1) but then in | ||
691 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
692 | * we're monitoring, the one forked there. | ||
693 | */ | ||
694 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
695 | if (err < 0) { | ||
696 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
697 | goto out_delete_evlist; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
702 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
703 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
704 | * on all the fds) and then mmap them. | ||
705 | */ | ||
706 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
707 | if (err < 0) { | ||
708 | pr_debug("Couldn't run the workload!\n"); | ||
709 | goto out_delete_evlist; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Config the evsels, setting attr->comm on the first one, etc. | ||
714 | */ | ||
715 | evsel = perf_evlist__first(evlist); | ||
716 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
717 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
718 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
719 | perf_evlist__config_attrs(evlist, &opts); | ||
720 | |||
721 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, | ||
722 | &cpu_mask_size); | ||
723 | if (err < 0) { | ||
724 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
725 | goto out_delete_evlist; | ||
726 | } | ||
727 | |||
728 | cpu = err; | ||
729 | |||
730 | /* | ||
731 | * So that we can check perf_sample.cpu on all the samples. | ||
732 | */ | ||
733 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { | ||
734 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
735 | goto out_free_cpu_mask; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
740 | * grouping them if asked to. | ||
741 | */ | ||
742 | err = perf_evlist__open(evlist); | ||
743 | if (err < 0) { | ||
744 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
745 | goto out_delete_evlist; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * mmap the first fd on a given CPU and ask for events for the other | ||
750 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
751 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
752 | */ | ||
753 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
754 | if (err < 0) { | ||
755 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
756 | goto out_delete_evlist; | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * Now that all is properly set up, enable the events, they will | ||
761 | * count just on workload.pid, which will start... | ||
762 | */ | ||
763 | perf_evlist__enable(evlist); | ||
764 | |||
765 | /* | ||
766 | * Now! | ||
767 | */ | ||
768 | perf_evlist__start_workload(evlist); | ||
769 | |||
770 | while (1) { | ||
771 | int before = total_events; | ||
772 | |||
773 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
774 | union perf_event *event; | ||
775 | |||
776 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
777 | const u32 type = event->header.type; | ||
778 | const char *name = perf_event__name(type); | ||
779 | |||
780 | ++total_events; | ||
781 | if (type < PERF_RECORD_MAX) | ||
782 | nr_events[type]++; | ||
783 | |||
784 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
785 | if (err < 0) { | ||
786 | if (verbose) | ||
787 | perf_event__fprintf(event, stderr); | ||
788 | pr_debug("Couldn't parse sample\n"); | ||
789 | goto out_err; | ||
790 | } | ||
791 | |||
792 | if (verbose) { | ||
793 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
794 | perf_event__fprintf(event, stderr); | ||
795 | } | ||
796 | |||
797 | if (prev_time > sample.time) { | ||
798 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
799 | name, prev_time, sample.time); | ||
800 | ++errs; | ||
801 | } | ||
802 | |||
803 | prev_time = sample.time; | ||
804 | |||
805 | if (sample.cpu != cpu) { | ||
806 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
807 | name, cpu, sample.cpu); | ||
808 | ++errs; | ||
809 | } | ||
810 | |||
811 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
812 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
813 | name, evlist->workload.pid, sample.pid); | ||
814 | ++errs; | ||
815 | } | ||
816 | |||
817 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
818 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
819 | name, evlist->workload.pid, sample.tid); | ||
820 | ++errs; | ||
821 | } | ||
822 | |||
823 | if ((type == PERF_RECORD_COMM || | ||
824 | type == PERF_RECORD_MMAP || | ||
825 | type == PERF_RECORD_FORK || | ||
826 | type == PERF_RECORD_EXIT) && | ||
827 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
828 | pr_debug("%s with unexpected pid/tid\n", name); | ||
829 | ++errs; | ||
830 | } | ||
831 | |||
832 | if ((type == PERF_RECORD_COMM || | ||
833 | type == PERF_RECORD_MMAP) && | ||
834 | event->comm.pid != event->comm.tid) { | ||
835 | pr_debug("%s with different pid/tid!\n", name); | ||
836 | ++errs; | ||
837 | } | ||
838 | |||
839 | switch (type) { | ||
840 | case PERF_RECORD_COMM: | ||
841 | if (strcmp(event->comm.comm, cmd)) { | ||
842 | pr_debug("%s with unexpected comm!\n", name); | ||
843 | ++errs; | ||
844 | } | ||
845 | break; | ||
846 | case PERF_RECORD_EXIT: | ||
847 | goto found_exit; | ||
848 | case PERF_RECORD_MMAP: | ||
849 | bname = strrchr(event->mmap.filename, '/'); | ||
850 | if (bname != NULL) { | ||
851 | if (!found_cmd_mmap) | ||
852 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
853 | if (!found_libc_mmap) | ||
854 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
855 | if (!found_ld_mmap) | ||
856 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
857 | } else if (!found_vdso_mmap) | ||
858 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
859 | break; | ||
860 | |||
861 | case PERF_RECORD_SAMPLE: | ||
862 | /* Just ignore samples for now */ | ||
863 | break; | ||
864 | default: | ||
865 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
866 | type); | ||
867 | ++errs; | ||
868 | } | ||
869 | } | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * We don't use poll here because at least at 3.1 times the | ||
874 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
875 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
876 | */ | ||
877 | if (total_events == before && false) | ||
878 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
879 | |||
880 | sleep(1); | ||
881 | if (++wakeups > 5) { | ||
882 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
883 | break; | ||
884 | } | ||
885 | } | ||
886 | |||
887 | found_exit: | ||
888 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
889 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
890 | ++errs; | ||
891 | } | ||
892 | |||
893 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
894 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
895 | ++errs; | ||
896 | } | ||
897 | |||
898 | if (!found_cmd_mmap) { | ||
899 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
900 | ++errs; | ||
901 | } | ||
902 | |||
903 | if (!found_libc_mmap) { | ||
904 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
905 | ++errs; | ||
906 | } | ||
907 | |||
908 | if (!found_ld_mmap) { | ||
909 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
910 | ++errs; | ||
911 | } | ||
912 | |||
913 | if (!found_vdso_mmap) { | ||
914 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
915 | ++errs; | ||
916 | } | ||
917 | out_err: | ||
918 | perf_evlist__munmap(evlist); | ||
919 | out_free_cpu_mask: | ||
920 | CPU_FREE(cpu_mask); | ||
921 | out_delete_evlist: | ||
922 | perf_evlist__delete(evlist); | ||
923 | out: | ||
924 | return (err < 0 || errs > 0) ? -1 : 0; | ||
925 | } | ||
926 | |||
927 | |||
928 | #if defined(__x86_64__) || defined(__i386__) | ||
929 | |||
930 | #define barrier() asm volatile("" ::: "memory") | ||
931 | |||
932 | static u64 rdpmc(unsigned int counter) | ||
933 | { | ||
934 | unsigned int low, high; | ||
935 | |||
936 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); | ||
937 | |||
938 | return low | ((u64)high) << 32; | ||
939 | } | ||
940 | |||
941 | static u64 rdtsc(void) | ||
942 | { | ||
943 | unsigned int low, high; | ||
944 | |||
945 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | ||
946 | |||
947 | return low | ((u64)high) << 32; | ||
948 | } | ||
949 | |||
950 | static u64 mmap_read_self(void *addr) | ||
951 | { | ||
952 | struct perf_event_mmap_page *pc = addr; | ||
953 | u32 seq, idx, time_mult = 0, time_shift = 0; | ||
954 | u64 count, cyc = 0, time_offset = 0, enabled, running, delta; | ||
955 | |||
956 | do { | ||
957 | seq = pc->lock; | ||
958 | barrier(); | ||
959 | |||
960 | enabled = pc->time_enabled; | ||
961 | running = pc->time_running; | ||
962 | |||
963 | if (enabled != running) { | ||
964 | cyc = rdtsc(); | ||
965 | time_mult = pc->time_mult; | ||
966 | time_shift = pc->time_shift; | ||
967 | time_offset = pc->time_offset; | ||
968 | } | ||
969 | |||
970 | idx = pc->index; | ||
971 | count = pc->offset; | ||
972 | if (idx) | ||
973 | count += rdpmc(idx - 1); | ||
974 | |||
975 | barrier(); | ||
976 | } while (pc->lock != seq); | ||
977 | |||
978 | if (enabled != running) { | ||
979 | u64 quot, rem; | ||
980 | |||
981 | quot = (cyc >> time_shift); | ||
982 | rem = cyc & ((1 << time_shift) - 1); | ||
983 | delta = time_offset + quot * time_mult + | ||
984 | ((rem * time_mult) >> time_shift); | ||
985 | |||
986 | enabled += delta; | ||
987 | if (idx) | ||
988 | running += delta; | ||
989 | |||
990 | quot = count / running; | ||
991 | rem = count % running; | ||
992 | count = quot * enabled + (rem * enabled) / running; | ||
993 | } | ||
994 | |||
995 | return count; | ||
996 | } | ||
997 | |||
998 | /* | ||
999 | * If the RDPMC instruction faults then signal this back to the test parent task: | ||
1000 | */ | ||
1001 | static void segfault_handler(int sig __maybe_unused, | ||
1002 | siginfo_t *info __maybe_unused, | ||
1003 | void *uc __maybe_unused) | ||
1004 | { | ||
1005 | exit(-1); | ||
1006 | } | ||
1007 | |||
1008 | static int __test__rdpmc(void) | ||
1009 | { | ||
1010 | volatile int tmp = 0; | ||
1011 | u64 i, loops = 1000; | ||
1012 | int n; | ||
1013 | int fd; | ||
1014 | void *addr; | ||
1015 | struct perf_event_attr attr = { | ||
1016 | .type = PERF_TYPE_HARDWARE, | ||
1017 | .config = PERF_COUNT_HW_INSTRUCTIONS, | ||
1018 | .exclude_kernel = 1, | ||
1019 | }; | ||
1020 | u64 delta_sum = 0; | ||
1021 | struct sigaction sa; | ||
1022 | |||
1023 | sigfillset(&sa.sa_mask); | ||
1024 | sa.sa_sigaction = segfault_handler; | ||
1025 | sigaction(SIGSEGV, &sa, NULL); | ||
1026 | |||
1027 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | ||
1028 | if (fd < 0) { | ||
1029 | pr_err("Error: sys_perf_event_open() syscall returned " | ||
1030 | "with %d (%s)\n", fd, strerror(errno)); | ||
1031 | return -1; | ||
1032 | } | ||
1033 | |||
1034 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | ||
1035 | if (addr == (void *)(-1)) { | ||
1036 | pr_err("Error: mmap() syscall returned with (%s)\n", | ||
1037 | strerror(errno)); | ||
1038 | goto out_close; | ||
1039 | } | ||
1040 | |||
1041 | for (n = 0; n < 6; n++) { | ||
1042 | u64 stamp, now, delta; | ||
1043 | |||
1044 | stamp = mmap_read_self(addr); | ||
1045 | |||
1046 | for (i = 0; i < loops; i++) | ||
1047 | tmp++; | ||
1048 | |||
1049 | now = mmap_read_self(addr); | ||
1050 | loops *= 10; | ||
1051 | |||
1052 | delta = now - stamp; | ||
1053 | pr_debug("%14d: %14Lu\n", n, (long long)delta); | ||
1054 | |||
1055 | delta_sum += delta; | ||
1056 | } | ||
1057 | |||
1058 | munmap(addr, page_size); | ||
1059 | pr_debug(" "); | ||
1060 | out_close: | ||
1061 | close(fd); | ||
1062 | |||
1063 | if (!delta_sum) | ||
1064 | return -1; | ||
1065 | |||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static int test__rdpmc(void) | ||
1070 | { | ||
1071 | int status = 0; | ||
1072 | int wret = 0; | ||
1073 | int ret; | ||
1074 | int pid; | ||
1075 | |||
1076 | pid = fork(); | ||
1077 | if (pid < 0) | ||
1078 | return -1; | ||
1079 | |||
1080 | if (!pid) { | ||
1081 | ret = __test__rdpmc(); | ||
1082 | |||
1083 | exit(ret); | ||
1084 | } | ||
1085 | |||
1086 | wret = waitpid(pid, &status, 0); | ||
1087 | if (wret < 0 || status) | ||
1088 | return -1; | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | #endif | ||
1094 | |||
1095 | static int test__perf_pmu(void) | ||
1096 | { | ||
1097 | return perf_pmu__test(); | ||
1098 | } | ||
1099 | |||
1100 | static int perf_evsel__roundtrip_cache_name_test(void) | ||
1101 | { | ||
1102 | char name[128]; | ||
1103 | int type, op, err = 0, ret = 0, i, idx; | ||
1104 | struct perf_evsel *evsel; | ||
1105 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1106 | |||
1107 | if (evlist == NULL) | ||
1108 | return -ENOMEM; | ||
1109 | |||
1110 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1111 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1112 | /* skip invalid cache type */ | ||
1113 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1114 | continue; | ||
1115 | |||
1116 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1117 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1118 | name, sizeof(name)); | ||
1119 | err = parse_events(evlist, name, 0); | ||
1120 | if (err) | ||
1121 | ret = err; | ||
1122 | } | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | idx = 0; | ||
1127 | evsel = perf_evlist__first(evlist); | ||
1128 | |||
1129 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1130 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1131 | /* skip invalid cache type */ | ||
1132 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1133 | continue; | ||
1134 | |||
1135 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1136 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1137 | name, sizeof(name)); | ||
1138 | if (evsel->idx != idx) | ||
1139 | continue; | ||
1140 | |||
1141 | ++idx; | ||
1142 | |||
1143 | if (strcmp(perf_evsel__name(evsel), name)) { | ||
1144 | pr_debug("%s != %s\n", perf_evsel__name(evsel), name); | ||
1145 | ret = -1; | ||
1146 | } | ||
1147 | |||
1148 | evsel = perf_evsel__next(evsel); | ||
1149 | } | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | perf_evlist__delete(evlist); | ||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1157 | static int __perf_evsel__name_array_test(const char *names[], int nr_names) | ||
1158 | { | ||
1159 | int i, err; | ||
1160 | struct perf_evsel *evsel; | ||
1161 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1162 | |||
1163 | if (evlist == NULL) | ||
1164 | return -ENOMEM; | ||
1165 | |||
1166 | for (i = 0; i < nr_names; ++i) { | ||
1167 | err = parse_events(evlist, names[i], 0); | ||
1168 | if (err) { | ||
1169 | pr_debug("failed to parse event '%s', err %d\n", | ||
1170 | names[i], err); | ||
1171 | goto out_delete_evlist; | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | err = 0; | ||
1176 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
1177 | if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { | ||
1178 | --err; | ||
1179 | pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); | ||
1180 | } | ||
1181 | } | ||
1182 | |||
1183 | out_delete_evlist: | ||
1184 | perf_evlist__delete(evlist); | ||
1185 | return err; | ||
1186 | } | ||
1187 | |||
1188 | #define perf_evsel__name_array_test(names) \ | ||
1189 | __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) | ||
1190 | |||
1191 | static int perf_evsel__roundtrip_name_test(void) | ||
1192 | { | ||
1193 | int err = 0, ret = 0; | ||
1194 | |||
1195 | err = perf_evsel__name_array_test(perf_evsel__hw_names); | ||
1196 | if (err) | ||
1197 | ret = err; | ||
1198 | |||
1199 | err = perf_evsel__name_array_test(perf_evsel__sw_names); | ||
1200 | if (err) | ||
1201 | ret = err; | ||
1202 | |||
1203 | err = perf_evsel__roundtrip_cache_name_test(); | ||
1204 | if (err) | ||
1205 | ret = err; | ||
1206 | |||
1207 | return ret; | ||
1208 | } | ||
1209 | |||
1210 | static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | ||
1211 | int size, bool should_be_signed) | ||
1212 | { | ||
1213 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1214 | int is_signed; | ||
1215 | int ret = 0; | ||
1216 | |||
1217 | if (field == NULL) { | ||
1218 | pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); | ||
1219 | return -1; | ||
1220 | } | ||
1221 | |||
1222 | is_signed = !!(field->flags | FIELD_IS_SIGNED); | ||
1223 | if (should_be_signed && !is_signed) { | ||
1224 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | ||
1225 | evsel->name, name, is_signed, should_be_signed); | ||
1226 | ret = -1; | ||
1227 | } | ||
1228 | |||
1229 | if (field->size != size) { | ||
1230 | pr_debug("%s: \"%s\" size (%d) should be %d!\n", | ||
1231 | evsel->name, name, field->size, size); | ||
1232 | ret = -1; | ||
1233 | } | ||
1234 | |||
1235 | return ret; | ||
1236 | } | ||
1237 | |||
1238 | static int perf_evsel__tp_sched_test(void) | ||
1239 | { | ||
1240 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | ||
1241 | int ret = 0; | ||
1242 | |||
1243 | if (evsel == NULL) { | ||
1244 | pr_debug("perf_evsel__new\n"); | ||
1245 | return -1; | ||
1246 | } | ||
1247 | |||
1248 | if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) | ||
1249 | ret = -1; | ||
1250 | |||
1251 | if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) | ||
1252 | ret = -1; | ||
1253 | |||
1254 | if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) | ||
1255 | ret = -1; | ||
1256 | |||
1257 | if (perf_evsel__test_field(evsel, "prev_state", 8, true)) | ||
1258 | ret = -1; | ||
1259 | |||
1260 | if (perf_evsel__test_field(evsel, "next_comm", 16, true)) | ||
1261 | ret = -1; | ||
1262 | |||
1263 | if (perf_evsel__test_field(evsel, "next_pid", 4, true)) | ||
1264 | ret = -1; | ||
1265 | |||
1266 | if (perf_evsel__test_field(evsel, "next_prio", 4, true)) | ||
1267 | ret = -1; | ||
1268 | |||
1269 | perf_evsel__delete(evsel); | ||
1270 | |||
1271 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | ||
1272 | |||
1273 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | ||
1274 | ret = -1; | ||
1275 | |||
1276 | if (perf_evsel__test_field(evsel, "pid", 4, true)) | ||
1277 | ret = -1; | ||
1278 | |||
1279 | if (perf_evsel__test_field(evsel, "prio", 4, true)) | ||
1280 | ret = -1; | ||
1281 | |||
1282 | if (perf_evsel__test_field(evsel, "success", 4, true)) | ||
1283 | ret = -1; | ||
1284 | |||
1285 | if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) | ||
1286 | ret = -1; | ||
1287 | |||
1288 | return ret; | ||
1289 | } | ||
1290 | |||
1291 | static int test__syscall_open_tp_fields(void) | ||
1292 | { | ||
1293 | struct perf_record_opts opts = { | ||
1294 | .target = { | ||
1295 | .uid = UINT_MAX, | ||
1296 | .uses_mmap = true, | ||
1297 | }, | ||
1298 | .no_delay = true, | ||
1299 | .freq = 1, | ||
1300 | .mmap_pages = 256, | ||
1301 | .raw_samples = true, | ||
1302 | }; | ||
1303 | const char *filename = "/etc/passwd"; | ||
1304 | int flags = O_RDONLY | O_DIRECTORY; | ||
1305 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1306 | struct perf_evsel *evsel; | ||
1307 | int err = -1, i, nr_events = 0, nr_polls = 0; | ||
1308 | |||
1309 | if (evlist == NULL) { | ||
1310 | pr_debug("%s: perf_evlist__new\n", __func__); | ||
1311 | goto out; | ||
1312 | } | ||
1313 | |||
1314 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | ||
1315 | if (evsel == NULL) { | ||
1316 | pr_debug("%s: perf_evsel__newtp\n", __func__); | ||
1317 | goto out_delete_evlist; | ||
1318 | } | ||
1319 | |||
1320 | perf_evlist__add(evlist, evsel); | ||
1321 | |||
1322 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
1323 | if (err < 0) { | ||
1324 | pr_debug("%s: perf_evlist__create_maps\n", __func__); | ||
1325 | goto out_delete_evlist; | ||
1326 | } | ||
1327 | |||
1328 | perf_evsel__config(evsel, &opts, evsel); | ||
1329 | |||
1330 | evlist->threads->map[0] = getpid(); | ||
1331 | |||
1332 | err = perf_evlist__open(evlist); | ||
1333 | if (err < 0) { | ||
1334 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
1335 | goto out_delete_evlist; | ||
1336 | } | ||
1337 | |||
1338 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
1339 | if (err < 0) { | ||
1340 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
1341 | goto out_delete_evlist; | ||
1342 | } | ||
1343 | |||
1344 | perf_evlist__enable(evlist); | ||
1345 | |||
1346 | /* | ||
1347 | * Generate the event: | ||
1348 | */ | ||
1349 | open(filename, flags); | ||
1350 | |||
1351 | while (1) { | ||
1352 | int before = nr_events; | ||
1353 | |||
1354 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
1355 | union perf_event *event; | ||
1356 | |||
1357 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
1358 | const u32 type = event->header.type; | ||
1359 | int tp_flags; | ||
1360 | struct perf_sample sample; | ||
1361 | |||
1362 | ++nr_events; | ||
1363 | |||
1364 | if (type != PERF_RECORD_SAMPLE) | ||
1365 | continue; | ||
1366 | |||
1367 | err = perf_evsel__parse_sample(evsel, event, &sample); | ||
1368 | if (err) { | ||
1369 | pr_err("Can't parse sample, err = %d\n", err); | ||
1370 | goto out_munmap; | ||
1371 | } | ||
1372 | |||
1373 | tp_flags = perf_evsel__intval(evsel, &sample, "flags"); | ||
1374 | |||
1375 | if (flags != tp_flags) { | ||
1376 | pr_debug("%s: Expected flags=%#x, got %#x\n", | ||
1377 | __func__, flags, tp_flags); | ||
1378 | goto out_munmap; | ||
1379 | } | ||
1380 | |||
1381 | goto out_ok; | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | if (nr_events == before) | ||
1386 | poll(evlist->pollfd, evlist->nr_fds, 10); | ||
1387 | |||
1388 | if (++nr_polls > 5) { | ||
1389 | pr_debug("%s: no events!\n", __func__); | ||
1390 | goto out_munmap; | ||
1391 | } | ||
1392 | } | ||
1393 | out_ok: | ||
1394 | err = 0; | ||
1395 | out_munmap: | ||
1396 | perf_evlist__munmap(evlist); | ||
1397 | out_delete_evlist: | ||
1398 | perf_evlist__delete(evlist); | ||
1399 | out: | ||
1400 | return err; | ||
1401 | } | ||
1402 | |||
1403 | static struct test { | ||
1404 | const char *desc; | ||
1405 | int (*func)(void); | ||
1406 | } tests[] = { | ||
1407 | { | ||
1408 | .desc = "vmlinux symtab matches kallsyms", | ||
1409 | .func = test__vmlinux_matches_kallsyms, | ||
1410 | }, | ||
1411 | { | ||
1412 | .desc = "detect open syscall event", | ||
1413 | .func = test__open_syscall_event, | ||
1414 | }, | ||
1415 | { | ||
1416 | .desc = "detect open syscall event on all cpus", | ||
1417 | .func = test__open_syscall_event_on_all_cpus, | ||
1418 | }, | ||
1419 | { | ||
1420 | .desc = "read samples using the mmap interface", | ||
1421 | .func = test__basic_mmap, | ||
1422 | }, | ||
1423 | { | ||
1424 | .desc = "parse events tests", | ||
1425 | .func = parse_events__test, | ||
1426 | }, | ||
1427 | #if defined(__x86_64__) || defined(__i386__) | ||
1428 | { | ||
1429 | .desc = "x86 rdpmc test", | ||
1430 | .func = test__rdpmc, | ||
1431 | }, | ||
1432 | #endif | ||
1433 | { | ||
1434 | .desc = "Validate PERF_RECORD_* events & perf_sample fields", | ||
1435 | .func = test__PERF_RECORD, | ||
1436 | }, | ||
1437 | { | ||
1438 | .desc = "Test perf pmu format parsing", | ||
1439 | .func = test__perf_pmu, | ||
1440 | }, | ||
1441 | { | ||
1442 | .desc = "Test dso data interface", | ||
1443 | .func = dso__test_data, | ||
1444 | }, | ||
1445 | { | ||
1446 | .desc = "roundtrip evsel->name check", | ||
1447 | .func = perf_evsel__roundtrip_name_test, | ||
1448 | }, | ||
1449 | { | ||
1450 | .desc = "Check parsing of sched tracepoints fields", | ||
1451 | .func = perf_evsel__tp_sched_test, | ||
1452 | }, | ||
1453 | { | ||
1454 | .desc = "Generate and check syscalls:sys_enter_open event fields", | ||
1455 | .func = test__syscall_open_tp_fields, | ||
1456 | }, | ||
1457 | { | ||
1458 | .func = NULL, | ||
1459 | }, | ||
1460 | }; | ||
1461 | |||
1462 | static bool perf_test__matches(int curr, int argc, const char *argv[]) | ||
1463 | { | ||
1464 | int i; | ||
1465 | |||
1466 | if (argc == 0) | ||
1467 | return true; | ||
1468 | |||
1469 | for (i = 0; i < argc; ++i) { | ||
1470 | char *end; | ||
1471 | long nr = strtoul(argv[i], &end, 10); | ||
1472 | |||
1473 | if (*end == '\0') { | ||
1474 | if (nr == curr + 1) | ||
1475 | return true; | ||
1476 | continue; | ||
1477 | } | ||
1478 | |||
1479 | if (strstr(tests[curr].desc, argv[i])) | ||
1480 | return true; | ||
1481 | } | ||
1482 | |||
1483 | return false; | ||
1484 | } | ||
1485 | |||
1486 | static int __cmd_test(int argc, const char *argv[]) | ||
1487 | { | ||
1488 | int i = 0; | ||
1489 | int width = 0; | ||
1490 | |||
1491 | while (tests[i].func) { | ||
1492 | int len = strlen(tests[i].desc); | ||
1493 | |||
1494 | if (width < len) | ||
1495 | width = len; | ||
1496 | ++i; | ||
1497 | } | ||
1498 | |||
1499 | i = 0; | ||
1500 | while (tests[i].func) { | ||
1501 | int curr = i++, err; | ||
1502 | |||
1503 | if (!perf_test__matches(curr, argc, argv)) | ||
1504 | continue; | ||
1505 | |||
1506 | pr_info("%2d: %-*s:", i, width, tests[curr].desc); | ||
1507 | pr_debug("\n--- start ---\n"); | ||
1508 | err = tests[curr].func(); | ||
1509 | pr_debug("---- end ----\n%s:", tests[curr].desc); | ||
1510 | if (err) | ||
1511 | color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); | ||
1512 | else | ||
1513 | pr_info(" Ok\n"); | ||
1514 | } | ||
1515 | |||
1516 | return 0; | ||
1517 | } | ||
1518 | |||
1519 | static int perf_test__list(int argc, const char **argv) | ||
1520 | { | ||
1521 | int i = 0; | ||
1522 | |||
1523 | while (tests[i].func) { | ||
1524 | int curr = i++; | ||
1525 | |||
1526 | if (argc > 1 && !strstr(tests[curr].desc, argv[1])) | ||
1527 | continue; | ||
1528 | |||
1529 | pr_info("%2d: %s\n", i, tests[curr].desc); | ||
1530 | } | ||
1531 | |||
1532 | return 0; | ||
1533 | } | ||
1534 | |||
1535 | int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) | ||
1536 | { | ||
1537 | const char * const test_usage[] = { | ||
1538 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", | ||
1539 | NULL, | ||
1540 | }; | ||
1541 | const struct option test_options[] = { | ||
1542 | OPT_INCR('v', "verbose", &verbose, | ||
1543 | "be more verbose (show symbol address, etc)"), | ||
1544 | OPT_END() | ||
1545 | }; | ||
1546 | |||
1547 | argc = parse_options(argc, argv, test_options, test_usage, 0); | ||
1548 | if (argc >= 1 && !strcmp(argv[0], "list")) | ||
1549 | return perf_test__list(argc, argv); | ||
1550 | |||
1551 | symbol_conf.priv_size = sizeof(int); | ||
1552 | symbol_conf.sort_by_name = true; | ||
1553 | symbol_conf.try_vmlinux_path = true; | ||
1554 | |||
1555 | if (symbol__init() < 0) | ||
1556 | return -1; | ||
1557 | |||
1558 | return __cmd_test(argc, argv); | ||
1559 | } | ||