diff options
Diffstat (limited to 'tools/perf/builtin-test.c')
-rw-r--r-- | tools/perf/builtin-test.c | 919 |
1 files changed, 919 insertions, 0 deletions
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c new file mode 100644 index 00000000000..efe696f936e --- /dev/null +++ b/tools/perf/builtin-test.c | |||
@@ -0,0 +1,919 @@ | |||
1 | /* | ||
2 | * builtin-test.c | ||
3 | * | ||
4 | * Builtin regression testing command: ever growing number of sanity tests | ||
5 | */ | ||
6 | #include "builtin.h" | ||
7 | |||
8 | #include "util/cache.h" | ||
9 | #include "util/debug.h" | ||
10 | #include "util/evlist.h" | ||
11 | #include "util/parse-options.h" | ||
12 | #include "util/parse-events.h" | ||
13 | #include "util/symbol.h" | ||
14 | #include "util/thread_map.h" | ||
15 | #include "../../include/linux/hw_breakpoint.h" | ||
16 | |||
17 | static long page_size; | ||
18 | |||
19 | static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) | ||
20 | { | ||
21 | bool *visited = symbol__priv(sym); | ||
22 | *visited = true; | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | static int test__vmlinux_matches_kallsyms(void) | ||
27 | { | ||
28 | int err = -1; | ||
29 | struct rb_node *nd; | ||
30 | struct symbol *sym; | ||
31 | struct map *kallsyms_map, *vmlinux_map; | ||
32 | struct machine kallsyms, vmlinux; | ||
33 | enum map_type type = MAP__FUNCTION; | ||
34 | struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; | ||
35 | |||
36 | /* | ||
37 | * Step 1: | ||
38 | * | ||
39 | * Init the machines that will hold kernel, modules obtained from | ||
40 | * both vmlinux + .ko files and from /proc/kallsyms split by modules. | ||
41 | */ | ||
42 | machine__init(&kallsyms, "", HOST_KERNEL_ID); | ||
43 | machine__init(&vmlinux, "", HOST_KERNEL_ID); | ||
44 | |||
45 | /* | ||
46 | * Step 2: | ||
47 | * | ||
48 | * Create the kernel maps for kallsyms and the DSO where we will then | ||
49 | * load /proc/kallsyms. Also create the modules maps from /proc/modules | ||
50 | * and find the .ko files that match them in /lib/modules/`uname -r`/. | ||
51 | */ | ||
52 | if (machine__create_kernel_maps(&kallsyms) < 0) { | ||
53 | pr_debug("machine__create_kernel_maps "); | ||
54 | return -1; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Step 3: | ||
59 | * | ||
60 | * Load and split /proc/kallsyms into multiple maps, one per module. | ||
61 | */ | ||
62 | if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { | ||
63 | pr_debug("dso__load_kallsyms "); | ||
64 | goto out; | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Step 4: | ||
69 | * | ||
70 | * kallsyms will be internally on demand sorted by name so that we can | ||
71 | * find the reference relocation * symbol, i.e. the symbol we will use | ||
72 | * to see if the running kernel was relocated by checking if it has the | ||
73 | * same value in the vmlinux file we load. | ||
74 | */ | ||
75 | kallsyms_map = machine__kernel_map(&kallsyms, type); | ||
76 | |||
77 | sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); | ||
78 | if (sym == NULL) { | ||
79 | pr_debug("dso__find_symbol_by_name "); | ||
80 | goto out; | ||
81 | } | ||
82 | |||
83 | ref_reloc_sym.addr = sym->start; | ||
84 | |||
85 | /* | ||
86 | * Step 5: | ||
87 | * | ||
88 | * Now repeat step 2, this time for the vmlinux file we'll auto-locate. | ||
89 | */ | ||
90 | if (machine__create_kernel_maps(&vmlinux) < 0) { | ||
91 | pr_debug("machine__create_kernel_maps "); | ||
92 | goto out; | ||
93 | } | ||
94 | |||
95 | vmlinux_map = machine__kernel_map(&vmlinux, type); | ||
96 | map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; | ||
97 | |||
98 | /* | ||
99 | * Step 6: | ||
100 | * | ||
101 | * Locate a vmlinux file in the vmlinux path that has a buildid that | ||
102 | * matches the one of the running kernel. | ||
103 | * | ||
104 | * While doing that look if we find the ref reloc symbol, if we find it | ||
105 | * we'll have its ref_reloc_symbol.unrelocated_addr and then | ||
106 | * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines | ||
107 | * to fixup the symbols. | ||
108 | */ | ||
109 | if (machine__load_vmlinux_path(&vmlinux, type, | ||
110 | vmlinux_matches_kallsyms_filter) <= 0) { | ||
111 | pr_debug("machine__load_vmlinux_path "); | ||
112 | goto out; | ||
113 | } | ||
114 | |||
115 | err = 0; | ||
116 | /* | ||
117 | * Step 7: | ||
118 | * | ||
119 | * Now look at the symbols in the vmlinux DSO and check if we find all of them | ||
120 | * in the kallsyms dso. For the ones that are in both, check its names and | ||
121 | * end addresses too. | ||
122 | */ | ||
123 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | ||
124 | struct symbol *pair, *first_pair; | ||
125 | bool backwards = true; | ||
126 | |||
127 | sym = rb_entry(nd, struct symbol, rb_node); | ||
128 | |||
129 | if (sym->start == sym->end) | ||
130 | continue; | ||
131 | |||
132 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
133 | pair = first_pair; | ||
134 | |||
135 | if (pair && pair->start == sym->start) { | ||
136 | next_pair: | ||
137 | if (strcmp(sym->name, pair->name) == 0) { | ||
138 | /* | ||
139 | * kallsyms don't have the symbol end, so we | ||
140 | * set that by using the next symbol start - 1, | ||
141 | * in some cases we get this up to a page | ||
142 | * wrong, trace_kmalloc when I was developing | ||
143 | * this code was one such example, 2106 bytes | ||
144 | * off the real size. More than that and we | ||
145 | * _really_ have a problem. | ||
146 | */ | ||
147 | s64 skew = sym->end - pair->end; | ||
148 | if (llabs(skew) < page_size) | ||
149 | continue; | ||
150 | |||
151 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", | ||
152 | sym->start, sym->name, sym->end, pair->end); | ||
153 | } else { | ||
154 | struct rb_node *nnd; | ||
155 | detour: | ||
156 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
157 | rb_next(&pair->rb_node); | ||
158 | if (nnd) { | ||
159 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | ||
160 | |||
161 | if (next->start == sym->start) { | ||
162 | pair = next; | ||
163 | goto next_pair; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | if (backwards) { | ||
168 | backwards = false; | ||
169 | pair = first_pair; | ||
170 | goto detour; | ||
171 | } | ||
172 | |||
173 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
174 | sym->start, sym->name, pair->name); | ||
175 | } | ||
176 | } else | ||
177 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); | ||
178 | |||
179 | err = -1; | ||
180 | } | ||
181 | |||
182 | if (!verbose) | ||
183 | goto out; | ||
184 | |||
185 | pr_info("Maps only in vmlinux:\n"); | ||
186 | |||
187 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
188 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
189 | /* | ||
190 | * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while | ||
191 | * the kernel will have the path for the vmlinux file being used, | ||
192 | * so use the short name, less descriptive but the same ("[kernel]" in | ||
193 | * both cases. | ||
194 | */ | ||
195 | pair = map_groups__find_by_name(&kallsyms.kmaps, type, | ||
196 | (pos->dso->kernel ? | ||
197 | pos->dso->short_name : | ||
198 | pos->dso->name)); | ||
199 | if (pair) | ||
200 | pair->priv = 1; | ||
201 | else | ||
202 | map__fprintf(pos, stderr); | ||
203 | } | ||
204 | |||
205 | pr_info("Maps in vmlinux with a different name in kallsyms:\n"); | ||
206 | |||
207 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
208 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
209 | |||
210 | pair = map_groups__find(&kallsyms.kmaps, type, pos->start); | ||
211 | if (pair == NULL || pair->priv) | ||
212 | continue; | ||
213 | |||
214 | if (pair->start == pos->start) { | ||
215 | pair->priv = 1; | ||
216 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", | ||
217 | pos->start, pos->end, pos->pgoff, pos->dso->name); | ||
218 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | ||
219 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", | ||
220 | pair->start, pair->end, pair->pgoff); | ||
221 | pr_info(" %s\n", pair->dso->name); | ||
222 | pair->priv = 1; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | pr_info("Maps only in kallsyms:\n"); | ||
227 | |||
228 | for (nd = rb_first(&kallsyms.kmaps.maps[type]); | ||
229 | nd; nd = rb_next(nd)) { | ||
230 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
231 | |||
232 | if (!pos->priv) | ||
233 | map__fprintf(pos, stderr); | ||
234 | } | ||
235 | out: | ||
236 | return err; | ||
237 | } | ||
238 | |||
239 | #include "util/cpumap.h" | ||
240 | #include "util/evsel.h" | ||
241 | #include <sys/types.h> | ||
242 | |||
243 | static int trace_event__id(const char *evname) | ||
244 | { | ||
245 | char *filename; | ||
246 | int err = -1, fd; | ||
247 | |||
248 | if (asprintf(&filename, | ||
249 | "%s/syscalls/%s/id", | ||
250 | debugfs_path, evname) < 0) | ||
251 | return -1; | ||
252 | |||
253 | fd = open(filename, O_RDONLY); | ||
254 | if (fd >= 0) { | ||
255 | char id[16]; | ||
256 | if (read(fd, id, sizeof(id)) > 0) | ||
257 | err = atoi(id); | ||
258 | close(fd); | ||
259 | } | ||
260 | |||
261 | free(filename); | ||
262 | return err; | ||
263 | } | ||
264 | |||
265 | static int test__open_syscall_event(void) | ||
266 | { | ||
267 | int err = -1, fd; | ||
268 | struct thread_map *threads; | ||
269 | struct perf_evsel *evsel; | ||
270 | struct perf_event_attr attr; | ||
271 | unsigned int nr_open_calls = 111, i; | ||
272 | int id = trace_event__id("sys_enter_open"); | ||
273 | |||
274 | if (id < 0) { | ||
275 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
276 | return -1; | ||
277 | } | ||
278 | |||
279 | threads = thread_map__new(-1, getpid()); | ||
280 | if (threads == NULL) { | ||
281 | pr_debug("thread_map__new\n"); | ||
282 | return -1; | ||
283 | } | ||
284 | |||
285 | memset(&attr, 0, sizeof(attr)); | ||
286 | attr.type = PERF_TYPE_TRACEPOINT; | ||
287 | attr.config = id; | ||
288 | evsel = perf_evsel__new(&attr, 0); | ||
289 | if (evsel == NULL) { | ||
290 | pr_debug("perf_evsel__new\n"); | ||
291 | goto out_thread_map_delete; | ||
292 | } | ||
293 | |||
294 | if (perf_evsel__open_per_thread(evsel, threads, false) < 0) { | ||
295 | pr_debug("failed to open counter: %s, " | ||
296 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
297 | strerror(errno)); | ||
298 | goto out_evsel_delete; | ||
299 | } | ||
300 | |||
301 | for (i = 0; i < nr_open_calls; ++i) { | ||
302 | fd = open("/etc/passwd", O_RDONLY); | ||
303 | close(fd); | ||
304 | } | ||
305 | |||
306 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
307 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
308 | goto out_close_fd; | ||
309 | } | ||
310 | |||
311 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
312 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
313 | nr_open_calls, evsel->counts->cpu[0].val); | ||
314 | goto out_close_fd; | ||
315 | } | ||
316 | |||
317 | err = 0; | ||
318 | out_close_fd: | ||
319 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
320 | out_evsel_delete: | ||
321 | perf_evsel__delete(evsel); | ||
322 | out_thread_map_delete: | ||
323 | thread_map__delete(threads); | ||
324 | return err; | ||
325 | } | ||
326 | |||
327 | #include <sched.h> | ||
328 | |||
329 | static int test__open_syscall_event_on_all_cpus(void) | ||
330 | { | ||
331 | int err = -1, fd, cpu; | ||
332 | struct thread_map *threads; | ||
333 | struct cpu_map *cpus; | ||
334 | struct perf_evsel *evsel; | ||
335 | struct perf_event_attr attr; | ||
336 | unsigned int nr_open_calls = 111, i; | ||
337 | cpu_set_t cpu_set; | ||
338 | int id = trace_event__id("sys_enter_open"); | ||
339 | |||
340 | if (id < 0) { | ||
341 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
342 | return -1; | ||
343 | } | ||
344 | |||
345 | threads = thread_map__new(-1, getpid()); | ||
346 | if (threads == NULL) { | ||
347 | pr_debug("thread_map__new\n"); | ||
348 | return -1; | ||
349 | } | ||
350 | |||
351 | cpus = cpu_map__new(NULL); | ||
352 | if (cpus == NULL) { | ||
353 | pr_debug("cpu_map__new\n"); | ||
354 | goto out_thread_map_delete; | ||
355 | } | ||
356 | |||
357 | |||
358 | CPU_ZERO(&cpu_set); | ||
359 | |||
360 | memset(&attr, 0, sizeof(attr)); | ||
361 | attr.type = PERF_TYPE_TRACEPOINT; | ||
362 | attr.config = id; | ||
363 | evsel = perf_evsel__new(&attr, 0); | ||
364 | if (evsel == NULL) { | ||
365 | pr_debug("perf_evsel__new\n"); | ||
366 | goto out_thread_map_delete; | ||
367 | } | ||
368 | |||
369 | if (perf_evsel__open(evsel, cpus, threads, false) < 0) { | ||
370 | pr_debug("failed to open counter: %s, " | ||
371 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
372 | strerror(errno)); | ||
373 | goto out_evsel_delete; | ||
374 | } | ||
375 | |||
376 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
377 | unsigned int ncalls = nr_open_calls + cpu; | ||
378 | /* | ||
379 | * XXX eventually lift this restriction in a way that | ||
380 | * keeps perf building on older glibc installations | ||
381 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
382 | * a reasonable upper limit tho :-) | ||
383 | */ | ||
384 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
385 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
386 | continue; | ||
387 | } | ||
388 | |||
389 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
390 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
391 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
392 | cpus->map[cpu], | ||
393 | strerror(errno)); | ||
394 | goto out_close_fd; | ||
395 | } | ||
396 | for (i = 0; i < ncalls; ++i) { | ||
397 | fd = open("/etc/passwd", O_RDONLY); | ||
398 | close(fd); | ||
399 | } | ||
400 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Here we need to explicitely preallocate the counts, as if | ||
405 | * we use the auto allocation it will allocate just for 1 cpu, | ||
406 | * as we start by cpu 0. | ||
407 | */ | ||
408 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
409 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
410 | goto out_close_fd; | ||
411 | } | ||
412 | |||
413 | err = 0; | ||
414 | |||
415 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
416 | unsigned int expected; | ||
417 | |||
418 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
419 | continue; | ||
420 | |||
421 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
422 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
423 | err = -1; | ||
424 | break; | ||
425 | } | ||
426 | |||
427 | expected = nr_open_calls + cpu; | ||
428 | if (evsel->counts->cpu[cpu].val != expected) { | ||
429 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
430 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
431 | err = -1; | ||
432 | } | ||
433 | } | ||
434 | |||
435 | out_close_fd: | ||
436 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
437 | out_evsel_delete: | ||
438 | perf_evsel__delete(evsel); | ||
439 | out_thread_map_delete: | ||
440 | thread_map__delete(threads); | ||
441 | return err; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * This test will generate random numbers of calls to some getpid syscalls, | ||
446 | * then establish an mmap for a group of events that are created to monitor | ||
447 | * the syscalls. | ||
448 | * | ||
449 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
450 | * sample.id field to map back to its respective perf_evsel instance. | ||
451 | * | ||
452 | * Then it checks if the number of syscalls reported as perf events by | ||
453 | * the kernel corresponds to the number of syscalls made. | ||
454 | */ | ||
455 | static int test__basic_mmap(void) | ||
456 | { | ||
457 | int err = -1; | ||
458 | union perf_event *event; | ||
459 | struct thread_map *threads; | ||
460 | struct cpu_map *cpus; | ||
461 | struct perf_evlist *evlist; | ||
462 | struct perf_event_attr attr = { | ||
463 | .type = PERF_TYPE_TRACEPOINT, | ||
464 | .read_format = PERF_FORMAT_ID, | ||
465 | .sample_type = PERF_SAMPLE_ID, | ||
466 | .watermark = 0, | ||
467 | }; | ||
468 | cpu_set_t cpu_set; | ||
469 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
470 | "getpgid", }; | ||
471 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
472 | (void*)getpgid }; | ||
473 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
474 | int ids[nsyscalls]; | ||
475 | unsigned int nr_events[nsyscalls], | ||
476 | expected_nr_events[nsyscalls], i, j; | ||
477 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
478 | int sample_size = __perf_evsel__sample_size(attr.sample_type); | ||
479 | |||
480 | for (i = 0; i < nsyscalls; ++i) { | ||
481 | char name[64]; | ||
482 | |||
483 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
484 | ids[i] = trace_event__id(name); | ||
485 | if (ids[i] < 0) { | ||
486 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
487 | return -1; | ||
488 | } | ||
489 | nr_events[i] = 0; | ||
490 | expected_nr_events[i] = random() % 257; | ||
491 | } | ||
492 | |||
493 | threads = thread_map__new(-1, getpid()); | ||
494 | if (threads == NULL) { | ||
495 | pr_debug("thread_map__new\n"); | ||
496 | return -1; | ||
497 | } | ||
498 | |||
499 | cpus = cpu_map__new(NULL); | ||
500 | if (cpus == NULL) { | ||
501 | pr_debug("cpu_map__new\n"); | ||
502 | goto out_free_threads; | ||
503 | } | ||
504 | |||
505 | CPU_ZERO(&cpu_set); | ||
506 | CPU_SET(cpus->map[0], &cpu_set); | ||
507 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
508 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
509 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
510 | cpus->map[0], strerror(errno)); | ||
511 | goto out_free_cpus; | ||
512 | } | ||
513 | |||
514 | evlist = perf_evlist__new(cpus, threads); | ||
515 | if (evlist == NULL) { | ||
516 | pr_debug("perf_evlist__new\n"); | ||
517 | goto out_free_cpus; | ||
518 | } | ||
519 | |||
520 | /* anonymous union fields, can't be initialized above */ | ||
521 | attr.wakeup_events = 1; | ||
522 | attr.sample_period = 1; | ||
523 | |||
524 | for (i = 0; i < nsyscalls; ++i) { | ||
525 | attr.config = ids[i]; | ||
526 | evsels[i] = perf_evsel__new(&attr, i); | ||
527 | if (evsels[i] == NULL) { | ||
528 | pr_debug("perf_evsel__new\n"); | ||
529 | goto out_free_evlist; | ||
530 | } | ||
531 | |||
532 | perf_evlist__add(evlist, evsels[i]); | ||
533 | |||
534 | if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) { | ||
535 | pr_debug("failed to open counter: %s, " | ||
536 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
537 | strerror(errno)); | ||
538 | goto out_close_fd; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
543 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
544 | strerror(errno)); | ||
545 | goto out_close_fd; | ||
546 | } | ||
547 | |||
548 | for (i = 0; i < nsyscalls; ++i) | ||
549 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
550 | int foo = syscalls[i](); | ||
551 | ++foo; | ||
552 | } | ||
553 | |||
554 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
555 | struct perf_sample sample; | ||
556 | |||
557 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
558 | pr_debug("unexpected %s event\n", | ||
559 | perf_event__name(event->header.type)); | ||
560 | goto out_munmap; | ||
561 | } | ||
562 | |||
563 | err = perf_event__parse_sample(event, attr.sample_type, sample_size, | ||
564 | false, &sample, false); | ||
565 | if (err) { | ||
566 | pr_err("Can't parse sample, err = %d\n", err); | ||
567 | goto out_munmap; | ||
568 | } | ||
569 | |||
570 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
571 | if (evsel == NULL) { | ||
572 | pr_debug("event with id %" PRIu64 | ||
573 | " doesn't map to an evsel\n", sample.id); | ||
574 | goto out_munmap; | ||
575 | } | ||
576 | nr_events[evsel->idx]++; | ||
577 | } | ||
578 | |||
579 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
580 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
581 | pr_debug("expected %d %s events, got %d\n", | ||
582 | expected_nr_events[evsel->idx], | ||
583 | event_name(evsel), nr_events[evsel->idx]); | ||
584 | goto out_munmap; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | err = 0; | ||
589 | out_munmap: | ||
590 | perf_evlist__munmap(evlist); | ||
591 | out_close_fd: | ||
592 | for (i = 0; i < nsyscalls; ++i) | ||
593 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
594 | out_free_evlist: | ||
595 | perf_evlist__delete(evlist); | ||
596 | out_free_cpus: | ||
597 | cpu_map__delete(cpus); | ||
598 | out_free_threads: | ||
599 | thread_map__delete(threads); | ||
600 | return err; | ||
601 | #undef nsyscalls | ||
602 | } | ||
603 | |||
604 | #define TEST_ASSERT_VAL(text, cond) \ | ||
605 | do { \ | ||
606 | if (!cond) { \ | ||
607 | pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ | ||
608 | return -1; \ | ||
609 | } \ | ||
610 | } while (0) | ||
611 | |||
612 | static int test__checkevent_tracepoint(struct perf_evlist *evlist) | ||
613 | { | ||
614 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
615 | struct perf_evsel, node); | ||
616 | |||
617 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
618 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
619 | TEST_ASSERT_VAL("wrong sample_type", | ||
620 | (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == | ||
621 | evsel->attr.sample_type); | ||
622 | TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); | ||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) | ||
627 | { | ||
628 | struct perf_evsel *evsel; | ||
629 | |||
630 | TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); | ||
631 | |||
632 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
633 | TEST_ASSERT_VAL("wrong type", | ||
634 | PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
635 | TEST_ASSERT_VAL("wrong sample_type", | ||
636 | (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) | ||
637 | == evsel->attr.sample_type); | ||
638 | TEST_ASSERT_VAL("wrong sample_period", | ||
639 | 1 == evsel->attr.sample_period); | ||
640 | } | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static int test__checkevent_raw(struct perf_evlist *evlist) | ||
645 | { | ||
646 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
647 | struct perf_evsel, node); | ||
648 | |||
649 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
650 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
651 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int test__checkevent_numeric(struct perf_evlist *evlist) | ||
656 | { | ||
657 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
658 | struct perf_evsel, node); | ||
659 | |||
660 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
661 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | ||
662 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static int test__checkevent_symbolic_name(struct perf_evlist *evlist) | ||
667 | { | ||
668 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
669 | struct perf_evsel, node); | ||
670 | |||
671 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
672 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
673 | TEST_ASSERT_VAL("wrong config", | ||
674 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) | ||
679 | { | ||
680 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
681 | struct perf_evsel, node); | ||
682 | |||
683 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
684 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); | ||
685 | TEST_ASSERT_VAL("wrong config", | ||
686 | PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static int test__checkevent_genhw(struct perf_evlist *evlist) | ||
691 | { | ||
692 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
693 | struct perf_evsel, node); | ||
694 | |||
695 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
696 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); | ||
697 | TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config); | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | static int test__checkevent_breakpoint(struct perf_evlist *evlist) | ||
702 | { | ||
703 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
704 | struct perf_evsel, node); | ||
705 | |||
706 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
707 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
708 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
709 | TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) == | ||
710 | evsel->attr.bp_type); | ||
711 | TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 == | ||
712 | evsel->attr.bp_len); | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) | ||
717 | { | ||
718 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
719 | struct perf_evsel, node); | ||
720 | |||
721 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
722 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
723 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
724 | TEST_ASSERT_VAL("wrong bp_type", | ||
725 | HW_BREAKPOINT_X == evsel->attr.bp_type); | ||
726 | TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len); | ||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) | ||
731 | { | ||
732 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
733 | struct perf_evsel, node); | ||
734 | |||
735 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
736 | TEST_ASSERT_VAL("wrong type", | ||
737 | PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
738 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
739 | TEST_ASSERT_VAL("wrong bp_type", | ||
740 | HW_BREAKPOINT_R == evsel->attr.bp_type); | ||
741 | TEST_ASSERT_VAL("wrong bp_len", | ||
742 | HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) | ||
747 | { | ||
748 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | ||
749 | struct perf_evsel, node); | ||
750 | |||
751 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
752 | TEST_ASSERT_VAL("wrong type", | ||
753 | PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
754 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
755 | TEST_ASSERT_VAL("wrong bp_type", | ||
756 | HW_BREAKPOINT_W == evsel->attr.bp_type); | ||
757 | TEST_ASSERT_VAL("wrong bp_len", | ||
758 | HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | static struct test__event_st { | ||
763 | const char *name; | ||
764 | __u32 type; | ||
765 | int (*check)(struct perf_evlist *evlist); | ||
766 | } test__events[] = { | ||
767 | { | ||
768 | .name = "syscalls:sys_enter_open", | ||
769 | .check = test__checkevent_tracepoint, | ||
770 | }, | ||
771 | { | ||
772 | .name = "syscalls:*", | ||
773 | .check = test__checkevent_tracepoint_multi, | ||
774 | }, | ||
775 | { | ||
776 | .name = "r1", | ||
777 | .check = test__checkevent_raw, | ||
778 | }, | ||
779 | { | ||
780 | .name = "1:1", | ||
781 | .check = test__checkevent_numeric, | ||
782 | }, | ||
783 | { | ||
784 | .name = "instructions", | ||
785 | .check = test__checkevent_symbolic_name, | ||
786 | }, | ||
787 | { | ||
788 | .name = "faults", | ||
789 | .check = test__checkevent_symbolic_alias, | ||
790 | }, | ||
791 | { | ||
792 | .name = "L1-dcache-load-miss", | ||
793 | .check = test__checkevent_genhw, | ||
794 | }, | ||
795 | { | ||
796 | .name = "mem:0", | ||
797 | .check = test__checkevent_breakpoint, | ||
798 | }, | ||
799 | { | ||
800 | .name = "mem:0:x", | ||
801 | .check = test__checkevent_breakpoint_x, | ||
802 | }, | ||
803 | { | ||
804 | .name = "mem:0:r", | ||
805 | .check = test__checkevent_breakpoint_r, | ||
806 | }, | ||
807 | { | ||
808 | .name = "mem:0:w", | ||
809 | .check = test__checkevent_breakpoint_w, | ||
810 | }, | ||
811 | }; | ||
812 | |||
813 | #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) | ||
814 | |||
815 | static int test__parse_events(void) | ||
816 | { | ||
817 | struct perf_evlist *evlist; | ||
818 | u_int i; | ||
819 | int ret = 0; | ||
820 | |||
821 | for (i = 0; i < TEST__EVENTS_CNT; i++) { | ||
822 | struct test__event_st *e = &test__events[i]; | ||
823 | |||
824 | evlist = perf_evlist__new(NULL, NULL); | ||
825 | if (evlist == NULL) | ||
826 | break; | ||
827 | |||
828 | ret = parse_events(evlist, e->name, 0); | ||
829 | if (ret) { | ||
830 | pr_debug("failed to parse event '%s', err %d\n", | ||
831 | e->name, ret); | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | ret = e->check(evlist); | ||
836 | if (ret) | ||
837 | break; | ||
838 | |||
839 | perf_evlist__delete(evlist); | ||
840 | } | ||
841 | |||
842 | return ret; | ||
843 | } | ||
844 | static struct test { | ||
845 | const char *desc; | ||
846 | int (*func)(void); | ||
847 | } tests[] = { | ||
848 | { | ||
849 | .desc = "vmlinux symtab matches kallsyms", | ||
850 | .func = test__vmlinux_matches_kallsyms, | ||
851 | }, | ||
852 | { | ||
853 | .desc = "detect open syscall event", | ||
854 | .func = test__open_syscall_event, | ||
855 | }, | ||
856 | { | ||
857 | .desc = "detect open syscall event on all cpus", | ||
858 | .func = test__open_syscall_event_on_all_cpus, | ||
859 | }, | ||
860 | { | ||
861 | .desc = "read samples using the mmap interface", | ||
862 | .func = test__basic_mmap, | ||
863 | }, | ||
864 | { | ||
865 | .desc = "parse events tests", | ||
866 | .func = test__parse_events, | ||
867 | }, | ||
868 | { | ||
869 | .func = NULL, | ||
870 | }, | ||
871 | }; | ||
872 | |||
873 | static int __cmd_test(void) | ||
874 | { | ||
875 | int i = 0; | ||
876 | |||
877 | page_size = sysconf(_SC_PAGE_SIZE); | ||
878 | |||
879 | while (tests[i].func) { | ||
880 | int err; | ||
881 | pr_info("%2d: %s:", i + 1, tests[i].desc); | ||
882 | pr_debug("\n--- start ---\n"); | ||
883 | err = tests[i].func(); | ||
884 | pr_debug("---- end ----\n%s:", tests[i].desc); | ||
885 | pr_info(" %s\n", err ? "FAILED!\n" : "Ok"); | ||
886 | ++i; | ||
887 | } | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static const char * const test_usage[] = { | ||
893 | "perf test [<options>]", | ||
894 | NULL, | ||
895 | }; | ||
896 | |||
897 | static const struct option test_options[] = { | ||
898 | OPT_INTEGER('v', "verbose", &verbose, | ||
899 | "be more verbose (show symbol address, etc)"), | ||
900 | OPT_END() | ||
901 | }; | ||
902 | |||
903 | int cmd_test(int argc, const char **argv, const char *prefix __used) | ||
904 | { | ||
905 | argc = parse_options(argc, argv, test_options, test_usage, 0); | ||
906 | if (argc) | ||
907 | usage_with_options(test_usage, test_options); | ||
908 | |||
909 | symbol_conf.priv_size = sizeof(int); | ||
910 | symbol_conf.sort_by_name = true; | ||
911 | symbol_conf.try_vmlinux_path = true; | ||
912 | |||
913 | if (symbol__init() < 0) | ||
914 | return -1; | ||
915 | |||
916 | setup_pager(); | ||
917 | |||
918 | return __cmd_test(); | ||
919 | } | ||