diff options
Diffstat (limited to 'tools/perf/builtin-test.c')
-rw-r--r-- | tools/perf/builtin-test.c | 415 |
1 files changed, 404 insertions, 11 deletions
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 035b9fa063a9..2da9162262b0 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -7,10 +7,11 @@ | |||
7 | 7 | ||
8 | #include "util/cache.h" | 8 | #include "util/cache.h" |
9 | #include "util/debug.h" | 9 | #include "util/debug.h" |
10 | #include "util/evlist.h" | ||
10 | #include "util/parse-options.h" | 11 | #include "util/parse-options.h" |
11 | #include "util/session.h" | 12 | #include "util/parse-events.h" |
12 | #include "util/symbol.h" | 13 | #include "util/symbol.h" |
13 | #include "util/thread.h" | 14 | #include "util/thread_map.h" |
14 | 15 | ||
15 | static long page_size; | 16 | static long page_size; |
16 | 17 | ||
@@ -119,10 +120,16 @@ static int test__vmlinux_matches_kallsyms(void) | |||
119 | * end addresses too. | 120 | * end addresses too. |
120 | */ | 121 | */ |
121 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | 122 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { |
122 | struct symbol *pair; | 123 | struct symbol *pair, *first_pair; |
124 | bool backwards = true; | ||
123 | 125 | ||
124 | sym = rb_entry(nd, struct symbol, rb_node); | 126 | sym = rb_entry(nd, struct symbol, rb_node); |
125 | pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | 127 | |
128 | if (sym->start == sym->end) | ||
129 | continue; | ||
130 | |||
131 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
132 | pair = first_pair; | ||
126 | 133 | ||
127 | if (pair && pair->start == sym->start) { | 134 | if (pair && pair->start == sym->start) { |
128 | next_pair: | 135 | next_pair: |
@@ -140,11 +147,13 @@ next_pair: | |||
140 | if (llabs(skew) < page_size) | 147 | if (llabs(skew) < page_size) |
141 | continue; | 148 | continue; |
142 | 149 | ||
143 | pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n", | 150 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", |
144 | sym->start, sym->name, sym->end, pair->end); | 151 | sym->start, sym->name, sym->end, pair->end); |
145 | } else { | 152 | } else { |
146 | struct rb_node *nnd = rb_prev(&pair->rb_node); | 153 | struct rb_node *nnd; |
147 | 154 | detour: | |
155 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
156 | rb_next(&pair->rb_node); | ||
148 | if (nnd) { | 157 | if (nnd) { |
149 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | 158 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); |
150 | 159 | ||
@@ -153,11 +162,18 @@ next_pair: | |||
153 | goto next_pair; | 162 | goto next_pair; |
154 | } | 163 | } |
155 | } | 164 | } |
156 | pr_debug("%#Lx: diff name v: %s k: %s\n", | 165 | |
166 | if (backwards) { | ||
167 | backwards = false; | ||
168 | pair = first_pair; | ||
169 | goto detour; | ||
170 | } | ||
171 | |||
172 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
157 | sym->start, sym->name, pair->name); | 173 | sym->start, sym->name, pair->name); |
158 | } | 174 | } |
159 | } else | 175 | } else |
160 | pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name); | 176 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); |
161 | 177 | ||
162 | err = -1; | 178 | err = -1; |
163 | } | 179 | } |
@@ -196,10 +212,10 @@ next_pair: | |||
196 | 212 | ||
197 | if (pair->start == pos->start) { | 213 | if (pair->start == pos->start) { |
198 | pair->priv = 1; | 214 | pair->priv = 1; |
199 | pr_info(" %Lx-%Lx %Lx %s in kallsyms as", | 215 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", |
200 | pos->start, pos->end, pos->pgoff, pos->dso->name); | 216 | pos->start, pos->end, pos->pgoff, pos->dso->name); |
201 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | 217 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) |
202 | pr_info(": \n*%Lx-%Lx %Lx", | 218 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", |
203 | pair->start, pair->end, pair->pgoff); | 219 | pair->start, pair->end, pair->pgoff); |
204 | pr_info(" %s\n", pair->dso->name); | 220 | pr_info(" %s\n", pair->dso->name); |
205 | pair->priv = 1; | 221 | pair->priv = 1; |
@@ -219,6 +235,371 @@ out: | |||
219 | return err; | 235 | return err; |
220 | } | 236 | } |
221 | 237 | ||
238 | #include "util/cpumap.h" | ||
239 | #include "util/evsel.h" | ||
240 | #include <sys/types.h> | ||
241 | |||
242 | static int trace_event__id(const char *evname) | ||
243 | { | ||
244 | char *filename; | ||
245 | int err = -1, fd; | ||
246 | |||
247 | if (asprintf(&filename, | ||
248 | "/sys/kernel/debug/tracing/events/syscalls/%s/id", | ||
249 | evname) < 0) | ||
250 | return -1; | ||
251 | |||
252 | fd = open(filename, O_RDONLY); | ||
253 | if (fd >= 0) { | ||
254 | char id[16]; | ||
255 | if (read(fd, id, sizeof(id)) > 0) | ||
256 | err = atoi(id); | ||
257 | close(fd); | ||
258 | } | ||
259 | |||
260 | free(filename); | ||
261 | return err; | ||
262 | } | ||
263 | |||
264 | static int test__open_syscall_event(void) | ||
265 | { | ||
266 | int err = -1, fd; | ||
267 | struct thread_map *threads; | ||
268 | struct perf_evsel *evsel; | ||
269 | struct perf_event_attr attr; | ||
270 | unsigned int nr_open_calls = 111, i; | ||
271 | int id = trace_event__id("sys_enter_open"); | ||
272 | |||
273 | if (id < 0) { | ||
274 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
275 | return -1; | ||
276 | } | ||
277 | |||
278 | threads = thread_map__new(-1, getpid()); | ||
279 | if (threads == NULL) { | ||
280 | pr_debug("thread_map__new\n"); | ||
281 | return -1; | ||
282 | } | ||
283 | |||
284 | memset(&attr, 0, sizeof(attr)); | ||
285 | attr.type = PERF_TYPE_TRACEPOINT; | ||
286 | attr.config = id; | ||
287 | evsel = perf_evsel__new(&attr, 0); | ||
288 | if (evsel == NULL) { | ||
289 | pr_debug("perf_evsel__new\n"); | ||
290 | goto out_thread_map_delete; | ||
291 | } | ||
292 | |||
293 | if (perf_evsel__open_per_thread(evsel, threads, false) < 0) { | ||
294 | pr_debug("failed to open counter: %s, " | ||
295 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
296 | strerror(errno)); | ||
297 | goto out_evsel_delete; | ||
298 | } | ||
299 | |||
300 | for (i = 0; i < nr_open_calls; ++i) { | ||
301 | fd = open("/etc/passwd", O_RDONLY); | ||
302 | close(fd); | ||
303 | } | ||
304 | |||
305 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
306 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
307 | goto out_close_fd; | ||
308 | } | ||
309 | |||
310 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
311 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
312 | nr_open_calls, evsel->counts->cpu[0].val); | ||
313 | goto out_close_fd; | ||
314 | } | ||
315 | |||
316 | err = 0; | ||
317 | out_close_fd: | ||
318 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
319 | out_evsel_delete: | ||
320 | perf_evsel__delete(evsel); | ||
321 | out_thread_map_delete: | ||
322 | thread_map__delete(threads); | ||
323 | return err; | ||
324 | } | ||
325 | |||
326 | #include <sched.h> | ||
327 | |||
328 | static int test__open_syscall_event_on_all_cpus(void) | ||
329 | { | ||
330 | int err = -1, fd, cpu; | ||
331 | struct thread_map *threads; | ||
332 | struct cpu_map *cpus; | ||
333 | struct perf_evsel *evsel; | ||
334 | struct perf_event_attr attr; | ||
335 | unsigned int nr_open_calls = 111, i; | ||
336 | cpu_set_t cpu_set; | ||
337 | int id = trace_event__id("sys_enter_open"); | ||
338 | |||
339 | if (id < 0) { | ||
340 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
341 | return -1; | ||
342 | } | ||
343 | |||
344 | threads = thread_map__new(-1, getpid()); | ||
345 | if (threads == NULL) { | ||
346 | pr_debug("thread_map__new\n"); | ||
347 | return -1; | ||
348 | } | ||
349 | |||
350 | cpus = cpu_map__new(NULL); | ||
351 | if (cpus == NULL) { | ||
352 | pr_debug("cpu_map__new\n"); | ||
353 | goto out_thread_map_delete; | ||
354 | } | ||
355 | |||
356 | |||
357 | CPU_ZERO(&cpu_set); | ||
358 | |||
359 | memset(&attr, 0, sizeof(attr)); | ||
360 | attr.type = PERF_TYPE_TRACEPOINT; | ||
361 | attr.config = id; | ||
362 | evsel = perf_evsel__new(&attr, 0); | ||
363 | if (evsel == NULL) { | ||
364 | pr_debug("perf_evsel__new\n"); | ||
365 | goto out_thread_map_delete; | ||
366 | } | ||
367 | |||
368 | if (perf_evsel__open(evsel, cpus, threads, false) < 0) { | ||
369 | pr_debug("failed to open counter: %s, " | ||
370 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
371 | strerror(errno)); | ||
372 | goto out_evsel_delete; | ||
373 | } | ||
374 | |||
375 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
376 | unsigned int ncalls = nr_open_calls + cpu; | ||
377 | /* | ||
378 | * XXX eventually lift this restriction in a way that | ||
379 | * keeps perf building on older glibc installations | ||
380 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
381 | * a reasonable upper limit tho :-) | ||
382 | */ | ||
383 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
384 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
385 | continue; | ||
386 | } | ||
387 | |||
388 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
389 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
390 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
391 | cpus->map[cpu], | ||
392 | strerror(errno)); | ||
393 | goto out_close_fd; | ||
394 | } | ||
395 | for (i = 0; i < ncalls; ++i) { | ||
396 | fd = open("/etc/passwd", O_RDONLY); | ||
397 | close(fd); | ||
398 | } | ||
399 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Here we need to explicitely preallocate the counts, as if | ||
404 | * we use the auto allocation it will allocate just for 1 cpu, | ||
405 | * as we start by cpu 0. | ||
406 | */ | ||
407 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
408 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
409 | goto out_close_fd; | ||
410 | } | ||
411 | |||
412 | err = 0; | ||
413 | |||
414 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
415 | unsigned int expected; | ||
416 | |||
417 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
418 | continue; | ||
419 | |||
420 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
421 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
422 | err = -1; | ||
423 | break; | ||
424 | } | ||
425 | |||
426 | expected = nr_open_calls + cpu; | ||
427 | if (evsel->counts->cpu[cpu].val != expected) { | ||
428 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
429 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
430 | err = -1; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | out_close_fd: | ||
435 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
436 | out_evsel_delete: | ||
437 | perf_evsel__delete(evsel); | ||
438 | out_thread_map_delete: | ||
439 | thread_map__delete(threads); | ||
440 | return err; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * This test will generate random numbers of calls to some getpid syscalls, | ||
445 | * then establish an mmap for a group of events that are created to monitor | ||
446 | * the syscalls. | ||
447 | * | ||
448 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
449 | * sample.id field to map back to its respective perf_evsel instance. | ||
450 | * | ||
451 | * Then it checks if the number of syscalls reported as perf events by | ||
452 | * the kernel corresponds to the number of syscalls made. | ||
453 | */ | ||
454 | static int test__basic_mmap(void) | ||
455 | { | ||
456 | int err = -1; | ||
457 | union perf_event *event; | ||
458 | struct thread_map *threads; | ||
459 | struct cpu_map *cpus; | ||
460 | struct perf_evlist *evlist; | ||
461 | struct perf_event_attr attr = { | ||
462 | .type = PERF_TYPE_TRACEPOINT, | ||
463 | .read_format = PERF_FORMAT_ID, | ||
464 | .sample_type = PERF_SAMPLE_ID, | ||
465 | .watermark = 0, | ||
466 | }; | ||
467 | cpu_set_t cpu_set; | ||
468 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
469 | "getpgid", }; | ||
470 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
471 | (void*)getpgid }; | ||
472 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
473 | int ids[nsyscalls]; | ||
474 | unsigned int nr_events[nsyscalls], | ||
475 | expected_nr_events[nsyscalls], i, j; | ||
476 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
477 | int sample_size = __perf_evsel__sample_size(attr.sample_type); | ||
478 | |||
479 | for (i = 0; i < nsyscalls; ++i) { | ||
480 | char name[64]; | ||
481 | |||
482 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
483 | ids[i] = trace_event__id(name); | ||
484 | if (ids[i] < 0) { | ||
485 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
486 | return -1; | ||
487 | } | ||
488 | nr_events[i] = 0; | ||
489 | expected_nr_events[i] = random() % 257; | ||
490 | } | ||
491 | |||
492 | threads = thread_map__new(-1, getpid()); | ||
493 | if (threads == NULL) { | ||
494 | pr_debug("thread_map__new\n"); | ||
495 | return -1; | ||
496 | } | ||
497 | |||
498 | cpus = cpu_map__new(NULL); | ||
499 | if (cpus == NULL) { | ||
500 | pr_debug("cpu_map__new\n"); | ||
501 | goto out_free_threads; | ||
502 | } | ||
503 | |||
504 | CPU_ZERO(&cpu_set); | ||
505 | CPU_SET(cpus->map[0], &cpu_set); | ||
506 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
507 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
508 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
509 | cpus->map[0], strerror(errno)); | ||
510 | goto out_free_cpus; | ||
511 | } | ||
512 | |||
513 | evlist = perf_evlist__new(cpus, threads); | ||
514 | if (evlist == NULL) { | ||
515 | pr_debug("perf_evlist__new\n"); | ||
516 | goto out_free_cpus; | ||
517 | } | ||
518 | |||
519 | /* anonymous union fields, can't be initialized above */ | ||
520 | attr.wakeup_events = 1; | ||
521 | attr.sample_period = 1; | ||
522 | |||
523 | for (i = 0; i < nsyscalls; ++i) { | ||
524 | attr.config = ids[i]; | ||
525 | evsels[i] = perf_evsel__new(&attr, i); | ||
526 | if (evsels[i] == NULL) { | ||
527 | pr_debug("perf_evsel__new\n"); | ||
528 | goto out_free_evlist; | ||
529 | } | ||
530 | |||
531 | perf_evlist__add(evlist, evsels[i]); | ||
532 | |||
533 | if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) { | ||
534 | pr_debug("failed to open counter: %s, " | ||
535 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
536 | strerror(errno)); | ||
537 | goto out_close_fd; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
542 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
543 | strerror(errno)); | ||
544 | goto out_close_fd; | ||
545 | } | ||
546 | |||
547 | for (i = 0; i < nsyscalls; ++i) | ||
548 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
549 | int foo = syscalls[i](); | ||
550 | ++foo; | ||
551 | } | ||
552 | |||
553 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
554 | struct perf_sample sample; | ||
555 | |||
556 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
557 | pr_debug("unexpected %s event\n", | ||
558 | perf_event__name(event->header.type)); | ||
559 | goto out_munmap; | ||
560 | } | ||
561 | |||
562 | err = perf_event__parse_sample(event, attr.sample_type, sample_size, | ||
563 | false, &sample); | ||
564 | if (err) { | ||
565 | pr_err("Can't parse sample, err = %d\n", err); | ||
566 | goto out_munmap; | ||
567 | } | ||
568 | |||
569 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
570 | if (evsel == NULL) { | ||
571 | pr_debug("event with id %" PRIu64 | ||
572 | " doesn't map to an evsel\n", sample.id); | ||
573 | goto out_munmap; | ||
574 | } | ||
575 | nr_events[evsel->idx]++; | ||
576 | } | ||
577 | |||
578 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
579 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
580 | pr_debug("expected %d %s events, got %d\n", | ||
581 | expected_nr_events[evsel->idx], | ||
582 | event_name(evsel), nr_events[evsel->idx]); | ||
583 | goto out_munmap; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | err = 0; | ||
588 | out_munmap: | ||
589 | perf_evlist__munmap(evlist); | ||
590 | out_close_fd: | ||
591 | for (i = 0; i < nsyscalls; ++i) | ||
592 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
593 | out_free_evlist: | ||
594 | perf_evlist__delete(evlist); | ||
595 | out_free_cpus: | ||
596 | cpu_map__delete(cpus); | ||
597 | out_free_threads: | ||
598 | thread_map__delete(threads); | ||
599 | return err; | ||
600 | #undef nsyscalls | ||
601 | } | ||
602 | |||
222 | static struct test { | 603 | static struct test { |
223 | const char *desc; | 604 | const char *desc; |
224 | int (*func)(void); | 605 | int (*func)(void); |
@@ -228,6 +609,18 @@ static struct test { | |||
228 | .func = test__vmlinux_matches_kallsyms, | 609 | .func = test__vmlinux_matches_kallsyms, |
229 | }, | 610 | }, |
230 | { | 611 | { |
612 | .desc = "detect open syscall event", | ||
613 | .func = test__open_syscall_event, | ||
614 | }, | ||
615 | { | ||
616 | .desc = "detect open syscall event on all cpus", | ||
617 | .func = test__open_syscall_event_on_all_cpus, | ||
618 | }, | ||
619 | { | ||
620 | .desc = "read samples using the mmap interface", | ||
621 | .func = test__basic_mmap, | ||
622 | }, | ||
623 | { | ||
231 | .func = NULL, | 624 | .func = NULL, |
232 | }, | 625 | }, |
233 | }; | 626 | }; |