diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-04 08:55:27 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-10 19:03:26 -0500 |
commit | 0252208eb52f6fe8731a47804eddc7ba93f60a87 (patch) | |
tree | deaddeda57a630a1d6813ea565fc972c699e2906 /tools/perf/builtin-test.c | |
parent | 12f7e0364375ba1ba55abcc5ac082b68fb526c80 (diff) |
perf evsel: Support perf_evsel__open(cpus > 1 && threads > 1)
And a test for it:
[acme@felicio linux]$ perf test
1: vmlinux symtab matches kallsyms: Ok
2: detect open syscall event: Ok
3: detect open syscall event on all cpus: Ok
[acme@felicio linux]$
Translating C the test does:
1. generates different number of open syscalls on each CPU
by using sched_setaffinity
2. Verifies that the expected number of events is generated
on each CPU
It works as expected.
LKML-Reference: <new-submission>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-test.c')
-rw-r--r-- | tools/perf/builtin-test.c | 110 |
1 files changed, 110 insertions, 0 deletions
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index e12753f976a1..ed5696198d3d 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -234,6 +234,7 @@ out: | |||
234 | return err; | 234 | return err; |
235 | } | 235 | } |
236 | 236 | ||
237 | #include "util/cpumap.h" | ||
237 | #include "util/evsel.h" | 238 | #include "util/evsel.h" |
238 | #include <sys/types.h> | 239 | #include <sys/types.h> |
239 | 240 | ||
@@ -321,6 +322,111 @@ out_thread_map_delete: | |||
321 | return err; | 322 | return err; |
322 | } | 323 | } |
323 | 324 | ||
325 | #include <sched.h> | ||
326 | |||
327 | static int test__open_syscall_event_on_all_cpus(void) | ||
328 | { | ||
329 | int err = -1, fd, cpu; | ||
330 | struct thread_map *threads; | ||
331 | struct cpu_map *cpus; | ||
332 | struct perf_evsel *evsel; | ||
333 | struct perf_event_attr attr; | ||
334 | unsigned int nr_open_calls = 111, i; | ||
335 | cpu_set_t *cpu_set; | ||
336 | size_t cpu_set_size; | ||
337 | int id = trace_event__id("sys_enter_open"); | ||
338 | |||
339 | if (id < 0) { | ||
340 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
341 | return -1; | ||
342 | } | ||
343 | |||
344 | threads = thread_map__new(-1, getpid()); | ||
345 | if (threads == NULL) { | ||
346 | pr_debug("thread_map__new\n"); | ||
347 | return -1; | ||
348 | } | ||
349 | |||
350 | cpus = cpu_map__new(NULL); | ||
351 | if (threads == NULL) { | ||
352 | pr_debug("thread_map__new\n"); | ||
353 | return -1; | ||
354 | } | ||
355 | |||
356 | cpu_set = CPU_ALLOC(cpus->nr); | ||
357 | |||
358 | if (cpu_set == NULL) | ||
359 | goto out_thread_map_delete; | ||
360 | |||
361 | cpu_set_size = CPU_ALLOC_SIZE(cpus->nr); | ||
362 | CPU_ZERO_S(cpu_set_size, cpu_set); | ||
363 | |||
364 | memset(&attr, 0, sizeof(attr)); | ||
365 | attr.type = PERF_TYPE_TRACEPOINT; | ||
366 | attr.config = id; | ||
367 | evsel = perf_evsel__new(&attr, 0); | ||
368 | if (evsel == NULL) { | ||
369 | pr_debug("perf_evsel__new\n"); | ||
370 | goto out_cpu_free; | ||
371 | } | ||
372 | |||
373 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | ||
374 | pr_debug("failed to open counter: %s, " | ||
375 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
376 | strerror(errno)); | ||
377 | goto out_evsel_delete; | ||
378 | } | ||
379 | |||
380 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
381 | unsigned int ncalls = nr_open_calls + cpu; | ||
382 | |||
383 | CPU_SET(cpu, cpu_set); | ||
384 | sched_setaffinity(0, cpu_set_size, cpu_set); | ||
385 | for (i = 0; i < ncalls; ++i) { | ||
386 | fd = open("/etc/passwd", O_RDONLY); | ||
387 | close(fd); | ||
388 | } | ||
389 | CPU_CLR(cpu, cpu_set); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Here we need to explicitely preallocate the counts, as if | ||
394 | * we use the auto allocation it will allocate just for 1 cpu, | ||
395 | * as we start by cpu 0. | ||
396 | */ | ||
397 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
398 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
399 | goto out_close_fd; | ||
400 | } | ||
401 | |||
402 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
403 | unsigned int expected; | ||
404 | |||
405 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
406 | pr_debug("perf_evsel__open_read_on_cpu\n"); | ||
407 | goto out_close_fd; | ||
408 | } | ||
409 | |||
410 | expected = nr_open_calls + cpu; | ||
411 | if (evsel->counts->cpu[cpu].val != expected) { | ||
412 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n", | ||
413 | expected, cpu, evsel->counts->cpu[cpu].val); | ||
414 | goto out_close_fd; | ||
415 | } | ||
416 | } | ||
417 | |||
418 | err = 0; | ||
419 | out_close_fd: | ||
420 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
421 | out_evsel_delete: | ||
422 | perf_evsel__delete(evsel); | ||
423 | out_cpu_free: | ||
424 | CPU_FREE(cpu_set); | ||
425 | out_thread_map_delete: | ||
426 | thread_map__delete(threads); | ||
427 | return err; | ||
428 | } | ||
429 | |||
324 | static struct test { | 430 | static struct test { |
325 | const char *desc; | 431 | const char *desc; |
326 | int (*func)(void); | 432 | int (*func)(void); |
@@ -334,6 +440,10 @@ static struct test { | |||
334 | .func = test__open_syscall_event, | 440 | .func = test__open_syscall_event, |
335 | }, | 441 | }, |
336 | { | 442 | { |
443 | .desc = "detect open syscall event on all cpus", | ||
444 | .func = test__open_syscall_event_on_all_cpus, | ||
445 | }, | ||
446 | { | ||
337 | .func = NULL, | 447 | .func = NULL, |
338 | }, | 448 | }, |
339 | }; | 449 | }; |