aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-04 08:55:27 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-10 19:03:26 -0500
commit0252208eb52f6fe8731a47804eddc7ba93f60a87 (patch)
treedeaddeda57a630a1d6813ea565fc972c699e2906 /tools
parent12f7e0364375ba1ba55abcc5ac082b68fb526c80 (diff)
perf evsel: Support perf_evsel__open(cpus > 1 && threads > 1)
And a test for it: [acme@felicio linux]$ perf test 1: vmlinux symtab matches kallsyms: Ok 2: detect open syscall event: Ok 3: detect open syscall event on all cpus: Ok [acme@felicio linux]$ Translating C the test does: 1. generates different number of open syscalls on each CPU by using sched_setaffinity 2. Verifies that the expected number of events is generated on each CPU It works as expected. LKML-Reference: <new-submission> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-test.c110
-rw-r--r--tools/perf/util/evsel.c82
2 files changed, 159 insertions, 33 deletions
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index e12753f976a1..ed5696198d3d 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,7 @@ out:
234 return err; 234 return err;
235} 235}
236 236
237#include "util/cpumap.h"
237#include "util/evsel.h" 238#include "util/evsel.h"
238#include <sys/types.h> 239#include <sys/types.h>
239 240
@@ -321,6 +322,111 @@ out_thread_map_delete:
321 return err; 322 return err;
322} 323}
323 324
325#include <sched.h>
326
327static int test__open_syscall_event_on_all_cpus(void)
328{
329 int err = -1, fd, cpu;
330 struct thread_map *threads;
331 struct cpu_map *cpus;
332 struct perf_evsel *evsel;
333 struct perf_event_attr attr;
334 unsigned int nr_open_calls = 111, i;
335 cpu_set_t *cpu_set;
336 size_t cpu_set_size;
337 int id = trace_event__id("sys_enter_open");
338
339 if (id < 0) {
340 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
341 return -1;
342 }
343
344 threads = thread_map__new(-1, getpid());
345 if (threads == NULL) {
346 pr_debug("thread_map__new\n");
347 return -1;
348 }
349
350 cpus = cpu_map__new(NULL);
351 if (threads == NULL) {
352 pr_debug("thread_map__new\n");
353 return -1;
354 }
355
356 cpu_set = CPU_ALLOC(cpus->nr);
357
358 if (cpu_set == NULL)
359 goto out_thread_map_delete;
360
361 cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
362 CPU_ZERO_S(cpu_set_size, cpu_set);
363
364 memset(&attr, 0, sizeof(attr));
365 attr.type = PERF_TYPE_TRACEPOINT;
366 attr.config = id;
367 evsel = perf_evsel__new(&attr, 0);
368 if (evsel == NULL) {
369 pr_debug("perf_evsel__new\n");
370 goto out_cpu_free;
371 }
372
373 if (perf_evsel__open(evsel, cpus, threads) < 0) {
374 pr_debug("failed to open counter: %s, "
375 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
376 strerror(errno));
377 goto out_evsel_delete;
378 }
379
380 for (cpu = 0; cpu < cpus->nr; ++cpu) {
381 unsigned int ncalls = nr_open_calls + cpu;
382
383 CPU_SET(cpu, cpu_set);
384 sched_setaffinity(0, cpu_set_size, cpu_set);
385 for (i = 0; i < ncalls; ++i) {
386 fd = open("/etc/passwd", O_RDONLY);
387 close(fd);
388 }
389 CPU_CLR(cpu, cpu_set);
390 }
391
392 /*
393 * Here we need to explicitely preallocate the counts, as if
394 * we use the auto allocation it will allocate just for 1 cpu,
395 * as we start by cpu 0.
396 */
397 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
398 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
399 goto out_close_fd;
400 }
401
402 for (cpu = 0; cpu < cpus->nr; ++cpu) {
403 unsigned int expected;
404
405 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
406 pr_debug("perf_evsel__open_read_on_cpu\n");
407 goto out_close_fd;
408 }
409
410 expected = nr_open_calls + cpu;
411 if (evsel->counts->cpu[cpu].val != expected) {
412 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
413 expected, cpu, evsel->counts->cpu[cpu].val);
414 goto out_close_fd;
415 }
416 }
417
418 err = 0;
419out_close_fd:
420 perf_evsel__close_fd(evsel, 1, threads->nr);
421out_evsel_delete:
422 perf_evsel__delete(evsel);
423out_cpu_free:
424 CPU_FREE(cpu_set);
425out_thread_map_delete:
426 thread_map__delete(threads);
427 return err;
428}
429
324static struct test { 430static struct test {
325 const char *desc; 431 const char *desc;
326 int (*func)(void); 432 int (*func)(void);
@@ -334,6 +440,10 @@ static struct test {
334 .func = test__open_syscall_event, 440 .func = test__open_syscall_event,
335 }, 441 },
336 { 442 {
443 .desc = "detect open syscall event on all cpus",
444 .func = test__open_syscall_event_on_all_cpus,
445 },
446 {
337 .func = NULL, 447 .func = NULL,
338 }, 448 },
339}; 449};
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1a5591d7a245..f5cfed60af98 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -127,59 +127,75 @@ int __perf_evsel__read(struct perf_evsel *evsel,
127 return 0; 127 return 0;
128} 128}
129 129
130int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus) 130static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
131 struct thread_map *threads)
131{ 132{
132 int cpu; 133 int cpu, thread;
133 134
134 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0) 135 if (evsel->fd == NULL &&
136 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
135 return -1; 137 return -1;
136 138
137 for (cpu = 0; cpu < cpus->nr; cpu++) { 139 for (cpu = 0; cpu < cpus->nr; cpu++) {
138 FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1, 140 for (thread = 0; thread < threads->nr; thread++) {
139 cpus->map[cpu], -1, 0); 141 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
140 if (FD(evsel, cpu, 0) < 0) 142 threads->map[thread],
141 goto out_close; 143 cpus->map[cpu], -1, 0);
144 if (FD(evsel, cpu, thread) < 0)
145 goto out_close;
146 }
142 } 147 }
143 148
144 return 0; 149 return 0;
145 150
146out_close: 151out_close:
147 while (--cpu >= 0) { 152 do {
148 close(FD(evsel, cpu, 0)); 153 while (--thread >= 0) {
149 FD(evsel, cpu, 0) = -1; 154 close(FD(evsel, cpu, thread));
150 } 155 FD(evsel, cpu, thread) = -1;
156 }
157 thread = threads->nr;
158 } while (--cpu >= 0);
151 return -1; 159 return -1;
152} 160}
153 161
154int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads) 162static struct {
163 struct cpu_map map;
164 int cpus[1];
165} empty_cpu_map = {
166 .map.nr = 1,
167 .cpus = { -1, },
168};
169
170static struct {
171 struct thread_map map;
172 int threads[1];
173} empty_thread_map = {
174 .map.nr = 1,
175 .threads = { -1, },
176};
177
178int perf_evsel__open(struct perf_evsel *evsel,
179 struct cpu_map *cpus, struct thread_map *threads)
155{ 180{
156 int thread;
157
158 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
159 return -1;
160 181
161 for (thread = 0; thread < threads->nr; thread++) { 182 if (cpus == NULL) {
162 FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr, 183 /* Work around old compiler warnings about strict aliasing */
163 threads->map[thread], -1, -1, 0); 184 cpus = &empty_cpu_map.map;
164 if (FD(evsel, 0, thread) < 0)
165 goto out_close;
166 } 185 }
167 186
168 return 0; 187 if (threads == NULL)
188 threads = &empty_thread_map.map;
169 189
170out_close: 190 return __perf_evsel__open(evsel, cpus, threads);
171 while (--thread >= 0) {
172 close(FD(evsel, 0, thread));
173 FD(evsel, 0, thread) = -1;
174 }
175 return -1;
176} 191}
177 192
178int perf_evsel__open(struct perf_evsel *evsel, 193int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
179 struct cpu_map *cpus, struct thread_map *threads)
180{ 194{
181 if (threads == NULL) 195 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
182 return perf_evsel__open_per_cpu(evsel, cpus); 196}
183 197
184 return perf_evsel__open_per_thread(evsel, threads); 198int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
199{
200 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
185} 201}