diff options
author | Jiri Olsa <jolsa@redhat.com> | 2012-10-30 18:01:43 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-10-31 14:19:19 -0400 |
commit | 945aea220bb8f4bb37950549cc0b93bbec24c460 (patch) | |
tree | 3eff78bd0339f33db881feed0a58287ad1e96e93 /tools/perf/tests | |
parent | c77d8d7030128e61e206658815b96a6befed9d06 (diff) |
perf tests: Move test objects into 'tests' directory
Separating test objects into 'tests' directory.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1351634526-1516-3-git-send-email-jolsa@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/tests')
-rw-r--r-- | tools/perf/tests/builtin-test.c | 1559 | ||||
-rw-r--r-- | tools/perf/tests/dso-data.c | 153 | ||||
-rw-r--r-- | tools/perf/tests/parse-events.c | 1116 |
3 files changed, 2828 insertions, 0 deletions
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c new file mode 100644 index 000000000000..f6c642415c44 --- /dev/null +++ b/tools/perf/tests/builtin-test.c | |||
@@ -0,0 +1,1559 @@ | |||
1 | /* | ||
2 | * builtin-test.c | ||
3 | * | ||
4 | * Builtin regression testing command: ever growing number of sanity tests | ||
5 | */ | ||
6 | #include "builtin.h" | ||
7 | |||
8 | #include "util/cache.h" | ||
9 | #include "util/color.h" | ||
10 | #include "util/debug.h" | ||
11 | #include "util/debugfs.h" | ||
12 | #include "util/evlist.h" | ||
13 | #include "util/parse-options.h" | ||
14 | #include "util/parse-events.h" | ||
15 | #include "util/symbol.h" | ||
16 | #include "util/thread_map.h" | ||
17 | #include "util/pmu.h" | ||
18 | #include "event-parse.h" | ||
19 | #include "../../include/linux/hw_breakpoint.h" | ||
20 | |||
21 | #include <sys/mman.h> | ||
22 | |||
23 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, | ||
24 | struct symbol *sym) | ||
25 | { | ||
26 | bool *visited = symbol__priv(sym); | ||
27 | *visited = true; | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static int test__vmlinux_matches_kallsyms(void) | ||
32 | { | ||
33 | int err = -1; | ||
34 | struct rb_node *nd; | ||
35 | struct symbol *sym; | ||
36 | struct map *kallsyms_map, *vmlinux_map; | ||
37 | struct machine kallsyms, vmlinux; | ||
38 | enum map_type type = MAP__FUNCTION; | ||
39 | struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; | ||
40 | |||
41 | /* | ||
42 | * Step 1: | ||
43 | * | ||
44 | * Init the machines that will hold kernel, modules obtained from | ||
45 | * both vmlinux + .ko files and from /proc/kallsyms split by modules. | ||
46 | */ | ||
47 | machine__init(&kallsyms, "", HOST_KERNEL_ID); | ||
48 | machine__init(&vmlinux, "", HOST_KERNEL_ID); | ||
49 | |||
50 | /* | ||
51 | * Step 2: | ||
52 | * | ||
53 | * Create the kernel maps for kallsyms and the DSO where we will then | ||
54 | * load /proc/kallsyms. Also create the modules maps from /proc/modules | ||
55 | * and find the .ko files that match them in /lib/modules/`uname -r`/. | ||
56 | */ | ||
57 | if (machine__create_kernel_maps(&kallsyms) < 0) { | ||
58 | pr_debug("machine__create_kernel_maps "); | ||
59 | return -1; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Step 3: | ||
64 | * | ||
65 | * Load and split /proc/kallsyms into multiple maps, one per module. | ||
66 | */ | ||
67 | if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { | ||
68 | pr_debug("dso__load_kallsyms "); | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Step 4: | ||
74 | * | ||
75 | * kallsyms will be internally on demand sorted by name so that we can | ||
76 | * find the reference relocation * symbol, i.e. the symbol we will use | ||
77 | * to see if the running kernel was relocated by checking if it has the | ||
78 | * same value in the vmlinux file we load. | ||
79 | */ | ||
80 | kallsyms_map = machine__kernel_map(&kallsyms, type); | ||
81 | |||
82 | sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); | ||
83 | if (sym == NULL) { | ||
84 | pr_debug("dso__find_symbol_by_name "); | ||
85 | goto out; | ||
86 | } | ||
87 | |||
88 | ref_reloc_sym.addr = sym->start; | ||
89 | |||
90 | /* | ||
91 | * Step 5: | ||
92 | * | ||
93 | * Now repeat step 2, this time for the vmlinux file we'll auto-locate. | ||
94 | */ | ||
95 | if (machine__create_kernel_maps(&vmlinux) < 0) { | ||
96 | pr_debug("machine__create_kernel_maps "); | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | vmlinux_map = machine__kernel_map(&vmlinux, type); | ||
101 | map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; | ||
102 | |||
103 | /* | ||
104 | * Step 6: | ||
105 | * | ||
106 | * Locate a vmlinux file in the vmlinux path that has a buildid that | ||
107 | * matches the one of the running kernel. | ||
108 | * | ||
109 | * While doing that look if we find the ref reloc symbol, if we find it | ||
110 | * we'll have its ref_reloc_symbol.unrelocated_addr and then | ||
111 | * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines | ||
112 | * to fixup the symbols. | ||
113 | */ | ||
114 | if (machine__load_vmlinux_path(&vmlinux, type, | ||
115 | vmlinux_matches_kallsyms_filter) <= 0) { | ||
116 | pr_debug("machine__load_vmlinux_path "); | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | err = 0; | ||
121 | /* | ||
122 | * Step 7: | ||
123 | * | ||
124 | * Now look at the symbols in the vmlinux DSO and check if we find all of them | ||
125 | * in the kallsyms dso. For the ones that are in both, check its names and | ||
126 | * end addresses too. | ||
127 | */ | ||
128 | for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { | ||
129 | struct symbol *pair, *first_pair; | ||
130 | bool backwards = true; | ||
131 | |||
132 | sym = rb_entry(nd, struct symbol, rb_node); | ||
133 | |||
134 | if (sym->start == sym->end) | ||
135 | continue; | ||
136 | |||
137 | first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); | ||
138 | pair = first_pair; | ||
139 | |||
140 | if (pair && pair->start == sym->start) { | ||
141 | next_pair: | ||
142 | if (strcmp(sym->name, pair->name) == 0) { | ||
143 | /* | ||
144 | * kallsyms don't have the symbol end, so we | ||
145 | * set that by using the next symbol start - 1, | ||
146 | * in some cases we get this up to a page | ||
147 | * wrong, trace_kmalloc when I was developing | ||
148 | * this code was one such example, 2106 bytes | ||
149 | * off the real size. More than that and we | ||
150 | * _really_ have a problem. | ||
151 | */ | ||
152 | s64 skew = sym->end - pair->end; | ||
153 | if (llabs(skew) < page_size) | ||
154 | continue; | ||
155 | |||
156 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", | ||
157 | sym->start, sym->name, sym->end, pair->end); | ||
158 | } else { | ||
159 | struct rb_node *nnd; | ||
160 | detour: | ||
161 | nnd = backwards ? rb_prev(&pair->rb_node) : | ||
162 | rb_next(&pair->rb_node); | ||
163 | if (nnd) { | ||
164 | struct symbol *next = rb_entry(nnd, struct symbol, rb_node); | ||
165 | |||
166 | if (next->start == sym->start) { | ||
167 | pair = next; | ||
168 | goto next_pair; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | if (backwards) { | ||
173 | backwards = false; | ||
174 | pair = first_pair; | ||
175 | goto detour; | ||
176 | } | ||
177 | |||
178 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", | ||
179 | sym->start, sym->name, pair->name); | ||
180 | } | ||
181 | } else | ||
182 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); | ||
183 | |||
184 | err = -1; | ||
185 | } | ||
186 | |||
187 | if (!verbose) | ||
188 | goto out; | ||
189 | |||
190 | pr_info("Maps only in vmlinux:\n"); | ||
191 | |||
192 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
193 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
194 | /* | ||
195 | * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while | ||
196 | * the kernel will have the path for the vmlinux file being used, | ||
197 | * so use the short name, less descriptive but the same ("[kernel]" in | ||
198 | * both cases. | ||
199 | */ | ||
200 | pair = map_groups__find_by_name(&kallsyms.kmaps, type, | ||
201 | (pos->dso->kernel ? | ||
202 | pos->dso->short_name : | ||
203 | pos->dso->name)); | ||
204 | if (pair) | ||
205 | pair->priv = 1; | ||
206 | else | ||
207 | map__fprintf(pos, stderr); | ||
208 | } | ||
209 | |||
210 | pr_info("Maps in vmlinux with a different name in kallsyms:\n"); | ||
211 | |||
212 | for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { | ||
213 | struct map *pos = rb_entry(nd, struct map, rb_node), *pair; | ||
214 | |||
215 | pair = map_groups__find(&kallsyms.kmaps, type, pos->start); | ||
216 | if (pair == NULL || pair->priv) | ||
217 | continue; | ||
218 | |||
219 | if (pair->start == pos->start) { | ||
220 | pair->priv = 1; | ||
221 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", | ||
222 | pos->start, pos->end, pos->pgoff, pos->dso->name); | ||
223 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | ||
224 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", | ||
225 | pair->start, pair->end, pair->pgoff); | ||
226 | pr_info(" %s\n", pair->dso->name); | ||
227 | pair->priv = 1; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | pr_info("Maps only in kallsyms:\n"); | ||
232 | |||
233 | for (nd = rb_first(&kallsyms.kmaps.maps[type]); | ||
234 | nd; nd = rb_next(nd)) { | ||
235 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
236 | |||
237 | if (!pos->priv) | ||
238 | map__fprintf(pos, stderr); | ||
239 | } | ||
240 | out: | ||
241 | return err; | ||
242 | } | ||
243 | |||
244 | #include "util/cpumap.h" | ||
245 | #include "util/evsel.h" | ||
246 | #include <sys/types.h> | ||
247 | |||
248 | static int trace_event__id(const char *evname) | ||
249 | { | ||
250 | char *filename; | ||
251 | int err = -1, fd; | ||
252 | |||
253 | if (asprintf(&filename, | ||
254 | "%s/syscalls/%s/id", | ||
255 | tracing_events_path, evname) < 0) | ||
256 | return -1; | ||
257 | |||
258 | fd = open(filename, O_RDONLY); | ||
259 | if (fd >= 0) { | ||
260 | char id[16]; | ||
261 | if (read(fd, id, sizeof(id)) > 0) | ||
262 | err = atoi(id); | ||
263 | close(fd); | ||
264 | } | ||
265 | |||
266 | free(filename); | ||
267 | return err; | ||
268 | } | ||
269 | |||
270 | static int test__open_syscall_event(void) | ||
271 | { | ||
272 | int err = -1, fd; | ||
273 | struct thread_map *threads; | ||
274 | struct perf_evsel *evsel; | ||
275 | struct perf_event_attr attr; | ||
276 | unsigned int nr_open_calls = 111, i; | ||
277 | int id = trace_event__id("sys_enter_open"); | ||
278 | |||
279 | if (id < 0) { | ||
280 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
281 | return -1; | ||
282 | } | ||
283 | |||
284 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
285 | if (threads == NULL) { | ||
286 | pr_debug("thread_map__new\n"); | ||
287 | return -1; | ||
288 | } | ||
289 | |||
290 | memset(&attr, 0, sizeof(attr)); | ||
291 | attr.type = PERF_TYPE_TRACEPOINT; | ||
292 | attr.config = id; | ||
293 | evsel = perf_evsel__new(&attr, 0); | ||
294 | if (evsel == NULL) { | ||
295 | pr_debug("perf_evsel__new\n"); | ||
296 | goto out_thread_map_delete; | ||
297 | } | ||
298 | |||
299 | if (perf_evsel__open_per_thread(evsel, threads) < 0) { | ||
300 | pr_debug("failed to open counter: %s, " | ||
301 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
302 | strerror(errno)); | ||
303 | goto out_evsel_delete; | ||
304 | } | ||
305 | |||
306 | for (i = 0; i < nr_open_calls; ++i) { | ||
307 | fd = open("/etc/passwd", O_RDONLY); | ||
308 | close(fd); | ||
309 | } | ||
310 | |||
311 | if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { | ||
312 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
313 | goto out_close_fd; | ||
314 | } | ||
315 | |||
316 | if (evsel->counts->cpu[0].val != nr_open_calls) { | ||
317 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", | ||
318 | nr_open_calls, evsel->counts->cpu[0].val); | ||
319 | goto out_close_fd; | ||
320 | } | ||
321 | |||
322 | err = 0; | ||
323 | out_close_fd: | ||
324 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
325 | out_evsel_delete: | ||
326 | perf_evsel__delete(evsel); | ||
327 | out_thread_map_delete: | ||
328 | thread_map__delete(threads); | ||
329 | return err; | ||
330 | } | ||
331 | |||
332 | #include <sched.h> | ||
333 | |||
334 | static int test__open_syscall_event_on_all_cpus(void) | ||
335 | { | ||
336 | int err = -1, fd, cpu; | ||
337 | struct thread_map *threads; | ||
338 | struct cpu_map *cpus; | ||
339 | struct perf_evsel *evsel; | ||
340 | struct perf_event_attr attr; | ||
341 | unsigned int nr_open_calls = 111, i; | ||
342 | cpu_set_t cpu_set; | ||
343 | int id = trace_event__id("sys_enter_open"); | ||
344 | |||
345 | if (id < 0) { | ||
346 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | ||
347 | return -1; | ||
348 | } | ||
349 | |||
350 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
351 | if (threads == NULL) { | ||
352 | pr_debug("thread_map__new\n"); | ||
353 | return -1; | ||
354 | } | ||
355 | |||
356 | cpus = cpu_map__new(NULL); | ||
357 | if (cpus == NULL) { | ||
358 | pr_debug("cpu_map__new\n"); | ||
359 | goto out_thread_map_delete; | ||
360 | } | ||
361 | |||
362 | |||
363 | CPU_ZERO(&cpu_set); | ||
364 | |||
365 | memset(&attr, 0, sizeof(attr)); | ||
366 | attr.type = PERF_TYPE_TRACEPOINT; | ||
367 | attr.config = id; | ||
368 | evsel = perf_evsel__new(&attr, 0); | ||
369 | if (evsel == NULL) { | ||
370 | pr_debug("perf_evsel__new\n"); | ||
371 | goto out_thread_map_delete; | ||
372 | } | ||
373 | |||
374 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | ||
375 | pr_debug("failed to open counter: %s, " | ||
376 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
377 | strerror(errno)); | ||
378 | goto out_evsel_delete; | ||
379 | } | ||
380 | |||
381 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
382 | unsigned int ncalls = nr_open_calls + cpu; | ||
383 | /* | ||
384 | * XXX eventually lift this restriction in a way that | ||
385 | * keeps perf building on older glibc installations | ||
386 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
387 | * a reasonable upper limit tho :-) | ||
388 | */ | ||
389 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
390 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
391 | continue; | ||
392 | } | ||
393 | |||
394 | CPU_SET(cpus->map[cpu], &cpu_set); | ||
395 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
396 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
397 | cpus->map[cpu], | ||
398 | strerror(errno)); | ||
399 | goto out_close_fd; | ||
400 | } | ||
401 | for (i = 0; i < ncalls; ++i) { | ||
402 | fd = open("/etc/passwd", O_RDONLY); | ||
403 | close(fd); | ||
404 | } | ||
405 | CPU_CLR(cpus->map[cpu], &cpu_set); | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Here we need to explicitely preallocate the counts, as if | ||
410 | * we use the auto allocation it will allocate just for 1 cpu, | ||
411 | * as we start by cpu 0. | ||
412 | */ | ||
413 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | ||
414 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | ||
415 | goto out_close_fd; | ||
416 | } | ||
417 | |||
418 | err = 0; | ||
419 | |||
420 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | ||
421 | unsigned int expected; | ||
422 | |||
423 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
424 | continue; | ||
425 | |||
426 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | ||
427 | pr_debug("perf_evsel__read_on_cpu\n"); | ||
428 | err = -1; | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | expected = nr_open_calls + cpu; | ||
433 | if (evsel->counts->cpu[cpu].val != expected) { | ||
434 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | ||
435 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | ||
436 | err = -1; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | out_close_fd: | ||
441 | perf_evsel__close_fd(evsel, 1, threads->nr); | ||
442 | out_evsel_delete: | ||
443 | perf_evsel__delete(evsel); | ||
444 | out_thread_map_delete: | ||
445 | thread_map__delete(threads); | ||
446 | return err; | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * This test will generate random numbers of calls to some getpid syscalls, | ||
451 | * then establish an mmap for a group of events that are created to monitor | ||
452 | * the syscalls. | ||
453 | * | ||
454 | * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated | ||
455 | * sample.id field to map back to its respective perf_evsel instance. | ||
456 | * | ||
457 | * Then it checks if the number of syscalls reported as perf events by | ||
458 | * the kernel corresponds to the number of syscalls made. | ||
459 | */ | ||
460 | static int test__basic_mmap(void) | ||
461 | { | ||
462 | int err = -1; | ||
463 | union perf_event *event; | ||
464 | struct thread_map *threads; | ||
465 | struct cpu_map *cpus; | ||
466 | struct perf_evlist *evlist; | ||
467 | struct perf_event_attr attr = { | ||
468 | .type = PERF_TYPE_TRACEPOINT, | ||
469 | .read_format = PERF_FORMAT_ID, | ||
470 | .sample_type = PERF_SAMPLE_ID, | ||
471 | .watermark = 0, | ||
472 | }; | ||
473 | cpu_set_t cpu_set; | ||
474 | const char *syscall_names[] = { "getsid", "getppid", "getpgrp", | ||
475 | "getpgid", }; | ||
476 | pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, | ||
477 | (void*)getpgid }; | ||
478 | #define nsyscalls ARRAY_SIZE(syscall_names) | ||
479 | int ids[nsyscalls]; | ||
480 | unsigned int nr_events[nsyscalls], | ||
481 | expected_nr_events[nsyscalls], i, j; | ||
482 | struct perf_evsel *evsels[nsyscalls], *evsel; | ||
483 | |||
484 | for (i = 0; i < nsyscalls; ++i) { | ||
485 | char name[64]; | ||
486 | |||
487 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | ||
488 | ids[i] = trace_event__id(name); | ||
489 | if (ids[i] < 0) { | ||
490 | pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); | ||
491 | return -1; | ||
492 | } | ||
493 | nr_events[i] = 0; | ||
494 | expected_nr_events[i] = random() % 257; | ||
495 | } | ||
496 | |||
497 | threads = thread_map__new(-1, getpid(), UINT_MAX); | ||
498 | if (threads == NULL) { | ||
499 | pr_debug("thread_map__new\n"); | ||
500 | return -1; | ||
501 | } | ||
502 | |||
503 | cpus = cpu_map__new(NULL); | ||
504 | if (cpus == NULL) { | ||
505 | pr_debug("cpu_map__new\n"); | ||
506 | goto out_free_threads; | ||
507 | } | ||
508 | |||
509 | CPU_ZERO(&cpu_set); | ||
510 | CPU_SET(cpus->map[0], &cpu_set); | ||
511 | sched_setaffinity(0, sizeof(cpu_set), &cpu_set); | ||
512 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | ||
513 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
514 | cpus->map[0], strerror(errno)); | ||
515 | goto out_free_cpus; | ||
516 | } | ||
517 | |||
518 | evlist = perf_evlist__new(cpus, threads); | ||
519 | if (evlist == NULL) { | ||
520 | pr_debug("perf_evlist__new\n"); | ||
521 | goto out_free_cpus; | ||
522 | } | ||
523 | |||
524 | /* anonymous union fields, can't be initialized above */ | ||
525 | attr.wakeup_events = 1; | ||
526 | attr.sample_period = 1; | ||
527 | |||
528 | for (i = 0; i < nsyscalls; ++i) { | ||
529 | attr.config = ids[i]; | ||
530 | evsels[i] = perf_evsel__new(&attr, i); | ||
531 | if (evsels[i] == NULL) { | ||
532 | pr_debug("perf_evsel__new\n"); | ||
533 | goto out_free_evlist; | ||
534 | } | ||
535 | |||
536 | perf_evlist__add(evlist, evsels[i]); | ||
537 | |||
538 | if (perf_evsel__open(evsels[i], cpus, threads) < 0) { | ||
539 | pr_debug("failed to open counter: %s, " | ||
540 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | ||
541 | strerror(errno)); | ||
542 | goto out_close_fd; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | if (perf_evlist__mmap(evlist, 128, true) < 0) { | ||
547 | pr_debug("failed to mmap events: %d (%s)\n", errno, | ||
548 | strerror(errno)); | ||
549 | goto out_close_fd; | ||
550 | } | ||
551 | |||
552 | for (i = 0; i < nsyscalls; ++i) | ||
553 | for (j = 0; j < expected_nr_events[i]; ++j) { | ||
554 | int foo = syscalls[i](); | ||
555 | ++foo; | ||
556 | } | ||
557 | |||
558 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { | ||
559 | struct perf_sample sample; | ||
560 | |||
561 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
562 | pr_debug("unexpected %s event\n", | ||
563 | perf_event__name(event->header.type)); | ||
564 | goto out_munmap; | ||
565 | } | ||
566 | |||
567 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
568 | if (err) { | ||
569 | pr_err("Can't parse sample, err = %d\n", err); | ||
570 | goto out_munmap; | ||
571 | } | ||
572 | |||
573 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
574 | if (evsel == NULL) { | ||
575 | pr_debug("event with id %" PRIu64 | ||
576 | " doesn't map to an evsel\n", sample.id); | ||
577 | goto out_munmap; | ||
578 | } | ||
579 | nr_events[evsel->idx]++; | ||
580 | } | ||
581 | |||
582 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
583 | if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { | ||
584 | pr_debug("expected %d %s events, got %d\n", | ||
585 | expected_nr_events[evsel->idx], | ||
586 | perf_evsel__name(evsel), nr_events[evsel->idx]); | ||
587 | goto out_munmap; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | err = 0; | ||
592 | out_munmap: | ||
593 | perf_evlist__munmap(evlist); | ||
594 | out_close_fd: | ||
595 | for (i = 0; i < nsyscalls; ++i) | ||
596 | perf_evsel__close_fd(evsels[i], 1, threads->nr); | ||
597 | out_free_evlist: | ||
598 | perf_evlist__delete(evlist); | ||
599 | out_free_cpus: | ||
600 | cpu_map__delete(cpus); | ||
601 | out_free_threads: | ||
602 | thread_map__delete(threads); | ||
603 | return err; | ||
604 | #undef nsyscalls | ||
605 | } | ||
606 | |||
607 | static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, | ||
608 | size_t *sizep) | ||
609 | { | ||
610 | cpu_set_t *mask; | ||
611 | size_t size; | ||
612 | int i, cpu = -1, nrcpus = 1024; | ||
613 | realloc: | ||
614 | mask = CPU_ALLOC(nrcpus); | ||
615 | size = CPU_ALLOC_SIZE(nrcpus); | ||
616 | CPU_ZERO_S(size, mask); | ||
617 | |||
618 | if (sched_getaffinity(pid, size, mask) == -1) { | ||
619 | CPU_FREE(mask); | ||
620 | if (errno == EINVAL && nrcpus < (1024 << 8)) { | ||
621 | nrcpus = nrcpus << 2; | ||
622 | goto realloc; | ||
623 | } | ||
624 | perror("sched_getaffinity"); | ||
625 | return -1; | ||
626 | } | ||
627 | |||
628 | for (i = 0; i < nrcpus; i++) { | ||
629 | if (CPU_ISSET_S(i, size, mask)) { | ||
630 | if (cpu == -1) { | ||
631 | cpu = i; | ||
632 | *maskp = mask; | ||
633 | *sizep = size; | ||
634 | } else | ||
635 | CPU_CLR_S(i, size, mask); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | if (cpu == -1) | ||
640 | CPU_FREE(mask); | ||
641 | |||
642 | return cpu; | ||
643 | } | ||
644 | |||
645 | static int test__PERF_RECORD(void) | ||
646 | { | ||
647 | struct perf_record_opts opts = { | ||
648 | .target = { | ||
649 | .uid = UINT_MAX, | ||
650 | .uses_mmap = true, | ||
651 | }, | ||
652 | .no_delay = true, | ||
653 | .freq = 10, | ||
654 | .mmap_pages = 256, | ||
655 | }; | ||
656 | cpu_set_t *cpu_mask = NULL; | ||
657 | size_t cpu_mask_size = 0; | ||
658 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
659 | struct perf_evsel *evsel; | ||
660 | struct perf_sample sample; | ||
661 | const char *cmd = "sleep"; | ||
662 | const char *argv[] = { cmd, "1", NULL, }; | ||
663 | char *bname; | ||
664 | u64 prev_time = 0; | ||
665 | bool found_cmd_mmap = false, | ||
666 | found_libc_mmap = false, | ||
667 | found_vdso_mmap = false, | ||
668 | found_ld_mmap = false; | ||
669 | int err = -1, errs = 0, i, wakeups = 0; | ||
670 | u32 cpu; | ||
671 | int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; | ||
672 | |||
673 | if (evlist == NULL || argv == NULL) { | ||
674 | pr_debug("Not enough memory to create evlist\n"); | ||
675 | goto out; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * We need at least one evsel in the evlist, use the default | ||
680 | * one: "cycles". | ||
681 | */ | ||
682 | err = perf_evlist__add_default(evlist); | ||
683 | if (err < 0) { | ||
684 | pr_debug("Not enough memory to create evsel\n"); | ||
685 | goto out_delete_evlist; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Create maps of threads and cpus to monitor. In this case | ||
690 | * we start with all threads and cpus (-1, -1) but then in | ||
691 | * perf_evlist__prepare_workload we'll fill in the only thread | ||
692 | * we're monitoring, the one forked there. | ||
693 | */ | ||
694 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
695 | if (err < 0) { | ||
696 | pr_debug("Not enough memory to create thread/cpu maps\n"); | ||
697 | goto out_delete_evlist; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Prepare the workload in argv[] to run, it'll fork it, and then wait | ||
702 | * for perf_evlist__start_workload() to exec it. This is done this way | ||
703 | * so that we have time to open the evlist (calling sys_perf_event_open | ||
704 | * on all the fds) and then mmap them. | ||
705 | */ | ||
706 | err = perf_evlist__prepare_workload(evlist, &opts, argv); | ||
707 | if (err < 0) { | ||
708 | pr_debug("Couldn't run the workload!\n"); | ||
709 | goto out_delete_evlist; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Config the evsels, setting attr->comm on the first one, etc. | ||
714 | */ | ||
715 | evsel = perf_evlist__first(evlist); | ||
716 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | ||
717 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | ||
718 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | ||
719 | perf_evlist__config_attrs(evlist, &opts); | ||
720 | |||
721 | err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, | ||
722 | &cpu_mask_size); | ||
723 | if (err < 0) { | ||
724 | pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); | ||
725 | goto out_delete_evlist; | ||
726 | } | ||
727 | |||
728 | cpu = err; | ||
729 | |||
730 | /* | ||
731 | * So that we can check perf_sample.cpu on all the samples. | ||
732 | */ | ||
733 | if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { | ||
734 | pr_debug("sched_setaffinity: %s\n", strerror(errno)); | ||
735 | goto out_free_cpu_mask; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * Call sys_perf_event_open on all the fds on all the evsels, | ||
740 | * grouping them if asked to. | ||
741 | */ | ||
742 | err = perf_evlist__open(evlist); | ||
743 | if (err < 0) { | ||
744 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
745 | goto out_delete_evlist; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * mmap the first fd on a given CPU and ask for events for the other | ||
750 | * fds in the same CPU to be injected in the same mmap ring buffer | ||
751 | * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). | ||
752 | */ | ||
753 | err = perf_evlist__mmap(evlist, opts.mmap_pages, false); | ||
754 | if (err < 0) { | ||
755 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
756 | goto out_delete_evlist; | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * Now that all is properly set up, enable the events, they will | ||
761 | * count just on workload.pid, which will start... | ||
762 | */ | ||
763 | perf_evlist__enable(evlist); | ||
764 | |||
765 | /* | ||
766 | * Now! | ||
767 | */ | ||
768 | perf_evlist__start_workload(evlist); | ||
769 | |||
770 | while (1) { | ||
771 | int before = total_events; | ||
772 | |||
773 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
774 | union perf_event *event; | ||
775 | |||
776 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
777 | const u32 type = event->header.type; | ||
778 | const char *name = perf_event__name(type); | ||
779 | |||
780 | ++total_events; | ||
781 | if (type < PERF_RECORD_MAX) | ||
782 | nr_events[type]++; | ||
783 | |||
784 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
785 | if (err < 0) { | ||
786 | if (verbose) | ||
787 | perf_event__fprintf(event, stderr); | ||
788 | pr_debug("Couldn't parse sample\n"); | ||
789 | goto out_err; | ||
790 | } | ||
791 | |||
792 | if (verbose) { | ||
793 | pr_info("%" PRIu64" %d ", sample.time, sample.cpu); | ||
794 | perf_event__fprintf(event, stderr); | ||
795 | } | ||
796 | |||
797 | if (prev_time > sample.time) { | ||
798 | pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", | ||
799 | name, prev_time, sample.time); | ||
800 | ++errs; | ||
801 | } | ||
802 | |||
803 | prev_time = sample.time; | ||
804 | |||
805 | if (sample.cpu != cpu) { | ||
806 | pr_debug("%s with unexpected cpu, expected %d, got %d\n", | ||
807 | name, cpu, sample.cpu); | ||
808 | ++errs; | ||
809 | } | ||
810 | |||
811 | if ((pid_t)sample.pid != evlist->workload.pid) { | ||
812 | pr_debug("%s with unexpected pid, expected %d, got %d\n", | ||
813 | name, evlist->workload.pid, sample.pid); | ||
814 | ++errs; | ||
815 | } | ||
816 | |||
817 | if ((pid_t)sample.tid != evlist->workload.pid) { | ||
818 | pr_debug("%s with unexpected tid, expected %d, got %d\n", | ||
819 | name, evlist->workload.pid, sample.tid); | ||
820 | ++errs; | ||
821 | } | ||
822 | |||
823 | if ((type == PERF_RECORD_COMM || | ||
824 | type == PERF_RECORD_MMAP || | ||
825 | type == PERF_RECORD_FORK || | ||
826 | type == PERF_RECORD_EXIT) && | ||
827 | (pid_t)event->comm.pid != evlist->workload.pid) { | ||
828 | pr_debug("%s with unexpected pid/tid\n", name); | ||
829 | ++errs; | ||
830 | } | ||
831 | |||
832 | if ((type == PERF_RECORD_COMM || | ||
833 | type == PERF_RECORD_MMAP) && | ||
834 | event->comm.pid != event->comm.tid) { | ||
835 | pr_debug("%s with different pid/tid!\n", name); | ||
836 | ++errs; | ||
837 | } | ||
838 | |||
839 | switch (type) { | ||
840 | case PERF_RECORD_COMM: | ||
841 | if (strcmp(event->comm.comm, cmd)) { | ||
842 | pr_debug("%s with unexpected comm!\n", name); | ||
843 | ++errs; | ||
844 | } | ||
845 | break; | ||
846 | case PERF_RECORD_EXIT: | ||
847 | goto found_exit; | ||
848 | case PERF_RECORD_MMAP: | ||
849 | bname = strrchr(event->mmap.filename, '/'); | ||
850 | if (bname != NULL) { | ||
851 | if (!found_cmd_mmap) | ||
852 | found_cmd_mmap = !strcmp(bname + 1, cmd); | ||
853 | if (!found_libc_mmap) | ||
854 | found_libc_mmap = !strncmp(bname + 1, "libc", 4); | ||
855 | if (!found_ld_mmap) | ||
856 | found_ld_mmap = !strncmp(bname + 1, "ld", 2); | ||
857 | } else if (!found_vdso_mmap) | ||
858 | found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); | ||
859 | break; | ||
860 | |||
861 | case PERF_RECORD_SAMPLE: | ||
862 | /* Just ignore samples for now */ | ||
863 | break; | ||
864 | default: | ||
865 | pr_debug("Unexpected perf_event->header.type %d!\n", | ||
866 | type); | ||
867 | ++errs; | ||
868 | } | ||
869 | } | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * We don't use poll here because at least at 3.1 times the | ||
874 | * PERF_RECORD_{!SAMPLE} events don't honour | ||
875 | * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. | ||
876 | */ | ||
877 | if (total_events == before && false) | ||
878 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
879 | |||
880 | sleep(1); | ||
881 | if (++wakeups > 5) { | ||
882 | pr_debug("No PERF_RECORD_EXIT event!\n"); | ||
883 | break; | ||
884 | } | ||
885 | } | ||
886 | |||
887 | found_exit: | ||
888 | if (nr_events[PERF_RECORD_COMM] > 1) { | ||
889 | pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); | ||
890 | ++errs; | ||
891 | } | ||
892 | |||
893 | if (nr_events[PERF_RECORD_COMM] == 0) { | ||
894 | pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); | ||
895 | ++errs; | ||
896 | } | ||
897 | |||
898 | if (!found_cmd_mmap) { | ||
899 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); | ||
900 | ++errs; | ||
901 | } | ||
902 | |||
903 | if (!found_libc_mmap) { | ||
904 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); | ||
905 | ++errs; | ||
906 | } | ||
907 | |||
908 | if (!found_ld_mmap) { | ||
909 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); | ||
910 | ++errs; | ||
911 | } | ||
912 | |||
913 | if (!found_vdso_mmap) { | ||
914 | pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); | ||
915 | ++errs; | ||
916 | } | ||
917 | out_err: | ||
918 | perf_evlist__munmap(evlist); | ||
919 | out_free_cpu_mask: | ||
920 | CPU_FREE(cpu_mask); | ||
921 | out_delete_evlist: | ||
922 | perf_evlist__delete(evlist); | ||
923 | out: | ||
924 | return (err < 0 || errs > 0) ? -1 : 0; | ||
925 | } | ||
926 | |||
927 | |||
928 | #if defined(__x86_64__) || defined(__i386__) | ||
929 | |||
930 | #define barrier() asm volatile("" ::: "memory") | ||
931 | |||
932 | static u64 rdpmc(unsigned int counter) | ||
933 | { | ||
934 | unsigned int low, high; | ||
935 | |||
936 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); | ||
937 | |||
938 | return low | ((u64)high) << 32; | ||
939 | } | ||
940 | |||
941 | static u64 rdtsc(void) | ||
942 | { | ||
943 | unsigned int low, high; | ||
944 | |||
945 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | ||
946 | |||
947 | return low | ((u64)high) << 32; | ||
948 | } | ||
949 | |||
950 | static u64 mmap_read_self(void *addr) | ||
951 | { | ||
952 | struct perf_event_mmap_page *pc = addr; | ||
953 | u32 seq, idx, time_mult = 0, time_shift = 0; | ||
954 | u64 count, cyc = 0, time_offset = 0, enabled, running, delta; | ||
955 | |||
956 | do { | ||
957 | seq = pc->lock; | ||
958 | barrier(); | ||
959 | |||
960 | enabled = pc->time_enabled; | ||
961 | running = pc->time_running; | ||
962 | |||
963 | if (enabled != running) { | ||
964 | cyc = rdtsc(); | ||
965 | time_mult = pc->time_mult; | ||
966 | time_shift = pc->time_shift; | ||
967 | time_offset = pc->time_offset; | ||
968 | } | ||
969 | |||
970 | idx = pc->index; | ||
971 | count = pc->offset; | ||
972 | if (idx) | ||
973 | count += rdpmc(idx - 1); | ||
974 | |||
975 | barrier(); | ||
976 | } while (pc->lock != seq); | ||
977 | |||
978 | if (enabled != running) { | ||
979 | u64 quot, rem; | ||
980 | |||
981 | quot = (cyc >> time_shift); | ||
982 | rem = cyc & ((1 << time_shift) - 1); | ||
983 | delta = time_offset + quot * time_mult + | ||
984 | ((rem * time_mult) >> time_shift); | ||
985 | |||
986 | enabled += delta; | ||
987 | if (idx) | ||
988 | running += delta; | ||
989 | |||
990 | quot = count / running; | ||
991 | rem = count % running; | ||
992 | count = quot * enabled + (rem * enabled) / running; | ||
993 | } | ||
994 | |||
995 | return count; | ||
996 | } | ||
997 | |||
998 | /* | ||
999 | * If the RDPMC instruction faults then signal this back to the test parent task: | ||
1000 | */ | ||
1001 | static void segfault_handler(int sig __maybe_unused, | ||
1002 | siginfo_t *info __maybe_unused, | ||
1003 | void *uc __maybe_unused) | ||
1004 | { | ||
1005 | exit(-1); | ||
1006 | } | ||
1007 | |||
1008 | static int __test__rdpmc(void) | ||
1009 | { | ||
1010 | volatile int tmp = 0; | ||
1011 | u64 i, loops = 1000; | ||
1012 | int n; | ||
1013 | int fd; | ||
1014 | void *addr; | ||
1015 | struct perf_event_attr attr = { | ||
1016 | .type = PERF_TYPE_HARDWARE, | ||
1017 | .config = PERF_COUNT_HW_INSTRUCTIONS, | ||
1018 | .exclude_kernel = 1, | ||
1019 | }; | ||
1020 | u64 delta_sum = 0; | ||
1021 | struct sigaction sa; | ||
1022 | |||
1023 | sigfillset(&sa.sa_mask); | ||
1024 | sa.sa_sigaction = segfault_handler; | ||
1025 | sigaction(SIGSEGV, &sa, NULL); | ||
1026 | |||
1027 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | ||
1028 | if (fd < 0) { | ||
1029 | pr_err("Error: sys_perf_event_open() syscall returned " | ||
1030 | "with %d (%s)\n", fd, strerror(errno)); | ||
1031 | return -1; | ||
1032 | } | ||
1033 | |||
1034 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | ||
1035 | if (addr == (void *)(-1)) { | ||
1036 | pr_err("Error: mmap() syscall returned with (%s)\n", | ||
1037 | strerror(errno)); | ||
1038 | goto out_close; | ||
1039 | } | ||
1040 | |||
1041 | for (n = 0; n < 6; n++) { | ||
1042 | u64 stamp, now, delta; | ||
1043 | |||
1044 | stamp = mmap_read_self(addr); | ||
1045 | |||
1046 | for (i = 0; i < loops; i++) | ||
1047 | tmp++; | ||
1048 | |||
1049 | now = mmap_read_self(addr); | ||
1050 | loops *= 10; | ||
1051 | |||
1052 | delta = now - stamp; | ||
1053 | pr_debug("%14d: %14Lu\n", n, (long long)delta); | ||
1054 | |||
1055 | delta_sum += delta; | ||
1056 | } | ||
1057 | |||
1058 | munmap(addr, page_size); | ||
1059 | pr_debug(" "); | ||
1060 | out_close: | ||
1061 | close(fd); | ||
1062 | |||
1063 | if (!delta_sum) | ||
1064 | return -1; | ||
1065 | |||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static int test__rdpmc(void) | ||
1070 | { | ||
1071 | int status = 0; | ||
1072 | int wret = 0; | ||
1073 | int ret; | ||
1074 | int pid; | ||
1075 | |||
1076 | pid = fork(); | ||
1077 | if (pid < 0) | ||
1078 | return -1; | ||
1079 | |||
1080 | if (!pid) { | ||
1081 | ret = __test__rdpmc(); | ||
1082 | |||
1083 | exit(ret); | ||
1084 | } | ||
1085 | |||
1086 | wret = waitpid(pid, &status, 0); | ||
1087 | if (wret < 0 || status) | ||
1088 | return -1; | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | #endif | ||
1094 | |||
1095 | static int test__perf_pmu(void) | ||
1096 | { | ||
1097 | return perf_pmu__test(); | ||
1098 | } | ||
1099 | |||
1100 | static int perf_evsel__roundtrip_cache_name_test(void) | ||
1101 | { | ||
1102 | char name[128]; | ||
1103 | int type, op, err = 0, ret = 0, i, idx; | ||
1104 | struct perf_evsel *evsel; | ||
1105 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1106 | |||
1107 | if (evlist == NULL) | ||
1108 | return -ENOMEM; | ||
1109 | |||
1110 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1111 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1112 | /* skip invalid cache type */ | ||
1113 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1114 | continue; | ||
1115 | |||
1116 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1117 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1118 | name, sizeof(name)); | ||
1119 | err = parse_events(evlist, name, 0); | ||
1120 | if (err) | ||
1121 | ret = err; | ||
1122 | } | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | idx = 0; | ||
1127 | evsel = perf_evlist__first(evlist); | ||
1128 | |||
1129 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1130 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1131 | /* skip invalid cache type */ | ||
1132 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1133 | continue; | ||
1134 | |||
1135 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1136 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1137 | name, sizeof(name)); | ||
1138 | if (evsel->idx != idx) | ||
1139 | continue; | ||
1140 | |||
1141 | ++idx; | ||
1142 | |||
1143 | if (strcmp(perf_evsel__name(evsel), name)) { | ||
1144 | pr_debug("%s != %s\n", perf_evsel__name(evsel), name); | ||
1145 | ret = -1; | ||
1146 | } | ||
1147 | |||
1148 | evsel = perf_evsel__next(evsel); | ||
1149 | } | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | perf_evlist__delete(evlist); | ||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1157 | static int __perf_evsel__name_array_test(const char *names[], int nr_names) | ||
1158 | { | ||
1159 | int i, err; | ||
1160 | struct perf_evsel *evsel; | ||
1161 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1162 | |||
1163 | if (evlist == NULL) | ||
1164 | return -ENOMEM; | ||
1165 | |||
1166 | for (i = 0; i < nr_names; ++i) { | ||
1167 | err = parse_events(evlist, names[i], 0); | ||
1168 | if (err) { | ||
1169 | pr_debug("failed to parse event '%s', err %d\n", | ||
1170 | names[i], err); | ||
1171 | goto out_delete_evlist; | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | err = 0; | ||
1176 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
1177 | if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { | ||
1178 | --err; | ||
1179 | pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); | ||
1180 | } | ||
1181 | } | ||
1182 | |||
1183 | out_delete_evlist: | ||
1184 | perf_evlist__delete(evlist); | ||
1185 | return err; | ||
1186 | } | ||
1187 | |||
1188 | #define perf_evsel__name_array_test(names) \ | ||
1189 | __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) | ||
1190 | |||
1191 | static int perf_evsel__roundtrip_name_test(void) | ||
1192 | { | ||
1193 | int err = 0, ret = 0; | ||
1194 | |||
1195 | err = perf_evsel__name_array_test(perf_evsel__hw_names); | ||
1196 | if (err) | ||
1197 | ret = err; | ||
1198 | |||
1199 | err = perf_evsel__name_array_test(perf_evsel__sw_names); | ||
1200 | if (err) | ||
1201 | ret = err; | ||
1202 | |||
1203 | err = perf_evsel__roundtrip_cache_name_test(); | ||
1204 | if (err) | ||
1205 | ret = err; | ||
1206 | |||
1207 | return ret; | ||
1208 | } | ||
1209 | |||
1210 | static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | ||
1211 | int size, bool should_be_signed) | ||
1212 | { | ||
1213 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1214 | int is_signed; | ||
1215 | int ret = 0; | ||
1216 | |||
1217 | if (field == NULL) { | ||
1218 | pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); | ||
1219 | return -1; | ||
1220 | } | ||
1221 | |||
1222 | is_signed = !!(field->flags | FIELD_IS_SIGNED); | ||
1223 | if (should_be_signed && !is_signed) { | ||
1224 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | ||
1225 | evsel->name, name, is_signed, should_be_signed); | ||
1226 | ret = -1; | ||
1227 | } | ||
1228 | |||
1229 | if (field->size != size) { | ||
1230 | pr_debug("%s: \"%s\" size (%d) should be %d!\n", | ||
1231 | evsel->name, name, field->size, size); | ||
1232 | ret = -1; | ||
1233 | } | ||
1234 | |||
1235 | return ret; | ||
1236 | } | ||
1237 | |||
1238 | static int perf_evsel__tp_sched_test(void) | ||
1239 | { | ||
1240 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | ||
1241 | int ret = 0; | ||
1242 | |||
1243 | if (evsel == NULL) { | ||
1244 | pr_debug("perf_evsel__new\n"); | ||
1245 | return -1; | ||
1246 | } | ||
1247 | |||
1248 | if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) | ||
1249 | ret = -1; | ||
1250 | |||
1251 | if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) | ||
1252 | ret = -1; | ||
1253 | |||
1254 | if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) | ||
1255 | ret = -1; | ||
1256 | |||
1257 | if (perf_evsel__test_field(evsel, "prev_state", 8, true)) | ||
1258 | ret = -1; | ||
1259 | |||
1260 | if (perf_evsel__test_field(evsel, "next_comm", 16, true)) | ||
1261 | ret = -1; | ||
1262 | |||
1263 | if (perf_evsel__test_field(evsel, "next_pid", 4, true)) | ||
1264 | ret = -1; | ||
1265 | |||
1266 | if (perf_evsel__test_field(evsel, "next_prio", 4, true)) | ||
1267 | ret = -1; | ||
1268 | |||
1269 | perf_evsel__delete(evsel); | ||
1270 | |||
1271 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | ||
1272 | |||
1273 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | ||
1274 | ret = -1; | ||
1275 | |||
1276 | if (perf_evsel__test_field(evsel, "pid", 4, true)) | ||
1277 | ret = -1; | ||
1278 | |||
1279 | if (perf_evsel__test_field(evsel, "prio", 4, true)) | ||
1280 | ret = -1; | ||
1281 | |||
1282 | if (perf_evsel__test_field(evsel, "success", 4, true)) | ||
1283 | ret = -1; | ||
1284 | |||
1285 | if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) | ||
1286 | ret = -1; | ||
1287 | |||
1288 | return ret; | ||
1289 | } | ||
1290 | |||
1291 | static int test__syscall_open_tp_fields(void) | ||
1292 | { | ||
1293 | struct perf_record_opts opts = { | ||
1294 | .target = { | ||
1295 | .uid = UINT_MAX, | ||
1296 | .uses_mmap = true, | ||
1297 | }, | ||
1298 | .no_delay = true, | ||
1299 | .freq = 1, | ||
1300 | .mmap_pages = 256, | ||
1301 | .raw_samples = true, | ||
1302 | }; | ||
1303 | const char *filename = "/etc/passwd"; | ||
1304 | int flags = O_RDONLY | O_DIRECTORY; | ||
1305 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1306 | struct perf_evsel *evsel; | ||
1307 | int err = -1, i, nr_events = 0, nr_polls = 0; | ||
1308 | |||
1309 | if (evlist == NULL) { | ||
1310 | pr_debug("%s: perf_evlist__new\n", __func__); | ||
1311 | goto out; | ||
1312 | } | ||
1313 | |||
1314 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | ||
1315 | if (evsel == NULL) { | ||
1316 | pr_debug("%s: perf_evsel__newtp\n", __func__); | ||
1317 | goto out_delete_evlist; | ||
1318 | } | ||
1319 | |||
1320 | perf_evlist__add(evlist, evsel); | ||
1321 | |||
1322 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
1323 | if (err < 0) { | ||
1324 | pr_debug("%s: perf_evlist__create_maps\n", __func__); | ||
1325 | goto out_delete_evlist; | ||
1326 | } | ||
1327 | |||
1328 | perf_evsel__config(evsel, &opts, evsel); | ||
1329 | |||
1330 | evlist->threads->map[0] = getpid(); | ||
1331 | |||
1332 | err = perf_evlist__open(evlist); | ||
1333 | if (err < 0) { | ||
1334 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
1335 | goto out_delete_evlist; | ||
1336 | } | ||
1337 | |||
1338 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
1339 | if (err < 0) { | ||
1340 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
1341 | goto out_delete_evlist; | ||
1342 | } | ||
1343 | |||
1344 | perf_evlist__enable(evlist); | ||
1345 | |||
1346 | /* | ||
1347 | * Generate the event: | ||
1348 | */ | ||
1349 | open(filename, flags); | ||
1350 | |||
1351 | while (1) { | ||
1352 | int before = nr_events; | ||
1353 | |||
1354 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
1355 | union perf_event *event; | ||
1356 | |||
1357 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
1358 | const u32 type = event->header.type; | ||
1359 | int tp_flags; | ||
1360 | struct perf_sample sample; | ||
1361 | |||
1362 | ++nr_events; | ||
1363 | |||
1364 | if (type != PERF_RECORD_SAMPLE) | ||
1365 | continue; | ||
1366 | |||
1367 | err = perf_evsel__parse_sample(evsel, event, &sample); | ||
1368 | if (err) { | ||
1369 | pr_err("Can't parse sample, err = %d\n", err); | ||
1370 | goto out_munmap; | ||
1371 | } | ||
1372 | |||
1373 | tp_flags = perf_evsel__intval(evsel, &sample, "flags"); | ||
1374 | |||
1375 | if (flags != tp_flags) { | ||
1376 | pr_debug("%s: Expected flags=%#x, got %#x\n", | ||
1377 | __func__, flags, tp_flags); | ||
1378 | goto out_munmap; | ||
1379 | } | ||
1380 | |||
1381 | goto out_ok; | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | if (nr_events == before) | ||
1386 | poll(evlist->pollfd, evlist->nr_fds, 10); | ||
1387 | |||
1388 | if (++nr_polls > 5) { | ||
1389 | pr_debug("%s: no events!\n", __func__); | ||
1390 | goto out_munmap; | ||
1391 | } | ||
1392 | } | ||
1393 | out_ok: | ||
1394 | err = 0; | ||
1395 | out_munmap: | ||
1396 | perf_evlist__munmap(evlist); | ||
1397 | out_delete_evlist: | ||
1398 | perf_evlist__delete(evlist); | ||
1399 | out: | ||
1400 | return err; | ||
1401 | } | ||
1402 | |||
1403 | static struct test { | ||
1404 | const char *desc; | ||
1405 | int (*func)(void); | ||
1406 | } tests[] = { | ||
1407 | { | ||
1408 | .desc = "vmlinux symtab matches kallsyms", | ||
1409 | .func = test__vmlinux_matches_kallsyms, | ||
1410 | }, | ||
1411 | { | ||
1412 | .desc = "detect open syscall event", | ||
1413 | .func = test__open_syscall_event, | ||
1414 | }, | ||
1415 | { | ||
1416 | .desc = "detect open syscall event on all cpus", | ||
1417 | .func = test__open_syscall_event_on_all_cpus, | ||
1418 | }, | ||
1419 | { | ||
1420 | .desc = "read samples using the mmap interface", | ||
1421 | .func = test__basic_mmap, | ||
1422 | }, | ||
1423 | { | ||
1424 | .desc = "parse events tests", | ||
1425 | .func = parse_events__test, | ||
1426 | }, | ||
1427 | #if defined(__x86_64__) || defined(__i386__) | ||
1428 | { | ||
1429 | .desc = "x86 rdpmc test", | ||
1430 | .func = test__rdpmc, | ||
1431 | }, | ||
1432 | #endif | ||
1433 | { | ||
1434 | .desc = "Validate PERF_RECORD_* events & perf_sample fields", | ||
1435 | .func = test__PERF_RECORD, | ||
1436 | }, | ||
1437 | { | ||
1438 | .desc = "Test perf pmu format parsing", | ||
1439 | .func = test__perf_pmu, | ||
1440 | }, | ||
1441 | { | ||
1442 | .desc = "Test dso data interface", | ||
1443 | .func = dso__test_data, | ||
1444 | }, | ||
1445 | { | ||
1446 | .desc = "roundtrip evsel->name check", | ||
1447 | .func = perf_evsel__roundtrip_name_test, | ||
1448 | }, | ||
1449 | { | ||
1450 | .desc = "Check parsing of sched tracepoints fields", | ||
1451 | .func = perf_evsel__tp_sched_test, | ||
1452 | }, | ||
1453 | { | ||
1454 | .desc = "Generate and check syscalls:sys_enter_open event fields", | ||
1455 | .func = test__syscall_open_tp_fields, | ||
1456 | }, | ||
1457 | { | ||
1458 | .func = NULL, | ||
1459 | }, | ||
1460 | }; | ||
1461 | |||
1462 | static bool perf_test__matches(int curr, int argc, const char *argv[]) | ||
1463 | { | ||
1464 | int i; | ||
1465 | |||
1466 | if (argc == 0) | ||
1467 | return true; | ||
1468 | |||
1469 | for (i = 0; i < argc; ++i) { | ||
1470 | char *end; | ||
1471 | long nr = strtoul(argv[i], &end, 10); | ||
1472 | |||
1473 | if (*end == '\0') { | ||
1474 | if (nr == curr + 1) | ||
1475 | return true; | ||
1476 | continue; | ||
1477 | } | ||
1478 | |||
1479 | if (strstr(tests[curr].desc, argv[i])) | ||
1480 | return true; | ||
1481 | } | ||
1482 | |||
1483 | return false; | ||
1484 | } | ||
1485 | |||
1486 | static int __cmd_test(int argc, const char *argv[]) | ||
1487 | { | ||
1488 | int i = 0; | ||
1489 | int width = 0; | ||
1490 | |||
1491 | while (tests[i].func) { | ||
1492 | int len = strlen(tests[i].desc); | ||
1493 | |||
1494 | if (width < len) | ||
1495 | width = len; | ||
1496 | ++i; | ||
1497 | } | ||
1498 | |||
1499 | i = 0; | ||
1500 | while (tests[i].func) { | ||
1501 | int curr = i++, err; | ||
1502 | |||
1503 | if (!perf_test__matches(curr, argc, argv)) | ||
1504 | continue; | ||
1505 | |||
1506 | pr_info("%2d: %-*s:", i, width, tests[curr].desc); | ||
1507 | pr_debug("\n--- start ---\n"); | ||
1508 | err = tests[curr].func(); | ||
1509 | pr_debug("---- end ----\n%s:", tests[curr].desc); | ||
1510 | if (err) | ||
1511 | color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); | ||
1512 | else | ||
1513 | pr_info(" Ok\n"); | ||
1514 | } | ||
1515 | |||
1516 | return 0; | ||
1517 | } | ||
1518 | |||
1519 | static int perf_test__list(int argc, const char **argv) | ||
1520 | { | ||
1521 | int i = 0; | ||
1522 | |||
1523 | while (tests[i].func) { | ||
1524 | int curr = i++; | ||
1525 | |||
1526 | if (argc > 1 && !strstr(tests[curr].desc, argv[1])) | ||
1527 | continue; | ||
1528 | |||
1529 | pr_info("%2d: %s\n", i, tests[curr].desc); | ||
1530 | } | ||
1531 | |||
1532 | return 0; | ||
1533 | } | ||
1534 | |||
1535 | int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) | ||
1536 | { | ||
1537 | const char * const test_usage[] = { | ||
1538 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", | ||
1539 | NULL, | ||
1540 | }; | ||
1541 | const struct option test_options[] = { | ||
1542 | OPT_INCR('v', "verbose", &verbose, | ||
1543 | "be more verbose (show symbol address, etc)"), | ||
1544 | OPT_END() | ||
1545 | }; | ||
1546 | |||
1547 | argc = parse_options(argc, argv, test_options, test_usage, 0); | ||
1548 | if (argc >= 1 && !strcmp(argv[0], "list")) | ||
1549 | return perf_test__list(argc, argv); | ||
1550 | |||
1551 | symbol_conf.priv_size = sizeof(int); | ||
1552 | symbol_conf.sort_by_name = true; | ||
1553 | symbol_conf.try_vmlinux_path = true; | ||
1554 | |||
1555 | if (symbol__init() < 0) | ||
1556 | return -1; | ||
1557 | |||
1558 | return __cmd_test(argc, argv); | ||
1559 | } | ||
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c new file mode 100644 index 000000000000..c6caedeb1d6b --- /dev/null +++ b/tools/perf/tests/dso-data.c | |||
@@ -0,0 +1,153 @@ | |||
1 | #include "util.h" | ||
2 | |||
3 | #include <stdlib.h> | ||
4 | #include <sys/types.h> | ||
5 | #include <sys/stat.h> | ||
6 | #include <fcntl.h> | ||
7 | #include <string.h> | ||
8 | |||
9 | #include "symbol.h" | ||
10 | |||
11 | #define TEST_ASSERT_VAL(text, cond) \ | ||
12 | do { \ | ||
13 | if (!(cond)) { \ | ||
14 | pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ | ||
15 | return -1; \ | ||
16 | } \ | ||
17 | } while (0) | ||
18 | |||
19 | static char *test_file(int size) | ||
20 | { | ||
21 | static char buf_templ[] = "/tmp/test-XXXXXX"; | ||
22 | char *templ = buf_templ; | ||
23 | int fd, i; | ||
24 | unsigned char *buf; | ||
25 | |||
26 | fd = mkstemp(templ); | ||
27 | |||
28 | buf = malloc(size); | ||
29 | if (!buf) { | ||
30 | close(fd); | ||
31 | return NULL; | ||
32 | } | ||
33 | |||
34 | for (i = 0; i < size; i++) | ||
35 | buf[i] = (unsigned char) ((int) i % 10); | ||
36 | |||
37 | if (size != write(fd, buf, size)) | ||
38 | templ = NULL; | ||
39 | |||
40 | close(fd); | ||
41 | return templ; | ||
42 | } | ||
43 | |||
44 | #define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20) | ||
45 | |||
46 | struct test_data_offset { | ||
47 | off_t offset; | ||
48 | u8 data[10]; | ||
49 | int size; | ||
50 | }; | ||
51 | |||
52 | struct test_data_offset offsets[] = { | ||
53 | /* Fill first cache page. */ | ||
54 | { | ||
55 | .offset = 10, | ||
56 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
57 | .size = 10, | ||
58 | }, | ||
59 | /* Read first cache page. */ | ||
60 | { | ||
61 | .offset = 10, | ||
62 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
63 | .size = 10, | ||
64 | }, | ||
65 | /* Fill cache boundary pages. */ | ||
66 | { | ||
67 | .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, | ||
68 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
69 | .size = 10, | ||
70 | }, | ||
71 | /* Read cache boundary pages. */ | ||
72 | { | ||
73 | .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, | ||
74 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
75 | .size = 10, | ||
76 | }, | ||
77 | /* Fill final cache page. */ | ||
78 | { | ||
79 | .offset = TEST_FILE_SIZE - 10, | ||
80 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
81 | .size = 10, | ||
82 | }, | ||
83 | /* Read final cache page. */ | ||
84 | { | ||
85 | .offset = TEST_FILE_SIZE - 10, | ||
86 | .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, | ||
87 | .size = 10, | ||
88 | }, | ||
89 | /* Read final cache page. */ | ||
90 | { | ||
91 | .offset = TEST_FILE_SIZE - 3, | ||
92 | .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 }, | ||
93 | .size = 3, | ||
94 | }, | ||
95 | }; | ||
96 | |||
97 | int dso__test_data(void) | ||
98 | { | ||
99 | struct machine machine; | ||
100 | struct dso *dso; | ||
101 | char *file = test_file(TEST_FILE_SIZE); | ||
102 | size_t i; | ||
103 | |||
104 | TEST_ASSERT_VAL("No test file", file); | ||
105 | |||
106 | memset(&machine, 0, sizeof(machine)); | ||
107 | |||
108 | dso = dso__new((const char *)file); | ||
109 | |||
110 | /* Basic 10 bytes tests. */ | ||
111 | for (i = 0; i < ARRAY_SIZE(offsets); i++) { | ||
112 | struct test_data_offset *data = &offsets[i]; | ||
113 | ssize_t size; | ||
114 | u8 buf[10]; | ||
115 | |||
116 | memset(buf, 0, 10); | ||
117 | size = dso__data_read_offset(dso, &machine, data->offset, | ||
118 | buf, 10); | ||
119 | |||
120 | TEST_ASSERT_VAL("Wrong size", size == data->size); | ||
121 | TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10)); | ||
122 | } | ||
123 | |||
124 | /* Read cross multiple cache pages. */ | ||
125 | { | ||
126 | ssize_t size; | ||
127 | int c; | ||
128 | u8 *buf; | ||
129 | |||
130 | buf = malloc(TEST_FILE_SIZE); | ||
131 | TEST_ASSERT_VAL("ENOMEM\n", buf); | ||
132 | |||
133 | /* First iteration to fill caches, second one to read them. */ | ||
134 | for (c = 0; c < 2; c++) { | ||
135 | memset(buf, 0, TEST_FILE_SIZE); | ||
136 | size = dso__data_read_offset(dso, &machine, 10, | ||
137 | buf, TEST_FILE_SIZE); | ||
138 | |||
139 | TEST_ASSERT_VAL("Wrong size", | ||
140 | size == (TEST_FILE_SIZE - 10)); | ||
141 | |||
142 | for (i = 0; i < (size_t)size; i++) | ||
143 | TEST_ASSERT_VAL("Wrong data", | ||
144 | buf[i] == (i % 10)); | ||
145 | } | ||
146 | |||
147 | free(buf); | ||
148 | } | ||
149 | |||
150 | dso__delete(dso); | ||
151 | unlink(file); | ||
152 | return 0; | ||
153 | } | ||
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c new file mode 100644 index 000000000000..b49c2eebff33 --- /dev/null +++ b/tools/perf/tests/parse-events.c | |||
@@ -0,0 +1,1116 @@ | |||
1 | |||
2 | #include "parse-events.h" | ||
3 | #include "evsel.h" | ||
4 | #include "evlist.h" | ||
5 | #include "sysfs.h" | ||
6 | #include "../../../include/linux/hw_breakpoint.h" | ||
7 | |||
8 | #define TEST_ASSERT_VAL(text, cond) \ | ||
9 | do { \ | ||
10 | if (!(cond)) { \ | ||
11 | pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ | ||
12 | return -1; \ | ||
13 | } \ | ||
14 | } while (0) | ||
15 | |||
16 | #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \ | ||
17 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) | ||
18 | |||
19 | static int test__checkevent_tracepoint(struct perf_evlist *evlist) | ||
20 | { | ||
21 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
22 | |||
23 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
24 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
25 | TEST_ASSERT_VAL("wrong sample_type", | ||
26 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | ||
27 | TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) | ||
32 | { | ||
33 | struct perf_evsel *evsel; | ||
34 | |||
35 | TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); | ||
36 | |||
37 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
38 | TEST_ASSERT_VAL("wrong type", | ||
39 | PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
40 | TEST_ASSERT_VAL("wrong sample_type", | ||
41 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | ||
42 | TEST_ASSERT_VAL("wrong sample_period", | ||
43 | 1 == evsel->attr.sample_period); | ||
44 | } | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static int test__checkevent_raw(struct perf_evlist *evlist) | ||
49 | { | ||
50 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
51 | |||
52 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
53 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
54 | TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config); | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static int test__checkevent_numeric(struct perf_evlist *evlist) | ||
59 | { | ||
60 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
61 | |||
62 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
63 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | ||
64 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int test__checkevent_symbolic_name(struct perf_evlist *evlist) | ||
69 | { | ||
70 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
71 | |||
72 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
73 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
74 | TEST_ASSERT_VAL("wrong config", | ||
75 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) | ||
80 | { | ||
81 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
82 | |||
83 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
84 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
85 | TEST_ASSERT_VAL("wrong config", | ||
86 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
87 | TEST_ASSERT_VAL("wrong period", | ||
88 | 100000 == evsel->attr.sample_period); | ||
89 | TEST_ASSERT_VAL("wrong config1", | ||
90 | 0 == evsel->attr.config1); | ||
91 | TEST_ASSERT_VAL("wrong config2", | ||
92 | 1 == evsel->attr.config2); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) | ||
97 | { | ||
98 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
99 | |||
100 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
101 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); | ||
102 | TEST_ASSERT_VAL("wrong config", | ||
103 | PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int test__checkevent_genhw(struct perf_evlist *evlist) | ||
108 | { | ||
109 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
110 | |||
111 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
112 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); | ||
113 | TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config); | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int test__checkevent_breakpoint(struct perf_evlist *evlist) | ||
118 | { | ||
119 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
120 | |||
121 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
122 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
123 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
124 | TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) == | ||
125 | evsel->attr.bp_type); | ||
126 | TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 == | ||
127 | evsel->attr.bp_len); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) | ||
132 | { | ||
133 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
134 | |||
135 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
136 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
137 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
138 | TEST_ASSERT_VAL("wrong bp_type", | ||
139 | HW_BREAKPOINT_X == evsel->attr.bp_type); | ||
140 | TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) | ||
145 | { | ||
146 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
147 | |||
148 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
149 | TEST_ASSERT_VAL("wrong type", | ||
150 | PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
151 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
152 | TEST_ASSERT_VAL("wrong bp_type", | ||
153 | HW_BREAKPOINT_R == evsel->attr.bp_type); | ||
154 | TEST_ASSERT_VAL("wrong bp_len", | ||
155 | HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) | ||
160 | { | ||
161 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
162 | |||
163 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
164 | TEST_ASSERT_VAL("wrong type", | ||
165 | PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
166 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
167 | TEST_ASSERT_VAL("wrong bp_type", | ||
168 | HW_BREAKPOINT_W == evsel->attr.bp_type); | ||
169 | TEST_ASSERT_VAL("wrong bp_len", | ||
170 | HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) | ||
175 | { | ||
176 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
177 | |||
178 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
179 | TEST_ASSERT_VAL("wrong type", | ||
180 | PERF_TYPE_BREAKPOINT == evsel->attr.type); | ||
181 | TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); | ||
182 | TEST_ASSERT_VAL("wrong bp_type", | ||
183 | (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->attr.bp_type); | ||
184 | TEST_ASSERT_VAL("wrong bp_len", | ||
185 | HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) | ||
190 | { | ||
191 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
192 | |||
193 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
194 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
195 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
196 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
197 | |||
198 | return test__checkevent_tracepoint(evlist); | ||
199 | } | ||
200 | |||
201 | static int | ||
202 | test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) | ||
203 | { | ||
204 | struct perf_evsel *evsel; | ||
205 | |||
206 | TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); | ||
207 | |||
208 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
209 | TEST_ASSERT_VAL("wrong exclude_user", | ||
210 | !evsel->attr.exclude_user); | ||
211 | TEST_ASSERT_VAL("wrong exclude_kernel", | ||
212 | evsel->attr.exclude_kernel); | ||
213 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
214 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
215 | } | ||
216 | |||
217 | return test__checkevent_tracepoint_multi(evlist); | ||
218 | } | ||
219 | |||
220 | static int test__checkevent_raw_modifier(struct perf_evlist *evlist) | ||
221 | { | ||
222 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
223 | |||
224 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
225 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
226 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
227 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
228 | |||
229 | return test__checkevent_raw(evlist); | ||
230 | } | ||
231 | |||
232 | static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) | ||
233 | { | ||
234 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
235 | |||
236 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
237 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
238 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
239 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
240 | |||
241 | return test__checkevent_numeric(evlist); | ||
242 | } | ||
243 | |||
244 | static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) | ||
245 | { | ||
246 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
247 | |||
248 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
249 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
250 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
251 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
252 | |||
253 | return test__checkevent_symbolic_name(evlist); | ||
254 | } | ||
255 | |||
256 | static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) | ||
257 | { | ||
258 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
259 | |||
260 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
261 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
262 | |||
263 | return test__checkevent_symbolic_name(evlist); | ||
264 | } | ||
265 | |||
266 | static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) | ||
267 | { | ||
268 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
269 | |||
270 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
271 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
272 | |||
273 | return test__checkevent_symbolic_name(evlist); | ||
274 | } | ||
275 | |||
276 | static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) | ||
277 | { | ||
278 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
279 | |||
280 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
281 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
282 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
283 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
284 | |||
285 | return test__checkevent_symbolic_alias(evlist); | ||
286 | } | ||
287 | |||
288 | static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) | ||
289 | { | ||
290 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
291 | |||
292 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
293 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
294 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
295 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
296 | |||
297 | return test__checkevent_genhw(evlist); | ||
298 | } | ||
299 | |||
300 | static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) | ||
301 | { | ||
302 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
303 | |||
304 | |||
305 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
306 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
307 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
308 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
309 | TEST_ASSERT_VAL("wrong name", | ||
310 | !strcmp(perf_evsel__name(evsel), "mem:0:u")); | ||
311 | |||
312 | return test__checkevent_breakpoint(evlist); | ||
313 | } | ||
314 | |||
315 | static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist) | ||
316 | { | ||
317 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
318 | |||
319 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
320 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
321 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
322 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
323 | TEST_ASSERT_VAL("wrong name", | ||
324 | !strcmp(perf_evsel__name(evsel), "mem:0:x:k")); | ||
325 | |||
326 | return test__checkevent_breakpoint_x(evlist); | ||
327 | } | ||
328 | |||
329 | static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist) | ||
330 | { | ||
331 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
332 | |||
333 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
334 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
335 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
336 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
337 | TEST_ASSERT_VAL("wrong name", | ||
338 | !strcmp(perf_evsel__name(evsel), "mem:0:r:hp")); | ||
339 | |||
340 | return test__checkevent_breakpoint_r(evlist); | ||
341 | } | ||
342 | |||
343 | static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist) | ||
344 | { | ||
345 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
346 | |||
347 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
348 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
349 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
350 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
351 | TEST_ASSERT_VAL("wrong name", | ||
352 | !strcmp(perf_evsel__name(evsel), "mem:0:w:up")); | ||
353 | |||
354 | return test__checkevent_breakpoint_w(evlist); | ||
355 | } | ||
356 | |||
357 | static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) | ||
358 | { | ||
359 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
360 | |||
361 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
362 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
363 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
364 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
365 | TEST_ASSERT_VAL("wrong name", | ||
366 | !strcmp(perf_evsel__name(evsel), "mem:0:rw:kp")); | ||
367 | |||
368 | return test__checkevent_breakpoint_rw(evlist); | ||
369 | } | ||
370 | |||
371 | static int test__checkevent_pmu(struct perf_evlist *evlist) | ||
372 | { | ||
373 | |||
374 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
375 | |||
376 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
377 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
378 | TEST_ASSERT_VAL("wrong config", 10 == evsel->attr.config); | ||
379 | TEST_ASSERT_VAL("wrong config1", 1 == evsel->attr.config1); | ||
380 | TEST_ASSERT_VAL("wrong config2", 3 == evsel->attr.config2); | ||
381 | TEST_ASSERT_VAL("wrong period", 1000 == evsel->attr.sample_period); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static int test__checkevent_list(struct perf_evlist *evlist) | ||
387 | { | ||
388 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
389 | |||
390 | TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); | ||
391 | |||
392 | /* r1 */ | ||
393 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
394 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
395 | TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1); | ||
396 | TEST_ASSERT_VAL("wrong config2", 0 == evsel->attr.config2); | ||
397 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
398 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
399 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
400 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
401 | |||
402 | /* syscalls:sys_enter_open:k */ | ||
403 | evsel = perf_evsel__next(evsel); | ||
404 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
405 | TEST_ASSERT_VAL("wrong sample_type", | ||
406 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | ||
407 | TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); | ||
408 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
409 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
410 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
411 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
412 | |||
413 | /* 1:1:hp */ | ||
414 | evsel = perf_evsel__next(evsel); | ||
415 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | ||
416 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
417 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
418 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
419 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
420 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static int test__checkevent_pmu_name(struct perf_evlist *evlist) | ||
426 | { | ||
427 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
428 | |||
429 | /* cpu/config=1,name=krava/u */ | ||
430 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
431 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
432 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | ||
433 | TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava")); | ||
434 | |||
435 | /* cpu/config=2/u" */ | ||
436 | evsel = perf_evsel__next(evsel); | ||
437 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
438 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
439 | TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config); | ||
440 | TEST_ASSERT_VAL("wrong name", | ||
441 | !strcmp(perf_evsel__name(evsel), "cpu/config=2/u")); | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static int test__checkevent_pmu_events(struct perf_evlist *evlist) | ||
447 | { | ||
448 | struct perf_evsel *evsel; | ||
449 | |||
450 | evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
451 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | ||
452 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | ||
453 | TEST_ASSERT_VAL("wrong exclude_user", | ||
454 | !evsel->attr.exclude_user); | ||
455 | TEST_ASSERT_VAL("wrong exclude_kernel", | ||
456 | evsel->attr.exclude_kernel); | ||
457 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
458 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int test__checkterms_simple(struct list_head *terms) | ||
464 | { | ||
465 | struct parse_events__term *term; | ||
466 | |||
467 | /* config=10 */ | ||
468 | term = list_entry(terms->next, struct parse_events__term, list); | ||
469 | TEST_ASSERT_VAL("wrong type term", | ||
470 | term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); | ||
471 | TEST_ASSERT_VAL("wrong type val", | ||
472 | term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); | ||
473 | TEST_ASSERT_VAL("wrong val", term->val.num == 10); | ||
474 | TEST_ASSERT_VAL("wrong config", !term->config); | ||
475 | |||
476 | /* config1 */ | ||
477 | term = list_entry(term->list.next, struct parse_events__term, list); | ||
478 | TEST_ASSERT_VAL("wrong type term", | ||
479 | term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); | ||
480 | TEST_ASSERT_VAL("wrong type val", | ||
481 | term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); | ||
482 | TEST_ASSERT_VAL("wrong val", term->val.num == 1); | ||
483 | TEST_ASSERT_VAL("wrong config", !term->config); | ||
484 | |||
485 | /* config2=3 */ | ||
486 | term = list_entry(term->list.next, struct parse_events__term, list); | ||
487 | TEST_ASSERT_VAL("wrong type term", | ||
488 | term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); | ||
489 | TEST_ASSERT_VAL("wrong type val", | ||
490 | term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); | ||
491 | TEST_ASSERT_VAL("wrong val", term->val.num == 3); | ||
492 | TEST_ASSERT_VAL("wrong config", !term->config); | ||
493 | |||
494 | /* umask=1*/ | ||
495 | term = list_entry(term->list.next, struct parse_events__term, list); | ||
496 | TEST_ASSERT_VAL("wrong type term", | ||
497 | term->type_term == PARSE_EVENTS__TERM_TYPE_USER); | ||
498 | TEST_ASSERT_VAL("wrong type val", | ||
499 | term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); | ||
500 | TEST_ASSERT_VAL("wrong val", term->val.num == 1); | ||
501 | TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask")); | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int test__group1(struct perf_evlist *evlist) | ||
507 | { | ||
508 | struct perf_evsel *evsel, *leader; | ||
509 | |||
510 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
511 | |||
512 | /* instructions:k */ | ||
513 | evsel = leader = perf_evlist__first(evlist); | ||
514 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
515 | TEST_ASSERT_VAL("wrong config", | ||
516 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
517 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
518 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
519 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
520 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
521 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
522 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
523 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
524 | |||
525 | /* cycles:upp */ | ||
526 | evsel = perf_evsel__next(evsel); | ||
527 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
528 | TEST_ASSERT_VAL("wrong config", | ||
529 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
530 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
531 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
532 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
533 | /* use of precise requires exclude_guest */ | ||
534 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
535 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
536 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); | ||
537 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
538 | |||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static int test__group2(struct perf_evlist *evlist) | ||
543 | { | ||
544 | struct perf_evsel *evsel, *leader; | ||
545 | |||
546 | TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); | ||
547 | |||
548 | /* faults + :ku modifier */ | ||
549 | evsel = leader = perf_evlist__first(evlist); | ||
550 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); | ||
551 | TEST_ASSERT_VAL("wrong config", | ||
552 | PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config); | ||
553 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
554 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
555 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
556 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
557 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
558 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
559 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
560 | |||
561 | /* cache-references + :u modifier */ | ||
562 | evsel = perf_evsel__next(evsel); | ||
563 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
564 | TEST_ASSERT_VAL("wrong config", | ||
565 | PERF_COUNT_HW_CACHE_REFERENCES == evsel->attr.config); | ||
566 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
567 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
568 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
569 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
570 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
571 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
572 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
573 | |||
574 | /* cycles:k */ | ||
575 | evsel = perf_evsel__next(evsel); | ||
576 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
577 | TEST_ASSERT_VAL("wrong config", | ||
578 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
579 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
580 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
581 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
582 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
583 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
584 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
585 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int test__group3(struct perf_evlist *evlist __maybe_unused) | ||
591 | { | ||
592 | struct perf_evsel *evsel, *leader; | ||
593 | |||
594 | TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); | ||
595 | |||
596 | /* group1 syscalls:sys_enter_open:H */ | ||
597 | evsel = leader = perf_evlist__first(evlist); | ||
598 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
599 | TEST_ASSERT_VAL("wrong sample_type", | ||
600 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | ||
601 | TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); | ||
602 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
603 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
604 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
605 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
606 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
607 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
608 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
609 | TEST_ASSERT_VAL("wrong group name", | ||
610 | !strcmp(leader->group_name, "group1")); | ||
611 | |||
612 | /* group1 cycles:kppp */ | ||
613 | evsel = perf_evsel__next(evsel); | ||
614 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
615 | TEST_ASSERT_VAL("wrong config", | ||
616 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
617 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
618 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
619 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
620 | /* use of precise requires exclude_guest */ | ||
621 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
622 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
623 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); | ||
624 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
625 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
626 | |||
627 | /* group2 cycles + G modifier */ | ||
628 | evsel = leader = perf_evsel__next(evsel); | ||
629 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
630 | TEST_ASSERT_VAL("wrong config", | ||
631 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
632 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
633 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
634 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
635 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
636 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
637 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
638 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
639 | TEST_ASSERT_VAL("wrong group name", | ||
640 | !strcmp(leader->group_name, "group2")); | ||
641 | |||
642 | /* group2 1:3 + G modifier */ | ||
643 | evsel = perf_evsel__next(evsel); | ||
644 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | ||
645 | TEST_ASSERT_VAL("wrong config", 3 == evsel->attr.config); | ||
646 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
647 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
648 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
649 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
650 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
651 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
652 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
653 | |||
654 | /* instructions:u */ | ||
655 | evsel = perf_evsel__next(evsel); | ||
656 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
657 | TEST_ASSERT_VAL("wrong config", | ||
658 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
659 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
660 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
661 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
662 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
663 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
664 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
665 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | static int test__group4(struct perf_evlist *evlist __maybe_unused) | ||
671 | { | ||
672 | struct perf_evsel *evsel, *leader; | ||
673 | |||
674 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
675 | |||
676 | /* cycles:u + p */ | ||
677 | evsel = leader = perf_evlist__first(evlist); | ||
678 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
679 | TEST_ASSERT_VAL("wrong config", | ||
680 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
681 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
682 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
683 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
684 | /* use of precise requires exclude_guest */ | ||
685 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
686 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
687 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); | ||
688 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
689 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
690 | |||
691 | /* instructions:kp + p */ | ||
692 | evsel = perf_evsel__next(evsel); | ||
693 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
694 | TEST_ASSERT_VAL("wrong config", | ||
695 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
696 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
697 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
698 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
699 | /* use of precise requires exclude_guest */ | ||
700 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
701 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
702 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); | ||
703 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | static int test__group5(struct perf_evlist *evlist __maybe_unused) | ||
709 | { | ||
710 | struct perf_evsel *evsel, *leader; | ||
711 | |||
712 | TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); | ||
713 | |||
714 | /* cycles + G */ | ||
715 | evsel = leader = perf_evlist__first(evlist); | ||
716 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
717 | TEST_ASSERT_VAL("wrong config", | ||
718 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
719 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
720 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
721 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
722 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
723 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
724 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
725 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
726 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
727 | |||
728 | /* instructions + G */ | ||
729 | evsel = perf_evsel__next(evsel); | ||
730 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
731 | TEST_ASSERT_VAL("wrong config", | ||
732 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
733 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
734 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
735 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
736 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
737 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
738 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
739 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
740 | |||
741 | /* cycles:G */ | ||
742 | evsel = leader = perf_evsel__next(evsel); | ||
743 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
744 | TEST_ASSERT_VAL("wrong config", | ||
745 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
746 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
747 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
748 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
749 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
750 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
751 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
752 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
753 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
754 | |||
755 | /* instructions:G */ | ||
756 | evsel = perf_evsel__next(evsel); | ||
757 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
758 | TEST_ASSERT_VAL("wrong config", | ||
759 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
760 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
761 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
762 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
763 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
764 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
765 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
766 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
767 | |||
768 | /* cycles */ | ||
769 | evsel = perf_evsel__next(evsel); | ||
770 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
771 | TEST_ASSERT_VAL("wrong config", | ||
772 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
773 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
774 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
775 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
776 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
777 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
778 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
779 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | struct test__event_st { | ||
785 | const char *name; | ||
786 | __u32 type; | ||
787 | int (*check)(struct perf_evlist *evlist); | ||
788 | }; | ||
789 | |||
790 | static struct test__event_st test__events[] = { | ||
791 | [0] = { | ||
792 | .name = "syscalls:sys_enter_open", | ||
793 | .check = test__checkevent_tracepoint, | ||
794 | }, | ||
795 | [1] = { | ||
796 | .name = "syscalls:*", | ||
797 | .check = test__checkevent_tracepoint_multi, | ||
798 | }, | ||
799 | [2] = { | ||
800 | .name = "r1a", | ||
801 | .check = test__checkevent_raw, | ||
802 | }, | ||
803 | [3] = { | ||
804 | .name = "1:1", | ||
805 | .check = test__checkevent_numeric, | ||
806 | }, | ||
807 | [4] = { | ||
808 | .name = "instructions", | ||
809 | .check = test__checkevent_symbolic_name, | ||
810 | }, | ||
811 | [5] = { | ||
812 | .name = "cycles/period=100000,config2/", | ||
813 | .check = test__checkevent_symbolic_name_config, | ||
814 | }, | ||
815 | [6] = { | ||
816 | .name = "faults", | ||
817 | .check = test__checkevent_symbolic_alias, | ||
818 | }, | ||
819 | [7] = { | ||
820 | .name = "L1-dcache-load-miss", | ||
821 | .check = test__checkevent_genhw, | ||
822 | }, | ||
823 | [8] = { | ||
824 | .name = "mem:0", | ||
825 | .check = test__checkevent_breakpoint, | ||
826 | }, | ||
827 | [9] = { | ||
828 | .name = "mem:0:x", | ||
829 | .check = test__checkevent_breakpoint_x, | ||
830 | }, | ||
831 | [10] = { | ||
832 | .name = "mem:0:r", | ||
833 | .check = test__checkevent_breakpoint_r, | ||
834 | }, | ||
835 | [11] = { | ||
836 | .name = "mem:0:w", | ||
837 | .check = test__checkevent_breakpoint_w, | ||
838 | }, | ||
839 | [12] = { | ||
840 | .name = "syscalls:sys_enter_open:k", | ||
841 | .check = test__checkevent_tracepoint_modifier, | ||
842 | }, | ||
843 | [13] = { | ||
844 | .name = "syscalls:*:u", | ||
845 | .check = test__checkevent_tracepoint_multi_modifier, | ||
846 | }, | ||
847 | [14] = { | ||
848 | .name = "r1a:kp", | ||
849 | .check = test__checkevent_raw_modifier, | ||
850 | }, | ||
851 | [15] = { | ||
852 | .name = "1:1:hp", | ||
853 | .check = test__checkevent_numeric_modifier, | ||
854 | }, | ||
855 | [16] = { | ||
856 | .name = "instructions:h", | ||
857 | .check = test__checkevent_symbolic_name_modifier, | ||
858 | }, | ||
859 | [17] = { | ||
860 | .name = "faults:u", | ||
861 | .check = test__checkevent_symbolic_alias_modifier, | ||
862 | }, | ||
863 | [18] = { | ||
864 | .name = "L1-dcache-load-miss:kp", | ||
865 | .check = test__checkevent_genhw_modifier, | ||
866 | }, | ||
867 | [19] = { | ||
868 | .name = "mem:0:u", | ||
869 | .check = test__checkevent_breakpoint_modifier, | ||
870 | }, | ||
871 | [20] = { | ||
872 | .name = "mem:0:x:k", | ||
873 | .check = test__checkevent_breakpoint_x_modifier, | ||
874 | }, | ||
875 | [21] = { | ||
876 | .name = "mem:0:r:hp", | ||
877 | .check = test__checkevent_breakpoint_r_modifier, | ||
878 | }, | ||
879 | [22] = { | ||
880 | .name = "mem:0:w:up", | ||
881 | .check = test__checkevent_breakpoint_w_modifier, | ||
882 | }, | ||
883 | [23] = { | ||
884 | .name = "r1,syscalls:sys_enter_open:k,1:1:hp", | ||
885 | .check = test__checkevent_list, | ||
886 | }, | ||
887 | [24] = { | ||
888 | .name = "instructions:G", | ||
889 | .check = test__checkevent_exclude_host_modifier, | ||
890 | }, | ||
891 | [25] = { | ||
892 | .name = "instructions:H", | ||
893 | .check = test__checkevent_exclude_guest_modifier, | ||
894 | }, | ||
895 | [26] = { | ||
896 | .name = "mem:0:rw", | ||
897 | .check = test__checkevent_breakpoint_rw, | ||
898 | }, | ||
899 | [27] = { | ||
900 | .name = "mem:0:rw:kp", | ||
901 | .check = test__checkevent_breakpoint_rw_modifier, | ||
902 | }, | ||
903 | [28] = { | ||
904 | .name = "{instructions:k,cycles:upp}", | ||
905 | .check = test__group1, | ||
906 | }, | ||
907 | [29] = { | ||
908 | .name = "{faults:k,cache-references}:u,cycles:k", | ||
909 | .check = test__group2, | ||
910 | }, | ||
911 | [30] = { | ||
912 | .name = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u", | ||
913 | .check = test__group3, | ||
914 | }, | ||
915 | [31] = { | ||
916 | .name = "{cycles:u,instructions:kp}:p", | ||
917 | .check = test__group4, | ||
918 | }, | ||
919 | [32] = { | ||
920 | .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", | ||
921 | .check = test__group5, | ||
922 | }, | ||
923 | }; | ||
924 | |||
925 | static struct test__event_st test__events_pmu[] = { | ||
926 | [0] = { | ||
927 | .name = "cpu/config=10,config1,config2=3,period=1000/u", | ||
928 | .check = test__checkevent_pmu, | ||
929 | }, | ||
930 | [1] = { | ||
931 | .name = "cpu/config=1,name=krava/u,cpu/config=2/u", | ||
932 | .check = test__checkevent_pmu_name, | ||
933 | }, | ||
934 | }; | ||
935 | |||
936 | struct test__term { | ||
937 | const char *str; | ||
938 | __u32 type; | ||
939 | int (*check)(struct list_head *terms); | ||
940 | }; | ||
941 | |||
942 | static struct test__term test__terms[] = { | ||
943 | [0] = { | ||
944 | .str = "config=10,config1,config2=3,umask=1", | ||
945 | .check = test__checkterms_simple, | ||
946 | }, | ||
947 | }; | ||
948 | |||
949 | static int test_event(struct test__event_st *e) | ||
950 | { | ||
951 | struct perf_evlist *evlist; | ||
952 | int ret; | ||
953 | |||
954 | evlist = perf_evlist__new(NULL, NULL); | ||
955 | if (evlist == NULL) | ||
956 | return -ENOMEM; | ||
957 | |||
958 | ret = parse_events(evlist, e->name, 0); | ||
959 | if (ret) { | ||
960 | pr_debug("failed to parse event '%s', err %d\n", | ||
961 | e->name, ret); | ||
962 | return ret; | ||
963 | } | ||
964 | |||
965 | ret = e->check(evlist); | ||
966 | perf_evlist__delete(evlist); | ||
967 | |||
968 | return ret; | ||
969 | } | ||
970 | |||
971 | static int test_events(struct test__event_st *events, unsigned cnt) | ||
972 | { | ||
973 | int ret1, ret2 = 0; | ||
974 | unsigned i; | ||
975 | |||
976 | for (i = 0; i < cnt; i++) { | ||
977 | struct test__event_st *e = &events[i]; | ||
978 | |||
979 | pr_debug("running test %d '%s'\n", i, e->name); | ||
980 | ret1 = test_event(e); | ||
981 | if (ret1) | ||
982 | ret2 = ret1; | ||
983 | } | ||
984 | |||
985 | return ret2; | ||
986 | } | ||
987 | |||
988 | static int test_term(struct test__term *t) | ||
989 | { | ||
990 | struct list_head *terms; | ||
991 | int ret; | ||
992 | |||
993 | terms = malloc(sizeof(*terms)); | ||
994 | if (!terms) | ||
995 | return -ENOMEM; | ||
996 | |||
997 | INIT_LIST_HEAD(terms); | ||
998 | |||
999 | ret = parse_events_terms(terms, t->str); | ||
1000 | if (ret) { | ||
1001 | pr_debug("failed to parse terms '%s', err %d\n", | ||
1002 | t->str , ret); | ||
1003 | return ret; | ||
1004 | } | ||
1005 | |||
1006 | ret = t->check(terms); | ||
1007 | parse_events__free_terms(terms); | ||
1008 | |||
1009 | return ret; | ||
1010 | } | ||
1011 | |||
1012 | static int test_terms(struct test__term *terms, unsigned cnt) | ||
1013 | { | ||
1014 | int ret = 0; | ||
1015 | unsigned i; | ||
1016 | |||
1017 | for (i = 0; i < cnt; i++) { | ||
1018 | struct test__term *t = &terms[i]; | ||
1019 | |||
1020 | pr_debug("running test %d '%s'\n", i, t->str); | ||
1021 | ret = test_term(t); | ||
1022 | if (ret) | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
1029 | static int test_pmu(void) | ||
1030 | { | ||
1031 | struct stat st; | ||
1032 | char path[PATH_MAX]; | ||
1033 | int ret; | ||
1034 | |||
1035 | snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/format/", | ||
1036 | sysfs_find_mountpoint()); | ||
1037 | |||
1038 | ret = stat(path, &st); | ||
1039 | if (ret) | ||
1040 | pr_debug("omitting PMU cpu tests\n"); | ||
1041 | return !ret; | ||
1042 | } | ||
1043 | |||
1044 | static int test_pmu_events(void) | ||
1045 | { | ||
1046 | struct stat st; | ||
1047 | char path[PATH_MAX]; | ||
1048 | struct dirent *ent; | ||
1049 | DIR *dir; | ||
1050 | int ret; | ||
1051 | |||
1052 | snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/events/", | ||
1053 | sysfs_find_mountpoint()); | ||
1054 | |||
1055 | ret = stat(path, &st); | ||
1056 | if (ret) { | ||
1057 | pr_debug("ommiting PMU cpu events tests\n"); | ||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | dir = opendir(path); | ||
1062 | if (!dir) { | ||
1063 | pr_debug("can't open pmu event dir"); | ||
1064 | return -1; | ||
1065 | } | ||
1066 | |||
1067 | while (!ret && (ent = readdir(dir))) { | ||
1068 | #define MAX_NAME 100 | ||
1069 | struct test__event_st e; | ||
1070 | char name[MAX_NAME]; | ||
1071 | |||
1072 | if (!strcmp(ent->d_name, ".") || | ||
1073 | !strcmp(ent->d_name, "..")) | ||
1074 | continue; | ||
1075 | |||
1076 | snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name); | ||
1077 | |||
1078 | e.name = name; | ||
1079 | e.check = test__checkevent_pmu_events; | ||
1080 | |||
1081 | ret = test_event(&e); | ||
1082 | #undef MAX_NAME | ||
1083 | } | ||
1084 | |||
1085 | closedir(dir); | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | int parse_events__test(void) | ||
1090 | { | ||
1091 | int ret1, ret2 = 0; | ||
1092 | |||
1093 | #define TEST_EVENTS(tests) \ | ||
1094 | do { \ | ||
1095 | ret1 = test_events(tests, ARRAY_SIZE(tests)); \ | ||
1096 | if (!ret2) \ | ||
1097 | ret2 = ret1; \ | ||
1098 | } while (0) | ||
1099 | |||
1100 | TEST_EVENTS(test__events); | ||
1101 | |||
1102 | if (test_pmu()) | ||
1103 | TEST_EVENTS(test__events_pmu); | ||
1104 | |||
1105 | if (test_pmu()) { | ||
1106 | int ret = test_pmu_events(); | ||
1107 | if (ret) | ||
1108 | return ret; | ||
1109 | } | ||
1110 | |||
1111 | ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); | ||
1112 | if (!ret2) | ||
1113 | ret2 = ret1; | ||
1114 | |||
1115 | return ret2; | ||
1116 | } | ||