diff options
Diffstat (limited to 'tools/perf/builtin-test.c')
-rw-r--r-- | tools/perf/builtin-test.c | 54 |
1 files changed, 32 insertions, 22 deletions
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index ed5696198d3d..5dcdba653d70 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -146,7 +146,7 @@ next_pair: | |||
146 | if (llabs(skew) < page_size) | 146 | if (llabs(skew) < page_size) |
147 | continue; | 147 | continue; |
148 | 148 | ||
149 | pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n", | 149 | pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", |
150 | sym->start, sym->name, sym->end, pair->end); | 150 | sym->start, sym->name, sym->end, pair->end); |
151 | } else { | 151 | } else { |
152 | struct rb_node *nnd; | 152 | struct rb_node *nnd; |
@@ -168,11 +168,11 @@ detour: | |||
168 | goto detour; | 168 | goto detour; |
169 | } | 169 | } |
170 | 170 | ||
171 | pr_debug("%#Lx: diff name v: %s k: %s\n", | 171 | pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", |
172 | sym->start, sym->name, pair->name); | 172 | sym->start, sym->name, pair->name); |
173 | } | 173 | } |
174 | } else | 174 | } else |
175 | pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name); | 175 | pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); |
176 | 176 | ||
177 | err = -1; | 177 | err = -1; |
178 | } | 178 | } |
@@ -211,10 +211,10 @@ detour: | |||
211 | 211 | ||
212 | if (pair->start == pos->start) { | 212 | if (pair->start == pos->start) { |
213 | pair->priv = 1; | 213 | pair->priv = 1; |
214 | pr_info(" %Lx-%Lx %Lx %s in kallsyms as", | 214 | pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", |
215 | pos->start, pos->end, pos->pgoff, pos->dso->name); | 215 | pos->start, pos->end, pos->pgoff, pos->dso->name); |
216 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) | 216 | if (pos->pgoff != pair->pgoff || pos->end != pair->end) |
217 | pr_info(": \n*%Lx-%Lx %Lx", | 217 | pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", |
218 | pair->start, pair->end, pair->pgoff); | 218 | pair->start, pair->end, pair->pgoff); |
219 | pr_info(" %s\n", pair->dso->name); | 219 | pr_info(" %s\n", pair->dso->name); |
220 | pair->priv = 1; | 220 | pair->priv = 1; |
@@ -307,7 +307,7 @@ static int test__open_syscall_event(void) | |||
307 | } | 307 | } |
308 | 308 | ||
309 | if (evsel->counts->cpu[0].val != nr_open_calls) { | 309 | if (evsel->counts->cpu[0].val != nr_open_calls) { |
310 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n", | 310 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", |
311 | nr_open_calls, evsel->counts->cpu[0].val); | 311 | nr_open_calls, evsel->counts->cpu[0].val); |
312 | goto out_close_fd; | 312 | goto out_close_fd; |
313 | } | 313 | } |
@@ -332,8 +332,7 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
332 | struct perf_evsel *evsel; | 332 | struct perf_evsel *evsel; |
333 | struct perf_event_attr attr; | 333 | struct perf_event_attr attr; |
334 | unsigned int nr_open_calls = 111, i; | 334 | unsigned int nr_open_calls = 111, i; |
335 | cpu_set_t *cpu_set; | 335 | cpu_set_t cpu_set; |
336 | size_t cpu_set_size; | ||
337 | int id = trace_event__id("sys_enter_open"); | 336 | int id = trace_event__id("sys_enter_open"); |
338 | 337 | ||
339 | if (id < 0) { | 338 | if (id < 0) { |
@@ -353,13 +352,8 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
353 | return -1; | 352 | return -1; |
354 | } | 353 | } |
355 | 354 | ||
356 | cpu_set = CPU_ALLOC(cpus->nr); | ||
357 | 355 | ||
358 | if (cpu_set == NULL) | 356 | CPU_ZERO(&cpu_set); |
359 | goto out_thread_map_delete; | ||
360 | |||
361 | cpu_set_size = CPU_ALLOC_SIZE(cpus->nr); | ||
362 | CPU_ZERO_S(cpu_set_size, cpu_set); | ||
363 | 357 | ||
364 | memset(&attr, 0, sizeof(attr)); | 358 | memset(&attr, 0, sizeof(attr)); |
365 | attr.type = PERF_TYPE_TRACEPOINT; | 359 | attr.type = PERF_TYPE_TRACEPOINT; |
@@ -367,7 +361,7 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
367 | evsel = perf_evsel__new(&attr, 0); | 361 | evsel = perf_evsel__new(&attr, 0); |
368 | if (evsel == NULL) { | 362 | if (evsel == NULL) { |
369 | pr_debug("perf_evsel__new\n"); | 363 | pr_debug("perf_evsel__new\n"); |
370 | goto out_cpu_free; | 364 | goto out_thread_map_delete; |
371 | } | 365 | } |
372 | 366 | ||
373 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | 367 | if (perf_evsel__open(evsel, cpus, threads) < 0) { |
@@ -379,14 +373,29 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
379 | 373 | ||
380 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | 374 | for (cpu = 0; cpu < cpus->nr; ++cpu) { |
381 | unsigned int ncalls = nr_open_calls + cpu; | 375 | unsigned int ncalls = nr_open_calls + cpu; |
376 | /* | ||
377 | * XXX eventually lift this restriction in a way that | ||
378 | * keeps perf building on older glibc installations | ||
379 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | ||
380 | * a reasonable upper limit tho :-) | ||
381 | */ | ||
382 | if (cpus->map[cpu] >= CPU_SETSIZE) { | ||
383 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | ||
384 | continue; | ||
385 | } | ||
382 | 386 | ||
383 | CPU_SET(cpu, cpu_set); | 387 | CPU_SET(cpus->map[cpu], &cpu_set); |
384 | sched_setaffinity(0, cpu_set_size, cpu_set); | 388 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { |
389 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | ||
390 | cpus->map[cpu], | ||
391 | strerror(errno)); | ||
392 | goto out_close_fd; | ||
393 | } | ||
385 | for (i = 0; i < ncalls; ++i) { | 394 | for (i = 0; i < ncalls; ++i) { |
386 | fd = open("/etc/passwd", O_RDONLY); | 395 | fd = open("/etc/passwd", O_RDONLY); |
387 | close(fd); | 396 | close(fd); |
388 | } | 397 | } |
389 | CPU_CLR(cpu, cpu_set); | 398 | CPU_CLR(cpus->map[cpu], &cpu_set); |
390 | } | 399 | } |
391 | 400 | ||
392 | /* | 401 | /* |
@@ -402,6 +411,9 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
402 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | 411 | for (cpu = 0; cpu < cpus->nr; ++cpu) { |
403 | unsigned int expected; | 412 | unsigned int expected; |
404 | 413 | ||
414 | if (cpus->map[cpu] >= CPU_SETSIZE) | ||
415 | continue; | ||
416 | |||
405 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | 417 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { |
406 | pr_debug("perf_evsel__open_read_on_cpu\n"); | 418 | pr_debug("perf_evsel__open_read_on_cpu\n"); |
407 | goto out_close_fd; | 419 | goto out_close_fd; |
@@ -409,8 +421,8 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
409 | 421 | ||
410 | expected = nr_open_calls + cpu; | 422 | expected = nr_open_calls + cpu; |
411 | if (evsel->counts->cpu[cpu].val != expected) { | 423 | if (evsel->counts->cpu[cpu].val != expected) { |
412 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n", | 424 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", |
413 | expected, cpu, evsel->counts->cpu[cpu].val); | 425 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); |
414 | goto out_close_fd; | 426 | goto out_close_fd; |
415 | } | 427 | } |
416 | } | 428 | } |
@@ -420,8 +432,6 @@ out_close_fd: | |||
420 | perf_evsel__close_fd(evsel, 1, threads->nr); | 432 | perf_evsel__close_fd(evsel, 1, threads->nr); |
421 | out_evsel_delete: | 433 | out_evsel_delete: |
422 | perf_evsel__delete(evsel); | 434 | perf_evsel__delete(evsel); |
423 | out_cpu_free: | ||
424 | CPU_FREE(cpu_set); | ||
425 | out_thread_map_delete: | 435 | out_thread_map_delete: |
426 | thread_map__delete(threads); | 436 | thread_map__delete(threads); |
427 | return err; | 437 | return err; |