diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:19:03 -0400 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:19:03 -0400 | 
| commit | 4d7b4ac22fbec1a03206c6cde353f2fd6942f828 (patch) | |
| tree | 2d96a9e9c28cf6fa628a278decc00ad55a8b043b /tools/perf/util/map.c | |
| parent | 3aaf51ace5975050ab43c7d4d7e439e0ae7d13d7 (diff) | |
| parent | 94f3ca95787ada3d64339a4ecb2754236ab563f6 (diff) | |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (311 commits)
  perf tools: Add mode to build without newt support
  perf symbols: symbol inconsistency message should be done only at verbose=1
  perf tui: Add explicit -lslang option
  perf options: Type check all the remaining OPT_ variants
  perf options: Type check OPT_BOOLEAN and fix the offenders
  perf options: Check v type in OPT_U?INTEGER
  perf options: Introduce OPT_UINTEGER
  perf tui: Add workaround for slang < 2.1.4
  perf record: Fix bug mismatch with -c option definition
  perf options: Introduce OPT_U64
  perf tui: Add help window to show key associations
  perf tui: Make <- exit menus too
  perf newt: Add single key shortcuts for zoom into DSO and threads
  perf newt: Exit browser unconditionally when CTRL+C, q or Q is pressed
  perf newt: Fix the 'A'/'a' shortcut for annotate
  perf newt: Make <- exit the ui_browser
  x86, perf: P4 PMU - fix counters management logic
  perf newt: Make <- zoom out filters
  perf report: Report number of events, not samples
  perf hist: Clarify events_stats fields usage
  ...
Fix up trivial conflicts in kernel/fork.c and tools/perf/builtin-record.c
Diffstat (limited to 'tools/perf/util/map.c')
| -rw-r--r-- | tools/perf/util/map.c | 409 | 
1 files changed, 400 insertions, 9 deletions
| diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index e509cd59c67d..e672f2fef65b 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
| @@ -1,9 +1,11 @@ | |||
| 1 | #include "event.h" | ||
| 2 | #include "symbol.h" | 1 | #include "symbol.h" | 
| 2 | #include <errno.h> | ||
| 3 | #include <limits.h> | ||
| 3 | #include <stdlib.h> | 4 | #include <stdlib.h> | 
| 4 | #include <string.h> | 5 | #include <string.h> | 
| 5 | #include <stdio.h> | 6 | #include <stdio.h> | 
| 6 | #include "debug.h" | 7 | #include <unistd.h> | 
| 8 | #include "map.h" | ||
| 7 | 9 | ||
| 8 | const char *map_type__name[MAP__NR_TYPES] = { | 10 | const char *map_type__name[MAP__NR_TYPES] = { | 
| 9 | [MAP__FUNCTION] = "Functions", | 11 | [MAP__FUNCTION] = "Functions", | 
| @@ -36,15 +38,16 @@ void map__init(struct map *self, enum map_type type, | |||
| 36 | self->map_ip = map__map_ip; | 38 | self->map_ip = map__map_ip; | 
| 37 | self->unmap_ip = map__unmap_ip; | 39 | self->unmap_ip = map__unmap_ip; | 
| 38 | RB_CLEAR_NODE(&self->rb_node); | 40 | RB_CLEAR_NODE(&self->rb_node); | 
| 41 | self->groups = NULL; | ||
| 39 | } | 42 | } | 
| 40 | 43 | ||
| 41 | struct map *map__new(struct mmap_event *event, enum map_type type, | 44 | struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | 
| 42 | char *cwd, int cwdlen) | 45 | u64 pgoff, u32 pid, char *filename, | 
| 46 | enum map_type type, char *cwd, int cwdlen) | ||
| 43 | { | 47 | { | 
| 44 | struct map *self = malloc(sizeof(*self)); | 48 | struct map *self = malloc(sizeof(*self)); | 
| 45 | 49 | ||
| 46 | if (self != NULL) { | 50 | if (self != NULL) { | 
| 47 | const char *filename = event->filename; | ||
| 48 | char newfilename[PATH_MAX]; | 51 | char newfilename[PATH_MAX]; | 
| 49 | struct dso *dso; | 52 | struct dso *dso; | 
| 50 | int anon; | 53 | int anon; | 
| @@ -62,16 +65,15 @@ struct map *map__new(struct mmap_event *event, enum map_type type, | |||
| 62 | anon = is_anon_memory(filename); | 65 | anon = is_anon_memory(filename); | 
| 63 | 66 | ||
| 64 | if (anon) { | 67 | if (anon) { | 
| 65 | snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid); | 68 | snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); | 
| 66 | filename = newfilename; | 69 | filename = newfilename; | 
| 67 | } | 70 | } | 
| 68 | 71 | ||
| 69 | dso = dsos__findnew(filename); | 72 | dso = __dsos__findnew(dsos__list, filename); | 
| 70 | if (dso == NULL) | 73 | if (dso == NULL) | 
| 71 | goto out_delete; | 74 | goto out_delete; | 
| 72 | 75 | ||
| 73 | map__init(self, type, event->start, event->start + event->len, | 76 | map__init(self, type, start, start + len, pgoff, dso); | 
| 74 | event->pgoff, dso); | ||
| 75 | 77 | ||
| 76 | if (anon) { | 78 | if (anon) { | 
| 77 | set_identity: | 79 | set_identity: | 
| @@ -235,3 +237,392 @@ u64 map__objdump_2ip(struct map *map, u64 addr) | |||
| 235 | map->unmap_ip(map, addr); /* RIP -> IP */ | 237 | map->unmap_ip(map, addr); /* RIP -> IP */ | 
| 236 | return ip; | 238 | return ip; | 
| 237 | } | 239 | } | 
| 240 | |||
| 241 | void map_groups__init(struct map_groups *self) | ||
| 242 | { | ||
| 243 | int i; | ||
| 244 | for (i = 0; i < MAP__NR_TYPES; ++i) { | ||
| 245 | self->maps[i] = RB_ROOT; | ||
| 246 | INIT_LIST_HEAD(&self->removed_maps[i]); | ||
| 247 | } | ||
| 248 | self->machine = NULL; | ||
| 249 | } | ||
| 250 | |||
| 251 | void map_groups__flush(struct map_groups *self) | ||
| 252 | { | ||
| 253 | int type; | ||
| 254 | |||
| 255 | for (type = 0; type < MAP__NR_TYPES; type++) { | ||
| 256 | struct rb_root *root = &self->maps[type]; | ||
| 257 | struct rb_node *next = rb_first(root); | ||
| 258 | |||
| 259 | while (next) { | ||
| 260 | struct map *pos = rb_entry(next, struct map, rb_node); | ||
| 261 | next = rb_next(&pos->rb_node); | ||
| 262 | rb_erase(&pos->rb_node, root); | ||
| 263 | /* | ||
| 264 | * We may have references to this map, for | ||
| 265 | * instance in some hist_entry instances, so | ||
| 266 | * just move them to a separate list. | ||
| 267 | */ | ||
| 268 | list_add_tail(&pos->node, &self->removed_maps[pos->type]); | ||
| 269 | } | ||
| 270 | } | ||
| 271 | } | ||
| 272 | |||
| 273 | struct symbol *map_groups__find_symbol(struct map_groups *self, | ||
| 274 | enum map_type type, u64 addr, | ||
| 275 | struct map **mapp, | ||
| 276 | symbol_filter_t filter) | ||
| 277 | { | ||
| 278 | struct map *map = map_groups__find(self, type, addr); | ||
| 279 | |||
| 280 | if (map != NULL) { | ||
| 281 | if (mapp != NULL) | ||
| 282 | *mapp = map; | ||
| 283 | return map__find_symbol(map, map->map_ip(map, addr), filter); | ||
| 284 | } | ||
| 285 | |||
| 286 | return NULL; | ||
| 287 | } | ||
| 288 | |||
| 289 | struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, | ||
| 290 | enum map_type type, | ||
| 291 | const char *name, | ||
| 292 | struct map **mapp, | ||
| 293 | symbol_filter_t filter) | ||
| 294 | { | ||
| 295 | struct rb_node *nd; | ||
| 296 | |||
| 297 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | ||
| 298 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
| 299 | struct symbol *sym = map__find_symbol_by_name(pos, name, filter); | ||
| 300 | |||
| 301 | if (sym == NULL) | ||
| 302 | continue; | ||
| 303 | if (mapp != NULL) | ||
| 304 | *mapp = pos; | ||
| 305 | return sym; | ||
| 306 | } | ||
| 307 | |||
| 308 | return NULL; | ||
| 309 | } | ||
| 310 | |||
| 311 | size_t __map_groups__fprintf_maps(struct map_groups *self, | ||
| 312 | enum map_type type, int verbose, FILE *fp) | ||
| 313 | { | ||
| 314 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); | ||
| 315 | struct rb_node *nd; | ||
| 316 | |||
| 317 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | ||
| 318 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
| 319 | printed += fprintf(fp, "Map:"); | ||
| 320 | printed += map__fprintf(pos, fp); | ||
| 321 | if (verbose > 2) { | ||
| 322 | printed += dso__fprintf(pos->dso, type, fp); | ||
| 323 | printed += fprintf(fp, "--\n"); | ||
| 324 | } | ||
| 325 | } | ||
| 326 | |||
| 327 | return printed; | ||
| 328 | } | ||
| 329 | |||
| 330 | size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp) | ||
| 331 | { | ||
| 332 | size_t printed = 0, i; | ||
| 333 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
| 334 | printed += __map_groups__fprintf_maps(self, i, verbose, fp); | ||
| 335 | return printed; | ||
| 336 | } | ||
| 337 | |||
| 338 | static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, | ||
| 339 | enum map_type type, | ||
| 340 | int verbose, FILE *fp) | ||
| 341 | { | ||
| 342 | struct map *pos; | ||
| 343 | size_t printed = 0; | ||
| 344 | |||
| 345 | list_for_each_entry(pos, &self->removed_maps[type], node) { | ||
| 346 | printed += fprintf(fp, "Map:"); | ||
| 347 | printed += map__fprintf(pos, fp); | ||
| 348 | if (verbose > 1) { | ||
| 349 | printed += dso__fprintf(pos->dso, type, fp); | ||
| 350 | printed += fprintf(fp, "--\n"); | ||
| 351 | } | ||
| 352 | } | ||
| 353 | return printed; | ||
| 354 | } | ||
| 355 | |||
| 356 | static size_t map_groups__fprintf_removed_maps(struct map_groups *self, | ||
| 357 | int verbose, FILE *fp) | ||
| 358 | { | ||
| 359 | size_t printed = 0, i; | ||
| 360 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
| 361 | printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp); | ||
| 362 | return printed; | ||
| 363 | } | ||
| 364 | |||
| 365 | size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp) | ||
| 366 | { | ||
| 367 | size_t printed = map_groups__fprintf_maps(self, verbose, fp); | ||
| 368 | printed += fprintf(fp, "Removed maps:\n"); | ||
| 369 | return printed + map_groups__fprintf_removed_maps(self, verbose, fp); | ||
| 370 | } | ||
| 371 | |||
| 372 | int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | ||
| 373 | int verbose, FILE *fp) | ||
| 374 | { | ||
| 375 | struct rb_root *root = &self->maps[map->type]; | ||
| 376 | struct rb_node *next = rb_first(root); | ||
| 377 | |||
| 378 | while (next) { | ||
| 379 | struct map *pos = rb_entry(next, struct map, rb_node); | ||
| 380 | next = rb_next(&pos->rb_node); | ||
| 381 | |||
| 382 | if (!map__overlap(pos, map)) | ||
| 383 | continue; | ||
| 384 | |||
| 385 | if (verbose >= 2) { | ||
| 386 | fputs("overlapping maps:\n", fp); | ||
| 387 | map__fprintf(map, fp); | ||
| 388 | map__fprintf(pos, fp); | ||
| 389 | } | ||
| 390 | |||
| 391 | rb_erase(&pos->rb_node, root); | ||
| 392 | /* | ||
| 393 | * We may have references to this map, for instance in some | ||
| 394 | * hist_entry instances, so just move them to a separate | ||
| 395 | * list. | ||
| 396 | */ | ||
| 397 | list_add_tail(&pos->node, &self->removed_maps[map->type]); | ||
| 398 | /* | ||
| 399 | * Now check if we need to create new maps for areas not | ||
| 400 | * overlapped by the new map: | ||
| 401 | */ | ||
| 402 | if (map->start > pos->start) { | ||
| 403 | struct map *before = map__clone(pos); | ||
| 404 | |||
| 405 | if (before == NULL) | ||
| 406 | return -ENOMEM; | ||
| 407 | |||
| 408 | before->end = map->start - 1; | ||
| 409 | map_groups__insert(self, before); | ||
| 410 | if (verbose >= 2) | ||
| 411 | map__fprintf(before, fp); | ||
| 412 | } | ||
| 413 | |||
| 414 | if (map->end < pos->end) { | ||
| 415 | struct map *after = map__clone(pos); | ||
| 416 | |||
| 417 | if (after == NULL) | ||
| 418 | return -ENOMEM; | ||
| 419 | |||
| 420 | after->start = map->end + 1; | ||
| 421 | map_groups__insert(self, after); | ||
| 422 | if (verbose >= 2) | ||
| 423 | map__fprintf(after, fp); | ||
| 424 | } | ||
| 425 | } | ||
| 426 | |||
| 427 | return 0; | ||
| 428 | } | ||
| 429 | |||
| 430 | /* | ||
| 431 | * XXX This should not really _copy_ te maps, but refcount them. | ||
| 432 | */ | ||
| 433 | int map_groups__clone(struct map_groups *self, | ||
| 434 | struct map_groups *parent, enum map_type type) | ||
| 435 | { | ||
| 436 | struct rb_node *nd; | ||
| 437 | for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { | ||
| 438 | struct map *map = rb_entry(nd, struct map, rb_node); | ||
| 439 | struct map *new = map__clone(map); | ||
| 440 | if (new == NULL) | ||
| 441 | return -ENOMEM; | ||
| 442 | map_groups__insert(self, new); | ||
| 443 | } | ||
| 444 | return 0; | ||
| 445 | } | ||
| 446 | |||
| 447 | static u64 map__reloc_map_ip(struct map *map, u64 ip) | ||
| 448 | { | ||
| 449 | return ip + (s64)map->pgoff; | ||
| 450 | } | ||
| 451 | |||
| 452 | static u64 map__reloc_unmap_ip(struct map *map, u64 ip) | ||
| 453 | { | ||
| 454 | return ip - (s64)map->pgoff; | ||
| 455 | } | ||
| 456 | |||
| 457 | void map__reloc_vmlinux(struct map *self) | ||
| 458 | { | ||
| 459 | struct kmap *kmap = map__kmap(self); | ||
| 460 | s64 reloc; | ||
| 461 | |||
| 462 | if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) | ||
| 463 | return; | ||
| 464 | |||
| 465 | reloc = (kmap->ref_reloc_sym->unrelocated_addr - | ||
| 466 | kmap->ref_reloc_sym->addr); | ||
| 467 | |||
| 468 | if (!reloc) | ||
| 469 | return; | ||
| 470 | |||
| 471 | self->map_ip = map__reloc_map_ip; | ||
| 472 | self->unmap_ip = map__reloc_unmap_ip; | ||
| 473 | self->pgoff = reloc; | ||
| 474 | } | ||
| 475 | |||
| 476 | void maps__insert(struct rb_root *maps, struct map *map) | ||
| 477 | { | ||
| 478 | struct rb_node **p = &maps->rb_node; | ||
| 479 | struct rb_node *parent = NULL; | ||
| 480 | const u64 ip = map->start; | ||
| 481 | struct map *m; | ||
| 482 | |||
| 483 | while (*p != NULL) { | ||
| 484 | parent = *p; | ||
| 485 | m = rb_entry(parent, struct map, rb_node); | ||
| 486 | if (ip < m->start) | ||
| 487 | p = &(*p)->rb_left; | ||
| 488 | else | ||
| 489 | p = &(*p)->rb_right; | ||
| 490 | } | ||
| 491 | |||
| 492 | rb_link_node(&map->rb_node, parent, p); | ||
| 493 | rb_insert_color(&map->rb_node, maps); | ||
| 494 | } | ||
| 495 | |||
| 496 | struct map *maps__find(struct rb_root *maps, u64 ip) | ||
| 497 | { | ||
| 498 | struct rb_node **p = &maps->rb_node; | ||
| 499 | struct rb_node *parent = NULL; | ||
| 500 | struct map *m; | ||
| 501 | |||
| 502 | while (*p != NULL) { | ||
| 503 | parent = *p; | ||
| 504 | m = rb_entry(parent, struct map, rb_node); | ||
| 505 | if (ip < m->start) | ||
| 506 | p = &(*p)->rb_left; | ||
| 507 | else if (ip > m->end) | ||
| 508 | p = &(*p)->rb_right; | ||
| 509 | else | ||
| 510 | return m; | ||
| 511 | } | ||
| 512 | |||
| 513 | return NULL; | ||
| 514 | } | ||
| 515 | |||
| 516 | int machine__init(struct machine *self, const char *root_dir, pid_t pid) | ||
| 517 | { | ||
| 518 | map_groups__init(&self->kmaps); | ||
| 519 | RB_CLEAR_NODE(&self->rb_node); | ||
| 520 | INIT_LIST_HEAD(&self->user_dsos); | ||
| 521 | INIT_LIST_HEAD(&self->kernel_dsos); | ||
| 522 | |||
| 523 | self->kmaps.machine = self; | ||
| 524 | self->pid = pid; | ||
| 525 | self->root_dir = strdup(root_dir); | ||
| 526 | return self->root_dir == NULL ? -ENOMEM : 0; | ||
| 527 | } | ||
| 528 | |||
| 529 | struct machine *machines__add(struct rb_root *self, pid_t pid, | ||
| 530 | const char *root_dir) | ||
| 531 | { | ||
| 532 | struct rb_node **p = &self->rb_node; | ||
| 533 | struct rb_node *parent = NULL; | ||
| 534 | struct machine *pos, *machine = malloc(sizeof(*machine)); | ||
| 535 | |||
| 536 | if (!machine) | ||
| 537 | return NULL; | ||
| 538 | |||
| 539 | if (machine__init(machine, root_dir, pid) != 0) { | ||
| 540 | free(machine); | ||
| 541 | return NULL; | ||
| 542 | } | ||
| 543 | |||
| 544 | while (*p != NULL) { | ||
| 545 | parent = *p; | ||
| 546 | pos = rb_entry(parent, struct machine, rb_node); | ||
| 547 | if (pid < pos->pid) | ||
| 548 | p = &(*p)->rb_left; | ||
| 549 | else | ||
| 550 | p = &(*p)->rb_right; | ||
| 551 | } | ||
| 552 | |||
| 553 | rb_link_node(&machine->rb_node, parent, p); | ||
| 554 | rb_insert_color(&machine->rb_node, self); | ||
| 555 | |||
| 556 | return machine; | ||
| 557 | } | ||
| 558 | |||
| 559 | struct machine *machines__find(struct rb_root *self, pid_t pid) | ||
| 560 | { | ||
| 561 | struct rb_node **p = &self->rb_node; | ||
| 562 | struct rb_node *parent = NULL; | ||
| 563 | struct machine *machine; | ||
| 564 | struct machine *default_machine = NULL; | ||
| 565 | |||
| 566 | while (*p != NULL) { | ||
| 567 | parent = *p; | ||
| 568 | machine = rb_entry(parent, struct machine, rb_node); | ||
| 569 | if (pid < machine->pid) | ||
| 570 | p = &(*p)->rb_left; | ||
| 571 | else if (pid > machine->pid) | ||
| 572 | p = &(*p)->rb_right; | ||
| 573 | else | ||
| 574 | return machine; | ||
| 575 | if (!machine->pid) | ||
| 576 | default_machine = machine; | ||
| 577 | } | ||
| 578 | |||
| 579 | return default_machine; | ||
| 580 | } | ||
| 581 | |||
| 582 | struct machine *machines__findnew(struct rb_root *self, pid_t pid) | ||
| 583 | { | ||
| 584 | char path[PATH_MAX]; | ||
| 585 | const char *root_dir; | ||
| 586 | struct machine *machine = machines__find(self, pid); | ||
| 587 | |||
| 588 | if (!machine || machine->pid != pid) { | ||
| 589 | if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID) | ||
| 590 | root_dir = ""; | ||
| 591 | else { | ||
| 592 | if (!symbol_conf.guestmount) | ||
| 593 | goto out; | ||
| 594 | sprintf(path, "%s/%d", symbol_conf.guestmount, pid); | ||
| 595 | if (access(path, R_OK)) { | ||
| 596 | pr_err("Can't access file %s\n", path); | ||
| 597 | goto out; | ||
| 598 | } | ||
| 599 | root_dir = path; | ||
| 600 | } | ||
| 601 | machine = machines__add(self, pid, root_dir); | ||
| 602 | } | ||
| 603 | |||
| 604 | out: | ||
| 605 | return machine; | ||
| 606 | } | ||
| 607 | |||
| 608 | void machines__process(struct rb_root *self, machine__process_t process, void *data) | ||
| 609 | { | ||
| 610 | struct rb_node *nd; | ||
| 611 | |||
| 612 | for (nd = rb_first(self); nd; nd = rb_next(nd)) { | ||
| 613 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | ||
| 614 | process(pos, data); | ||
| 615 | } | ||
| 616 | } | ||
| 617 | |||
| 618 | char *machine__mmap_name(struct machine *self, char *bf, size_t size) | ||
| 619 | { | ||
| 620 | if (machine__is_host(self)) | ||
| 621 | snprintf(bf, size, "[%s]", "kernel.kallsyms"); | ||
| 622 | else if (machine__is_default_guest(self)) | ||
| 623 | snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); | ||
| 624 | else | ||
| 625 | snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); | ||
| 626 | |||
| 627 | return bf; | ||
| 628 | } | ||
