aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-28 05:09:22 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-28 05:09:22 -0400
commitf1942b96b4b44c1ab0e0b82fef93ba7e1fada7af (patch)
tree4a0ba3876432e0583eb65f8ace8361b6c78cdb76
parent09a216ea5c528356797dc12ab35234922a6c02be (diff)
parentf00898f4e20b286877b8d6d96d6e404661fd7985 (diff)
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core refactorings and improvements from Arnaldo Carvalho de Melo: User visible changes: - Add hint for 'Too many events are opened.' error message (Jiri Olsa) Infrastructure changes: - Protect accesses to map rbtrees with a lock and refcount struct map, reducing memory usage as maps not used get freed. The 'dso' struct is next in line. (Arnaldo Carvalho de Melo) - Annotation and branch related option parsing refactorings to share code with upcoming patches (Andi Kleen) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-record.c89
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c2
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/annotate.c21
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evsel.c4
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/map.c190
-rw-r--r--tools/perf/util/map.h29
-rw-r--r--tools/perf/util/parse-branch-options.c93
-rw-r--r--tools/perf/util/parse-branch-options.h5
-rw-r--r--tools/perf/util/probe-event.c8
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol.c36
15 files changed, 315 insertions, 174 deletions
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index b57a027fb200..c434e1264087 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -59,6 +59,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
59 (al->sym == NULL || 59 (al->sym == NULL ||
60 strcmp(ann->sym_hist_filter, al->sym->name) != 0)) { 60 strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
61 /* We're only interested in a symbol named sym_hist_filter */ 61 /* We're only interested in a symbol named sym_hist_filter */
62 /*
63 * FIXME: why isn't this done in the symbol_filter when loading
64 * the DSO?
65 */
62 if (al->sym != NULL) { 66 if (al->sym != NULL) {
63 rb_erase(&al->sym->rb_node, 67 rb_erase(&al->sym->rb_node,
64 &al->map->dso->symbols[al->map->type]); 68 &al->map->dso->symbols[al->map->type]);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 5dfe91395617..91aa2a3dcf19 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -28,6 +28,7 @@
28#include "util/thread_map.h" 28#include "util/thread_map.h"
29#include "util/data.h" 29#include "util/data.h"
30#include "util/auxtrace.h" 30#include "util/auxtrace.h"
31#include "util/parse-branch-options.h"
31 32
32#include <unistd.h> 33#include <unistd.h>
33#include <sched.h> 34#include <sched.h>
@@ -751,94 +752,6 @@ out_delete_session:
751 return status; 752 return status;
752} 753}
753 754
754#define BRANCH_OPT(n, m) \
755 { .name = n, .mode = (m) }
756
757#define BRANCH_END { .name = NULL }
758
759struct branch_mode {
760 const char *name;
761 int mode;
762};
763
764static const struct branch_mode branch_modes[] = {
765 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
766 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
767 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
768 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
769 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
770 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
771 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
772 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
773 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
774 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
775 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
776 BRANCH_END
777};
778
779static int
780parse_branch_stack(const struct option *opt, const char *str, int unset)
781{
782#define ONLY_PLM \
783 (PERF_SAMPLE_BRANCH_USER |\
784 PERF_SAMPLE_BRANCH_KERNEL |\
785 PERF_SAMPLE_BRANCH_HV)
786
787 uint64_t *mode = (uint64_t *)opt->value;
788 const struct branch_mode *br;
789 char *s, *os = NULL, *p;
790 int ret = -1;
791
792 if (unset)
793 return 0;
794
795 /*
796 * cannot set it twice, -b + --branch-filter for instance
797 */
798 if (*mode)
799 return -1;
800
801 /* str may be NULL in case no arg is passed to -b */
802 if (str) {
803 /* because str is read-only */
804 s = os = strdup(str);
805 if (!s)
806 return -1;
807
808 for (;;) {
809 p = strchr(s, ',');
810 if (p)
811 *p = '\0';
812
813 for (br = branch_modes; br->name; br++) {
814 if (!strcasecmp(s, br->name))
815 break;
816 }
817 if (!br->name) {
818 ui__warning("unknown branch filter %s,"
819 " check man page\n", s);
820 goto error;
821 }
822
823 *mode |= br->mode;
824
825 if (!p)
826 break;
827
828 s = p + 1;
829 }
830 }
831 ret = 0;
832
833 /* default to any branch */
834 if ((*mode & ~ONLY_PLM) == 0) {
835 *mode = PERF_SAMPLE_BRANCH_ANY;
836 }
837error:
838 free(os);
839 return ret;
840}
841
842static void callchain_debug(void) 755static void callchain_debug(void)
843{ 756{
844 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" }; 757 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 94ac6924df65..b34c5fc829ae 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,7 @@ int test__vmlinux_matches_kallsyms(void)
26 struct map *kallsyms_map, *vmlinux_map, *map; 26 struct map *kallsyms_map, *vmlinux_map, *map;
27 struct machine kallsyms, vmlinux; 27 struct machine kallsyms, vmlinux;
28 enum map_type type = MAP__FUNCTION; 28 enum map_type type = MAP__FUNCTION;
29 struct rb_root *maps = &vmlinux.kmaps.maps[type]; 29 struct maps *maps = &vmlinux.kmaps.maps[type];
30 u64 mem_start, mem_end; 30 u64 mem_start, mem_end;
31 31
32 /* 32 /*
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 6966d0743bf7..e4b676de2f64 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -75,6 +75,7 @@ libperf-$(CONFIG_X86) += tsc.o
75libperf-y += cloexec.o 75libperf-y += cloexec.o
76libperf-y += thread-stack.o 76libperf-y += thread-stack.o
77libperf-$(CONFIG_AUXTRACE) += auxtrace.o 77libperf-$(CONFIG_AUXTRACE) += auxtrace.o
78libperf-y += parse-branch-options.o
78 79
79libperf-$(CONFIG_LIBELF) += symbol-elf.o 80libperf-$(CONFIG_LIBELF) += symbol-elf.o
80libperf-$(CONFIG_LIBELF) += probe-event.o 81libperf-$(CONFIG_LIBELF) += probe-event.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 7f5bdfc9bc87..bf8043009909 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -506,6 +506,17 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
506 return 0; 506 return 0;
507} 507}
508 508
509static struct annotation *symbol__get_annotation(struct symbol *sym)
510{
511 struct annotation *notes = symbol__annotation(sym);
512
513 if (notes->src == NULL) {
514 if (symbol__alloc_hist(sym) < 0)
515 return NULL;
516 }
517 return notes;
518}
519
509static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, 520static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
510 int evidx, u64 addr) 521 int evidx, u64 addr)
511{ 522{
@@ -513,13 +524,9 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
513 524
514 if (sym == NULL) 525 if (sym == NULL)
515 return 0; 526 return 0;
516 527 notes = symbol__get_annotation(sym);
517 notes = symbol__annotation(sym); 528 if (notes == NULL)
518 if (notes->src == NULL) { 529 return -ENOMEM;
519 if (symbol__alloc_hist(sym) < 0)
520 return -ENOMEM;
521 }
522
523 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); 530 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
524} 531}
525 532
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 9d3bba175423..c1925968a8af 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -331,7 +331,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
331 int rc = 0; 331 int rc = 0;
332 struct map *pos; 332 struct map *pos;
333 struct map_groups *kmaps = &machine->kmaps; 333 struct map_groups *kmaps = &machine->kmaps;
334 struct rb_root *maps = &kmaps->maps[MAP__FUNCTION]; 334 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
335 union perf_event *event = zalloc((sizeof(event->mmap) + 335 union perf_event *event = zalloc((sizeof(event->mmap) +
336 machine->id_hdr_size)); 336 machine->id_hdr_size));
337 if (event == NULL) { 337 if (event == NULL) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c886b9f7a48d..a3e36fc634dc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2149,7 +2149,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2149 case EMFILE: 2149 case EMFILE:
2150 return scnprintf(msg, size, "%s", 2150 return scnprintf(msg, size, "%s",
2151 "Too many events are opened.\n" 2151 "Too many events are opened.\n"
2152 "Try again after reducing the number of events."); 2152 "Probably the maximum number of open file descriptors has been reached.\n"
2153 "Hint: Try again after reducing the number of events.\n"
2154 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2153 case ENODEV: 2155 case ENODEV:
2154 if (target->cpu_list) 2156 if (target->cpu_list)
2155 return scnprintf(msg, size, "%s", 2157 return scnprintf(msg, size, "%s",
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6bf845758ae3..0c0e61cce577 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -759,7 +759,6 @@ void machine__destroy_kernel_maps(struct machine *machine)
759 kmap->ref_reloc_sym = NULL; 759 kmap->ref_reloc_sym = NULL;
760 } 760 }
761 761
762 map__delete(machine->vmlinux_maps[type]);
763 machine->vmlinux_maps[type] = NULL; 762 machine->vmlinux_maps[type] = NULL;
764 } 763 }
765} 764}
@@ -1247,6 +1246,7 @@ int machine__process_mmap2_event(struct machine *machine,
1247 1246
1248 thread__insert_map(thread, map); 1247 thread__insert_map(thread, map);
1249 thread__put(thread); 1248 thread__put(thread);
1249 map__put(map);
1250 return 0; 1250 return 0;
1251 1251
1252out_problem_map: 1252out_problem_map:
@@ -1297,6 +1297,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
1297 1297
1298 thread__insert_map(thread, map); 1298 thread__insert_map(thread, map);
1299 thread__put(thread); 1299 thread__put(thread);
1300 map__put(map);
1300 return 0; 1301 return 0;
1301 1302
1302out_problem_map: 1303out_problem_map:
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 898ab92a98dd..af572322586d 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -16,6 +16,8 @@
16#include "machine.h" 16#include "machine.h"
17#include <linux/string.h> 17#include <linux/string.h>
18 18
19static void __maps__insert(struct maps *maps, struct map *map);
20
19const char *map_type__name[MAP__NR_TYPES] = { 21const char *map_type__name[MAP__NR_TYPES] = {
20 [MAP__FUNCTION] = "Functions", 22 [MAP__FUNCTION] = "Functions",
21 [MAP__VARIABLE] = "Variables", 23 [MAP__VARIABLE] = "Variables",
@@ -137,6 +139,7 @@ void map__init(struct map *map, enum map_type type,
137 map->groups = NULL; 139 map->groups = NULL;
138 map->referenced = false; 140 map->referenced = false;
139 map->erange_warned = false; 141 map->erange_warned = false;
142 atomic_set(&map->refcnt, 1);
140} 143}
141 144
142struct map *map__new(struct machine *machine, u64 start, u64 len, 145struct map *map__new(struct machine *machine, u64 start, u64 len,
@@ -223,9 +226,16 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
223 226
224void map__delete(struct map *map) 227void map__delete(struct map *map)
225{ 228{
229 BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
226 free(map); 230 free(map);
227} 231}
228 232
233void map__put(struct map *map)
234{
235 if (map && atomic_dec_and_test(&map->refcnt))
236 map__delete(map);
237}
238
229void map__fixup_start(struct map *map) 239void map__fixup_start(struct map *map)
230{ 240{
231 struct rb_root *symbols = &map->dso->symbols[map->type]; 241 struct rb_root *symbols = &map->dso->symbols[map->type];
@@ -418,48 +428,61 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
418 return ip + map->reloc; 428 return ip + map->reloc;
419} 429}
420 430
431static void maps__init(struct maps *maps)
432{
433 maps->entries = RB_ROOT;
434 pthread_rwlock_init(&maps->lock, NULL);
435 INIT_LIST_HEAD(&maps->removed_maps);
436}
437
421void map_groups__init(struct map_groups *mg, struct machine *machine) 438void map_groups__init(struct map_groups *mg, struct machine *machine)
422{ 439{
423 int i; 440 int i;
424 for (i = 0; i < MAP__NR_TYPES; ++i) { 441 for (i = 0; i < MAP__NR_TYPES; ++i) {
425 mg->maps[i] = RB_ROOT; 442 maps__init(&mg->maps[i]);
426 INIT_LIST_HEAD(&mg->removed_maps[i]);
427 } 443 }
428 mg->machine = machine; 444 mg->machine = machine;
429 atomic_set(&mg->refcnt, 1); 445 atomic_set(&mg->refcnt, 1);
430} 446}
431 447
432static void maps__delete(struct rb_root *maps) 448static void __maps__purge(struct maps *maps)
433{ 449{
434 struct rb_node *next = rb_first(maps); 450 struct rb_root *root = &maps->entries;
451 struct rb_node *next = rb_first(root);
435 452
436 while (next) { 453 while (next) {
437 struct map *pos = rb_entry(next, struct map, rb_node); 454 struct map *pos = rb_entry(next, struct map, rb_node);
438 455
439 next = rb_next(&pos->rb_node); 456 next = rb_next(&pos->rb_node);
440 rb_erase(&pos->rb_node, maps); 457 rb_erase_init(&pos->rb_node, root);
441 map__delete(pos); 458 map__put(pos);
442 } 459 }
443} 460}
444 461
445static void maps__delete_removed(struct list_head *maps) 462static void __maps__purge_removed_maps(struct maps *maps)
446{ 463{
447 struct map *pos, *n; 464 struct map *pos, *n;
448 465
449 list_for_each_entry_safe(pos, n, maps, node) { 466 list_for_each_entry_safe(pos, n, &maps->removed_maps, node) {
450 list_del(&pos->node); 467 list_del_init(&pos->node);
451 map__delete(pos); 468 map__put(pos);
452 } 469 }
453} 470}
454 471
472static void maps__exit(struct maps *maps)
473{
474 pthread_rwlock_wrlock(&maps->lock);
475 __maps__purge(maps);
476 __maps__purge_removed_maps(maps);
477 pthread_rwlock_unlock(&maps->lock);
478}
479
455void map_groups__exit(struct map_groups *mg) 480void map_groups__exit(struct map_groups *mg)
456{ 481{
457 int i; 482 int i;
458 483
459 for (i = 0; i < MAP__NR_TYPES; ++i) { 484 for (i = 0; i < MAP__NR_TYPES; ++i)
460 maps__delete(&mg->maps[i]); 485 maps__exit(&mg->maps[i]);
461 maps__delete_removed(&mg->removed_maps[i]);
462 }
463} 486}
464 487
465bool map_groups__empty(struct map_groups *mg) 488bool map_groups__empty(struct map_groups *mg)
@@ -469,7 +492,7 @@ bool map_groups__empty(struct map_groups *mg)
469 for (i = 0; i < MAP__NR_TYPES; ++i) { 492 for (i = 0; i < MAP__NR_TYPES; ++i) {
470 if (maps__first(&mg->maps[i])) 493 if (maps__first(&mg->maps[i]))
471 return false; 494 return false;
472 if (!list_empty(&mg->removed_maps[i])) 495 if (!list_empty(&mg->maps[i].removed_maps))
473 return false; 496 return false;
474 } 497 }
475 498
@@ -521,20 +544,28 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
521 struct map **mapp, 544 struct map **mapp,
522 symbol_filter_t filter) 545 symbol_filter_t filter)
523{ 546{
547 struct maps *maps = &mg->maps[type];
548 struct symbol *sym;
524 struct rb_node *nd; 549 struct rb_node *nd;
525 550
526 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 551 pthread_rwlock_rdlock(&maps->lock);
552
553 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
527 struct map *pos = rb_entry(nd, struct map, rb_node); 554 struct map *pos = rb_entry(nd, struct map, rb_node);
528 struct symbol *sym = map__find_symbol_by_name(pos, name, filter); 555
556 sym = map__find_symbol_by_name(pos, name, filter);
529 557
530 if (sym == NULL) 558 if (sym == NULL)
531 continue; 559 continue;
532 if (mapp != NULL) 560 if (mapp != NULL)
533 *mapp = pos; 561 *mapp = pos;
534 return sym; 562 goto out;
535 } 563 }
536 564
537 return NULL; 565 sym = NULL;
566out:
567 pthread_rwlock_unlock(&maps->lock);
568 return sym;
538} 569}
539 570
540int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) 571int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
@@ -554,25 +585,35 @@ int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
554 return ams->sym ? 0 : -1; 585 return ams->sym ? 0 : -1;
555} 586}
556 587
557size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, 588static size_t maps__fprintf(struct maps *maps, FILE *fp)
558 FILE *fp)
559{ 589{
560 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 590 size_t printed = 0;
561 struct rb_node *nd; 591 struct rb_node *nd;
562 592
563 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 593 pthread_rwlock_rdlock(&maps->lock);
594
595 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
564 struct map *pos = rb_entry(nd, struct map, rb_node); 596 struct map *pos = rb_entry(nd, struct map, rb_node);
565 printed += fprintf(fp, "Map:"); 597 printed += fprintf(fp, "Map:");
566 printed += map__fprintf(pos, fp); 598 printed += map__fprintf(pos, fp);
567 if (verbose > 2) { 599 if (verbose > 2) {
568 printed += dso__fprintf(pos->dso, type, fp); 600 printed += dso__fprintf(pos->dso, pos->type, fp);
569 printed += fprintf(fp, "--\n"); 601 printed += fprintf(fp, "--\n");
570 } 602 }
571 } 603 }
572 604
605 pthread_rwlock_unlock(&maps->lock);
606
573 return printed; 607 return printed;
574} 608}
575 609
610size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
611 FILE *fp)
612{
613 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
614 return printed += maps__fprintf(&mg->maps[type], fp);
615}
616
576static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) 617static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
577{ 618{
578 size_t printed = 0, i; 619 size_t printed = 0, i;
@@ -587,7 +628,7 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
587 struct map *pos; 628 struct map *pos;
588 size_t printed = 0; 629 size_t printed = 0;
589 630
590 list_for_each_entry(pos, &mg->removed_maps[type], node) { 631 list_for_each_entry(pos, &mg->maps[type].removed_maps, node) {
591 printed += fprintf(fp, "Map:"); 632 printed += fprintf(fp, "Map:");
592 printed += map__fprintf(pos, fp); 633 printed += map__fprintf(pos, fp);
593 if (verbose > 1) { 634 if (verbose > 1) {
@@ -614,13 +655,17 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
614 return printed + map_groups__fprintf_removed_maps(mg, fp); 655 return printed + map_groups__fprintf_removed_maps(mg, fp);
615} 656}
616 657
617int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 658static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
618 FILE *fp)
619{ 659{
620 struct rb_root *root = &mg->maps[map->type]; 660 struct rb_root *root;
621 struct rb_node *next = rb_first(root); 661 struct rb_node *next;
622 int err = 0; 662 int err = 0;
623 663
664 pthread_rwlock_wrlock(&maps->lock);
665
666 root = &maps->entries;
667 next = rb_first(root);
668
624 while (next) { 669 while (next) {
625 struct map *pos = rb_entry(next, struct map, rb_node); 670 struct map *pos = rb_entry(next, struct map, rb_node);
626 next = rb_next(&pos->rb_node); 671 next = rb_next(&pos->rb_node);
@@ -634,7 +679,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
634 map__fprintf(pos, fp); 679 map__fprintf(pos, fp);
635 } 680 }
636 681
637 rb_erase(&pos->rb_node, root); 682 rb_erase_init(&pos->rb_node, root);
638 /* 683 /*
639 * Now check if we need to create new maps for areas not 684 * Now check if we need to create new maps for areas not
640 * overlapped by the new map: 685 * overlapped by the new map:
@@ -644,11 +689,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
644 689
645 if (before == NULL) { 690 if (before == NULL) {
646 err = -ENOMEM; 691 err = -ENOMEM;
647 goto move_map; 692 goto put_map;
648 } 693 }
649 694
650 before->end = map->start; 695 before->end = map->start;
651 map_groups__insert(mg, before); 696 __maps__insert(maps, before);
652 if (verbose >= 2) 697 if (verbose >= 2)
653 map__fprintf(before, fp); 698 map__fprintf(before, fp);
654 } 699 }
@@ -658,28 +703,37 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
658 703
659 if (after == NULL) { 704 if (after == NULL) {
660 err = -ENOMEM; 705 err = -ENOMEM;
661 goto move_map; 706 goto put_map;
662 } 707 }
663 708
664 after->start = map->end; 709 after->start = map->end;
665 map_groups__insert(mg, after); 710 __maps__insert(maps, after);
666 if (verbose >= 2) 711 if (verbose >= 2)
667 map__fprintf(after, fp); 712 map__fprintf(after, fp);
668 } 713 }
669move_map: 714put_map:
670 /* 715 /*
671 * If we have references, just move them to a separate list. 716 * If we have references, just move them to a separate list.
672 */ 717 */
673 if (pos->referenced) 718 if (pos->referenced)
674 list_add_tail(&pos->node, &mg->removed_maps[map->type]); 719 list_add_tail(&pos->node, &maps->removed_maps);
675 else 720 else
676 map__delete(pos); 721 map__put(pos);
677 722
678 if (err) 723 if (err)
679 return err; 724 goto out;
680 } 725 }
681 726
682 return 0; 727 err = 0;
728out:
729 pthread_rwlock_unlock(&maps->lock);
730 return err;
731}
732
733int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
734 FILE *fp)
735{
736 return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
683} 737}
684 738
685/* 739/*
@@ -688,21 +742,28 @@ move_map:
688int map_groups__clone(struct map_groups *mg, 742int map_groups__clone(struct map_groups *mg,
689 struct map_groups *parent, enum map_type type) 743 struct map_groups *parent, enum map_type type)
690{ 744{
745 int err = -ENOMEM;
691 struct map *map; 746 struct map *map;
692 struct rb_root *maps = &parent->maps[type]; 747 struct maps *maps = &parent->maps[type];
748
749 pthread_rwlock_rdlock(&maps->lock);
693 750
694 for (map = maps__first(maps); map; map = map__next(map)) { 751 for (map = maps__first(maps); map; map = map__next(map)) {
695 struct map *new = map__clone(map); 752 struct map *new = map__clone(map);
696 if (new == NULL) 753 if (new == NULL)
697 return -ENOMEM; 754 goto out_unlock;
698 map_groups__insert(mg, new); 755 map_groups__insert(mg, new);
699 } 756 }
700 return 0; 757
758 err = 0;
759out_unlock:
760 pthread_rwlock_unlock(&maps->lock);
761 return err;
701} 762}
702 763
703void maps__insert(struct rb_root *maps, struct map *map) 764static void __maps__insert(struct maps *maps, struct map *map)
704{ 765{
705 struct rb_node **p = &maps->rb_node; 766 struct rb_node **p = &maps->entries.rb_node;
706 struct rb_node *parent = NULL; 767 struct rb_node *parent = NULL;
707 const u64 ip = map->start; 768 const u64 ip = map->start;
708 struct map *m; 769 struct map *m;
@@ -717,20 +778,38 @@ void maps__insert(struct rb_root *maps, struct map *map)
717 } 778 }
718 779
719 rb_link_node(&map->rb_node, parent, p); 780 rb_link_node(&map->rb_node, parent, p);
720 rb_insert_color(&map->rb_node, maps); 781 rb_insert_color(&map->rb_node, &maps->entries);
782 map__get(map);
721} 783}
722 784
723void maps__remove(struct rb_root *maps, struct map *map) 785void maps__insert(struct maps *maps, struct map *map)
724{ 786{
725 rb_erase(&map->rb_node, maps); 787 pthread_rwlock_wrlock(&maps->lock);
788 __maps__insert(maps, map);
789 pthread_rwlock_unlock(&maps->lock);
726} 790}
727 791
728struct map *maps__find(struct rb_root *maps, u64 ip) 792static void __maps__remove(struct maps *maps, struct map *map)
729{ 793{
730 struct rb_node **p = &maps->rb_node; 794 rb_erase_init(&map->rb_node, &maps->entries);
731 struct rb_node *parent = NULL; 795 map__put(map);
796}
797
798void maps__remove(struct maps *maps, struct map *map)
799{
800 pthread_rwlock_wrlock(&maps->lock);
801 __maps__remove(maps, map);
802 pthread_rwlock_unlock(&maps->lock);
803}
804
805struct map *maps__find(struct maps *maps, u64 ip)
806{
807 struct rb_node **p, *parent = NULL;
732 struct map *m; 808 struct map *m;
733 809
810 pthread_rwlock_rdlock(&maps->lock);
811
812 p = &maps->entries.rb_node;
734 while (*p != NULL) { 813 while (*p != NULL) {
735 parent = *p; 814 parent = *p;
736 m = rb_entry(parent, struct map, rb_node); 815 m = rb_entry(parent, struct map, rb_node);
@@ -739,15 +818,18 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
739 else if (ip >= m->end) 818 else if (ip >= m->end)
740 p = &(*p)->rb_right; 819 p = &(*p)->rb_right;
741 else 820 else
742 return m; 821 goto out;
743 } 822 }
744 823
745 return NULL; 824 m = NULL;
825out:
826 pthread_rwlock_unlock(&maps->lock);
827 return m;
746} 828}
747 829
748struct map *maps__first(struct rb_root *maps) 830struct map *maps__first(struct maps *maps)
749{ 831{
750 struct rb_node *first = rb_first(maps); 832 struct rb_node *first = rb_first(&maps->entries);
751 833
752 if (first) 834 if (first)
753 return rb_entry(first, struct map, rb_node); 835 return rb_entry(first, struct map, rb_node);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index f2b27566d986..b8df09d94aca 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -5,6 +5,7 @@
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/rbtree.h> 7#include <linux/rbtree.h>
8#include <pthread.h>
8#include <stdio.h> 9#include <stdio.h>
9#include <stdbool.h> 10#include <stdbool.h>
10#include <linux/types.h> 11#include <linux/types.h>
@@ -51,6 +52,7 @@ struct map {
51 52
52 struct dso *dso; 53 struct dso *dso;
53 struct map_groups *groups; 54 struct map_groups *groups;
55 atomic_t refcnt;
54}; 56};
55 57
56struct kmap { 58struct kmap {
@@ -58,9 +60,14 @@ struct kmap {
58 struct map_groups *kmaps; 60 struct map_groups *kmaps;
59}; 61};
60 62
63struct maps {
64 struct rb_root entries;
65 pthread_rwlock_t lock;
66 struct list_head removed_maps;
67};
68
61struct map_groups { 69struct map_groups {
62 struct rb_root maps[MAP__NR_TYPES]; 70 struct maps maps[MAP__NR_TYPES];
63 struct list_head removed_maps[MAP__NR_TYPES];
64 struct machine *machine; 71 struct machine *machine;
65 atomic_t refcnt; 72 atomic_t refcnt;
66}; 73};
@@ -144,6 +151,16 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
144struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 151struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
145void map__delete(struct map *map); 152void map__delete(struct map *map);
146struct map *map__clone(struct map *map); 153struct map *map__clone(struct map *map);
154
155static inline struct map *map__get(struct map *map)
156{
157 if (map)
158 atomic_inc(&map->refcnt);
159 return map;
160}
161
162void map__put(struct map *map);
163
147int map__overlap(struct map *l, struct map *r); 164int map__overlap(struct map *l, struct map *r);
148size_t map__fprintf(struct map *map, FILE *fp); 165size_t map__fprintf(struct map *map, FILE *fp);
149size_t map__fprintf_dsoname(struct map *map, FILE *fp); 166size_t map__fprintf_dsoname(struct map *map, FILE *fp);
@@ -162,10 +179,10 @@ void map__reloc_vmlinux(struct map *map);
162 179
163size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, 180size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
164 FILE *fp); 181 FILE *fp);
165void maps__insert(struct rb_root *maps, struct map *map); 182void maps__insert(struct maps *maps, struct map *map);
166void maps__remove(struct rb_root *maps, struct map *map); 183void maps__remove(struct maps *maps, struct map *map);
167struct map *maps__find(struct rb_root *maps, u64 addr); 184struct map *maps__find(struct maps *maps, u64 addr);
168struct map *maps__first(struct rb_root *maps); 185struct map *maps__first(struct maps *maps);
169struct map *map__next(struct map *map); 186struct map *map__next(struct map *map);
170void map_groups__init(struct map_groups *mg, struct machine *machine); 187void map_groups__init(struct map_groups *mg, struct machine *machine);
171void map_groups__exit(struct map_groups *mg); 188void map_groups__exit(struct map_groups *mg);
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
new file mode 100644
index 000000000000..9d999436658f
--- /dev/null
+++ b/tools/perf/util/parse-branch-options.c
@@ -0,0 +1,93 @@
1#include "perf.h"
2#include "util/util.h"
3#include "util/debug.h"
4#include "util/parse-options.h"
5#include "util/parse-branch-options.h"
6
7#define BRANCH_OPT(n, m) \
8 { .name = n, .mode = (m) }
9
10#define BRANCH_END { .name = NULL }
11
12struct branch_mode {
13 const char *name;
14 int mode;
15};
16
17static const struct branch_mode branch_modes[] = {
18 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
19 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
20 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
21 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
22 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
23 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
24 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
25 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
26 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
27 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
28 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
29 BRANCH_END
30};
31
32int
33parse_branch_stack(const struct option *opt, const char *str, int unset)
34{
35#define ONLY_PLM \
36 (PERF_SAMPLE_BRANCH_USER |\
37 PERF_SAMPLE_BRANCH_KERNEL |\
38 PERF_SAMPLE_BRANCH_HV)
39
40 uint64_t *mode = (uint64_t *)opt->value;
41 const struct branch_mode *br;
42 char *s, *os = NULL, *p;
43 int ret = -1;
44
45 if (unset)
46 return 0;
47
48 /*
49 * cannot set it twice, -b + --branch-filter for instance
50 */
51 if (*mode)
52 return -1;
53
54 /* str may be NULL in case no arg is passed to -b */
55 if (str) {
56 /* because str is read-only */
57 s = os = strdup(str);
58 if (!s)
59 return -1;
60
61 for (;;) {
62 p = strchr(s, ',');
63 if (p)
64 *p = '\0';
65
66 for (br = branch_modes; br->name; br++) {
67 if (!strcasecmp(s, br->name))
68 break;
69 }
70 if (!br->name) {
71 ui__warning("unknown branch filter %s,"
72 " check man page\n", s);
73 goto error;
74 }
75
76 *mode |= br->mode;
77
78 if (!p)
79 break;
80
81 s = p + 1;
82 }
83 }
84 ret = 0;
85
86 /* default to any branch */
87 if ((*mode & ~ONLY_PLM) == 0) {
88 *mode = PERF_SAMPLE_BRANCH_ANY;
89 }
90error:
91 free(os);
92 return ret;
93}
diff --git a/tools/perf/util/parse-branch-options.h b/tools/perf/util/parse-branch-options.h
new file mode 100644
index 000000000000..b9d9470c2e82
--- /dev/null
+++ b/tools/perf/util/parse-branch-options.h
@@ -0,0 +1,5 @@
1#ifndef _PERF_PARSE_BRANCH_OPTIONS_H
2#define _PERF_PARSE_BRANCH_OPTIONS_H 1
3struct option;
4int parse_branch_stack(const struct option *opt, const char *str, int unset);
5#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 97da98481d89..b0b8a8080009 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -163,7 +163,7 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
163static struct map *kernel_get_module_map(const char *module) 163static struct map *kernel_get_module_map(const char *module)
164{ 164{
165 struct map_groups *grp = &host_machine->kmaps; 165 struct map_groups *grp = &host_machine->kmaps;
166 struct rb_root *maps = &grp->maps[MAP__FUNCTION]; 166 struct maps *maps = &grp->maps[MAP__FUNCTION];
167 struct map *pos; 167 struct map *pos;
168 168
169 /* A file path -- this is an offline module */ 169 /* A file path -- this is an offline module */
@@ -195,7 +195,7 @@ static void put_target_map(struct map *map, bool user)
195{ 195{
196 if (map && user) { 196 if (map && user) {
197 /* Only the user map needs to be released */ 197 /* Only the user map needs to be released */
198 map__delete(map); 198 map__put(map);
199 } 199 }
200} 200}
201 201
@@ -1791,7 +1791,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
1791 1791
1792out: 1792out:
1793 if (map && !is_kprobe) { 1793 if (map && !is_kprobe) {
1794 map__delete(map); 1794 map__put(map);
1795 } 1795 }
1796 1796
1797 return ret; 1797 return ret;
@@ -2884,7 +2884,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
2884 dso__fprintf_symbols_by_name(map->dso, map->type, stdout); 2884 dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
2885end: 2885end:
2886 if (user) { 2886 if (user) {
2887 map__delete(map); 2887 map__put(map);
2888 } 2888 }
2889 exit_symbol_maps(); 2889 exit_symbol_maps();
2890 2890
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 9d526a5312b1..fa10116a12ab 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -972,8 +972,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
972 map->unmap_ip = map__unmap_ip; 972 map->unmap_ip = map__unmap_ip;
973 /* Ensure maps are correctly ordered */ 973 /* Ensure maps are correctly ordered */
974 if (kmaps) { 974 if (kmaps) {
975 map__get(map);
975 map_groups__remove(kmaps, map); 976 map_groups__remove(kmaps, map);
976 map_groups__insert(kmaps, map); 977 map_groups__insert(kmaps, map);
978 map__put(map);
977 } 979 }
978 } 980 }
979 981
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b9e3eb581884..a3e80d6ad70a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,12 +202,14 @@ void symbols__fixup_end(struct rb_root *symbols)
202 202
203void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 203void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
204{ 204{
205 struct rb_root *maps = &mg->maps[type]; 205 struct maps *maps = &mg->maps[type];
206 struct map *next, *curr; 206 struct map *next, *curr;
207 207
208 pthread_rwlock_wrlock(&maps->lock);
209
208 curr = maps__first(maps); 210 curr = maps__first(maps);
209 if (curr == NULL) 211 if (curr == NULL)
210 return; 212 goto out_unlock;
211 213
212 for (next = map__next(curr); next; next = map__next(curr)) { 214 for (next = map__next(curr); next; next = map__next(curr)) {
213 curr->end = next->start; 215 curr->end = next->start;
@@ -219,6 +221,9 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
219 * last map final address. 221 * last map final address.
220 */ 222 */
221 curr->end = ~0ULL; 223 curr->end = ~0ULL;
224
225out_unlock:
226 pthread_rwlock_unlock(&maps->lock);
222} 227}
223 228
224struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 229struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
@@ -654,14 +659,14 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
654 curr_map = map_groups__find(kmaps, map->type, pos->start); 659 curr_map = map_groups__find(kmaps, map->type, pos->start);
655 660
656 if (!curr_map || (filter && filter(curr_map, pos))) { 661 if (!curr_map || (filter && filter(curr_map, pos))) {
657 rb_erase(&pos->rb_node, root); 662 rb_erase_init(&pos->rb_node, root);
658 symbol__delete(pos); 663 symbol__delete(pos);
659 } else { 664 } else {
660 pos->start -= curr_map->start - curr_map->pgoff; 665 pos->start -= curr_map->start - curr_map->pgoff;
661 if (pos->end) 666 if (pos->end)
662 pos->end -= curr_map->start - curr_map->pgoff; 667 pos->end -= curr_map->start - curr_map->pgoff;
663 if (curr_map != map) { 668 if (curr_map != map) {
664 rb_erase(&pos->rb_node, root); 669 rb_erase_init(&pos->rb_node, root);
665 symbols__insert( 670 symbols__insert(
666 &curr_map->dso->symbols[curr_map->type], 671 &curr_map->dso->symbols[curr_map->type],
667 pos); 672 pos);
@@ -1168,20 +1173,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1168 /* Add new maps */ 1173 /* Add new maps */
1169 while (!list_empty(&md.maps)) { 1174 while (!list_empty(&md.maps)) {
1170 new_map = list_entry(md.maps.next, struct map, node); 1175 new_map = list_entry(md.maps.next, struct map, node);
1171 list_del(&new_map->node); 1176 list_del_init(&new_map->node);
1172 if (new_map == replacement_map) { 1177 if (new_map == replacement_map) {
1173 map->start = new_map->start; 1178 map->start = new_map->start;
1174 map->end = new_map->end; 1179 map->end = new_map->end;
1175 map->pgoff = new_map->pgoff; 1180 map->pgoff = new_map->pgoff;
1176 map->map_ip = new_map->map_ip; 1181 map->map_ip = new_map->map_ip;
1177 map->unmap_ip = new_map->unmap_ip; 1182 map->unmap_ip = new_map->unmap_ip;
1178 map__delete(new_map);
1179 /* Ensure maps are correctly ordered */ 1183 /* Ensure maps are correctly ordered */
1184 map__get(map);
1180 map_groups__remove(kmaps, map); 1185 map_groups__remove(kmaps, map);
1181 map_groups__insert(kmaps, map); 1186 map_groups__insert(kmaps, map);
1187 map__put(map);
1182 } else { 1188 } else {
1183 map_groups__insert(kmaps, new_map); 1189 map_groups__insert(kmaps, new_map);
1184 } 1190 }
1191
1192 map__put(new_map);
1185 } 1193 }
1186 1194
1187 /* 1195 /*
@@ -1206,8 +1214,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1206out_err: 1214out_err:
1207 while (!list_empty(&md.maps)) { 1215 while (!list_empty(&md.maps)) {
1208 map = list_entry(md.maps.next, struct map, node); 1216 map = list_entry(md.maps.next, struct map, node);
1209 list_del(&map->node); 1217 list_del_init(&map->node);
1210 map__delete(map); 1218 map__put(map);
1211 } 1219 }
1212 close(fd); 1220 close(fd);
1213 return -EINVAL; 1221 return -EINVAL;
@@ -1520,15 +1528,21 @@ out:
1520struct map *map_groups__find_by_name(struct map_groups *mg, 1528struct map *map_groups__find_by_name(struct map_groups *mg,
1521 enum map_type type, const char *name) 1529 enum map_type type, const char *name)
1522{ 1530{
1523 struct rb_root *maps = &mg->maps[type]; 1531 struct maps *maps = &mg->maps[type];
1524 struct map *map; 1532 struct map *map;
1525 1533
1534 pthread_rwlock_rdlock(&maps->lock);
1535
1526 for (map = maps__first(maps); map; map = map__next(map)) { 1536 for (map = maps__first(maps); map; map = map__next(map)) {
1527 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1537 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1528 return map; 1538 goto out_unlock;
1529 } 1539 }
1530 1540
1531 return NULL; 1541 map = NULL;
1542
1543out_unlock:
1544 pthread_rwlock_unlock(&maps->lock);
1545 return map;
1532} 1546}
1533 1547
1534int dso__load_vmlinux(struct dso *dso, struct map *map, 1548int dso__load_vmlinux(struct dso *dso, struct map *map,