aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/hist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r--tools/perf/util/hist.c96
1 files changed, 42 insertions, 54 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6b32721f829a..b11a6cfdb414 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -70,9 +70,17 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
70 int symlen; 70 int symlen;
71 u16 len; 71 u16 len;
72 72
73 if (h->ms.sym) 73 /*
74 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); 74 * +4 accounts for '[x] ' priv level info
75 else { 75 * +2 accounts for 0x prefix on raw addresses
76 * +3 accounts for ' y ' symtab origin info
77 */
78 if (h->ms.sym) {
79 symlen = h->ms.sym->namelen + 4;
80 if (verbose)
81 symlen += BITS_PER_LONG / 4 + 2 + 3;
82 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83 } else {
76 symlen = unresolved_col_width + 4 + 2; 84 symlen = unresolved_col_width + 4 + 2;
77 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 85 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78 hists__set_unres_dso_col_len(hists, HISTC_DSO); 86 hists__set_unres_dso_col_len(hists, HISTC_DSO);
@@ -91,12 +99,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
91 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 99 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
92 100
93 if (h->branch_info) { 101 if (h->branch_info) {
94 /*
95 * +4 accounts for '[x] ' priv level info
96 * +2 account of 0x prefix on raw addresses
97 */
98 if (h->branch_info->from.sym) { 102 if (h->branch_info->from.sym) {
99 symlen = (int)h->branch_info->from.sym->namelen + 4; 103 symlen = (int)h->branch_info->from.sym->namelen + 4;
104 if (verbose)
105 symlen += BITS_PER_LONG / 4 + 2 + 3;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101 107
102 symlen = dso__name_len(h->branch_info->from.map->dso); 108 symlen = dso__name_len(h->branch_info->from.map->dso);
@@ -109,6 +115,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
109 115
110 if (h->branch_info->to.sym) { 116 if (h->branch_info->to.sym) {
111 symlen = (int)h->branch_info->to.sym->namelen + 4; 117 symlen = (int)h->branch_info->to.sym->namelen + 4;
118 if (verbose)
119 symlen += BITS_PER_LONG / 4 + 2 + 3;
112 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
113 121
114 symlen = dso__name_len(h->branch_info->to.map->dso); 122 symlen = dso__name_len(h->branch_info->to.map->dso);
@@ -121,10 +129,6 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
121 } 129 }
122 130
123 if (h->mem_info) { 131 if (h->mem_info) {
124 /*
125 * +4 accounts for '[x] ' priv level info
126 * +2 account of 0x prefix on raw addresses
127 */
128 if (h->mem_info->daddr.sym) { 132 if (h->mem_info->daddr.sym) {
129 symlen = (int)h->mem_info->daddr.sym->namelen + 4 133 symlen = (int)h->mem_info->daddr.sym->namelen + 4
130 + unresolved_col_width + 2; 134 + unresolved_col_width + 2;
@@ -236,8 +240,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
236 return he->stat.period == 0; 240 return he->stat.period == 0;
237} 241}
238 242
239static void __hists__decay_entries(struct hists *hists, bool zap_user, 243void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
240 bool zap_kernel, bool threaded)
241{ 244{
242 struct rb_node *next = rb_first(&hists->entries); 245 struct rb_node *next = rb_first(&hists->entries);
243 struct hist_entry *n; 246 struct hist_entry *n;
@@ -256,7 +259,7 @@ static void __hists__decay_entries(struct hists *hists, bool zap_user,
256 !n->used) { 259 !n->used) {
257 rb_erase(&n->rb_node, &hists->entries); 260 rb_erase(&n->rb_node, &hists->entries);
258 261
259 if (sort__need_collapse || threaded) 262 if (sort__need_collapse)
260 rb_erase(&n->rb_node_in, &hists->entries_collapsed); 263 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
261 264
262 hist_entry__free(n); 265 hist_entry__free(n);
@@ -265,17 +268,6 @@ static void __hists__decay_entries(struct hists *hists, bool zap_user,
265 } 268 }
266} 269}
267 270
268void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
269{
270 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
271}
272
273void hists__decay_entries_threaded(struct hists *hists,
274 bool zap_user, bool zap_kernel)
275{
276 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
277}
278
279/* 271/*
280 * histogram, sorted on item, collects periods 272 * histogram, sorted on item, collects periods
281 */ 273 */
@@ -292,6 +284,20 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
292 he->ms.map->referenced = true; 284 he->ms.map->referenced = true;
293 285
294 if (he->branch_info) { 286 if (he->branch_info) {
287 /*
288 * This branch info is (a part of) allocated from
289 * machine__resolve_bstack() and will be freed after
290 * adding new entries. So we need to save a copy.
291 */
292 he->branch_info = malloc(sizeof(*he->branch_info));
293 if (he->branch_info == NULL) {
294 free(he);
295 return NULL;
296 }
297
298 memcpy(he->branch_info, template->branch_info,
299 sizeof(*he->branch_info));
300
295 if (he->branch_info->from.map) 301 if (he->branch_info->from.map)
296 he->branch_info->from.map->referenced = true; 302 he->branch_info->from.map->referenced = true;
297 if (he->branch_info->to.map) 303 if (he->branch_info->to.map)
@@ -341,8 +347,6 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
341 struct hist_entry *he; 347 struct hist_entry *he;
342 int cmp; 348 int cmp;
343 349
344 pthread_mutex_lock(&hists->lock);
345
346 p = &hists->entries_in->rb_node; 350 p = &hists->entries_in->rb_node;
347 351
348 while (*p != NULL) { 352 while (*p != NULL) {
@@ -360,6 +364,12 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
360 if (!cmp) { 364 if (!cmp) {
361 he_stat__add_period(&he->stat, period, weight); 365 he_stat__add_period(&he->stat, period, weight);
362 366
367 /*
368 * This mem info was allocated from machine__resolve_mem
369 * and will not be used anymore.
370 */
371 free(entry->mem_info);
372
363 /* If the map of an existing hist_entry has 373 /* If the map of an existing hist_entry has
364 * become out-of-date due to an exec() or 374 * become out-of-date due to an exec() or
365 * similar, update it. Otherwise we will 375 * similar, update it. Otherwise we will
@@ -382,14 +392,12 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
382 392
383 he = hist_entry__new(entry); 393 he = hist_entry__new(entry);
384 if (!he) 394 if (!he)
385 goto out_unlock; 395 return NULL;
386 396
387 rb_link_node(&he->rb_node_in, parent, p); 397 rb_link_node(&he->rb_node_in, parent, p);
388 rb_insert_color(&he->rb_node_in, hists->entries_in); 398 rb_insert_color(&he->rb_node_in, hists->entries_in);
389out: 399out:
390 hist_entry__add_cpumode_period(he, al->cpumode, period); 400 hist_entry__add_cpumode_period(he, al->cpumode, period);
391out_unlock:
392 pthread_mutex_unlock(&hists->lock);
393 return he; 401 return he;
394} 402}
395 403
@@ -589,13 +597,13 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
589 hists__filter_entry_by_symbol(hists, he); 597 hists__filter_entry_by_symbol(hists, he);
590} 598}
591 599
592static void __hists__collapse_resort(struct hists *hists, bool threaded) 600void hists__collapse_resort(struct hists *hists)
593{ 601{
594 struct rb_root *root; 602 struct rb_root *root;
595 struct rb_node *next; 603 struct rb_node *next;
596 struct hist_entry *n; 604 struct hist_entry *n;
597 605
598 if (!sort__need_collapse && !threaded) 606 if (!sort__need_collapse)
599 return; 607 return;
600 608
601 root = hists__get_rotate_entries_in(hists); 609 root = hists__get_rotate_entries_in(hists);
@@ -617,16 +625,6 @@ static void __hists__collapse_resort(struct hists *hists, bool threaded)
617 } 625 }
618} 626}
619 627
620void hists__collapse_resort(struct hists *hists)
621{
622 return __hists__collapse_resort(hists, false);
623}
624
625void hists__collapse_resort_threaded(struct hists *hists)
626{
627 return __hists__collapse_resort(hists, true);
628}
629
630/* 628/*
631 * reverse the map, sort on period. 629 * reverse the map, sort on period.
632 */ 630 */
@@ -713,7 +711,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
713 rb_insert_color(&he->rb_node, entries); 711 rb_insert_color(&he->rb_node, entries);
714} 712}
715 713
716static void __hists__output_resort(struct hists *hists, bool threaded) 714void hists__output_resort(struct hists *hists)
717{ 715{
718 struct rb_root *root; 716 struct rb_root *root;
719 struct rb_node *next; 717 struct rb_node *next;
@@ -722,7 +720,7 @@ static void __hists__output_resort(struct hists *hists, bool threaded)
722 720
723 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); 721 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
724 722
725 if (sort__need_collapse || threaded) 723 if (sort__need_collapse)
726 root = &hists->entries_collapsed; 724 root = &hists->entries_collapsed;
727 else 725 else
728 root = hists->entries_in; 726 root = hists->entries_in;
@@ -743,16 +741,6 @@ static void __hists__output_resort(struct hists *hists, bool threaded)
743 } 741 }
744} 742}
745 743
746void hists__output_resort(struct hists *hists)
747{
748 return __hists__output_resort(hists, false);
749}
750
751void hists__output_resort_threaded(struct hists *hists)
752{
753 return __hists__output_resort(hists, true);
754}
755
756static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 744static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
757 enum hist_filter filter) 745 enum hist_filter filter)
758{ 746{