aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung.kim@lge.com>2012-12-10 03:29:55 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2013-01-24 14:40:06 -0500
commitce74f60eab3cc8b7a3b0cb9c29ec9b1e1abac7d2 (patch)
tree1a09caebc2415509f92fb032379d5b0b54311c23 /tools
parent9afcf930b1fa1158b0878afeba3eff299300dc65 (diff)
perf hists: Link hist entries before inserting to an output tree
For matching and/or linking hist entries, they need to be sorted by given sort keys. However current hists__match/link did this on the output trees, so that the entries in the output tree need to be resort before doing it. This looks not so good since we have trees for collecting or collapsing entries before passing them to an output tree and they're already sorted by the given sort keys. Since we don't need to print anything at the time of matching/linking, we can use these internal trees directly instead of bothering with double resort on the output tree. Its only user - at the time of this writing - perf diff can be easily converted to use the internal tree and can save some lines too by getting rid of unnecessary resorting codes. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1355128197-18193-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-diff.c65
-rw-r--r--tools/perf/util/hist.c49
2 files changed, 54 insertions, 60 deletions
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 4dda6f4dc618..8b896f5eded0 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -275,43 +275,6 @@ static struct perf_tool tool = {
275 .ordering_requires_timestamps = true, 275 .ordering_requires_timestamps = true,
276}; 276};
277 277
278static void insert_hist_entry_by_name(struct rb_root *root,
279 struct hist_entry *he)
280{
281 struct rb_node **p = &root->rb_node;
282 struct rb_node *parent = NULL;
283 struct hist_entry *iter;
284
285 while (*p != NULL) {
286 parent = *p;
287 iter = rb_entry(parent, struct hist_entry, rb_node);
288 if (hist_entry__cmp(iter, he) < 0)
289 p = &(*p)->rb_left;
290 else
291 p = &(*p)->rb_right;
292 }
293
294 rb_link_node(&he->rb_node, parent, p);
295 rb_insert_color(&he->rb_node, root);
296}
297
298static void hists__name_resort(struct hists *self)
299{
300 struct rb_root tmp = RB_ROOT;
301 struct rb_node *next = rb_first(&self->entries);
302
303 while (next != NULL) {
304 struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
305
306 next = rb_next(&n->rb_node);
307
308 rb_erase(&n->rb_node, &self->entries);
309 insert_hist_entry_by_name(&tmp, n);
310 }
311
312 self->entries = tmp;
313}
314
315static struct perf_evsel *evsel_match(struct perf_evsel *evsel, 278static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
316 struct perf_evlist *evlist) 279 struct perf_evlist *evlist)
317{ 280{
@@ -324,30 +287,34 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
324 return NULL; 287 return NULL;
325} 288}
326 289
327static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) 290static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
328{ 291{
329 struct perf_evsel *evsel; 292 struct perf_evsel *evsel;
330 293
331 list_for_each_entry(evsel, &evlist->entries, node) { 294 list_for_each_entry(evsel, &evlist->entries, node) {
332 struct hists *hists = &evsel->hists; 295 struct hists *hists = &evsel->hists;
333 296
334 hists__output_resort(hists); 297 hists__collapse_resort(hists);
335
336 if (name)
337 hists__name_resort(hists);
338 } 298 }
339} 299}
340 300
341static void hists__baseline_only(struct hists *hists) 301static void hists__baseline_only(struct hists *hists)
342{ 302{
343 struct rb_node *next = rb_first(&hists->entries); 303 struct rb_root *root;
304 struct rb_node *next;
305
306 if (sort__need_collapse)
307 root = &hists->entries_collapsed;
308 else
309 root = hists->entries_in;
344 310
311 next = rb_first(root);
345 while (next != NULL) { 312 while (next != NULL) {
346 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 313 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
347 314
348 next = rb_next(&he->rb_node); 315 next = rb_next(&he->rb_node_in);
349 if (!hist_entry__next_pair(he)) { 316 if (!hist_entry__next_pair(he)) {
350 rb_erase(&he->rb_node, &hists->entries); 317 rb_erase(&he->rb_node_in, root);
351 hist_entry__free(he); 318 hist_entry__free(he);
352 } 319 }
353 } 320 }
@@ -471,6 +438,8 @@ static void hists__process(struct hists *old, struct hists *new)
471 else 438 else
472 hists__link(new, old); 439 hists__link(new, old);
473 440
441 hists__output_resort(new);
442
474 if (sort_compute) { 443 if (sort_compute) {
475 hists__precompute(new); 444 hists__precompute(new);
476 hists__compute_resort(new); 445 hists__compute_resort(new);
@@ -505,8 +474,8 @@ static int __cmd_diff(void)
505 evlist_old = older->evlist; 474 evlist_old = older->evlist;
506 evlist_new = newer->evlist; 475 evlist_new = newer->evlist;
507 476
508 perf_evlist__resort_hists(evlist_old, true); 477 perf_evlist__collapse_resort(evlist_old);
509 perf_evlist__resort_hists(evlist_new, false); 478 perf_evlist__collapse_resort(evlist_new);
510 479
511 list_for_each_entry(evsel, &evlist_new->entries, node) { 480 list_for_each_entry(evsel, &evlist_new->entries, node) {
512 struct perf_evsel *evsel_old; 481 struct perf_evsel *evsel_old;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3a9ccd09835d..8ff3c2f8a5dd 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -726,16 +726,24 @@ void hists__inc_nr_events(struct hists *hists, u32 type)
726static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 726static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
727 struct hist_entry *pair) 727 struct hist_entry *pair)
728{ 728{
729 struct rb_node **p = &hists->entries.rb_node; 729 struct rb_root *root;
730 struct rb_node **p;
730 struct rb_node *parent = NULL; 731 struct rb_node *parent = NULL;
731 struct hist_entry *he; 732 struct hist_entry *he;
732 int cmp; 733 int cmp;
733 734
735 if (sort__need_collapse)
736 root = &hists->entries_collapsed;
737 else
738 root = hists->entries_in;
739
740 p = &root->rb_node;
741
734 while (*p != NULL) { 742 while (*p != NULL) {
735 parent = *p; 743 parent = *p;
736 he = rb_entry(parent, struct hist_entry, rb_node); 744 he = rb_entry(parent, struct hist_entry, rb_node_in);
737 745
738 cmp = hist_entry__cmp(he, pair); 746 cmp = hist_entry__collapse(he, pair);
739 747
740 if (!cmp) 748 if (!cmp)
741 goto out; 749 goto out;
@@ -750,8 +758,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
750 if (he) { 758 if (he) {
751 memset(&he->stat, 0, sizeof(he->stat)); 759 memset(&he->stat, 0, sizeof(he->stat));
752 he->hists = hists; 760 he->hists = hists;
753 rb_link_node(&he->rb_node, parent, p); 761 rb_link_node(&he->rb_node_in, parent, p);
754 rb_insert_color(&he->rb_node, &hists->entries); 762 rb_insert_color(&he->rb_node_in, root);
755 hists__inc_nr_entries(hists, he); 763 hists__inc_nr_entries(hists, he);
756 } 764 }
757out: 765out:
@@ -761,11 +769,16 @@ out:
761static struct hist_entry *hists__find_entry(struct hists *hists, 769static struct hist_entry *hists__find_entry(struct hists *hists,
762 struct hist_entry *he) 770 struct hist_entry *he)
763{ 771{
764 struct rb_node *n = hists->entries.rb_node; 772 struct rb_node *n;
773
774 if (sort__need_collapse)
775 n = hists->entries_collapsed.rb_node;
776 else
777 n = hists->entries_in->rb_node;
765 778
766 while (n) { 779 while (n) {
767 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 780 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
768 int64_t cmp = hist_entry__cmp(iter, he); 781 int64_t cmp = hist_entry__collapse(iter, he);
769 782
770 if (cmp < 0) 783 if (cmp < 0)
771 n = n->rb_left; 784 n = n->rb_left;
@@ -783,11 +796,17 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
783 */ 796 */
784void hists__match(struct hists *leader, struct hists *other) 797void hists__match(struct hists *leader, struct hists *other)
785{ 798{
799 struct rb_root *root;
786 struct rb_node *nd; 800 struct rb_node *nd;
787 struct hist_entry *pos, *pair; 801 struct hist_entry *pos, *pair;
788 802
789 for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { 803 if (sort__need_collapse)
790 pos = rb_entry(nd, struct hist_entry, rb_node); 804 root = &leader->entries_collapsed;
805 else
806 root = leader->entries_in;
807
808 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
809 pos = rb_entry(nd, struct hist_entry, rb_node_in);
791 pair = hists__find_entry(other, pos); 810 pair = hists__find_entry(other, pos);
792 811
793 if (pair) 812 if (pair)
@@ -802,11 +821,17 @@ void hists__match(struct hists *leader, struct hists *other)
802 */ 821 */
803int hists__link(struct hists *leader, struct hists *other) 822int hists__link(struct hists *leader, struct hists *other)
804{ 823{
824 struct rb_root *root;
805 struct rb_node *nd; 825 struct rb_node *nd;
806 struct hist_entry *pos, *pair; 826 struct hist_entry *pos, *pair;
807 827
808 for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { 828 if (sort__need_collapse)
809 pos = rb_entry(nd, struct hist_entry, rb_node); 829 root = &other->entries_collapsed;
830 else
831 root = other->entries_in;
832
833 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
834 pos = rb_entry(nd, struct hist_entry, rb_node_in);
810 835
811 if (!hist_entry__has_pairs(pos)) { 836 if (!hist_entry__has_pairs(pos)) {
812 pair = hists__add_dummy_entry(leader, pos); 837 pair = hists__add_dummy_entry(leader, pos);