aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-diff.c
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung.kim@lge.com>2012-12-10 03:29:55 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2013-01-24 14:40:06 -0500
commitce74f60eab3cc8b7a3b0cb9c29ec9b1e1abac7d2 (patch)
tree1a09caebc2415509f92fb032379d5b0b54311c23 /tools/perf/builtin-diff.c
parent9afcf930b1fa1158b0878afeba3eff299300dc65 (diff)
perf hists: Link hist entries before inserting to an output tree
For matching and/or linking hist entries, they need to be sorted by given sort keys. However current hists__match/link did this on the output trees, so that the entries in the output tree need to be resort before doing it. This looks not so good since we have trees for collecting or collapsing entries before passing them to an output tree and they're already sorted by the given sort keys. Since we don't need to print anything at the time of matching/linking, we can use these internal trees directly instead of bothering with double resort on the output tree. Its only user - at the time of this writing - perf diff can be easily converted to use the internal tree and can save some lines too by getting rid of unnecessary resorting codes. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1355128197-18193-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-diff.c')
-rw-r--r--tools/perf/builtin-diff.c65
1 files changed, 17 insertions, 48 deletions
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 4dda6f4dc618..8b896f5eded0 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -275,43 +275,6 @@ static struct perf_tool tool = {
275 .ordering_requires_timestamps = true, 275 .ordering_requires_timestamps = true,
276}; 276};
277 277
278static void insert_hist_entry_by_name(struct rb_root *root,
279 struct hist_entry *he)
280{
281 struct rb_node **p = &root->rb_node;
282 struct rb_node *parent = NULL;
283 struct hist_entry *iter;
284
285 while (*p != NULL) {
286 parent = *p;
287 iter = rb_entry(parent, struct hist_entry, rb_node);
288 if (hist_entry__cmp(iter, he) < 0)
289 p = &(*p)->rb_left;
290 else
291 p = &(*p)->rb_right;
292 }
293
294 rb_link_node(&he->rb_node, parent, p);
295 rb_insert_color(&he->rb_node, root);
296}
297
298static void hists__name_resort(struct hists *self)
299{
300 struct rb_root tmp = RB_ROOT;
301 struct rb_node *next = rb_first(&self->entries);
302
303 while (next != NULL) {
304 struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
305
306 next = rb_next(&n->rb_node);
307
308 rb_erase(&n->rb_node, &self->entries);
309 insert_hist_entry_by_name(&tmp, n);
310 }
311
312 self->entries = tmp;
313}
314
315static struct perf_evsel *evsel_match(struct perf_evsel *evsel, 278static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
316 struct perf_evlist *evlist) 279 struct perf_evlist *evlist)
317{ 280{
@@ -324,30 +287,34 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
324 return NULL; 287 return NULL;
325} 288}
326 289
327static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) 290static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
328{ 291{
329 struct perf_evsel *evsel; 292 struct perf_evsel *evsel;
330 293
331 list_for_each_entry(evsel, &evlist->entries, node) { 294 list_for_each_entry(evsel, &evlist->entries, node) {
332 struct hists *hists = &evsel->hists; 295 struct hists *hists = &evsel->hists;
333 296
334 hists__output_resort(hists); 297 hists__collapse_resort(hists);
335
336 if (name)
337 hists__name_resort(hists);
338 } 298 }
339} 299}
340 300
341static void hists__baseline_only(struct hists *hists) 301static void hists__baseline_only(struct hists *hists)
342{ 302{
343 struct rb_node *next = rb_first(&hists->entries); 303 struct rb_root *root;
304 struct rb_node *next;
305
306 if (sort__need_collapse)
307 root = &hists->entries_collapsed;
308 else
309 root = hists->entries_in;
344 310
311 next = rb_first(root);
345 while (next != NULL) { 312 while (next != NULL) {
346 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 313 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
347 314
348 next = rb_next(&he->rb_node); 315 next = rb_next(&he->rb_node_in);
349 if (!hist_entry__next_pair(he)) { 316 if (!hist_entry__next_pair(he)) {
350 rb_erase(&he->rb_node, &hists->entries); 317 rb_erase(&he->rb_node_in, root);
351 hist_entry__free(he); 318 hist_entry__free(he);
352 } 319 }
353 } 320 }
@@ -471,6 +438,8 @@ static void hists__process(struct hists *old, struct hists *new)
471 else 438 else
472 hists__link(new, old); 439 hists__link(new, old);
473 440
441 hists__output_resort(new);
442
474 if (sort_compute) { 443 if (sort_compute) {
475 hists__precompute(new); 444 hists__precompute(new);
476 hists__compute_resort(new); 445 hists__compute_resort(new);
@@ -505,8 +474,8 @@ static int __cmd_diff(void)
505 evlist_old = older->evlist; 474 evlist_old = older->evlist;
506 evlist_new = newer->evlist; 475 evlist_new = newer->evlist;
507 476
508 perf_evlist__resort_hists(evlist_old, true); 477 perf_evlist__collapse_resort(evlist_old);
509 perf_evlist__resort_hists(evlist_new, false); 478 perf_evlist__collapse_resort(evlist_new);
510 479
511 list_for_each_entry(evsel, &evlist_new->entries, node) { 480 list_for_each_entry(evsel, &evlist_new->entries, node) {
512 struct perf_evsel *evsel_old; 481 struct perf_evsel *evsel_old;