aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2015-01-07 19:45:48 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2015-01-21 11:24:35 -0500
commit566b5cfb035fb496280be61f976b5281563bfa27 (patch)
treeb479446f8eac4be9f9e1e64df3dcca7ef591a886 /tools/perf
parent56495a8affabe35aa0d94aae050d3e0e60d0455f (diff)
perf diff: Fix -o/--order option behavior
The prior change fixes default output ordering with each column but it breaks -o/--order option. This patch prepends a new hpp fmt struct to sort list but not to output field list so that it can affect ordering without adding a new output column. The new hpp fmt uses its own compare functions which treats dummy entries (which have no baseline) little differently - the delta field can be computed without baseline but others (ratio and wdiff) are not. The new output will look like below: $ perf diff -o 2 perf.data.{old,cur,new} ... # Baseline/0 Delta/1 Delta/2 Shared Object Symbol # .......... ....... ....... ................. .......................................... 22.98% +0.51% +0.52% libc-2.20.so [.] _int_malloc 5.70% +0.28% +0.30% libc-2.20.so [.] free 4.38% -0.21% +0.25% a.out [.] main 1.32% -0.15% +0.05% a.out [.] free@plt +0.01% [kernel.kallsyms] [k] intel_pstate_timer_func +0.01% [kernel.kallsyms] [k] _raw_spin_lock_irqsave +0.01% [kernel.kallsyms] [k] timekeeping_update.constprop.8 +0.01% +0.01% [kernel.kallsyms] [k] apic_timer_interrupt 0.01% -0.00% [kernel.kallsyms] [k] native_read_msr_safe 0.01% -0.01% -0.01% [kernel.kallsyms] [k] native_write_msr_safe 1.31% +0.03% -0.06% a.out [.] malloc@plt 31.50% -0.74% -0.23% libc-2.20.so [.] _int_free 32.75% +0.28% -0.83% libc-2.20.so [.] malloc 0.01% [kernel.kallsyms] [k] scheduler_tick +0.01% [kernel.kallsyms] [k] read_tsc +0.01% [kernel.kallsyms] [k] perf_adjust_freq_unthr_context.part.82 In above example, the output was sorted by 'Delta/2' column first, and then 'Baseline/0' and finally 'Delta/1'. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1420677949-6719-8-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-diff.c101
1 files changed, 99 insertions, 2 deletions
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 98444561d9b4..74aada554b12 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -558,6 +558,37 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
558} 558}
559 559
560static int64_t 560static int64_t
561hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
562 int c, int sort_idx)
563{
564 struct hist_entry *p_right, *p_left;
565
566 p_left = get_pair_data(left, &data__files[sort_idx]);
567 p_right = get_pair_data(right, &data__files[sort_idx]);
568
569 if (!p_left && !p_right)
570 return 0;
571
572 if (!p_left || !p_right)
573 return p_left ? -1 : 1;
574
575 if (c != COMPUTE_DELTA) {
576 /*
577 * The delta can be computed without the baseline, but
578 * others are not. Put those entries which have no
579 * values below.
580 */
581 if (left->dummy && right->dummy)
582 return 0;
583
584 if (left->dummy || right->dummy)
585 return left->dummy ? 1 : -1;
586 }
587
588 return __hist_entry__cmp_compute(p_left, p_right, c);
589}
590
591static int64_t
561hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused, 592hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
562 struct hist_entry *left __maybe_unused, 593 struct hist_entry *left __maybe_unused,
563 struct hist_entry *right __maybe_unused) 594 struct hist_entry *right __maybe_unused)
@@ -601,6 +632,30 @@ hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
601 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx); 632 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
602} 633}
603 634
635static int64_t
636hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
637 struct hist_entry *left, struct hist_entry *right)
638{
639 return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
640 sort_compute);
641}
642
643static int64_t
644hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
645 struct hist_entry *left, struct hist_entry *right)
646{
647 return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
648 sort_compute);
649}
650
651static int64_t
652hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
653 struct hist_entry *left, struct hist_entry *right)
654{
655 return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
656 sort_compute);
657}
658
604static void hists__process(struct hists *hists) 659static void hists__process(struct hists *hists)
605{ 660{
606 if (show_baseline_only) 661 if (show_baseline_only)
@@ -1074,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
1074 perf_hpp__register_sort_field(fmt); 1129 perf_hpp__register_sort_field(fmt);
1075} 1130}
1076 1131
1077static void ui_init(void) 1132static int ui_init(void)
1078{ 1133{
1079 struct data__file *d; 1134 struct data__file *d;
1135 struct perf_hpp_fmt *fmt;
1080 int i; 1136 int i;
1081 1137
1082 data__for_each_file(i, d) { 1138 data__for_each_file(i, d) {
@@ -1106,6 +1162,46 @@ static void ui_init(void)
1106 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : 1162 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
1107 PERF_HPP_DIFF__PERIOD_BASELINE); 1163 PERF_HPP_DIFF__PERIOD_BASELINE);
1108 } 1164 }
1165
1166 if (!sort_compute)
1167 return 0;
1168
1169 /*
1170 * Prepend an fmt to sort on columns at 'sort_compute' first.
1171 * This fmt is added only to the sort list but not to the
1172 * output fields list.
1173 *
1174 * Note that this column (data) can be compared twice - one
1175 * for this 'sort_compute' fmt and another for the normal
1176 * diff_hpp_fmt. But it shouldn't a problem as most entries
1177 * will be sorted out by first try or baseline and comparing
1178 * is not a costly operation.
1179 */
1180 fmt = zalloc(sizeof(*fmt));
1181 if (fmt == NULL) {
1182 pr_err("Memory allocation failed\n");
1183 return -1;
1184 }
1185
1186 fmt->cmp = hist_entry__cmp_nop;
1187 fmt->collapse = hist_entry__cmp_nop;
1188
1189 switch (compute) {
1190 case COMPUTE_DELTA:
1191 fmt->sort = hist_entry__cmp_delta_idx;
1192 break;
1193 case COMPUTE_RATIO:
1194 fmt->sort = hist_entry__cmp_ratio_idx;
1195 break;
1196 case COMPUTE_WEIGHTED_DIFF:
1197 fmt->sort = hist_entry__cmp_wdiff_idx;
1198 break;
1199 default:
1200 BUG_ON(1);
1201 }
1202
1203 list_add(&fmt->sort_list, &perf_hpp__sort_list);
1204 return 0;
1109} 1205}
1110 1206
1111static int data_init(int argc, const char **argv) 1207static int data_init(int argc, const char **argv)
@@ -1171,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
1171 if (data_init(argc, argv) < 0) 1267 if (data_init(argc, argv) < 0)
1172 return -1; 1268 return -1;
1173 1269
1174 ui_init(); 1270 if (ui_init() < 0)
1271 return -1;
1175 1272
1176 sort__mode = SORT_MODE__DIFF; 1273 sort__mode = SORT_MODE__DIFF;
1177 1274