aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2013-10-29 20:40:34 -0400
committerJiri Olsa <jolsa@kernel.org>2014-06-01 08:34:55 -0400
commit69bcb019fc809874f518559c8e5b0a90176f0532 (patch)
tree0d1bf258aa9a06c23981ea9220fe5360d1280ed1 /tools
parent1844dbcbe78503e0f4a8996d69da725d5e7a5177 (diff)
perf tools: Introduce struct hist_entry_iter
There're some duplicate code when adding hist entries. They are different in that some have branch info or mem info but generally do same thing. So introduce new struct hist_entry_iter and add callbacks to customize each case in general way. The new perf_evsel__add_entry() function will look like: iter->prepare_entry(); iter->add_single_entry(); while (iter->next_entry()) iter->add_next_entry(); iter->finish_entry(); This will help further work like the cumulative callchain patchset. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Tested-by: Arun Sharma <asharma@fb.com> Tested-by: Rodrigo Campos <rodrigo@sdfg.com.ar> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1401335910-16832-3-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-report.c192
-rw-r--r--tools/perf/tests/hists_filter.c16
-rw-r--r--tools/perf/tests/hists_output.c11
-rw-r--r--tools/perf/util/hist.c299
-rw-r--r--tools/perf/util/hist.h33
5 files changed, 372 insertions, 179 deletions
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4a3b84dd4f41..3201bdfa8c3f 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -76,163 +76,16 @@ static int report__config(const char *var, const char *value, void *cb)
76 return perf_default_config(var, value, cb); 76 return perf_default_config(var, value, cb);
77} 77}
78 78
79static void report__inc_stats(struct report *rep, struct hist_entry *he) 79static void report__inc_stats(struct report *rep,
80 struct hist_entry *he __maybe_unused)
80{ 81{
81 /* 82 /*
82 * The @he is either of a newly created one or an existing one 83 * We cannot access @he at this time. Just assume it's a new entry.
83 * merging current sample. We only want to count a new one so 84 * It'll be fixed once we have a callback mechanism in hist_iter.
84 * checking ->nr_events being 1.
85 */ 85 */
86 if (he->stat.nr_events == 1) 86 rep->nr_entries++;
87 rep->nr_entries++;
88
89 /*
90 * Only counts number of samples at this stage as it's more
91 * natural to do it here and non-sample events are also
92 * counted in perf_session_deliver_event(). The dump_trace
93 * requires this info is ready before going to the output tree.
94 */
95 hists__inc_nr_samples(he->hists, he->filtered);
96}
97
98static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al,
99 struct perf_sample *sample, struct perf_evsel *evsel)
100{
101 struct symbol *parent = NULL;
102 struct hist_entry *he;
103 struct mem_info *mi, *mx;
104 uint64_t cost;
105 int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
106
107 if (err)
108 return err;
109
110 mi = sample__resolve_mem(sample, al);
111 if (!mi)
112 return -ENOMEM;
113
114 if (rep->hide_unresolved && !al->sym)
115 return 0;
116
117 cost = sample->weight;
118 if (!cost)
119 cost = 1;
120
121 /*
122 * must pass period=weight in order to get the correct
123 * sorting from hists__collapse_resort() which is solely
124 * based on periods. We want sorting be done on nr_events * weight
125 * and this is indirectly achieved by passing period=weight here
126 * and the he_stat__add_period() function.
127 */
128 he = __hists__add_entry(&evsel->hists, al, parent, NULL, mi,
129 cost, cost, 0);
130 if (!he)
131 return -ENOMEM;
132
133 if (ui__has_annotation()) {
134 err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
135 if (err)
136 goto out;
137
138 mx = he->mem_info;
139 err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
140 if (err)
141 goto out;
142 }
143
144 report__inc_stats(rep, he);
145
146 err = hist_entry__append_callchain(he, sample);
147out:
148 return err;
149}
150
151static int report__add_branch_hist_entry(struct report *rep, struct addr_location *al,
152 struct perf_sample *sample, struct perf_evsel *evsel)
153{
154 struct symbol *parent = NULL;
155 unsigned i;
156 struct hist_entry *he;
157 struct branch_info *bi, *bx;
158 int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
159
160 if (err)
161 return err;
162
163 bi = sample__resolve_bstack(sample, al);
164 if (!bi)
165 return -ENOMEM;
166
167 for (i = 0; i < sample->branch_stack->nr; i++) {
168 if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
169 continue;
170
171 err = -ENOMEM;
172
173 /* overwrite the 'al' to branch-to info */
174 al->map = bi[i].to.map;
175 al->sym = bi[i].to.sym;
176 al->addr = bi[i].to.addr;
177 /*
178 * The report shows the percentage of total branches captured
179 * and not events sampled. Thus we use a pseudo period of 1.
180 */
181 he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL,
182 1, 1, 0);
183 if (he) {
184 if (ui__has_annotation()) {
185 bx = he->branch_info;
186 err = addr_map_symbol__inc_samples(&bx->from,
187 evsel->idx);
188 if (err)
189 goto out;
190
191 err = addr_map_symbol__inc_samples(&bx->to,
192 evsel->idx);
193 if (err)
194 goto out;
195 }
196 report__inc_stats(rep, he);
197 } else
198 goto out;
199 }
200 err = 0;
201out:
202 free(bi);
203 return err;
204} 87}
205 88
206static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel,
207 struct addr_location *al, struct perf_sample *sample)
208{
209 struct symbol *parent = NULL;
210 struct hist_entry *he;
211 int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
212
213 if (err)
214 return err;
215
216 he = __hists__add_entry(&evsel->hists, al, parent, NULL, NULL,
217 sample->period, sample->weight,
218 sample->transaction);
219 if (he == NULL)
220 return -ENOMEM;
221
222 err = hist_entry__append_callchain(he, sample);
223 if (err)
224 goto out;
225
226 if (ui__has_annotation())
227 err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
228
229 report__inc_stats(rep, he);
230
231out:
232 return err;
233}
234
235
236static int process_sample_event(struct perf_tool *tool, 89static int process_sample_event(struct perf_tool *tool,
237 union perf_event *event, 90 union perf_event *event,
238 struct perf_sample *sample, 91 struct perf_sample *sample,
@@ -241,6 +94,9 @@ static int process_sample_event(struct perf_tool *tool,
241{ 94{
242 struct report *rep = container_of(tool, struct report, tool); 95 struct report *rep = container_of(tool, struct report, tool);
243 struct addr_location al; 96 struct addr_location al;
97 struct hist_entry_iter iter = {
98 .hide_unresolved = rep->hide_unresolved,
99 };
244 int ret; 100 int ret;
245 101
246 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { 102 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
@@ -255,22 +111,22 @@ static int process_sample_event(struct perf_tool *tool,
255 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 111 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
256 return 0; 112 return 0;
257 113
258 if (sort__mode == SORT_MODE__BRANCH) { 114 if (sort__mode == SORT_MODE__BRANCH)
259 ret = report__add_branch_hist_entry(rep, &al, sample, evsel); 115 iter.ops = &hist_iter_branch;
260 if (ret < 0) 116 else if (rep->mem_mode)
261 pr_debug("problem adding lbr entry, skipping event\n"); 117 iter.ops = &hist_iter_mem;
262 } else if (rep->mem_mode == 1) { 118 else
263 ret = report__add_mem_hist_entry(rep, &al, sample, evsel); 119 iter.ops = &hist_iter_normal;
264 if (ret < 0) 120
265 pr_debug("problem adding mem entry, skipping event\n"); 121 if (al.map != NULL)
266 } else { 122 al.map->dso->hit = 1;
267 if (al.map != NULL) 123
268 al.map->dso->hit = 1; 124 report__inc_stats(rep, NULL);
269 125
270 ret = report__add_hist_entry(rep, evsel, &al, sample); 126 ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack);
271 if (ret < 0) 127 if (ret < 0)
272 pr_debug("problem incrementing symbol period, skipping event\n"); 128 pr_debug("problem adding hist entry, skipping event\n");
273 } 129
274 return ret; 130 return ret;
275} 131}
276 132
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 0a71ef4b9158..76b02e1de701 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -42,11 +42,11 @@ static struct sample fake_samples[] = {
42 { .pid = 300, .ip = 0xf0000 + 800, }, 42 { .pid = 300, .ip = 0xf0000 + 800, },
43}; 43};
44 44
45static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) 45static int add_hist_entries(struct perf_evlist *evlist,
46 struct machine *machine __maybe_unused)
46{ 47{
47 struct perf_evsel *evsel; 48 struct perf_evsel *evsel;
48 struct addr_location al; 49 struct addr_location al;
49 struct hist_entry *he;
50 struct perf_sample sample = { .cpu = 0, }; 50 struct perf_sample sample = { .cpu = 0, };
51 size_t i; 51 size_t i;
52 52
@@ -62,6 +62,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
62 .misc = PERF_RECORD_MISC_USER, 62 .misc = PERF_RECORD_MISC_USER,
63 }, 63 },
64 }; 64 };
65 struct hist_entry_iter iter = {
66 .ops = &hist_iter_normal,
67 .hide_unresolved = false,
68 };
65 69
66 /* make sure it has no filter at first */ 70 /* make sure it has no filter at first */
67 evsel->hists.thread_filter = NULL; 71 evsel->hists.thread_filter = NULL;
@@ -71,21 +75,19 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
71 sample.pid = fake_samples[i].pid; 75 sample.pid = fake_samples[i].pid;
72 sample.tid = fake_samples[i].pid; 76 sample.tid = fake_samples[i].pid;
73 sample.ip = fake_samples[i].ip; 77 sample.ip = fake_samples[i].ip;
78 sample.period = 100;
74 79
75 if (perf_event__preprocess_sample(&event, machine, &al, 80 if (perf_event__preprocess_sample(&event, machine, &al,
76 &sample) < 0) 81 &sample) < 0)
77 goto out; 82 goto out;
78 83
79 he = __hists__add_entry(&evsel->hists, &al, NULL, 84 if (hist_entry_iter__add(&iter, &al, evsel, &sample,
80 NULL, NULL, 100, 1, 0); 85 PERF_MAX_STACK_DEPTH) < 0)
81 if (he == NULL)
82 goto out; 86 goto out;
83 87
84 fake_samples[i].thread = al.thread; 88 fake_samples[i].thread = al.thread;
85 fake_samples[i].map = al.map; 89 fake_samples[i].map = al.map;
86 fake_samples[i].sym = al.sym; 90 fake_samples[i].sym = al.sym;
87
88 hists__inc_nr_samples(he->hists, he->filtered);
89 } 91 }
90 } 92 }
91 93
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index a16850551797..1308f88a9169 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -46,7 +46,7 @@ static struct sample fake_samples[] = {
46static int add_hist_entries(struct hists *hists, struct machine *machine) 46static int add_hist_entries(struct hists *hists, struct machine *machine)
47{ 47{
48 struct addr_location al; 48 struct addr_location al;
49 struct hist_entry *he; 49 struct perf_evsel *evsel = hists_to_evsel(hists);
50 struct perf_sample sample = { .period = 100, }; 50 struct perf_sample sample = { .period = 100, };
51 size_t i; 51 size_t i;
52 52
@@ -56,6 +56,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
56 .misc = PERF_RECORD_MISC_USER, 56 .misc = PERF_RECORD_MISC_USER,
57 }, 57 },
58 }; 58 };
59 struct hist_entry_iter iter = {
60 .ops = &hist_iter_normal,
61 .hide_unresolved = false,
62 };
59 63
60 sample.cpu = fake_samples[i].cpu; 64 sample.cpu = fake_samples[i].cpu;
61 sample.pid = fake_samples[i].pid; 65 sample.pid = fake_samples[i].pid;
@@ -66,9 +70,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
66 &sample) < 0) 70 &sample) < 0)
67 goto out; 71 goto out;
68 72
69 he = __hists__add_entry(hists, &al, NULL, NULL, NULL, 73 if (hist_entry_iter__add(&iter, &al, evsel, &sample,
70 sample.period, 1, 0); 74 PERF_MAX_STACK_DEPTH) < 0)
71 if (he == NULL)
72 goto out; 75 goto out;
73 76
74 fake_samples[i].thread = al.thread; 77 fake_samples[i].thread = al.thread;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 5943ba60f193..d8662356de20 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -4,6 +4,7 @@
4#include "session.h" 4#include "session.h"
5#include "sort.h" 5#include "sort.h"
6#include "evsel.h" 6#include "evsel.h"
7#include "annotate.h"
7#include <math.h> 8#include <math.h>
8 9
9static bool hists__filter_entry_by_dso(struct hists *hists, 10static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -429,6 +430,304 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
429 return add_hist_entry(hists, &entry, al); 430 return add_hist_entry(hists, &entry, al);
430} 431}
431 432
433static int
434iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
435 struct addr_location *al __maybe_unused)
436{
437 return 0;
438}
439
440static int
441iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
442 struct addr_location *al __maybe_unused)
443{
444 return 0;
445}
446
447static int
448iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
449{
450 struct perf_sample *sample = iter->sample;
451 struct mem_info *mi;
452
453 mi = sample__resolve_mem(sample, al);
454 if (mi == NULL)
455 return -ENOMEM;
456
457 iter->priv = mi;
458 return 0;
459}
460
461static int
462iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
463{
464 u64 cost;
465 struct mem_info *mi = iter->priv;
466 struct hist_entry *he;
467
468 if (mi == NULL)
469 return -EINVAL;
470
471 cost = iter->sample->weight;
472 if (!cost)
473 cost = 1;
474
475 /*
476 * must pass period=weight in order to get the correct
477 * sorting from hists__collapse_resort() which is solely
478 * based on periods. We want sorting be done on nr_events * weight
479 * and this is indirectly achieved by passing period=weight here
480 * and the he_stat__add_period() function.
481 */
482 he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
483 cost, cost, 0);
484 if (!he)
485 return -ENOMEM;
486
487 iter->he = he;
488 return 0;
489}
490
491static int
492iter_finish_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
493{
494 struct perf_evsel *evsel = iter->evsel;
495 struct hist_entry *he = iter->he;
496 struct mem_info *mx;
497 int err = -EINVAL;
498
499 if (he == NULL)
500 goto out;
501
502 if (ui__has_annotation()) {
503 err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
504 if (err)
505 goto out;
506
507 mx = he->mem_info;
508 err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
509 if (err)
510 goto out;
511 }
512
513 hists__inc_nr_samples(&evsel->hists, he->filtered);
514
515 err = hist_entry__append_callchain(he, iter->sample);
516
517out:
518 /*
519 * We don't need to free iter->priv (mem_info) here since
520 * the mem info was either already freed in add_hist_entry() or
521 * passed to a new hist entry by hist_entry__new().
522 */
523 iter->priv = NULL;
524
525 iter->he = NULL;
526 return err;
527}
528
529static int
530iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
531{
532 struct branch_info *bi;
533 struct perf_sample *sample = iter->sample;
534
535 bi = sample__resolve_bstack(sample, al);
536 if (!bi)
537 return -ENOMEM;
538
539 iter->curr = 0;
540 iter->total = sample->branch_stack->nr;
541
542 iter->priv = bi;
543 return 0;
544}
545
546static int
547iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
548 struct addr_location *al __maybe_unused)
549{
550 return 0;
551}
552
553static int
554iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
555{
556 struct branch_info *bi = iter->priv;
557 int i = iter->curr;
558
559 if (bi == NULL)
560 return 0;
561
562 if (iter->curr >= iter->total)
563 return 0;
564
565 al->map = bi[i].to.map;
566 al->sym = bi[i].to.sym;
567 al->addr = bi[i].to.addr;
568 return 1;
569}
570
571static int
572iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
573{
574 struct branch_info *bi, *bx;
575 struct perf_evsel *evsel = iter->evsel;
576 struct hist_entry *he = NULL;
577 int i = iter->curr;
578 int err = 0;
579
580 bi = iter->priv;
581
582 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
583 goto out;
584
585 /*
586 * The report shows the percentage of total branches captured
587 * and not events sampled. Thus we use a pseudo period of 1.
588 */
589 he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
590 1, 1, 0);
591 if (he == NULL)
592 return -ENOMEM;
593
594 if (ui__has_annotation()) {
595 bx = he->branch_info;
596 err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
597 if (err)
598 goto out;
599
600 err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
601 if (err)
602 goto out;
603 }
604
605 hists__inc_nr_samples(&evsel->hists, he->filtered);
606
607out:
608 iter->he = he;
609 iter->curr++;
610 return err;
611}
612
613static int
614iter_finish_branch_entry(struct hist_entry_iter *iter,
615 struct addr_location *al __maybe_unused)
616{
617 zfree(&iter->priv);
618 iter->he = NULL;
619
620 return iter->curr >= iter->total ? 0 : -1;
621}
622
623static int
624iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
625 struct addr_location *al __maybe_unused)
626{
627 return 0;
628}
629
630static int
631iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
632{
633 struct perf_evsel *evsel = iter->evsel;
634 struct perf_sample *sample = iter->sample;
635 struct hist_entry *he;
636
637 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
638 sample->period, sample->weight,
639 sample->transaction);
640 if (he == NULL)
641 return -ENOMEM;
642
643 iter->he = he;
644 return 0;
645}
646
647static int
648iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
649{
650 int err;
651 struct hist_entry *he = iter->he;
652 struct perf_evsel *evsel = iter->evsel;
653 struct perf_sample *sample = iter->sample;
654
655 if (he == NULL)
656 return 0;
657
658 iter->he = NULL;
659
660 if (ui__has_annotation()) {
661 err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
662 if (err)
663 return err;
664 }
665
666 hists__inc_nr_samples(&evsel->hists, he->filtered);
667
668 return hist_entry__append_callchain(he, sample);
669}
670
671const struct hist_iter_ops hist_iter_mem = {
672 .prepare_entry = iter_prepare_mem_entry,
673 .add_single_entry = iter_add_single_mem_entry,
674 .next_entry = iter_next_nop_entry,
675 .add_next_entry = iter_add_next_nop_entry,
676 .finish_entry = iter_finish_mem_entry,
677};
678
679const struct hist_iter_ops hist_iter_branch = {
680 .prepare_entry = iter_prepare_branch_entry,
681 .add_single_entry = iter_add_single_branch_entry,
682 .next_entry = iter_next_branch_entry,
683 .add_next_entry = iter_add_next_branch_entry,
684 .finish_entry = iter_finish_branch_entry,
685};
686
687const struct hist_iter_ops hist_iter_normal = {
688 .prepare_entry = iter_prepare_normal_entry,
689 .add_single_entry = iter_add_single_normal_entry,
690 .next_entry = iter_next_nop_entry,
691 .add_next_entry = iter_add_next_nop_entry,
692 .finish_entry = iter_finish_normal_entry,
693};
694
695int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
696 struct perf_evsel *evsel, struct perf_sample *sample,
697 int max_stack_depth)
698{
699 int err, err2;
700
701 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
702 max_stack_depth);
703 if (err)
704 return err;
705
706 iter->evsel = evsel;
707 iter->sample = sample;
708
709 err = iter->ops->prepare_entry(iter, al);
710 if (err)
711 goto out;
712
713 err = iter->ops->add_single_entry(iter, al);
714 if (err)
715 goto out;
716
717 while (iter->ops->next_entry(iter, al)) {
718 err = iter->ops->add_next_entry(iter, al);
719 if (err)
720 break;
721 }
722
723out:
724 err2 = iter->ops->finish_entry(iter, al);
725 if (!err)
726 err = err2;
727
728 return err;
729}
730
432int64_t 731int64_t
433hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 732hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
434{ 733{
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 03ae1dbb1b15..8894f184357c 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -96,12 +96,45 @@ struct hists {
96 u16 col_len[HISTC_NR_COLS]; 96 u16 col_len[HISTC_NR_COLS];
97}; 97};
98 98
99struct hist_entry_iter;
100
101struct hist_iter_ops {
102 int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *);
103 int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *);
104 int (*next_entry)(struct hist_entry_iter *, struct addr_location *);
105 int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *);
106 int (*finish_entry)(struct hist_entry_iter *, struct addr_location *);
107};
108
109struct hist_entry_iter {
110 int total;
111 int curr;
112
113 bool hide_unresolved;
114
115 struct perf_evsel *evsel;
116 struct perf_sample *sample;
117 struct hist_entry *he;
118 struct symbol *parent;
119 void *priv;
120
121 const struct hist_iter_ops *ops;
122};
123
124extern const struct hist_iter_ops hist_iter_normal;
125extern const struct hist_iter_ops hist_iter_branch;
126extern const struct hist_iter_ops hist_iter_mem;
127
99struct hist_entry *__hists__add_entry(struct hists *hists, 128struct hist_entry *__hists__add_entry(struct hists *hists,
100 struct addr_location *al, 129 struct addr_location *al,
101 struct symbol *parent, 130 struct symbol *parent,
102 struct branch_info *bi, 131 struct branch_info *bi,
103 struct mem_info *mi, u64 period, 132 struct mem_info *mi, u64 period,
104 u64 weight, u64 transaction); 133 u64 weight, u64 transaction);
134int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
135 struct perf_evsel *evsel, struct perf_sample *sample,
136 int max_stack_depth);
137
105int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); 138int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
106int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); 139int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
107int hist_entry__transaction_len(void); 140int hist_entry__transaction_len(void);