aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/hist.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2010-05-10 12:57:51 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2010-05-10 18:49:08 -0400
commitfefb0b94bbab858be0909a7eb5ef357e0f996a79 (patch)
treedb46b10241ad338db05e9ee68bb0ee45954d7b8a /tools/perf/util/hist.c
parent1c02c4d2e92f2097f1bba63ec71560b0e05a7f36 (diff)
perf hist: Calculate max_sym name len and nr_entries
Better done when we are adding entries, be it initially of when we're re-sorting the histograms. Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r--tools/perf/util/hist.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 410cf56c9662..e34fd248067d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -47,6 +47,13 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
47 return self; 47 return self;
48} 48}
49 49
50static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
51{
52 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
53 self->max_sym_namelen = entry->ms.sym->namelen;
54 ++self->nr_entries;
55}
56
50struct hist_entry *__hists__add_entry(struct hists *self, 57struct hist_entry *__hists__add_entry(struct hists *self,
51 struct addr_location *al, 58 struct addr_location *al,
52 struct symbol *sym_parent, u64 count) 59 struct symbol *sym_parent, u64 count)
@@ -89,6 +96,7 @@ struct hist_entry *__hists__add_entry(struct hists *self,
89 return NULL; 96 return NULL;
90 rb_link_node(&he->rb_node, parent, p); 97 rb_link_node(&he->rb_node, parent, p);
91 rb_insert_color(&he->rb_node, &self->entries); 98 rb_insert_color(&he->rb_node, &self->entries);
99 hists__inc_nr_entries(self, he);
92out: 100out:
93 hist_entry__add_cpumode_count(he, al->cpumode, count); 101 hist_entry__add_cpumode_count(he, al->cpumode, count);
94 return he; 102 return he;
@@ -137,7 +145,7 @@ void hist_entry__free(struct hist_entry *he)
137 * collapse the histogram 145 * collapse the histogram
138 */ 146 */
139 147
140static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) 148static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
141{ 149{
142 struct rb_node **p = &root->rb_node; 150 struct rb_node **p = &root->rb_node;
143 struct rb_node *parent = NULL; 151 struct rb_node *parent = NULL;
@@ -153,7 +161,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
153 if (!cmp) { 161 if (!cmp) {
154 iter->count += he->count; 162 iter->count += he->count;
155 hist_entry__free(he); 163 hist_entry__free(he);
156 return; 164 return false;
157 } 165 }
158 166
159 if (cmp < 0) 167 if (cmp < 0)
@@ -164,6 +172,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
164 172
165 rb_link_node(&he->rb_node, parent, p); 173 rb_link_node(&he->rb_node, parent, p);
166 rb_insert_color(&he->rb_node, root); 174 rb_insert_color(&he->rb_node, root);
175 return true;
167} 176}
168 177
169void hists__collapse_resort(struct hists *self) 178void hists__collapse_resort(struct hists *self)
@@ -177,13 +186,16 @@ void hists__collapse_resort(struct hists *self)
177 186
178 tmp = RB_ROOT; 187 tmp = RB_ROOT;
179 next = rb_first(&self->entries); 188 next = rb_first(&self->entries);
189 self->nr_entries = 0;
190 self->max_sym_namelen = 0;
180 191
181 while (next) { 192 while (next) {
182 n = rb_entry(next, struct hist_entry, rb_node); 193 n = rb_entry(next, struct hist_entry, rb_node);
183 next = rb_next(&n->rb_node); 194 next = rb_next(&n->rb_node);
184 195
185 rb_erase(&n->rb_node, &self->entries); 196 rb_erase(&n->rb_node, &self->entries);
186 collapse__insert_entry(&tmp, n); 197 if (collapse__insert_entry(&tmp, n))
198 hists__inc_nr_entries(self, n);
187 } 199 }
188 200
189 self->entries = tmp; 201 self->entries = tmp;
@@ -219,30 +231,31 @@ static void __hists__insert_output_entry(struct rb_root *entries,
219 rb_insert_color(&he->rb_node, entries); 231 rb_insert_color(&he->rb_node, entries);
220} 232}
221 233
222u64 hists__output_resort(struct hists *self) 234void hists__output_resort(struct hists *self)
223{ 235{
224 struct rb_root tmp; 236 struct rb_root tmp;
225 struct rb_node *next; 237 struct rb_node *next;
226 struct hist_entry *n; 238 struct hist_entry *n;
227 u64 min_callchain_hits; 239 u64 min_callchain_hits;
228 u64 nr_hists = 0;
229 240
230 min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100); 241 min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
231 242
232 tmp = RB_ROOT; 243 tmp = RB_ROOT;
233 next = rb_first(&self->entries); 244 next = rb_first(&self->entries);
234 245
246 self->nr_entries = 0;
247 self->max_sym_namelen = 0;
248
235 while (next) { 249 while (next) {
236 n = rb_entry(next, struct hist_entry, rb_node); 250 n = rb_entry(next, struct hist_entry, rb_node);
237 next = rb_next(&n->rb_node); 251 next = rb_next(&n->rb_node);
238 252
239 rb_erase(&n->rb_node, &self->entries); 253 rb_erase(&n->rb_node, &self->entries);
240 __hists__insert_output_entry(&tmp, n, min_callchain_hits); 254 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
241 ++nr_hists; 255 hists__inc_nr_entries(self, n);
242 } 256 }
243 257
244 self->entries = tmp; 258 self->entries = tmp;
245 return nr_hists;
246} 259}
247 260
248static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 261static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)