aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN6
-rw-r--r--tools/perf/util/bitmap.c21
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/cache.h14
-rw-r--r--tools/perf/util/callchain.c121
-rw-r--r--tools/perf/util/callchain.h10
-rw-r--r--tools/perf/util/color.c48
-rw-r--r--tools/perf/util/color.h4
-rw-r--r--tools/perf/util/debug.c8
-rw-r--r--tools/perf/util/debug.h30
-rw-r--r--tools/perf/util/event.c368
-rw-r--r--tools/perf/util/event.h66
-rw-r--r--tools/perf/util/header.c489
-rw-r--r--tools/perf/util/header.h39
-rw-r--r--tools/perf/util/hist.c613
-rw-r--r--tools/perf/util/hist.h102
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/bitops.h18
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h38
-rw-r--r--tools/perf/util/include/linux/bitops.h20
-rw-r--r--tools/perf/util/include/linux/compiler.h2
-rw-r--r--tools/perf/util/include/linux/kernel.h11
-rw-r--r--tools/perf/util/map.c409
-rw-r--r--tools/perf/util/map.h131
-rw-r--r--tools/perf/util/newt.c1084
-rw-r--r--tools/perf/util/parse-events.c44
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-options.c55
-rw-r--r--tools/perf/util/parse-options.h29
-rw-r--r--tools/perf/util/probe-event.c1580
-rw-r--r--tools/perf/util/probe-event.h130
-rw-r--r--tools/perf/util/probe-finder.c1046
-rw-r--r--tools/perf/util/probe-finder.h67
-rw-r--r--tools/perf/util/pstack.c75
-rw-r--r--tools/perf/util/pstack.h12
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c568
-rw-r--r--tools/perf/util/session.h126
-rw-r--r--tools/perf/util/sort.c153
-rw-r--r--tools/perf/util/sort.h35
-rw-r--r--tools/perf/util/string.c45
-rw-r--r--tools/perf/util/string.h18
-rw-r--r--tools/perf/util/symbol.c564
-rw-r--r--tools/perf/util/symbol.h80
-rw-r--r--tools/perf/util/thread.c242
-rw-r--r--tools/perf/util/thread.h53
-rw-r--r--tools/perf/util/trace-event-info.c35
-rw-r--r--tools/perf/util/trace-event-parse.c118
-rw-r--r--tools/perf/util/trace-event-read.c116
-rw-r--r--tools/perf/util/trace-event.h8
-rw-r--r--tools/perf/util/util.c22
-rw-r--r--tools/perf/util/util.h22
55 files changed, 6975 insertions, 1977 deletions
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 54552a00a117..49ece7921914 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -1,6 +1,10 @@
1#!/bin/sh 1#!/bin/sh
2 2
3GVF=PERF-VERSION-FILE 3if [ $# -eq 1 ] ; then
4 OUTPUT=$1
5fi
6
7GVF=${OUTPUT}PERF-VERSION-FILE
4DEF_VER=v0.0.2.PERF 8DEF_VER=v0.0.2.PERF
5 9
6LF=' 10LF='
diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c
new file mode 100644
index 000000000000..5e230acae1e9
--- /dev/null
+++ b/tools/perf/util/bitmap.c
@@ -0,0 +1,21 @@
1/*
2 * From lib/bitmap.c
3 * Helper functions for bitmap.h.
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8#include <linux/bitmap.h>
9
10int __bitmap_weight(const unsigned long *bitmap, int bits)
11{
12 int k, w = 0, lim = bits/BITS_PER_LONG;
13
14 for (k = 0; k < lim; k++)
15 w += hweight_long(bitmap[k]);
16
17 if (bits % BITS_PER_LONG)
18 w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
19
20 return w;
21}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04904b35ba81..0f60a3906808 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -24,7 +24,7 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
24 } 24 }
25 25
26 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 26 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
27 event->ip.ip, &al); 27 event->ip.pid, event->ip.ip, &al);
28 28
29 if (al.map != NULL) 29 if (al.map != NULL)
30 al.map->dso->hit = 1; 30 al.map->dso->hit = 1;
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 918eb376abe3..4b9aab7f0405 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -1,6 +1,7 @@
1#ifndef __PERF_CACHE_H 1#ifndef __PERF_CACHE_H
2#define __PERF_CACHE_H 2#define __PERF_CACHE_H
3 3
4#include <stdbool.h>
4#include "util.h" 5#include "util.h"
5#include "strbuf.h" 6#include "strbuf.h"
6#include "../perf.h" 7#include "../perf.h"
@@ -69,6 +70,19 @@ extern const char *pager_program;
69extern int pager_in_use(void); 70extern int pager_in_use(void);
70extern int pager_use_color; 71extern int pager_use_color;
71 72
73extern bool use_browser;
74
75#ifdef NO_NEWT_SUPPORT
76static inline void setup_browser(void)
77{
78 setup_pager();
79}
80static inline void exit_browser(bool wait_for_ok __used) {}
81#else
82void setup_browser(void);
83void exit_browser(bool wait_for_ok);
84#endif
85
72extern const char *editor_program; 86extern const char *editor_program;
73extern const char *excludes_file; 87extern const char *excludes_file;
74 88
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index b3b71258272a..21a52e0a4435 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> 2 * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
3 * 3 *
4 * Handle the callchains from the stream in an ad-hoc radix tree and then 4 * Handle the callchains from the stream in an ad-hoc radix tree and then
5 * sort them in an rbtree. 5 * sort them in an rbtree.
@@ -17,6 +17,13 @@
17 17
18#include "callchain.h" 18#include "callchain.h"
19 19
20bool ip_callchain__valid(struct ip_callchain *chain, event_t *event)
21{
22 unsigned int chain_size = event->header.size;
23 chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
24 return chain->nr * sizeof(u64) <= chain_size;
25}
26
20#define chain_for_each_child(child, parent) \ 27#define chain_for_each_child(child, parent) \
21 list_for_each_entry(child, &parent->children, brothers) 28 list_for_each_entry(child, &parent->children, brothers)
22 29
@@ -160,7 +167,7 @@ create_child(struct callchain_node *parent, bool inherit_children)
160{ 167{
161 struct callchain_node *new; 168 struct callchain_node *new;
162 169
163 new = malloc(sizeof(*new)); 170 new = zalloc(sizeof(*new));
164 if (!new) { 171 if (!new) {
165 perror("not enough memory to create child for code path tree"); 172 perror("not enough memory to create child for code path tree");
166 return NULL; 173 return NULL;
@@ -183,25 +190,36 @@ create_child(struct callchain_node *parent, bool inherit_children)
183 return new; 190 return new;
184} 191}
185 192
193
194struct resolved_ip {
195 u64 ip;
196 struct map_symbol ms;
197};
198
199struct resolved_chain {
200 u64 nr;
201 struct resolved_ip ips[0];
202};
203
204
186/* 205/*
187 * Fill the node with callchain values 206 * Fill the node with callchain values
188 */ 207 */
189static void 208static void
190fill_node(struct callchain_node *node, struct ip_callchain *chain, 209fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
191 int start, struct symbol **syms)
192{ 210{
193 unsigned int i; 211 unsigned int i;
194 212
195 for (i = start; i < chain->nr; i++) { 213 for (i = start; i < chain->nr; i++) {
196 struct callchain_list *call; 214 struct callchain_list *call;
197 215
198 call = malloc(sizeof(*call)); 216 call = zalloc(sizeof(*call));
199 if (!call) { 217 if (!call) {
200 perror("not enough memory for the code path tree"); 218 perror("not enough memory for the code path tree");
201 return; 219 return;
202 } 220 }
203 call->ip = chain->ips[i]; 221 call->ip = chain->ips[i].ip;
204 call->sym = syms[i]; 222 call->ms = chain->ips[i].ms;
205 list_add_tail(&call->list, &node->val); 223 list_add_tail(&call->list, &node->val);
206 } 224 }
207 node->val_nr = chain->nr - start; 225 node->val_nr = chain->nr - start;
@@ -210,13 +228,13 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain,
210} 228}
211 229
212static void 230static void
213add_child(struct callchain_node *parent, struct ip_callchain *chain, 231add_child(struct callchain_node *parent, struct resolved_chain *chain,
214 int start, struct symbol **syms) 232 int start)
215{ 233{
216 struct callchain_node *new; 234 struct callchain_node *new;
217 235
218 new = create_child(parent, false); 236 new = create_child(parent, false);
219 fill_node(new, chain, start, syms); 237 fill_node(new, chain, start);
220 238
221 new->children_hit = 0; 239 new->children_hit = 0;
222 new->hit = 1; 240 new->hit = 1;
@@ -228,9 +246,8 @@ add_child(struct callchain_node *parent, struct ip_callchain *chain,
228 * Then create another child to host the given callchain of new branch 246 * Then create another child to host the given callchain of new branch
229 */ 247 */
230static void 248static void
231split_add_child(struct callchain_node *parent, struct ip_callchain *chain, 249split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
232 struct callchain_list *to_split, int idx_parents, int idx_local, 250 struct callchain_list *to_split, int idx_parents, int idx_local)
233 struct symbol **syms)
234{ 251{
235 struct callchain_node *new; 252 struct callchain_node *new;
236 struct list_head *old_tail; 253 struct list_head *old_tail;
@@ -257,7 +274,7 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
257 /* create a new child for the new branch if any */ 274 /* create a new child for the new branch if any */
258 if (idx_total < chain->nr) { 275 if (idx_total < chain->nr) {
259 parent->hit = 0; 276 parent->hit = 0;
260 add_child(parent, chain, idx_total, syms); 277 add_child(parent, chain, idx_total);
261 parent->children_hit++; 278 parent->children_hit++;
262 } else { 279 } else {
263 parent->hit = 1; 280 parent->hit = 1;
@@ -265,32 +282,33 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
265} 282}
266 283
267static int 284static int
268__append_chain(struct callchain_node *root, struct ip_callchain *chain, 285__append_chain(struct callchain_node *root, struct resolved_chain *chain,
269 unsigned int start, struct symbol **syms); 286 unsigned int start);
270 287
271static void 288static void
272__append_chain_children(struct callchain_node *root, struct ip_callchain *chain, 289__append_chain_children(struct callchain_node *root,
273 struct symbol **syms, unsigned int start) 290 struct resolved_chain *chain,
291 unsigned int start)
274{ 292{
275 struct callchain_node *rnode; 293 struct callchain_node *rnode;
276 294
277 /* lookup in childrens */ 295 /* lookup in childrens */
278 chain_for_each_child(rnode, root) { 296 chain_for_each_child(rnode, root) {
279 unsigned int ret = __append_chain(rnode, chain, start, syms); 297 unsigned int ret = __append_chain(rnode, chain, start);
280 298
281 if (!ret) 299 if (!ret)
282 goto inc_children_hit; 300 goto inc_children_hit;
283 } 301 }
284 /* nothing in children, add to the current node */ 302 /* nothing in children, add to the current node */
285 add_child(root, chain, start, syms); 303 add_child(root, chain, start);
286 304
287inc_children_hit: 305inc_children_hit:
288 root->children_hit++; 306 root->children_hit++;
289} 307}
290 308
291static int 309static int
292__append_chain(struct callchain_node *root, struct ip_callchain *chain, 310__append_chain(struct callchain_node *root, struct resolved_chain *chain,
293 unsigned int start, struct symbol **syms) 311 unsigned int start)
294{ 312{
295 struct callchain_list *cnode; 313 struct callchain_list *cnode;
296 unsigned int i = start; 314 unsigned int i = start;
@@ -302,13 +320,19 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
302 * anywhere inside a function. 320 * anywhere inside a function.
303 */ 321 */
304 list_for_each_entry(cnode, &root->val, list) { 322 list_for_each_entry(cnode, &root->val, list) {
323 struct symbol *sym;
324
305 if (i == chain->nr) 325 if (i == chain->nr)
306 break; 326 break;
307 if (cnode->sym && syms[i]) { 327
308 if (cnode->sym->start != syms[i]->start) 328 sym = chain->ips[i].ms.sym;
329
330 if (cnode->ms.sym && sym) {
331 if (cnode->ms.sym->start != sym->start)
309 break; 332 break;
310 } else if (cnode->ip != chain->ips[i]) 333 } else if (cnode->ip != chain->ips[i].ip)
311 break; 334 break;
335
312 if (!found) 336 if (!found)
313 found = true; 337 found = true;
314 i++; 338 i++;
@@ -320,7 +344,7 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
320 344
321 /* we match only a part of the node. Split it and add the new chain */ 345 /* we match only a part of the node. Split it and add the new chain */
322 if (i - start < root->val_nr) { 346 if (i - start < root->val_nr) {
323 split_add_child(root, chain, cnode, start, i - start, syms); 347 split_add_child(root, chain, cnode, start, i - start);
324 return 0; 348 return 0;
325 } 349 }
326 350
@@ -331,15 +355,50 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
331 } 355 }
332 356
333 /* We match the node and still have a part remaining */ 357 /* We match the node and still have a part remaining */
334 __append_chain_children(root, chain, syms, i); 358 __append_chain_children(root, chain, i);
335 359
336 return 0; 360 return 0;
337} 361}
338 362
339void append_chain(struct callchain_node *root, struct ip_callchain *chain, 363static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
340 struct symbol **syms) 364 struct map_symbol *syms)
341{ 365{
366 int i, j = 0;
367
368 for (i = 0; i < (int)old->nr; i++) {
369 if (old->ips[i] >= PERF_CONTEXT_MAX)
370 continue;
371
372 new->ips[j].ip = old->ips[i];
373 new->ips[j].ms = syms[i];
374 j++;
375 }
376
377 new->nr = j;
378}
379
380
381int append_chain(struct callchain_node *root, struct ip_callchain *chain,
382 struct map_symbol *syms)
383{
384 struct resolved_chain *filtered;
385
342 if (!chain->nr) 386 if (!chain->nr)
343 return; 387 return 0;
344 __append_chain_children(root, chain, syms, 0); 388
389 filtered = zalloc(sizeof(*filtered) +
390 chain->nr * sizeof(struct resolved_ip));
391 if (!filtered)
392 return -ENOMEM;
393
394 filter_context(chain, filtered, syms);
395
396 if (!filtered->nr)
397 goto end;
398
399 __append_chain_children(root, filtered, 0);
400end:
401 free(filtered);
402
403 return 0;
345} 404}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index ad4626de4c2b..1cba1f5504e7 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,6 +4,7 @@
4#include "../perf.h" 4#include "../perf.h"
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7#include "event.h"
7#include "util.h" 8#include "util.h"
8#include "symbol.h" 9#include "symbol.h"
9 10
@@ -33,13 +34,14 @@ typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
33 34
34struct callchain_param { 35struct callchain_param {
35 enum chain_mode mode; 36 enum chain_mode mode;
37 u32 print_limit;
36 double min_percent; 38 double min_percent;
37 sort_chain_func_t sort; 39 sort_chain_func_t sort;
38}; 40};
39 41
40struct callchain_list { 42struct callchain_list {
41 u64 ip; 43 u64 ip;
42 struct symbol *sym; 44 struct map_symbol ms;
43 struct list_head list; 45 struct list_head list;
44}; 46};
45 47
@@ -56,6 +58,8 @@ static inline u64 cumul_hits(struct callchain_node *node)
56} 58}
57 59
58int register_callchain_param(struct callchain_param *param); 60int register_callchain_param(struct callchain_param *param);
59void append_chain(struct callchain_node *root, struct ip_callchain *chain, 61int append_chain(struct callchain_node *root, struct ip_callchain *chain,
60 struct symbol **syms); 62 struct map_symbol *syms);
63
64bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
61#endif /* __PERF_CALLCHAIN_H */ 65#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index e88bca55a599..e191eb9a667f 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -166,6 +166,31 @@ int perf_color_default_config(const char *var, const char *value, void *cb)
166 return perf_default_config(var, value, cb); 166 return perf_default_config(var, value, cb);
167} 167}
168 168
169static int __color_vsnprintf(char *bf, size_t size, const char *color,
170 const char *fmt, va_list args, const char *trail)
171{
172 int r = 0;
173
174 /*
175 * Auto-detect:
176 */
177 if (perf_use_color_default < 0) {
178 if (isatty(1) || pager_in_use())
179 perf_use_color_default = 1;
180 else
181 perf_use_color_default = 0;
182 }
183
184 if (perf_use_color_default && *color)
185 r += snprintf(bf, size, "%s", color);
186 r += vsnprintf(bf + r, size - r, fmt, args);
187 if (perf_use_color_default && *color)
188 r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
189 if (trail)
190 r += snprintf(bf + r, size - r, "%s", trail);
191 return r;
192}
193
169static int __color_vfprintf(FILE *fp, const char *color, const char *fmt, 194static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
170 va_list args, const char *trail) 195 va_list args, const char *trail)
171{ 196{
@@ -191,11 +216,28 @@ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
191 return r; 216 return r;
192} 217}
193 218
219int color_vsnprintf(char *bf, size_t size, const char *color,
220 const char *fmt, va_list args)
221{
222 return __color_vsnprintf(bf, size, color, fmt, args, NULL);
223}
224
194int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args) 225int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
195{ 226{
196 return __color_vfprintf(fp, color, fmt, args, NULL); 227 return __color_vfprintf(fp, color, fmt, args, NULL);
197} 228}
198 229
230int color_snprintf(char *bf, size_t size, const char *color,
231 const char *fmt, ...)
232{
233 va_list args;
234 int r;
235
236 va_start(args, fmt);
237 r = color_vsnprintf(bf, size, color, fmt, args);
238 va_end(args);
239 return r;
240}
199 241
200int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) 242int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
201{ 243{
@@ -274,3 +316,9 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
274 316
275 return r; 317 return r;
276} 318}
319
320int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
321{
322 const char *color = get_percent_color(percent);
323 return color_snprintf(bf, size, color, fmt, percent);
324}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 24e8809210bb..dea082b79602 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -32,10 +32,14 @@ int perf_color_default_config(const char *var, const char *value, void *cb);
32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); 32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
33void color_parse(const char *value, const char *var, char *dst); 33void color_parse(const char *value, const char *var, char *dst);
34void color_parse_mem(const char *value, int len, const char *var, char *dst); 34void color_parse_mem(const char *value, int len, const char *var, char *dst);
35int color_vsnprintf(char *bf, size_t size, const char *color,
36 const char *fmt, va_list args);
35int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); 37int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
36int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); 38int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
39int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
37int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); 40int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
38int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); 41int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
42int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
39int percent_color_fprintf(FILE *fp, const char *fmt, double percent); 43int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
40const char *get_percent_color(double percent); 44const char *get_percent_color(double percent);
41 45
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 0905600c3851..dd824cf3b628 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -6,13 +6,14 @@
6#include <stdarg.h> 6#include <stdarg.h>
7#include <stdio.h> 7#include <stdio.h>
8 8
9#include "cache.h"
9#include "color.h" 10#include "color.h"
10#include "event.h" 11#include "event.h"
11#include "debug.h" 12#include "debug.h"
12#include "util.h" 13#include "util.h"
13 14
14int verbose = 0; 15int verbose = 0;
15int dump_trace = 0; 16bool dump_trace = false;
16 17
17int eprintf(int level, const char *fmt, ...) 18int eprintf(int level, const char *fmt, ...)
18{ 19{
@@ -21,7 +22,10 @@ int eprintf(int level, const char *fmt, ...)
21 22
22 if (verbose >= level) { 23 if (verbose >= level) {
23 va_start(args, fmt); 24 va_start(args, fmt);
24 ret = vfprintf(stderr, fmt, args); 25 if (use_browser)
26 ret = browser__show_help(fmt, args);
27 else
28 ret = vfprintf(stderr, fmt, args);
25 va_end(args); 29 va_end(args);
26 } 30 }
27 31
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index c6c24c522dea..047ac3324ebe 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -2,14 +2,38 @@
2#ifndef __PERF_DEBUG_H 2#ifndef __PERF_DEBUG_H
3#define __PERF_DEBUG_H 3#define __PERF_DEBUG_H
4 4
5#include <stdbool.h>
5#include "event.h" 6#include "event.h"
6 7
7extern int verbose; 8extern int verbose;
8extern int dump_trace; 9extern bool dump_trace;
9 10
10int eprintf(int level,
11 const char *fmt, ...) __attribute__((format(printf, 2, 3)));
12int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); 11int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
13void trace_event(event_t *event); 12void trace_event(event_t *event);
14 13
14struct ui_progress;
15
16#ifdef NO_NEWT_SUPPORT
17static inline int browser__show_help(const char *format __used, va_list ap __used)
18{
19 return 0;
20}
21
22static inline struct ui_progress *ui_progress__new(const char *title __used,
23 u64 total __used)
24{
25 return (struct ui_progress *)1;
26}
27
28static inline void ui_progress__update(struct ui_progress *self __used,
29 u64 curr __used) {}
30
31static inline void ui_progress__delete(struct ui_progress *self __used) {}
32#else
33int browser__show_help(const char *format, va_list ap);
34struct ui_progress *ui_progress__new(const char *title, u64 total);
35void ui_progress__update(struct ui_progress *self, u64 curr);
36void ui_progress__delete(struct ui_progress *self);
37#endif
38
15#endif /* __PERF_DEBUG_H */ 39#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 705ec63548b4..50771b5813ee 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -7,6 +7,23 @@
7#include "strlist.h" 7#include "strlist.h"
8#include "thread.h" 8#include "thread.h"
9 9
10const char *event__name[] = {
11 [0] = "TOTAL",
12 [PERF_RECORD_MMAP] = "MMAP",
13 [PERF_RECORD_LOST] = "LOST",
14 [PERF_RECORD_COMM] = "COMM",
15 [PERF_RECORD_EXIT] = "EXIT",
16 [PERF_RECORD_THROTTLE] = "THROTTLE",
17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
18 [PERF_RECORD_FORK] = "FORK",
19 [PERF_RECORD_READ] = "READ",
20 [PERF_RECORD_SAMPLE] = "SAMPLE",
21 [PERF_RECORD_HEADER_ATTR] = "ATTR",
22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
25};
26
10static pid_t event__synthesize_comm(pid_t pid, int full, 27static pid_t event__synthesize_comm(pid_t pid, int full,
11 event__handler_t process, 28 event__handler_t process,
12 struct perf_session *session) 29 struct perf_session *session)
@@ -112,7 +129,11 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
112 event_t ev = { 129 event_t ev = {
113 .header = { 130 .header = {
114 .type = PERF_RECORD_MMAP, 131 .type = PERF_RECORD_MMAP,
115 .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */ 132 /*
133 * Just like the kernel, see __perf_event_mmap
134 * in kernel/perf_event.c
135 */
136 .misc = PERF_RECORD_MISC_USER,
116 }, 137 },
117 }; 138 };
118 int n; 139 int n;
@@ -130,6 +151,7 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
130 continue; 151 continue;
131 pbf += n + 3; 152 pbf += n + 3;
132 if (*pbf == 'x') { /* vm_exec */ 153 if (*pbf == 'x') { /* vm_exec */
154 u64 vm_pgoff;
133 char *execname = strchr(bf, '/'); 155 char *execname = strchr(bf, '/');
134 156
135 /* Catch VDSO */ 157 /* Catch VDSO */
@@ -139,6 +161,14 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
139 if (execname == NULL) 161 if (execname == NULL)
140 continue; 162 continue;
141 163
164 pbf += 3;
165 n = hex2u64(pbf, &vm_pgoff);
166 /* pgoff is in bytes, not pages */
167 if (n >= 0)
168 ev.mmap.pgoff = vm_pgoff << getpagesize();
169 else
170 ev.mmap.pgoff = 0;
171
142 size = strlen(execname); 172 size = strlen(execname);
143 execname[size - 1] = '\0'; /* Remove \n */ 173 execname[size - 1] = '\0'; /* Remove \n */
144 memcpy(ev.mmap.filename, execname, size); 174 memcpy(ev.mmap.filename, execname, size);
@@ -158,11 +188,23 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
158} 188}
159 189
160int event__synthesize_modules(event__handler_t process, 190int event__synthesize_modules(event__handler_t process,
161 struct perf_session *session) 191 struct perf_session *session,
192 struct machine *machine)
162{ 193{
163 struct rb_node *nd; 194 struct rb_node *nd;
195 struct map_groups *kmaps = &machine->kmaps;
196 u16 misc;
197
198 /*
199 * kernel uses 0 for user space maps, see kernel/perf_event.c
200 * __perf_event_mmap
201 */
202 if (machine__is_host(machine))
203 misc = PERF_RECORD_MISC_KERNEL;
204 else
205 misc = PERF_RECORD_MISC_GUEST_KERNEL;
164 206
165 for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]); 207 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
166 nd; nd = rb_next(nd)) { 208 nd; nd = rb_next(nd)) {
167 event_t ev; 209 event_t ev;
168 size_t size; 210 size_t size;
@@ -173,12 +215,13 @@ int event__synthesize_modules(event__handler_t process,
173 215
174 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 216 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
175 memset(&ev, 0, sizeof(ev)); 217 memset(&ev, 0, sizeof(ev));
176 ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */ 218 ev.mmap.header.misc = misc;
177 ev.mmap.header.type = PERF_RECORD_MMAP; 219 ev.mmap.header.type = PERF_RECORD_MMAP;
178 ev.mmap.header.size = (sizeof(ev.mmap) - 220 ev.mmap.header.size = (sizeof(ev.mmap) -
179 (sizeof(ev.mmap.filename) - size)); 221 (sizeof(ev.mmap.filename) - size));
180 ev.mmap.start = pos->start; 222 ev.mmap.start = pos->start;
181 ev.mmap.len = pos->end - pos->start; 223 ev.mmap.len = pos->end - pos->start;
224 ev.mmap.pid = machine->pid;
182 225
183 memcpy(ev.mmap.filename, pos->dso->long_name, 226 memcpy(ev.mmap.filename, pos->dso->long_name,
184 pos->dso->long_name_len + 1); 227 pos->dso->long_name_len + 1);
@@ -241,13 +284,18 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
241 284
242int event__synthesize_kernel_mmap(event__handler_t process, 285int event__synthesize_kernel_mmap(event__handler_t process,
243 struct perf_session *session, 286 struct perf_session *session,
287 struct machine *machine,
244 const char *symbol_name) 288 const char *symbol_name)
245{ 289{
246 size_t size; 290 size_t size;
291 const char *filename, *mmap_name;
292 char path[PATH_MAX];
293 char name_buff[PATH_MAX];
294 struct map *map;
295
247 event_t ev = { 296 event_t ev = {
248 .header = { 297 .header = {
249 .type = PERF_RECORD_MMAP, 298 .type = PERF_RECORD_MMAP,
250 .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
251 }, 299 },
252 }; 300 };
253 /* 301 /*
@@ -257,16 +305,37 @@ int event__synthesize_kernel_mmap(event__handler_t process,
257 */ 305 */
258 struct process_symbol_args args = { .name = symbol_name, }; 306 struct process_symbol_args args = { .name = symbol_name, };
259 307
260 if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0) 308 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
309 if (machine__is_host(machine)) {
310 /*
311 * kernel uses PERF_RECORD_MISC_USER for user space maps,
312 * see kernel/perf_event.c __perf_event_mmap
313 */
314 ev.header.misc = PERF_RECORD_MISC_KERNEL;
315 filename = "/proc/kallsyms";
316 } else {
317 ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
318 if (machine__is_default_guest(machine))
319 filename = (char *) symbol_conf.default_guest_kallsyms;
320 else {
321 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
322 filename = path;
323 }
324 }
325
326 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
261 return -ENOENT; 327 return -ENOENT;
262 328
329 map = machine->vmlinux_maps[MAP__FUNCTION];
263 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), 330 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
264 "[kernel.kallsyms.%s]", symbol_name) + 1; 331 "%s%s", mmap_name, symbol_name) + 1;
265 size = ALIGN(size, sizeof(u64)); 332 size = ALIGN(size, sizeof(u64));
266 ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); 333 ev.mmap.header.size = (sizeof(ev.mmap) -
334 (sizeof(ev.mmap.filename) - size));
267 ev.mmap.pgoff = args.start; 335 ev.mmap.pgoff = args.start;
268 ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start; 336 ev.mmap.start = map->start;
269 ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ; 337 ev.mmap.len = map->end - ev.mmap.start;
338 ev.mmap.pid = machine->pid;
270 339
271 return process(&ev, session); 340 return process(&ev, session);
272} 341}
@@ -316,26 +385,54 @@ int event__process_comm(event_t *self, struct perf_session *session)
316int event__process_lost(event_t *self, struct perf_session *session) 385int event__process_lost(event_t *self, struct perf_session *session)
317{ 386{
318 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 387 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
319 session->events_stats.lost += self->lost.lost; 388 session->hists.stats.total_lost += self->lost.lost;
320 return 0; 389 return 0;
321} 390}
322 391
323int event__process_mmap(event_t *self, struct perf_session *session) 392static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
393{
394 maps[MAP__FUNCTION]->start = self->mmap.start;
395 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
396 /*
397 * Be a bit paranoid here, some perf.data file came with
398 * a zero sized synthesized MMAP event for the kernel.
399 */
400 if (maps[MAP__FUNCTION]->end == 0)
401 maps[MAP__FUNCTION]->end = ~0UL;
402}
403
404static int event__process_kernel_mmap(event_t *self,
405 struct perf_session *session)
324{ 406{
325 struct thread *thread;
326 struct map *map; 407 struct map *map;
408 char kmmap_prefix[PATH_MAX];
409 struct machine *machine;
410 enum dso_kernel_type kernel_type;
411 bool is_kernel_mmap;
412
413 machine = perf_session__findnew_machine(session, self->mmap.pid);
414 if (!machine) {
415 pr_err("Can't find id %d's machine\n", self->mmap.pid);
416 goto out_problem;
417 }
327 418
328 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", 419 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
329 self->mmap.pid, self->mmap.tid, self->mmap.start, 420 if (machine__is_host(machine))
330 self->mmap.len, self->mmap.pgoff, self->mmap.filename); 421 kernel_type = DSO_TYPE_KERNEL;
422 else
423 kernel_type = DSO_TYPE_GUEST_KERNEL;
331 424
332 if (self->mmap.pid == 0) { 425 is_kernel_mmap = memcmp(self->mmap.filename,
333 static const char kmmap_prefix[] = "[kernel.kallsyms."; 426 kmmap_prefix,
427 strlen(kmmap_prefix)) == 0;
428 if (self->mmap.filename[0] == '/' ||
429 (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
334 430
335 if (self->mmap.filename[0] == '/') { 431 char short_module_name[1024];
336 char short_module_name[1024]; 432 char *name, *dot;
337 char *name = strrchr(self->mmap.filename, '/'), *dot;
338 433
434 if (self->mmap.filename[0] == '/') {
435 name = strrchr(self->mmap.filename, '/');
339 if (name == NULL) 436 if (name == NULL)
340 goto out_problem; 437 goto out_problem;
341 438
@@ -343,58 +440,84 @@ int event__process_mmap(event_t *self, struct perf_session *session)
343 dot = strrchr(name, '.'); 440 dot = strrchr(name, '.');
344 if (dot == NULL) 441 if (dot == NULL)
345 goto out_problem; 442 goto out_problem;
346
347 snprintf(short_module_name, sizeof(short_module_name), 443 snprintf(short_module_name, sizeof(short_module_name),
348 "[%.*s]", (int)(dot - name), name); 444 "[%.*s]", (int)(dot - name), name);
349 strxfrchar(short_module_name, '-', '_'); 445 strxfrchar(short_module_name, '-', '_');
350 446 } else
351 map = perf_session__new_module_map(session, 447 strcpy(short_module_name, self->mmap.filename);
352 self->mmap.start, 448
353 self->mmap.filename); 449 map = machine__new_module(machine, self->mmap.start,
354 if (map == NULL) 450 self->mmap.filename);
355 goto out_problem; 451 if (map == NULL)
356 452 goto out_problem;
357 name = strdup(short_module_name); 453
358 if (name == NULL) 454 name = strdup(short_module_name);
359 goto out_problem; 455 if (name == NULL)
360 456 goto out_problem;
361 map->dso->short_name = name; 457
362 map->end = map->start + self->mmap.len; 458 map->dso->short_name = name;
363 } else if (memcmp(self->mmap.filename, kmmap_prefix, 459 map->end = map->start + self->mmap.len;
364 sizeof(kmmap_prefix) - 1) == 0) { 460 } else if (is_kernel_mmap) {
365 const char *symbol_name = (self->mmap.filename + 461 const char *symbol_name = (self->mmap.filename +
366 sizeof(kmmap_prefix) - 1); 462 strlen(kmmap_prefix));
463 /*
464 * Should be there already, from the build-id table in
465 * the header.
466 */
467 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
468 kmmap_prefix);
469 if (kernel == NULL)
470 goto out_problem;
471
472 kernel->kernel = kernel_type;
473 if (__machine__create_kernel_maps(machine, kernel) < 0)
474 goto out_problem;
475
476 event_set_kernel_mmap_len(machine->vmlinux_maps, self);
477 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
478 symbol_name,
479 self->mmap.pgoff);
480 if (machine__is_default_guest(machine)) {
367 /* 481 /*
368 * Should be there already, from the build-id table in 482 * preload dso of guest kernel and modules
369 * the header.
370 */ 483 */
371 struct dso *kernel = __dsos__findnew(&dsos__kernel, 484 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
372 "[kernel.kallsyms]"); 485 NULL);
373 if (kernel == NULL) 486 }
374 goto out_problem; 487 }
375 488 return 0;
376 kernel->kernel = 1; 489out_problem:
377 if (__perf_session__create_kernel_maps(session, kernel) < 0) 490 return -1;
378 goto out_problem; 491}
379 492
380 session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start; 493int event__process_mmap(event_t *self, struct perf_session *session)
381 session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; 494{
382 /* 495 struct machine *machine;
383 * Be a bit paranoid here, some perf.data file came with 496 struct thread *thread;
384 * a zero sized synthesized MMAP event for the kernel. 497 struct map *map;
385 */ 498 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
386 if (session->vmlinux_maps[MAP__FUNCTION]->end == 0) 499 int ret = 0;
387 session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL;
388 500
389 perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name, 501 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
390 self->mmap.pgoff); 502 self->mmap.pid, self->mmap.tid, self->mmap.start,
391 } 503 self->mmap.len, self->mmap.pgoff, self->mmap.filename);
504
505 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
506 cpumode == PERF_RECORD_MISC_KERNEL) {
507 ret = event__process_kernel_mmap(self, session);
508 if (ret < 0)
509 goto out_problem;
392 return 0; 510 return 0;
393 } 511 }
394 512
513 machine = perf_session__find_host_machine(session);
514 if (machine == NULL)
515 goto out_problem;
395 thread = perf_session__findnew(session, self->mmap.pid); 516 thread = perf_session__findnew(session, self->mmap.pid);
396 map = map__new(&self->mmap, MAP__FUNCTION, 517 map = map__new(&machine->user_dsos, self->mmap.start,
397 session->cwd, session->cwdlen); 518 self->mmap.len, self->mmap.pgoff,
519 self->mmap.pid, self->mmap.filename,
520 MAP__FUNCTION, session->cwd, session->cwdlen);
398 521
399 if (thread == NULL || map == NULL) 522 if (thread == NULL || map == NULL)
400 goto out_problem; 523 goto out_problem;
@@ -434,22 +557,56 @@ int event__process_task(event_t *self, struct perf_session *session)
434 557
435void thread__find_addr_map(struct thread *self, 558void thread__find_addr_map(struct thread *self,
436 struct perf_session *session, u8 cpumode, 559 struct perf_session *session, u8 cpumode,
437 enum map_type type, u64 addr, 560 enum map_type type, pid_t pid, u64 addr,
438 struct addr_location *al) 561 struct addr_location *al)
439{ 562{
440 struct map_groups *mg = &self->mg; 563 struct map_groups *mg = &self->mg;
564 struct machine *machine = NULL;
441 565
442 al->thread = self; 566 al->thread = self;
443 al->addr = addr; 567 al->addr = addr;
568 al->cpumode = cpumode;
569 al->filtered = false;
444 570
445 if (cpumode == PERF_RECORD_MISC_KERNEL) { 571 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
446 al->level = 'k'; 572 al->level = 'k';
447 mg = &session->kmaps; 573 machine = perf_session__find_host_machine(session);
448 } else if (cpumode == PERF_RECORD_MISC_USER) 574 if (machine == NULL) {
575 al->map = NULL;
576 return;
577 }
578 mg = &machine->kmaps;
579 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
449 al->level = '.'; 580 al->level = '.';
450 else { 581 machine = perf_session__find_host_machine(session);
451 al->level = 'H'; 582 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
583 al->level = 'g';
584 machine = perf_session__find_machine(session, pid);
585 if (machine == NULL) {
586 al->map = NULL;
587 return;
588 }
589 mg = &machine->kmaps;
590 } else {
591 /*
592 * 'u' means guest os user space.
593 * TODO: We don't support guest user space. Might support late.
594 */
595 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
596 al->level = 'u';
597 else
598 al->level = 'H';
452 al->map = NULL; 599 al->map = NULL;
600
601 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
602 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
603 !perf_guest)
604 al->filtered = true;
605 if ((cpumode == PERF_RECORD_MISC_USER ||
606 cpumode == PERF_RECORD_MISC_KERNEL) &&
607 !perf_host)
608 al->filtered = true;
609
453 return; 610 return;
454 } 611 }
455try_again: 612try_again:
@@ -464,8 +621,10 @@ try_again:
464 * "[vdso]" dso, but for now lets use the old trick of looking 621 * "[vdso]" dso, but for now lets use the old trick of looking
465 * in the whole kernel symbol list. 622 * in the whole kernel symbol list.
466 */ 623 */
467 if ((long long)al->addr < 0 && mg != &session->kmaps) { 624 if ((long long)al->addr < 0 &&
468 mg = &session->kmaps; 625 cpumode == PERF_RECORD_MISC_KERNEL &&
626 machine && mg != &machine->kmaps) {
627 mg = &machine->kmaps;
469 goto try_again; 628 goto try_again;
470 } 629 }
471 } else 630 } else
@@ -474,11 +633,11 @@ try_again:
474 633
475void thread__find_addr_location(struct thread *self, 634void thread__find_addr_location(struct thread *self,
476 struct perf_session *session, u8 cpumode, 635 struct perf_session *session, u8 cpumode,
477 enum map_type type, u64 addr, 636 enum map_type type, pid_t pid, u64 addr,
478 struct addr_location *al, 637 struct addr_location *al,
479 symbol_filter_t filter) 638 symbol_filter_t filter)
480{ 639{
481 thread__find_addr_map(self, session, cpumode, type, addr, al); 640 thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
482 if (al->map != NULL) 641 if (al->map != NULL)
483 al->sym = map__find_symbol(al->map, al->addr, filter); 642 al->sym = map__find_symbol(al->map, al->addr, filter);
484 else 643 else
@@ -490,8 +649,10 @@ static void dso__calc_col_width(struct dso *self)
490 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 649 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
491 (!symbol_conf.dso_list || 650 (!symbol_conf.dso_list ||
492 strlist__has_entry(symbol_conf.dso_list, self->name))) { 651 strlist__has_entry(symbol_conf.dso_list, self->name))) {
493 unsigned int slen = strlen(self->name); 652 u16 slen = self->short_name_len;
494 if (slen > dsos__col_width) 653 if (verbose)
654 slen = self->long_name_len;
655 if (dsos__col_width < slen)
495 dsos__col_width = slen; 656 dsos__col_width = slen;
496 } 657 }
497 658
@@ -512,31 +673,55 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
512 goto out_filtered; 673 goto out_filtered;
513 674
514 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 675 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
676 /*
677 * Have we already created the kernel maps for the host machine?
678 *
679 * This should have happened earlier, when we processed the kernel MMAP
680 * events, but for older perf.data files there was no such thing, so do
681 * it now.
682 */
683 if (cpumode == PERF_RECORD_MISC_KERNEL &&
684 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
685 machine__create_kernel_maps(&session->host_machine);
515 686
516 thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION, 687 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
517 self->ip.ip, al, filter); 688 self->ip.pid, self->ip.ip, al);
518 dump_printf(" ...... dso: %s\n", 689 dump_printf(" ...... dso: %s\n",
519 al->map ? al->map->dso->long_name : 690 al->map ? al->map->dso->long_name :
520 al->level == 'H' ? "[hypervisor]" : "<not found>"); 691 al->level == 'H' ? "[hypervisor]" : "<not found>");
521 /* 692 al->sym = NULL;
522 * We have to do this here as we may have a dso with no symbol hit that 693
523 * has a name longer than the ones with symbols sampled. 694 if (al->map) {
524 */ 695 if (symbol_conf.dso_list &&
525 if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated) 696 (!al->map || !al->map->dso ||
526 dso__calc_col_width(al->map->dso); 697 !(strlist__has_entry(symbol_conf.dso_list,
527 698 al->map->dso->short_name) ||
528 if (symbol_conf.dso_list && 699 (al->map->dso->short_name != al->map->dso->long_name &&
529 (!al->map || !al->map->dso || 700 strlist__has_entry(symbol_conf.dso_list,
530 !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) || 701 al->map->dso->long_name)))))
531 (al->map->dso->short_name != al->map->dso->long_name && 702 goto out_filtered;
532 strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name))))) 703 /*
533 goto out_filtered; 704 * We have to do this here as we may have a dso with no symbol
705 * hit that has a name longer than the ones with symbols
706 * sampled.
707 */
708 if (!sort_dso.elide && !al->map->dso->slen_calculated)
709 dso__calc_col_width(al->map->dso);
710
711 al->sym = map__find_symbol(al->map, al->addr, filter);
712 } else {
713 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
714
715 if (dsos__col_width < unresolved_col_width &&
716 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
717 !symbol_conf.dso_list)
718 dsos__col_width = unresolved_col_width;
719 }
534 720
535 if (symbol_conf.sym_list && al->sym && 721 if (symbol_conf.sym_list && al->sym &&
536 !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 722 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
537 goto out_filtered; 723 goto out_filtered;
538 724
539 al->filtered = false;
540 return 0; 725 return 0;
541 726
542out_filtered: 727out_filtered:
@@ -570,6 +755,7 @@ int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
570 array++; 755 array++;
571 } 756 }
572 757
758 data->id = -1ULL;
573 if (type & PERF_SAMPLE_ID) { 759 if (type & PERF_SAMPLE_ID) {
574 data->id = *array; 760 data->id = *array;
575 array++; 761 array++;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a33b94952e34..8577085db067 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -68,21 +68,54 @@ struct sample_data {
68 u64 addr; 68 u64 addr;
69 u64 id; 69 u64 id;
70 u64 stream_id; 70 u64 stream_id;
71 u32 cpu;
72 u64 period; 71 u64 period;
73 struct ip_callchain *callchain; 72 u32 cpu;
74 u32 raw_size; 73 u32 raw_size;
75 void *raw_data; 74 void *raw_data;
75 struct ip_callchain *callchain;
76}; 76};
77 77
78#define BUILD_ID_SIZE 20 78#define BUILD_ID_SIZE 20
79 79
80struct build_id_event { 80struct build_id_event {
81 struct perf_event_header header; 81 struct perf_event_header header;
82 pid_t pid;
82 u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 83 u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
83 char filename[]; 84 char filename[];
84}; 85};
85 86
87enum perf_user_event_type { /* above any possible kernel type */
88 PERF_RECORD_HEADER_ATTR = 64,
89 PERF_RECORD_HEADER_EVENT_TYPE = 65,
90 PERF_RECORD_HEADER_TRACING_DATA = 66,
91 PERF_RECORD_HEADER_BUILD_ID = 67,
92 PERF_RECORD_FINISHED_ROUND = 68,
93 PERF_RECORD_HEADER_MAX
94};
95
96struct attr_event {
97 struct perf_event_header header;
98 struct perf_event_attr attr;
99 u64 id[];
100};
101
102#define MAX_EVENT_NAME 64
103
104struct perf_trace_event_type {
105 u64 event_id;
106 char name[MAX_EVENT_NAME];
107};
108
109struct event_type_event {
110 struct perf_event_header header;
111 struct perf_trace_event_type event_type;
112};
113
114struct tracing_data_event {
115 struct perf_event_header header;
116 u32 size;
117};
118
86typedef union event_union { 119typedef union event_union {
87 struct perf_event_header header; 120 struct perf_event_header header;
88 struct ip_event ip; 121 struct ip_event ip;
@@ -92,22 +125,12 @@ typedef union event_union {
92 struct lost_event lost; 125 struct lost_event lost;
93 struct read_event read; 126 struct read_event read;
94 struct sample_event sample; 127 struct sample_event sample;
128 struct attr_event attr;
129 struct event_type_event event_type;
130 struct tracing_data_event tracing_data;
131 struct build_id_event build_id;
95} event_t; 132} event_t;
96 133
97struct events_stats {
98 u64 total;
99 u64 lost;
100};
101
102struct event_stat_id {
103 struct rb_node rb_node;
104 struct rb_root hists;
105 struct events_stats stats;
106 u64 config;
107 u64 event_stream;
108 u32 type;
109};
110
111void event__print_totals(void); 134void event__print_totals(void);
112 135
113struct perf_session; 136struct perf_session;
@@ -119,10 +142,13 @@ int event__synthesize_thread(pid_t pid, event__handler_t process,
119void event__synthesize_threads(event__handler_t process, 142void event__synthesize_threads(event__handler_t process,
120 struct perf_session *session); 143 struct perf_session *session);
121int event__synthesize_kernel_mmap(event__handler_t process, 144int event__synthesize_kernel_mmap(event__handler_t process,
122 struct perf_session *session, 145 struct perf_session *session,
123 const char *symbol_name); 146 struct machine *machine,
147 const char *symbol_name);
148
124int event__synthesize_modules(event__handler_t process, 149int event__synthesize_modules(event__handler_t process,
125 struct perf_session *session); 150 struct perf_session *session,
151 struct machine *machine);
126 152
127int event__process_comm(event_t *self, struct perf_session *session); 153int event__process_comm(event_t *self, struct perf_session *session);
128int event__process_lost(event_t *self, struct perf_session *session); 154int event__process_lost(event_t *self, struct perf_session *session);
@@ -134,4 +160,6 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
134 struct addr_location *al, symbol_filter_t filter); 160 struct addr_location *al, symbol_filter_t filter);
135int event__parse_sample(event_t *event, u64 type, struct sample_data *data); 161int event__parse_sample(event_t *event, u64 type, struct sample_data *data);
136 162
163extern const char *event__name[];
164
137#endif /* __PERF_RECORD_H */ 165#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 6c9aa16ee51f..8847bec64c54 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -99,13 +99,6 @@ int perf_header__add_attr(struct perf_header *self,
99 return 0; 99 return 0;
100} 100}
101 101
102#define MAX_EVENT_NAME 64
103
104struct perf_trace_event_type {
105 u64 event_id;
106 char name[MAX_EVENT_NAME];
107};
108
109static int event_count; 102static int event_count;
110static struct perf_trace_event_type *events; 103static struct perf_trace_event_type *events;
111 104
@@ -197,7 +190,8 @@ static int write_padded(int fd, const void *bf, size_t count,
197 continue; \ 190 continue; \
198 else 191 else
199 192
200static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd) 193static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
194 u16 misc, int fd)
201{ 195{
202 struct dso *pos; 196 struct dso *pos;
203 197
@@ -212,6 +206,7 @@ static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
212 len = ALIGN(len, NAME_ALIGN); 206 len = ALIGN(len, NAME_ALIGN);
213 memset(&b, 0, sizeof(b)); 207 memset(&b, 0, sizeof(b));
214 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); 208 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
209 b.pid = pid;
215 b.header.misc = misc; 210 b.header.misc = misc;
216 b.header.size = sizeof(b) + len; 211 b.header.size = sizeof(b) + len;
217 err = do_write(fd, &b, sizeof(b)); 212 err = do_write(fd, &b, sizeof(b));
@@ -226,13 +221,32 @@ static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
226 return 0; 221 return 0;
227} 222}
228 223
229static int dsos__write_buildid_table(int fd) 224static int dsos__write_buildid_table(struct perf_header *header, int fd)
230{ 225{
231 int err = __dsos__write_buildid_table(&dsos__kernel, 226 struct perf_session *session = container_of(header,
232 PERF_RECORD_MISC_KERNEL, fd); 227 struct perf_session, header);
233 if (err == 0) 228 struct rb_node *nd;
234 err = __dsos__write_buildid_table(&dsos__user, 229 int err = 0;
235 PERF_RECORD_MISC_USER, fd); 230 u16 kmisc, umisc;
231
232 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
233 struct machine *pos = rb_entry(nd, struct machine, rb_node);
234 if (machine__is_host(pos)) {
235 kmisc = PERF_RECORD_MISC_KERNEL;
236 umisc = PERF_RECORD_MISC_USER;
237 } else {
238 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
239 umisc = PERF_RECORD_MISC_GUEST_USER;
240 }
241
242 err = __dsos__write_buildid_table(&pos->kernel_dsos, pos->pid,
243 kmisc, fd);
244 if (err == 0)
245 err = __dsos__write_buildid_table(&pos->user_dsos,
246 pos->pid, umisc, fd);
247 if (err)
248 break;
249 }
236 return err; 250 return err;
237} 251}
238 252
@@ -349,9 +363,12 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
349 return err; 363 return err;
350} 364}
351 365
352static int dsos__cache_build_ids(void) 366static int dsos__cache_build_ids(struct perf_header *self)
353{ 367{
354 int err_kernel, err_user; 368 struct perf_session *session = container_of(self,
369 struct perf_session, header);
370 struct rb_node *nd;
371 int ret = 0;
355 char debugdir[PATH_MAX]; 372 char debugdir[PATH_MAX];
356 373
357 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"), 374 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
@@ -360,9 +377,28 @@ static int dsos__cache_build_ids(void)
360 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 377 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
361 return -1; 378 return -1;
362 379
363 err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir); 380 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
364 err_user = __dsos__cache_build_ids(&dsos__user, debugdir); 381 struct machine *pos = rb_entry(nd, struct machine, rb_node);
365 return err_kernel || err_user ? -1 : 0; 382 ret |= __dsos__cache_build_ids(&pos->kernel_dsos, debugdir);
383 ret |= __dsos__cache_build_ids(&pos->user_dsos, debugdir);
384 }
385 return ret ? -1 : 0;
386}
387
388static bool dsos__read_build_ids(struct perf_header *self, bool with_hits)
389{
390 bool ret = false;
391 struct perf_session *session = container_of(self,
392 struct perf_session, header);
393 struct rb_node *nd;
394
395 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
396 struct machine *pos = rb_entry(nd, struct machine, rb_node);
397 ret |= __dsos__read_build_ids(&pos->kernel_dsos, with_hits);
398 ret |= __dsos__read_build_ids(&pos->user_dsos, with_hits);
399 }
400
401 return ret;
366} 402}
367 403
368static int perf_header__adds_write(struct perf_header *self, int fd) 404static int perf_header__adds_write(struct perf_header *self, int fd)
@@ -373,7 +409,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
373 u64 sec_start; 409 u64 sec_start;
374 int idx = 0, err; 410 int idx = 0, err;
375 411
376 if (dsos__read_build_ids(true)) 412 if (dsos__read_build_ids(self, true))
377 perf_header__set_feat(self, HEADER_BUILD_ID); 413 perf_header__set_feat(self, HEADER_BUILD_ID);
378 414
379 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); 415 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
@@ -400,7 +436,6 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
400 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; 436 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
401 } 437 }
402 438
403
404 if (perf_header__has_feat(self, HEADER_BUILD_ID)) { 439 if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
405 struct perf_file_section *buildid_sec; 440 struct perf_file_section *buildid_sec;
406 441
@@ -408,14 +443,14 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
408 443
409 /* Write build-ids */ 444 /* Write build-ids */
410 buildid_sec->offset = lseek(fd, 0, SEEK_CUR); 445 buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
411 err = dsos__write_buildid_table(fd); 446 err = dsos__write_buildid_table(self, fd);
412 if (err < 0) { 447 if (err < 0) {
413 pr_debug("failed to write buildid table\n"); 448 pr_debug("failed to write buildid table\n");
414 goto out_free; 449 goto out_free;
415 } 450 }
416 buildid_sec->size = lseek(fd, 0, SEEK_CUR) - 451 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
417 buildid_sec->offset; 452 buildid_sec->offset;
418 dsos__cache_build_ids(); 453 dsos__cache_build_ids(self);
419 } 454 }
420 455
421 lseek(fd, sec_start, SEEK_SET); 456 lseek(fd, sec_start, SEEK_SET);
@@ -427,6 +462,25 @@ out_free:
427 return err; 462 return err;
428} 463}
429 464
465int perf_header__write_pipe(int fd)
466{
467 struct perf_pipe_file_header f_header;
468 int err;
469
470 f_header = (struct perf_pipe_file_header){
471 .magic = PERF_MAGIC,
472 .size = sizeof(f_header),
473 };
474
475 err = do_write(fd, &f_header, sizeof(f_header));
476 if (err < 0) {
477 pr_debug("failed to write perf pipe header\n");
478 return err;
479 }
480
481 return 0;
482}
483
430int perf_header__write(struct perf_header *self, int fd, bool at_exit) 484int perf_header__write(struct perf_header *self, int fd, bool at_exit)
431{ 485{
432 struct perf_file_header f_header; 486 struct perf_file_header f_header;
@@ -518,25 +572,10 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
518 return 0; 572 return 0;
519} 573}
520 574
521static int do_read(int fd, void *buf, size_t size)
522{
523 while (size) {
524 int ret = read(fd, buf, size);
525
526 if (ret <= 0)
527 return -1;
528
529 size -= ret;
530 buf += ret;
531 }
532
533 return 0;
534}
535
536static int perf_header__getbuffer64(struct perf_header *self, 575static int perf_header__getbuffer64(struct perf_header *self,
537 int fd, void *buf, size_t size) 576 int fd, void *buf, size_t size)
538{ 577{
539 if (do_read(fd, buf, size)) 578 if (do_read(fd, buf, size) <= 0)
540 return -1; 579 return -1;
541 580
542 if (self->needs_swap) 581 if (self->needs_swap)
@@ -592,7 +631,7 @@ int perf_file_header__read(struct perf_file_header *self,
592{ 631{
593 lseek(fd, 0, SEEK_SET); 632 lseek(fd, 0, SEEK_SET);
594 633
595 if (do_read(fd, self, sizeof(*self)) || 634 if (do_read(fd, self, sizeof(*self)) <= 0 ||
596 memcmp(&self->magic, __perf_magic, sizeof(self->magic))) 635 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
597 return -1; 636 return -1;
598 637
@@ -636,6 +675,93 @@ int perf_file_header__read(struct perf_file_header *self,
636 return 0; 675 return 0;
637} 676}
638 677
678static int __event_process_build_id(struct build_id_event *bev,
679 char *filename,
680 struct perf_session *session)
681{
682 int err = -1;
683 struct list_head *head;
684 struct machine *machine;
685 u16 misc;
686 struct dso *dso;
687 enum dso_kernel_type dso_type;
688
689 machine = perf_session__findnew_machine(session, bev->pid);
690 if (!machine)
691 goto out;
692
693 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
694
695 switch (misc) {
696 case PERF_RECORD_MISC_KERNEL:
697 dso_type = DSO_TYPE_KERNEL;
698 head = &machine->kernel_dsos;
699 break;
700 case PERF_RECORD_MISC_GUEST_KERNEL:
701 dso_type = DSO_TYPE_GUEST_KERNEL;
702 head = &machine->kernel_dsos;
703 break;
704 case PERF_RECORD_MISC_USER:
705 case PERF_RECORD_MISC_GUEST_USER:
706 dso_type = DSO_TYPE_USER;
707 head = &machine->user_dsos;
708 break;
709 default:
710 goto out;
711 }
712
713 dso = __dsos__findnew(head, filename);
714 if (dso != NULL) {
715 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
716
717 dso__set_build_id(dso, &bev->build_id);
718
719 if (filename[0] == '[')
720 dso->kernel = dso_type;
721
722 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
723 sbuild_id);
724 pr_debug("build id event received for %s: %s\n",
725 dso->long_name, sbuild_id);
726 }
727
728 err = 0;
729out:
730 return err;
731}
732
733static int perf_header__read_build_ids(struct perf_header *self,
734 int input, u64 offset, u64 size)
735{
736 struct perf_session *session = container_of(self,
737 struct perf_session, header);
738 struct build_id_event bev;
739 char filename[PATH_MAX];
740 u64 limit = offset + size;
741 int err = -1;
742
743 while (offset < limit) {
744 ssize_t len;
745
746 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
747 goto out;
748
749 if (self->needs_swap)
750 perf_event_header__bswap(&bev.header);
751
752 len = bev.header.size - sizeof(bev);
753 if (read(input, filename, len) != len)
754 goto out;
755
756 __event_process_build_id(&bev, filename, session);
757
758 offset += bev.header.size;
759 }
760 err = 0;
761out:
762 return err;
763}
764
639static int perf_file_section__process(struct perf_file_section *self, 765static int perf_file_section__process(struct perf_file_section *self,
640 struct perf_header *ph, 766 struct perf_header *ph,
641 int feat, int fd) 767 int feat, int fd)
@@ -648,7 +774,7 @@ static int perf_file_section__process(struct perf_file_section *self,
648 774
649 switch (feat) { 775 switch (feat) {
650 case HEADER_TRACE_INFO: 776 case HEADER_TRACE_INFO:
651 trace_report(fd); 777 trace_report(fd, false);
652 break; 778 break;
653 779
654 case HEADER_BUILD_ID: 780 case HEADER_BUILD_ID:
@@ -662,13 +788,56 @@ static int perf_file_section__process(struct perf_file_section *self,
662 return 0; 788 return 0;
663} 789}
664 790
665int perf_header__read(struct perf_header *self, int fd) 791static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
792 struct perf_header *ph, int fd,
793 bool repipe)
794{
795 if (do_read(fd, self, sizeof(*self)) <= 0 ||
796 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
797 return -1;
798
799 if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
800 return -1;
801
802 if (self->size != sizeof(*self)) {
803 u64 size = bswap_64(self->size);
804
805 if (size != sizeof(*self))
806 return -1;
807
808 ph->needs_swap = true;
809 }
810
811 return 0;
812}
813
814static int perf_header__read_pipe(struct perf_session *session, int fd)
666{ 815{
816 struct perf_header *self = &session->header;
817 struct perf_pipe_file_header f_header;
818
819 if (perf_file_header__read_pipe(&f_header, self, fd,
820 session->repipe) < 0) {
821 pr_debug("incompatible file format\n");
822 return -EINVAL;
823 }
824
825 session->fd = fd;
826
827 return 0;
828}
829
830int perf_header__read(struct perf_session *session, int fd)
831{
832 struct perf_header *self = &session->header;
667 struct perf_file_header f_header; 833 struct perf_file_header f_header;
668 struct perf_file_attr f_attr; 834 struct perf_file_attr f_attr;
669 u64 f_id; 835 u64 f_id;
670 int nr_attrs, nr_ids, i, j; 836 int nr_attrs, nr_ids, i, j;
671 837
838 if (session->fd_pipe)
839 return perf_header__read_pipe(session, fd);
840
672 if (perf_file_header__read(&f_header, self, fd) < 0) { 841 if (perf_file_header__read(&f_header, self, fd) < 0) {
673 pr_debug("incompatible file format\n"); 842 pr_debug("incompatible file format\n");
674 return -EINVAL; 843 return -EINVAL;
@@ -753,6 +922,14 @@ perf_header__find_attr(u64 id, struct perf_header *header)
753{ 922{
754 int i; 923 int i;
755 924
925 /*
926 * We set id to -1 if the data file doesn't contain sample
927 * ids. Check for this and avoid walking through the entire
928 * list of ids which may be large.
929 */
930 if (id == -1ULL)
931 return NULL;
932
756 for (i = 0; i < header->attrs; i++) { 933 for (i = 0; i < header->attrs; i++) {
757 struct perf_header_attr *attr = header->attr[i]; 934 struct perf_header_attr *attr = header->attr[i];
758 int j; 935 int j;
@@ -765,3 +942,231 @@ perf_header__find_attr(u64 id, struct perf_header *header)
765 942
766 return NULL; 943 return NULL;
767} 944}
945
946int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
947 event__handler_t process,
948 struct perf_session *session)
949{
950 event_t *ev;
951 size_t size;
952 int err;
953
954 size = sizeof(struct perf_event_attr);
955 size = ALIGN(size, sizeof(u64));
956 size += sizeof(struct perf_event_header);
957 size += ids * sizeof(u64);
958
959 ev = malloc(size);
960
961 ev->attr.attr = *attr;
962 memcpy(ev->attr.id, id, ids * sizeof(u64));
963
964 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
965 ev->attr.header.size = size;
966
967 err = process(ev, session);
968
969 free(ev);
970
971 return err;
972}
973
974int event__synthesize_attrs(struct perf_header *self,
975 event__handler_t process,
976 struct perf_session *session)
977{
978 struct perf_header_attr *attr;
979 int i, err = 0;
980
981 for (i = 0; i < self->attrs; i++) {
982 attr = self->attr[i];
983
984 err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
985 process, session);
986 if (err) {
987 pr_debug("failed to create perf header attribute\n");
988 return err;
989 }
990 }
991
992 return err;
993}
994
995int event__process_attr(event_t *self, struct perf_session *session)
996{
997 struct perf_header_attr *attr;
998 unsigned int i, ids, n_ids;
999
1000 attr = perf_header_attr__new(&self->attr.attr);
1001 if (attr == NULL)
1002 return -ENOMEM;
1003
1004 ids = self->header.size;
1005 ids -= (void *)&self->attr.id - (void *)self;
1006 n_ids = ids / sizeof(u64);
1007
1008 for (i = 0; i < n_ids; i++) {
1009 if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
1010 perf_header_attr__delete(attr);
1011 return -ENOMEM;
1012 }
1013 }
1014
1015 if (perf_header__add_attr(&session->header, attr) < 0) {
1016 perf_header_attr__delete(attr);
1017 return -ENOMEM;
1018 }
1019
1020 perf_session__update_sample_type(session);
1021
1022 return 0;
1023}
1024
1025int event__synthesize_event_type(u64 event_id, char *name,
1026 event__handler_t process,
1027 struct perf_session *session)
1028{
1029 event_t ev;
1030 size_t size = 0;
1031 int err = 0;
1032
1033 memset(&ev, 0, sizeof(ev));
1034
1035 ev.event_type.event_type.event_id = event_id;
1036 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1037 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1038
1039 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1040 size = strlen(name);
1041 size = ALIGN(size, sizeof(u64));
1042 ev.event_type.header.size = sizeof(ev.event_type) -
1043 (sizeof(ev.event_type.event_type.name) - size);
1044
1045 err = process(&ev, session);
1046
1047 return err;
1048}
1049
1050int event__synthesize_event_types(event__handler_t process,
1051 struct perf_session *session)
1052{
1053 struct perf_trace_event_type *type;
1054 int i, err = 0;
1055
1056 for (i = 0; i < event_count; i++) {
1057 type = &events[i];
1058
1059 err = event__synthesize_event_type(type->event_id, type->name,
1060 process, session);
1061 if (err) {
1062 pr_debug("failed to create perf header event type\n");
1063 return err;
1064 }
1065 }
1066
1067 return err;
1068}
1069
1070int event__process_event_type(event_t *self,
1071 struct perf_session *session __unused)
1072{
1073 if (perf_header__push_event(self->event_type.event_type.event_id,
1074 self->event_type.event_type.name) < 0)
1075 return -ENOMEM;
1076
1077 return 0;
1078}
1079
1080int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
1081 int nb_events,
1082 event__handler_t process,
1083 struct perf_session *session __unused)
1084{
1085 event_t ev;
1086 ssize_t size = 0, aligned_size = 0, padding;
1087 int err = 0;
1088
1089 memset(&ev, 0, sizeof(ev));
1090
1091 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1092 size = read_tracing_data_size(fd, pattrs, nb_events);
1093 if (size <= 0)
1094 return size;
1095 aligned_size = ALIGN(size, sizeof(u64));
1096 padding = aligned_size - size;
1097 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1098 ev.tracing_data.size = aligned_size;
1099
1100 process(&ev, session);
1101
1102 err = read_tracing_data(fd, pattrs, nb_events);
1103 write_padded(fd, NULL, 0, padding);
1104
1105 return aligned_size;
1106}
1107
1108int event__process_tracing_data(event_t *self,
1109 struct perf_session *session)
1110{
1111 ssize_t size_read, padding, size = self->tracing_data.size;
1112 off_t offset = lseek(session->fd, 0, SEEK_CUR);
1113 char buf[BUFSIZ];
1114
1115 /* setup for reading amidst mmap */
1116 lseek(session->fd, offset + sizeof(struct tracing_data_event),
1117 SEEK_SET);
1118
1119 size_read = trace_report(session->fd, session->repipe);
1120
1121 padding = ALIGN(size_read, sizeof(u64)) - size_read;
1122
1123 if (read(session->fd, buf, padding) < 0)
1124 die("reading input file");
1125 if (session->repipe) {
1126 int retw = write(STDOUT_FILENO, buf, padding);
1127 if (retw <= 0 || retw != padding)
1128 die("repiping tracing data padding");
1129 }
1130
1131 if (size_read + padding != size)
1132 die("tracing data size mismatch");
1133
1134 return size_read + padding;
1135}
1136
1137int event__synthesize_build_id(struct dso *pos, u16 misc,
1138 event__handler_t process,
1139 struct machine *machine,
1140 struct perf_session *session)
1141{
1142 event_t ev;
1143 size_t len;
1144 int err = 0;
1145
1146 if (!pos->hit)
1147 return err;
1148
1149 memset(&ev, 0, sizeof(ev));
1150
1151 len = pos->long_name_len + 1;
1152 len = ALIGN(len, NAME_ALIGN);
1153 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1154 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1155 ev.build_id.header.misc = misc;
1156 ev.build_id.pid = machine->pid;
1157 ev.build_id.header.size = sizeof(ev.build_id) + len;
1158 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1159
1160 err = process(&ev, session);
1161
1162 return err;
1163}
1164
1165int event__process_build_id(event_t *self,
1166 struct perf_session *session)
1167{
1168 __event_process_build_id(&self->build_id,
1169 self->build_id.filename,
1170 session);
1171 return 0;
1172}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 82a6af72d4cc..402ac2454cf8 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,11 @@ struct perf_file_header {
39 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); 39 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
40}; 40};
41 41
42struct perf_pipe_file_header {
43 u64 magic;
44 u64 size;
45};
46
42struct perf_header; 47struct perf_header;
43 48
44int perf_file_header__read(struct perf_file_header *self, 49int perf_file_header__read(struct perf_file_header *self,
@@ -47,21 +52,22 @@ int perf_file_header__read(struct perf_file_header *self,
47struct perf_header { 52struct perf_header {
48 int frozen; 53 int frozen;
49 int attrs, size; 54 int attrs, size;
55 bool needs_swap;
50 struct perf_header_attr **attr; 56 struct perf_header_attr **attr;
51 s64 attr_offset; 57 s64 attr_offset;
52 u64 data_offset; 58 u64 data_offset;
53 u64 data_size; 59 u64 data_size;
54 u64 event_offset; 60 u64 event_offset;
55 u64 event_size; 61 u64 event_size;
56 bool needs_swap;
57 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); 62 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
58}; 63};
59 64
60int perf_header__init(struct perf_header *self); 65int perf_header__init(struct perf_header *self);
61void perf_header__exit(struct perf_header *self); 66void perf_header__exit(struct perf_header *self);
62 67
63int perf_header__read(struct perf_header *self, int fd); 68int perf_header__read(struct perf_session *session, int fd);
64int perf_header__write(struct perf_header *self, int fd, bool at_exit); 69int perf_header__write(struct perf_header *self, int fd, bool at_exit);
70int perf_header__write_pipe(int fd);
65 71
66int perf_header__add_attr(struct perf_header *self, 72int perf_header__add_attr(struct perf_header *self,
67 struct perf_header_attr *attr); 73 struct perf_header_attr *attr);
@@ -89,4 +95,33 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
89 const char *name, bool is_kallsyms); 95 const char *name, bool is_kallsyms);
90int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); 96int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
91 97
98int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
99 event__handler_t process,
100 struct perf_session *session);
101int event__synthesize_attrs(struct perf_header *self,
102 event__handler_t process,
103 struct perf_session *session);
104int event__process_attr(event_t *self, struct perf_session *session);
105
106int event__synthesize_event_type(u64 event_id, char *name,
107 event__handler_t process,
108 struct perf_session *session);
109int event__synthesize_event_types(event__handler_t process,
110 struct perf_session *session);
111int event__process_event_type(event_t *self,
112 struct perf_session *session);
113
114int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
115 int nb_events,
116 event__handler_t process,
117 struct perf_session *session);
118int event__process_tracing_data(event_t *self,
119 struct perf_session *session);
120
121int event__synthesize_build_id(struct dso *pos, u16 misc,
122 event__handler_t process,
123 struct machine *machine,
124 struct perf_session *session);
125int event__process_build_id(event_t *self, struct perf_session *session);
126
92#endif /* __PERF_HEADER_H */ 127#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2be33c7dbf03..9a71c94f057a 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,3 +1,4 @@
1#include "util.h"
1#include "hist.h" 2#include "hist.h"
2#include "session.h" 3#include "session.h"
3#include "sort.h" 4#include "sort.h"
@@ -8,25 +9,69 @@ struct callchain_param callchain_param = {
8 .min_percent = 0.5 9 .min_percent = 0.5
9}; 10};
10 11
12static void hist_entry__add_cpumode_period(struct hist_entry *self,
13 unsigned int cpumode, u64 period)
14{
15 switch (cpumode) {
16 case PERF_RECORD_MISC_KERNEL:
17 self->period_sys += period;
18 break;
19 case PERF_RECORD_MISC_USER:
20 self->period_us += period;
21 break;
22 case PERF_RECORD_MISC_GUEST_KERNEL:
23 self->period_guest_sys += period;
24 break;
25 case PERF_RECORD_MISC_GUEST_USER:
26 self->period_guest_us += period;
27 break;
28 default:
29 break;
30 }
31}
32
11/* 33/*
12 * histogram, sorted on item, collects counts 34 * histogram, sorted on item, collects periods
13 */ 35 */
14 36
15struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, 37static struct hist_entry *hist_entry__new(struct hist_entry *template)
16 struct addr_location *al, 38{
17 struct symbol *sym_parent, 39 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
18 u64 count, bool *hit) 40 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
41
42 if (self != NULL) {
43 *self = *template;
44 self->nr_events = 1;
45 if (symbol_conf.use_callchain)
46 callchain_init(self->callchain);
47 }
48
49 return self;
50}
51
52static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
19{ 53{
20 struct rb_node **p = &hists->rb_node; 54 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
55 self->max_sym_namelen = entry->ms.sym->namelen;
56 ++self->nr_entries;
57}
58
59struct hist_entry *__hists__add_entry(struct hists *self,
60 struct addr_location *al,
61 struct symbol *sym_parent, u64 period)
62{
63 struct rb_node **p = &self->entries.rb_node;
21 struct rb_node *parent = NULL; 64 struct rb_node *parent = NULL;
22 struct hist_entry *he; 65 struct hist_entry *he;
23 struct hist_entry entry = { 66 struct hist_entry entry = {
24 .thread = al->thread, 67 .thread = al->thread,
25 .map = al->map, 68 .ms = {
26 .sym = al->sym, 69 .map = al->map,
70 .sym = al->sym,
71 },
27 .ip = al->addr, 72 .ip = al->addr,
28 .level = al->level, 73 .level = al->level,
29 .count = count, 74 .period = period,
30 .parent = sym_parent, 75 .parent = sym_parent,
31 }; 76 };
32 int cmp; 77 int cmp;
@@ -38,8 +83,9 @@ struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
38 cmp = hist_entry__cmp(&entry, he); 83 cmp = hist_entry__cmp(&entry, he);
39 84
40 if (!cmp) { 85 if (!cmp) {
41 *hit = true; 86 he->period += period;
42 return he; 87 ++he->nr_events;
88 goto out;
43 } 89 }
44 90
45 if (cmp < 0) 91 if (cmp < 0)
@@ -48,13 +94,14 @@ struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
48 p = &(*p)->rb_right; 94 p = &(*p)->rb_right;
49 } 95 }
50 96
51 he = malloc(sizeof(*he)); 97 he = hist_entry__new(&entry);
52 if (!he) 98 if (!he)
53 return NULL; 99 return NULL;
54 *he = entry;
55 rb_link_node(&he->rb_node, parent, p); 100 rb_link_node(&he->rb_node, parent, p);
56 rb_insert_color(&he->rb_node, hists); 101 rb_insert_color(&he->rb_node, &self->entries);
57 *hit = false; 102 hists__inc_nr_entries(self, he);
103out:
104 hist_entry__add_cpumode_period(he, al->cpumode, period);
58 return he; 105 return he;
59} 106}
60 107
@@ -65,7 +112,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
65 int64_t cmp = 0; 112 int64_t cmp = 0;
66 113
67 list_for_each_entry(se, &hist_entry__sort_list, list) { 114 list_for_each_entry(se, &hist_entry__sort_list, list) {
68 cmp = se->cmp(left, right); 115 cmp = se->se_cmp(left, right);
69 if (cmp) 116 if (cmp)
70 break; 117 break;
71 } 118 }
@@ -82,7 +129,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
82 list_for_each_entry(se, &hist_entry__sort_list, list) { 129 list_for_each_entry(se, &hist_entry__sort_list, list) {
83 int64_t (*f)(struct hist_entry *, struct hist_entry *); 130 int64_t (*f)(struct hist_entry *, struct hist_entry *);
84 131
85 f = se->collapse ?: se->cmp; 132 f = se->se_collapse ?: se->se_cmp;
86 133
87 cmp = f(left, right); 134 cmp = f(left, right);
88 if (cmp) 135 if (cmp)
@@ -101,7 +148,7 @@ void hist_entry__free(struct hist_entry *he)
101 * collapse the histogram 148 * collapse the histogram
102 */ 149 */
103 150
104static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) 151static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
105{ 152{
106 struct rb_node **p = &root->rb_node; 153 struct rb_node **p = &root->rb_node;
107 struct rb_node *parent = NULL; 154 struct rb_node *parent = NULL;
@@ -115,9 +162,9 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
115 cmp = hist_entry__collapse(iter, he); 162 cmp = hist_entry__collapse(iter, he);
116 163
117 if (!cmp) { 164 if (!cmp) {
118 iter->count += he->count; 165 iter->period += he->period;
119 hist_entry__free(he); 166 hist_entry__free(he);
120 return; 167 return false;
121 } 168 }
122 169
123 if (cmp < 0) 170 if (cmp < 0)
@@ -128,9 +175,10 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
128 175
129 rb_link_node(&he->rb_node, parent, p); 176 rb_link_node(&he->rb_node, parent, p);
130 rb_insert_color(&he->rb_node, root); 177 rb_insert_color(&he->rb_node, root);
178 return true;
131} 179}
132 180
133void perf_session__collapse_resort(struct rb_root *hists) 181void hists__collapse_resort(struct hists *self)
134{ 182{
135 struct rb_root tmp; 183 struct rb_root tmp;
136 struct rb_node *next; 184 struct rb_node *next;
@@ -140,72 +188,77 @@ void perf_session__collapse_resort(struct rb_root *hists)
140 return; 188 return;
141 189
142 tmp = RB_ROOT; 190 tmp = RB_ROOT;
143 next = rb_first(hists); 191 next = rb_first(&self->entries);
192 self->nr_entries = 0;
193 self->max_sym_namelen = 0;
144 194
145 while (next) { 195 while (next) {
146 n = rb_entry(next, struct hist_entry, rb_node); 196 n = rb_entry(next, struct hist_entry, rb_node);
147 next = rb_next(&n->rb_node); 197 next = rb_next(&n->rb_node);
148 198
149 rb_erase(&n->rb_node, hists); 199 rb_erase(&n->rb_node, &self->entries);
150 collapse__insert_entry(&tmp, n); 200 if (collapse__insert_entry(&tmp, n))
201 hists__inc_nr_entries(self, n);
151 } 202 }
152 203
153 *hists = tmp; 204 self->entries = tmp;
154} 205}
155 206
156/* 207/*
157 * reverse the map, sort on count. 208 * reverse the map, sort on period.
158 */ 209 */
159 210
160static void perf_session__insert_output_hist_entry(struct rb_root *root, 211static void __hists__insert_output_entry(struct rb_root *entries,
161 struct hist_entry *he, 212 struct hist_entry *he,
162 u64 min_callchain_hits) 213 u64 min_callchain_hits)
163{ 214{
164 struct rb_node **p = &root->rb_node; 215 struct rb_node **p = &entries->rb_node;
165 struct rb_node *parent = NULL; 216 struct rb_node *parent = NULL;
166 struct hist_entry *iter; 217 struct hist_entry *iter;
167 218
168 if (symbol_conf.use_callchain) 219 if (symbol_conf.use_callchain)
169 callchain_param.sort(&he->sorted_chain, &he->callchain, 220 callchain_param.sort(&he->sorted_chain, he->callchain,
170 min_callchain_hits, &callchain_param); 221 min_callchain_hits, &callchain_param);
171 222
172 while (*p != NULL) { 223 while (*p != NULL) {
173 parent = *p; 224 parent = *p;
174 iter = rb_entry(parent, struct hist_entry, rb_node); 225 iter = rb_entry(parent, struct hist_entry, rb_node);
175 226
176 if (he->count > iter->count) 227 if (he->period > iter->period)
177 p = &(*p)->rb_left; 228 p = &(*p)->rb_left;
178 else 229 else
179 p = &(*p)->rb_right; 230 p = &(*p)->rb_right;
180 } 231 }
181 232
182 rb_link_node(&he->rb_node, parent, p); 233 rb_link_node(&he->rb_node, parent, p);
183 rb_insert_color(&he->rb_node, root); 234 rb_insert_color(&he->rb_node, entries);
184} 235}
185 236
186void perf_session__output_resort(struct rb_root *hists, u64 total_samples) 237void hists__output_resort(struct hists *self)
187{ 238{
188 struct rb_root tmp; 239 struct rb_root tmp;
189 struct rb_node *next; 240 struct rb_node *next;
190 struct hist_entry *n; 241 struct hist_entry *n;
191 u64 min_callchain_hits; 242 u64 min_callchain_hits;
192 243
193 min_callchain_hits = 244 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
194 total_samples * (callchain_param.min_percent / 100);
195 245
196 tmp = RB_ROOT; 246 tmp = RB_ROOT;
197 next = rb_first(hists); 247 next = rb_first(&self->entries);
248
249 self->nr_entries = 0;
250 self->max_sym_namelen = 0;
198 251
199 while (next) { 252 while (next) {
200 n = rb_entry(next, struct hist_entry, rb_node); 253 n = rb_entry(next, struct hist_entry, rb_node);
201 next = rb_next(&n->rb_node); 254 next = rb_next(&n->rb_node);
202 255
203 rb_erase(&n->rb_node, hists); 256 rb_erase(&n->rb_node, &self->entries);
204 perf_session__insert_output_hist_entry(&tmp, n, 257 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
205 min_callchain_hits); 258 hists__inc_nr_entries(self, n);
206 } 259 }
207 260
208 *hists = tmp; 261 self->entries = tmp;
209} 262}
210 263
211static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 264static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -237,7 +290,7 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
237} 290}
238 291
239static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 292static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
240 int depth, int depth_mask, int count, 293 int depth, int depth_mask, int period,
241 u64 total_samples, int hits, 294 u64 total_samples, int hits,
242 int left_margin) 295 int left_margin)
243{ 296{
@@ -250,7 +303,7 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
250 ret += fprintf(fp, "|"); 303 ret += fprintf(fp, "|");
251 else 304 else
252 ret += fprintf(fp, " "); 305 ret += fprintf(fp, " ");
253 if (!count && i == depth - 1) { 306 if (!period && i == depth - 1) {
254 double percent; 307 double percent;
255 308
256 percent = hits * 100.0 / total_samples; 309 percent = hits * 100.0 / total_samples;
@@ -258,8 +311,8 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
258 } else 311 } else
259 ret += fprintf(fp, "%s", " "); 312 ret += fprintf(fp, "%s", " ");
260 } 313 }
261 if (chain->sym) 314 if (chain->ms.sym)
262 ret += fprintf(fp, "%s\n", chain->sym->name); 315 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
263 else 316 else
264 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 317 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
265 318
@@ -278,7 +331,7 @@ static void init_rem_hits(void)
278 } 331 }
279 332
280 strcpy(rem_sq_bracket->name, "[...]"); 333 strcpy(rem_sq_bracket->name, "[...]");
281 rem_hits.sym = rem_sq_bracket; 334 rem_hits.ms.sym = rem_sq_bracket;
282} 335}
283 336
284static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 337static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
@@ -293,6 +346,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
293 u64 remaining; 346 u64 remaining;
294 size_t ret = 0; 347 size_t ret = 0;
295 int i; 348 int i;
349 uint entries_printed = 0;
296 350
297 if (callchain_param.mode == CHAIN_GRAPH_REL) 351 if (callchain_param.mode == CHAIN_GRAPH_REL)
298 new_total = self->children_hit; 352 new_total = self->children_hit;
@@ -328,8 +382,6 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
328 left_margin); 382 left_margin);
329 i = 0; 383 i = 0;
330 list_for_each_entry(chain, &child->val, list) { 384 list_for_each_entry(chain, &child->val, list) {
331 if (chain->ip >= PERF_CONTEXT_MAX)
332 continue;
333 ret += ipchain__fprintf_graph(fp, chain, depth, 385 ret += ipchain__fprintf_graph(fp, chain, depth,
334 new_depth_mask, i++, 386 new_depth_mask, i++,
335 new_total, 387 new_total,
@@ -341,6 +393,8 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
341 new_depth_mask | (1 << depth), 393 new_depth_mask | (1 << depth),
342 left_margin); 394 left_margin);
343 node = next; 395 node = next;
396 if (++entries_printed == callchain_param.print_limit)
397 break;
344 } 398 }
345 399
346 if (callchain_param.mode == CHAIN_GRAPH_REL && 400 if (callchain_param.mode == CHAIN_GRAPH_REL &&
@@ -366,11 +420,9 @@ static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
366 bool printed = false; 420 bool printed = false;
367 int i = 0; 421 int i = 0;
368 int ret = 0; 422 int ret = 0;
423 u32 entries_printed = 0;
369 424
370 list_for_each_entry(chain, &self->val, list) { 425 list_for_each_entry(chain, &self->val, list) {
371 if (chain->ip >= PERF_CONTEXT_MAX)
372 continue;
373
374 if (!i++ && sort__first_dimension == SORT_SYM) 426 if (!i++ && sort__first_dimension == SORT_SYM)
375 continue; 427 continue;
376 428
@@ -385,10 +437,13 @@ static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
385 } else 437 } else
386 ret += callchain__fprintf_left_margin(fp, left_margin); 438 ret += callchain__fprintf_left_margin(fp, left_margin);
387 439
388 if (chain->sym) 440 if (chain->ms.sym)
389 ret += fprintf(fp, " %s\n", chain->sym->name); 441 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
390 else 442 else
391 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 443 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
444
445 if (++entries_printed == callchain_param.print_limit)
446 break;
392 } 447 }
393 448
394 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 449 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
@@ -411,8 +466,8 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
411 list_for_each_entry(chain, &self->val, list) { 466 list_for_each_entry(chain, &self->val, list) {
412 if (chain->ip >= PERF_CONTEXT_MAX) 467 if (chain->ip >= PERF_CONTEXT_MAX)
413 continue; 468 continue;
414 if (chain->sym) 469 if (chain->ms.sym)
415 ret += fprintf(fp, " %s\n", chain->sym->name); 470 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
416 else 471 else
417 ret += fprintf(fp, " %p\n", 472 ret += fprintf(fp, " %p\n",
418 (void *)(long)chain->ip); 473 (void *)(long)chain->ip);
@@ -427,6 +482,7 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
427 struct rb_node *rb_node; 482 struct rb_node *rb_node;
428 struct callchain_node *chain; 483 struct callchain_node *chain;
429 size_t ret = 0; 484 size_t ret = 0;
485 u32 entries_printed = 0;
430 486
431 rb_node = rb_first(&self->sorted_chain); 487 rb_node = rb_first(&self->sorted_chain);
432 while (rb_node) { 488 while (rb_node) {
@@ -449,55 +505,88 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
449 break; 505 break;
450 } 506 }
451 ret += fprintf(fp, "\n"); 507 ret += fprintf(fp, "\n");
508 if (++entries_printed == callchain_param.print_limit)
509 break;
452 rb_node = rb_next(rb_node); 510 rb_node = rb_next(rb_node);
453 } 511 }
454 512
455 return ret; 513 return ret;
456} 514}
457 515
458static size_t hist_entry__fprintf(struct hist_entry *self, 516int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
459 struct perf_session *pair_session, 517 struct hists *pair_hists, bool show_displacement,
460 bool show_displacement, 518 long displacement, bool color, u64 session_total)
461 long displacement, FILE *fp,
462 u64 session_total)
463{ 519{
464 struct sort_entry *se; 520 struct sort_entry *se;
465 u64 count, total; 521 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
466 const char *sep = symbol_conf.field_sep; 522 const char *sep = symbol_conf.field_sep;
467 size_t ret; 523 int ret;
468 524
469 if (symbol_conf.exclude_other && !self->parent) 525 if (symbol_conf.exclude_other && !self->parent)
470 return 0; 526 return 0;
471 527
472 if (pair_session) { 528 if (pair_hists) {
473 count = self->pair ? self->pair->count : 0; 529 period = self->pair ? self->pair->period : 0;
474 total = pair_session->events_stats.total; 530 total = pair_hists->stats.total_period;
531 period_sys = self->pair ? self->pair->period_sys : 0;
532 period_us = self->pair ? self->pair->period_us : 0;
533 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
534 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
475 } else { 535 } else {
476 count = self->count; 536 period = self->period;
477 total = session_total; 537 total = session_total;
538 period_sys = self->period_sys;
539 period_us = self->period_us;
540 period_guest_sys = self->period_guest_sys;
541 period_guest_us = self->period_guest_us;
478 } 542 }
479 543
480 if (total) 544 if (total) {
481 ret = percent_color_fprintf(fp, sep ? "%.2f" : " %6.2f%%", 545 if (color)
482 (count * 100.0) / total); 546 ret = percent_color_snprintf(s, size,
483 else 547 sep ? "%.2f" : " %6.2f%%",
484 ret = fprintf(fp, sep ? "%lld" : "%12lld ", count); 548 (period * 100.0) / total);
549 else
550 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
551 (period * 100.0) / total);
552 if (symbol_conf.show_cpu_utilization) {
553 ret += percent_color_snprintf(s + ret, size - ret,
554 sep ? "%.2f" : " %6.2f%%",
555 (period_sys * 100.0) / total);
556 ret += percent_color_snprintf(s + ret, size - ret,
557 sep ? "%.2f" : " %6.2f%%",
558 (period_us * 100.0) / total);
559 if (perf_guest) {
560 ret += percent_color_snprintf(s + ret,
561 size - ret,
562 sep ? "%.2f" : " %6.2f%%",
563 (period_guest_sys * 100.0) /
564 total);
565 ret += percent_color_snprintf(s + ret,
566 size - ret,
567 sep ? "%.2f" : " %6.2f%%",
568 (period_guest_us * 100.0) /
569 total);
570 }
571 }
572 } else
573 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
485 574
486 if (symbol_conf.show_nr_samples) { 575 if (symbol_conf.show_nr_samples) {
487 if (sep) 576 if (sep)
488 fprintf(fp, "%c%lld", *sep, count); 577 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
489 else 578 else
490 fprintf(fp, "%11lld", count); 579 ret += snprintf(s + ret, size - ret, "%11lld", period);
491 } 580 }
492 581
493 if (pair_session) { 582 if (pair_hists) {
494 char bf[32]; 583 char bf[32];
495 double old_percent = 0, new_percent = 0, diff; 584 double old_percent = 0, new_percent = 0, diff;
496 585
497 if (total > 0) 586 if (total > 0)
498 old_percent = (count * 100.0) / total; 587 old_percent = (period * 100.0) / total;
499 if (session_total > 0) 588 if (session_total > 0)
500 new_percent = (self->count * 100.0) / session_total; 589 new_percent = (self->period * 100.0) / session_total;
501 590
502 diff = new_percent - old_percent; 591 diff = new_percent - old_percent;
503 592
@@ -507,9 +596,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
507 snprintf(bf, sizeof(bf), " "); 596 snprintf(bf, sizeof(bf), " ");
508 597
509 if (sep) 598 if (sep)
510 ret += fprintf(fp, "%c%s", *sep, bf); 599 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
511 else 600 else
512 ret += fprintf(fp, "%11.11s", bf); 601 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
513 602
514 if (show_displacement) { 603 if (show_displacement) {
515 if (displacement) 604 if (displacement)
@@ -518,9 +607,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
518 snprintf(bf, sizeof(bf), " "); 607 snprintf(bf, sizeof(bf), " ");
519 608
520 if (sep) 609 if (sep)
521 fprintf(fp, "%c%s", *sep, bf); 610 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
522 else 611 else
523 fprintf(fp, "%6.6s", bf); 612 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
524 } 613 }
525 } 614 }
526 615
@@ -528,33 +617,43 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
528 if (se->elide) 617 if (se->elide)
529 continue; 618 continue;
530 619
531 fprintf(fp, "%s", sep ?: " "); 620 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
532 ret += se->print(fp, self, se->width ? *se->width : 0); 621 ret += se->se_snprintf(self, s + ret, size - ret,
622 se->se_width ? *se->se_width : 0);
533 } 623 }
534 624
535 ret += fprintf(fp, "\n"); 625 return ret;
626}
536 627
537 if (symbol_conf.use_callchain) { 628int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
538 int left_margin = 0; 629 bool show_displacement, long displacement, FILE *fp,
630 u64 session_total)
631{
632 char bf[512];
633 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
634 show_displacement, displacement,
635 true, session_total);
636 return fprintf(fp, "%s\n", bf);
637}
539 638
540 if (sort__first_dimension == SORT_COMM) { 639static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
541 se = list_first_entry(&hist_entry__sort_list, typeof(*se), 640 u64 session_total)
542 list); 641{
543 left_margin = se->width ? *se->width : 0; 642 int left_margin = 0;
544 left_margin -= thread__comm_len(self->thread);
545 }
546 643
547 hist_entry_callchain__fprintf(fp, self, session_total, 644 if (sort__first_dimension == SORT_COMM) {
548 left_margin); 645 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
646 typeof(*se), list);
647 left_margin = se->se_width ? *se->se_width : 0;
648 left_margin -= thread__comm_len(self->thread);
549 } 649 }
550 650
551 return ret; 651 return hist_entry_callchain__fprintf(fp, self, session_total,
652 left_margin);
552} 653}
553 654
554size_t perf_session__fprintf_hists(struct rb_root *hists, 655size_t hists__fprintf(struct hists *self, struct hists *pair,
555 struct perf_session *pair, 656 bool show_displacement, FILE *fp)
556 bool show_displacement, FILE *fp,
557 u64 session_total)
558{ 657{
559 struct sort_entry *se; 658 struct sort_entry *se;
560 struct rb_node *nd; 659 struct rb_node *nd;
@@ -563,7 +662,7 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
563 long displacement = 0; 662 long displacement = 0;
564 unsigned int width; 663 unsigned int width;
565 const char *sep = symbol_conf.field_sep; 664 const char *sep = symbol_conf.field_sep;
566 char *col_width = symbol_conf.col_width_list_str; 665 const char *col_width = symbol_conf.col_width_list_str;
567 666
568 init_rem_hits(); 667 init_rem_hits();
569 668
@@ -576,6 +675,24 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
576 fputs(" Samples ", fp); 675 fputs(" Samples ", fp);
577 } 676 }
578 677
678 if (symbol_conf.show_cpu_utilization) {
679 if (sep) {
680 ret += fprintf(fp, "%csys", *sep);
681 ret += fprintf(fp, "%cus", *sep);
682 if (perf_guest) {
683 ret += fprintf(fp, "%cguest sys", *sep);
684 ret += fprintf(fp, "%cguest us", *sep);
685 }
686 } else {
687 ret += fprintf(fp, " sys ");
688 ret += fprintf(fp, " us ");
689 if (perf_guest) {
690 ret += fprintf(fp, " guest sys ");
691 ret += fprintf(fp, " guest us ");
692 }
693 }
694 }
695
579 if (pair) { 696 if (pair) {
580 if (sep) 697 if (sep)
581 ret += fprintf(fp, "%cDelta", *sep); 698 ret += fprintf(fp, "%cDelta", *sep);
@@ -594,22 +711,22 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
594 if (se->elide) 711 if (se->elide)
595 continue; 712 continue;
596 if (sep) { 713 if (sep) {
597 fprintf(fp, "%c%s", *sep, se->header); 714 fprintf(fp, "%c%s", *sep, se->se_header);
598 continue; 715 continue;
599 } 716 }
600 width = strlen(se->header); 717 width = strlen(se->se_header);
601 if (se->width) { 718 if (se->se_width) {
602 if (symbol_conf.col_width_list_str) { 719 if (symbol_conf.col_width_list_str) {
603 if (col_width) { 720 if (col_width) {
604 *se->width = atoi(col_width); 721 *se->se_width = atoi(col_width);
605 col_width = strchr(col_width, ','); 722 col_width = strchr(col_width, ',');
606 if (col_width) 723 if (col_width)
607 ++col_width; 724 ++col_width;
608 } 725 }
609 } 726 }
610 width = *se->width = max(*se->width, width); 727 width = *se->se_width = max(*se->se_width, width);
611 } 728 }
612 fprintf(fp, " %*s", width, se->header); 729 fprintf(fp, " %*s", width, se->se_header);
613 } 730 }
614 fprintf(fp, "\n"); 731 fprintf(fp, "\n");
615 732
@@ -631,10 +748,10 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
631 continue; 748 continue;
632 749
633 fprintf(fp, " "); 750 fprintf(fp, " ");
634 if (se->width) 751 if (se->se_width)
635 width = *se->width; 752 width = *se->se_width;
636 else 753 else
637 width = strlen(se->header); 754 width = strlen(se->se_header);
638 for (i = 0; i < width; i++) 755 for (i = 0; i < width; i++)
639 fprintf(fp, "."); 756 fprintf(fp, ".");
640 } 757 }
@@ -642,7 +759,7 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
642 fprintf(fp, "\n#\n"); 759 fprintf(fp, "\n#\n");
643 760
644print_entries: 761print_entries:
645 for (nd = rb_first(hists); nd; nd = rb_next(nd)) { 762 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
646 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 763 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
647 764
648 if (show_displacement) { 765 if (show_displacement) {
@@ -654,10 +771,14 @@ print_entries:
654 ++position; 771 ++position;
655 } 772 }
656 ret += hist_entry__fprintf(h, pair, show_displacement, 773 ret += hist_entry__fprintf(h, pair, show_displacement,
657 displacement, fp, session_total); 774 displacement, fp, self->stats.total_period);
658 if (h->map == NULL && verbose > 1) { 775
776 if (symbol_conf.use_callchain)
777 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
778
779 if (h->ms.map == NULL && verbose > 1) {
659 __map_groups__fprintf_maps(&h->thread->mg, 780 __map_groups__fprintf_maps(&h->thread->mg,
660 MAP__FUNCTION, fp); 781 MAP__FUNCTION, verbose, fp);
661 fprintf(fp, "%.10s end\n", graph_dotted_line); 782 fprintf(fp, "%.10s end\n", graph_dotted_line);
662 } 783 }
663 } 784 }
@@ -666,3 +787,271 @@ print_entries:
666 787
667 return ret; 788 return ret;
668} 789}
790
791enum hist_filter {
792 HIST_FILTER__DSO,
793 HIST_FILTER__THREAD,
794};
795
796void hists__filter_by_dso(struct hists *self, const struct dso *dso)
797{
798 struct rb_node *nd;
799
800 self->nr_entries = self->stats.total_period = 0;
801 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
802 self->max_sym_namelen = 0;
803
804 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
805 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
806
807 if (symbol_conf.exclude_other && !h->parent)
808 continue;
809
810 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
811 h->filtered |= (1 << HIST_FILTER__DSO);
812 continue;
813 }
814
815 h->filtered &= ~(1 << HIST_FILTER__DSO);
816 if (!h->filtered) {
817 ++self->nr_entries;
818 self->stats.total_period += h->period;
819 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
820 if (h->ms.sym &&
821 self->max_sym_namelen < h->ms.sym->namelen)
822 self->max_sym_namelen = h->ms.sym->namelen;
823 }
824 }
825}
826
827void hists__filter_by_thread(struct hists *self, const struct thread *thread)
828{
829 struct rb_node *nd;
830
831 self->nr_entries = self->stats.total_period = 0;
832 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
833 self->max_sym_namelen = 0;
834
835 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
836 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
837
838 if (thread != NULL && h->thread != thread) {
839 h->filtered |= (1 << HIST_FILTER__THREAD);
840 continue;
841 }
842 h->filtered &= ~(1 << HIST_FILTER__THREAD);
843 if (!h->filtered) {
844 ++self->nr_entries;
845 self->stats.total_period += h->period;
846 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
847 if (h->ms.sym &&
848 self->max_sym_namelen < h->ms.sym->namelen)
849 self->max_sym_namelen = h->ms.sym->namelen;
850 }
851 }
852}
853
854static int symbol__alloc_hist(struct symbol *self)
855{
856 struct sym_priv *priv = symbol__priv(self);
857 const int size = (sizeof(*priv->hist) +
858 (self->end - self->start) * sizeof(u64));
859
860 priv->hist = zalloc(size);
861 return priv->hist == NULL ? -1 : 0;
862}
863
864int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
865{
866 unsigned int sym_size, offset;
867 struct symbol *sym = self->ms.sym;
868 struct sym_priv *priv;
869 struct sym_hist *h;
870
871 if (!sym || !self->ms.map)
872 return 0;
873
874 priv = symbol__priv(sym);
875 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
876 return -ENOMEM;
877
878 sym_size = sym->end - sym->start;
879 offset = ip - sym->start;
880
881 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
882
883 if (offset >= sym_size)
884 return 0;
885
886 h = priv->hist;
887 h->sum++;
888 h->ip[offset]++;
889
890 pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
891 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
892 return 0;
893}
894
895static struct objdump_line *objdump_line__new(s64 offset, char *line)
896{
897 struct objdump_line *self = malloc(sizeof(*self));
898
899 if (self != NULL) {
900 self->offset = offset;
901 self->line = line;
902 }
903
904 return self;
905}
906
907void objdump_line__free(struct objdump_line *self)
908{
909 free(self->line);
910 free(self);
911}
912
913static void objdump__add_line(struct list_head *head, struct objdump_line *line)
914{
915 list_add_tail(&line->node, head);
916}
917
918struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
919 struct objdump_line *pos)
920{
921 list_for_each_entry_continue(pos, head, node)
922 if (pos->offset >= 0)
923 return pos;
924
925 return NULL;
926}
927
928static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
929 struct list_head *head)
930{
931 struct symbol *sym = self->ms.sym;
932 struct objdump_line *objdump_line;
933 char *line = NULL, *tmp, *tmp2, *c;
934 size_t line_len;
935 s64 line_ip, offset = -1;
936
937 if (getline(&line, &line_len, file) < 0)
938 return -1;
939
940 if (!line)
941 return -1;
942
943 while (line_len != 0 && isspace(line[line_len - 1]))
944 line[--line_len] = '\0';
945
946 c = strchr(line, '\n');
947 if (c)
948 *c = 0;
949
950 line_ip = -1;
951
952 /*
953 * Strip leading spaces:
954 */
955 tmp = line;
956 while (*tmp) {
957 if (*tmp != ' ')
958 break;
959 tmp++;
960 }
961
962 if (*tmp) {
963 /*
964 * Parse hexa addresses followed by ':'
965 */
966 line_ip = strtoull(tmp, &tmp2, 16);
967 if (*tmp2 != ':')
968 line_ip = -1;
969 }
970
971 if (line_ip != -1) {
972 u64 start = map__rip_2objdump(self->ms.map, sym->start);
973 offset = line_ip - start;
974 }
975
976 objdump_line = objdump_line__new(offset, line);
977 if (objdump_line == NULL) {
978 free(line);
979 return -1;
980 }
981 objdump__add_line(head, objdump_line);
982
983 return 0;
984}
985
986int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
987{
988 struct symbol *sym = self->ms.sym;
989 struct map *map = self->ms.map;
990 struct dso *dso = map->dso;
991 const char *filename = dso->long_name;
992 char command[PATH_MAX * 2];
993 FILE *file;
994 u64 len;
995
996 if (!filename)
997 return -1;
998
999 if (dso->origin == DSO__ORIG_KERNEL) {
1000 if (dso->annotate_warned)
1001 return 0;
1002 dso->annotate_warned = 1;
1003 pr_err("Can't annotate %s: No vmlinux file was found in the "
1004 "path:\n", sym->name);
1005 vmlinux_path__fprintf(stderr);
1006 return -1;
1007 }
1008
1009 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1010 filename, sym->name, map->unmap_ip(map, sym->start),
1011 map->unmap_ip(map, sym->end));
1012
1013 len = sym->end - sym->start;
1014
1015 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1016 dso, dso->long_name, sym, sym->name);
1017
1018 snprintf(command, sizeof(command),
1019 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
1020 map__rip_2objdump(map, sym->start),
1021 map__rip_2objdump(map, sym->end),
1022 filename, filename);
1023
1024 pr_debug("Executing: %s\n", command);
1025
1026 file = popen(command, "r");
1027 if (!file)
1028 return -1;
1029
1030 while (!feof(file))
1031 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1032 break;
1033
1034 pclose(file);
1035 return 0;
1036}
1037
1038void hists__inc_nr_events(struct hists *self, u32 type)
1039{
1040 ++self->stats.nr_events[0];
1041 ++self->stats.nr_events[type];
1042}
1043
1044size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1045{
1046 int i;
1047 size_t ret = 0;
1048
1049 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1050 if (!event__name[i])
1051 continue;
1052 ret += fprintf(fp, "%10s events: %10d\n",
1053 event__name[i], self->stats.nr_events[i]);
1054 }
1055
1056 return ret;
1057}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 16f360cce5bf..6f17dcd8412c 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -6,24 +6,104 @@
6 6
7extern struct callchain_param callchain_param; 7extern struct callchain_param callchain_param;
8 8
9struct perf_session;
10struct hist_entry; 9struct hist_entry;
11struct addr_location; 10struct addr_location;
12struct symbol; 11struct symbol;
13struct rb_root; 12struct rb_root;
14 13
15struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, 14struct objdump_line {
16 struct addr_location *al, 15 struct list_head node;
17 struct symbol *parent, 16 s64 offset;
18 u64 count, bool *hit); 17 char *line;
18};
19
20void objdump_line__free(struct objdump_line *self);
21struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
22 struct objdump_line *pos);
23
24struct sym_hist {
25 u64 sum;
26 u64 ip[0];
27};
28
29struct sym_ext {
30 struct rb_node node;
31 double percent;
32 char *path;
33};
34
35struct sym_priv {
36 struct sym_hist *hist;
37 struct sym_ext *ext;
38};
39
40/*
41 * The kernel collects the number of events it couldn't send in a stretch and
42 * when possible sends this number in a PERF_RECORD_LOST event. The number of
43 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
44 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
45 * the sum of all struct lost_event.lost fields reported.
46 *
47 * The total_period is needed because by default auto-freq is used, so
48 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
49 * the total number of low level events, it is necessary to to sum all struct
50 * sample_event.period and stash the result in total_period.
51 */
52struct events_stats {
53 u64 total_period;
54 u64 total_lost;
55 u32 nr_events[PERF_RECORD_HEADER_MAX];
56 u32 nr_unknown_events;
57};
58
59struct hists {
60 struct rb_node rb_node;
61 struct rb_root entries;
62 u64 nr_entries;
63 struct events_stats stats;
64 u64 config;
65 u64 event_stream;
66 u32 type;
67 u32 max_sym_namelen;
68};
69
70struct hist_entry *__hists__add_entry(struct hists *self,
71 struct addr_location *al,
72 struct symbol *parent, u64 period);
19extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); 73extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
20extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); 74extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
75int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
76 bool show_displacement, long displacement, FILE *fp,
77 u64 total);
78int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
79 struct hists *pair_hists, bool show_displacement,
80 long displacement, bool color, u64 total);
21void hist_entry__free(struct hist_entry *); 81void hist_entry__free(struct hist_entry *);
22 82
23void perf_session__output_resort(struct rb_root *hists, u64 total_samples); 83void hists__output_resort(struct hists *self);
24void perf_session__collapse_resort(struct rb_root *hists); 84void hists__collapse_resort(struct hists *self);
25size_t perf_session__fprintf_hists(struct rb_root *hists, 85
26 struct perf_session *pair, 86void hists__inc_nr_events(struct hists *self, u32 type);
27 bool show_displacement, FILE *fp, 87size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
28 u64 session_total); 88
89size_t hists__fprintf(struct hists *self, struct hists *pair,
90 bool show_displacement, FILE *fp);
91
92int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
93int hist_entry__annotate(struct hist_entry *self, struct list_head *head);
94
95void hists__filter_by_dso(struct hists *self, const struct dso *dso);
96void hists__filter_by_thread(struct hists *self, const struct thread *thread);
97
98#ifdef NO_NEWT_SUPPORT
99static inline int hists__browse(struct hists *self __used,
100 const char *helpline __used,
101 const char *input_name __used)
102{
103 return 0;
104}
105#else
106int hists__browse(struct hists *self, const char *helpline,
107 const char *input_name);
108#endif
29#endif /* __PERF_HIST_H */ 109#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
new file mode 100644
index 000000000000..5c1d0d099f0d
--- /dev/null
+++ b/tools/perf/util/hweight.c
@@ -0,0 +1,31 @@
1#include <linux/bitops.h>
2
3/**
4 * hweightN - returns the hamming weight of a N-bit word
5 * @x: the word to weigh
6 *
7 * The Hamming Weight of a number is the total number of bits set in it.
8 */
9
10unsigned int hweight32(unsigned int w)
11{
12 unsigned int res = w - ((w >> 1) & 0x55555555);
13 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
14 res = (res + (res >> 4)) & 0x0F0F0F0F;
15 res = res + (res >> 8);
16 return (res + (res >> 16)) & 0x000000FF;
17}
18
19unsigned long hweight64(__u64 w)
20{
21#if BITS_PER_LONG == 32
22 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
23#elif BITS_PER_LONG == 64
24 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
25 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
26 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
27 res = res + (res >> 8);
28 res = res + (res >> 16);
29 return (res + (res >> 32)) & 0x00000000000000FFul;
30#endif
31}
diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h
deleted file mode 100644
index 58e9817ffae0..000000000000
--- a/tools/perf/util/include/asm/bitops.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef _PERF_ASM_BITOPS_H_
2#define _PERF_ASM_BITOPS_H_
3
4#include <sys/types.h>
5#include "../../types.h"
6#include <linux/compiler.h>
7
8/* CHECKME: Not sure both always match */
9#define BITS_PER_LONG __WORDSIZE
10
11#include "../../../../include/asm-generic/bitops/__fls.h"
12#include "../../../../include/asm-generic/bitops/fls.h"
13#include "../../../../include/asm-generic/bitops/fls64.h"
14#include "../../../../include/asm-generic/bitops/__ffs.h"
15#include "../../../../include/asm-generic/bitops/ffz.h"
16#include "../../../../include/asm-generic/bitops/hweight.h"
17
18#endif
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
new file mode 100644
index 000000000000..36cf26d434a5
--- /dev/null
+++ b/tools/perf/util/include/asm/hweight.h
@@ -0,0 +1,8 @@
1#ifndef PERF_HWEIGHT_H
2#define PERF_HWEIGHT_H
3
4#include <linux/types.h>
5unsigned int hweight32(unsigned int w);
6unsigned long hweight64(__u64 w);
7
8#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
new file mode 100644
index 000000000000..cf6727e99c44
--- /dev/null
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -0,0 +1,8 @@
1#ifndef _PERF_DWARF_REGS_H_
2#define _PERF_DWARF_REGS_H_
3
4#ifdef DWARF_SUPPORT
5const char *get_arch_regstr(unsigned int n);
6#endif
7
8#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
index 94507639a8c4..eda4416efa0a 100644
--- a/tools/perf/util/include/linux/bitmap.h
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -1,3 +1,35 @@
1#include "../../../../include/linux/bitmap.h" 1#ifndef _PERF_BITOPS_H
2#include "../../../../include/asm-generic/bitops/find.h" 2#define _PERF_BITOPS_H
3#include <linux/errno.h> 3
4#include <string.h>
5#include <linux/bitops.h>
6
7int __bitmap_weight(const unsigned long *bitmap, int bits);
8
9#define BITMAP_LAST_WORD_MASK(nbits) \
10( \
11 ((nbits) % BITS_PER_LONG) ? \
12 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
13)
14
15#define small_const_nbits(nbits) \
16 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
17
18static inline void bitmap_zero(unsigned long *dst, int nbits)
19{
20 if (small_const_nbits(nbits))
21 *dst = 0UL;
22 else {
23 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
24 memset(dst, 0, len);
25 }
26}
27
28static inline int bitmap_weight(const unsigned long *src, int nbits)
29{
30 if (small_const_nbits(nbits))
31 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
32 return __bitmap_weight(src, nbits);
33}
34
35#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 8d63116e9435..bb4ac2e05385 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -1,13 +1,12 @@
1#ifndef _PERF_LINUX_BITOPS_H_ 1#ifndef _PERF_LINUX_BITOPS_H_
2#define _PERF_LINUX_BITOPS_H_ 2#define _PERF_LINUX_BITOPS_H_
3 3
4#define __KERNEL__ 4#include <linux/kernel.h>
5#include <asm/hweight.h>
5 6
6#define CONFIG_GENERIC_FIND_NEXT_BIT 7#define BITS_PER_LONG __WORDSIZE
7#define CONFIG_GENERIC_FIND_FIRST_BIT 8#define BITS_PER_BYTE 8
8#include "../../../../include/linux/bitops.h" 9#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
9
10#undef __KERNEL__
11 10
12static inline void set_bit(int nr, unsigned long *addr) 11static inline void set_bit(int nr, unsigned long *addr)
13{ 12{
@@ -20,10 +19,9 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
20 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 19 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
21} 20}
22 21
23unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned 22static inline unsigned long hweight_long(unsigned long w)
24 long size, unsigned long offset); 23{
25 24 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
26unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned 25}
27 long size, unsigned long offset);
28 26
29#endif 27#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index dfb0713ed47f..791f9dd27ebf 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -7,4 +7,6 @@
7#define __user 7#define __user
8#define __attribute_const__ 8#define __attribute_const__
9 9
10#define __used __attribute__((__unused__))
11
10#endif 12#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index f2611655ab51..1eb804fd3fbf 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -28,6 +28,8 @@
28 (type *)((char *)__mptr - offsetof(type, member)); }) 28 (type *)((char *)__mptr - offsetof(type, member)); })
29#endif 29#endif
30 30
31#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
32
31#ifndef max 33#ifndef max
32#define max(x, y) ({ \ 34#define max(x, y) ({ \
33 typeof(x) _max1 = (x); \ 35 typeof(x) _max1 = (x); \
@@ -85,16 +87,19 @@ simple_strtoul(const char *nptr, char **endptr, int base)
85 return strtoul(nptr, endptr, base); 87 return strtoul(nptr, endptr, base);
86} 88}
87 89
90int eprintf(int level,
91 const char *fmt, ...) __attribute__((format(printf, 2, 3)));
92
88#ifndef pr_fmt 93#ifndef pr_fmt
89#define pr_fmt(fmt) fmt 94#define pr_fmt(fmt) fmt
90#endif 95#endif
91 96
92#define pr_err(fmt, ...) \ 97#define pr_err(fmt, ...) \
93 do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) 98 eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
94#define pr_warning(fmt, ...) \ 99#define pr_warning(fmt, ...) \
95 do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) 100 eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
96#define pr_info(fmt, ...) \ 101#define pr_info(fmt, ...) \
97 do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) 102 eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
98#define pr_debug(fmt, ...) \ 103#define pr_debug(fmt, ...) \
99 eprintf(1, pr_fmt(fmt), ##__VA_ARGS__) 104 eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
100#define pr_debugN(n, fmt, ...) \ 105#define pr_debugN(n, fmt, ...) \
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index e509cd59c67d..e672f2fef65b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,9 +1,11 @@
1#include "event.h"
2#include "symbol.h" 1#include "symbol.h"
2#include <errno.h>
3#include <limits.h>
3#include <stdlib.h> 4#include <stdlib.h>
4#include <string.h> 5#include <string.h>
5#include <stdio.h> 6#include <stdio.h>
6#include "debug.h" 7#include <unistd.h>
8#include "map.h"
7 9
8const char *map_type__name[MAP__NR_TYPES] = { 10const char *map_type__name[MAP__NR_TYPES] = {
9 [MAP__FUNCTION] = "Functions", 11 [MAP__FUNCTION] = "Functions",
@@ -36,15 +38,16 @@ void map__init(struct map *self, enum map_type type,
36 self->map_ip = map__map_ip; 38 self->map_ip = map__map_ip;
37 self->unmap_ip = map__unmap_ip; 39 self->unmap_ip = map__unmap_ip;
38 RB_CLEAR_NODE(&self->rb_node); 40 RB_CLEAR_NODE(&self->rb_node);
41 self->groups = NULL;
39} 42}
40 43
41struct map *map__new(struct mmap_event *event, enum map_type type, 44struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
42 char *cwd, int cwdlen) 45 u64 pgoff, u32 pid, char *filename,
46 enum map_type type, char *cwd, int cwdlen)
43{ 47{
44 struct map *self = malloc(sizeof(*self)); 48 struct map *self = malloc(sizeof(*self));
45 49
46 if (self != NULL) { 50 if (self != NULL) {
47 const char *filename = event->filename;
48 char newfilename[PATH_MAX]; 51 char newfilename[PATH_MAX];
49 struct dso *dso; 52 struct dso *dso;
50 int anon; 53 int anon;
@@ -62,16 +65,15 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
62 anon = is_anon_memory(filename); 65 anon = is_anon_memory(filename);
63 66
64 if (anon) { 67 if (anon) {
65 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid); 68 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
66 filename = newfilename; 69 filename = newfilename;
67 } 70 }
68 71
69 dso = dsos__findnew(filename); 72 dso = __dsos__findnew(dsos__list, filename);
70 if (dso == NULL) 73 if (dso == NULL)
71 goto out_delete; 74 goto out_delete;
72 75
73 map__init(self, type, event->start, event->start + event->len, 76 map__init(self, type, start, start + len, pgoff, dso);
74 event->pgoff, dso);
75 77
76 if (anon) { 78 if (anon) {
77set_identity: 79set_identity:
@@ -235,3 +237,392 @@ u64 map__objdump_2ip(struct map *map, u64 addr)
235 map->unmap_ip(map, addr); /* RIP -> IP */ 237 map->unmap_ip(map, addr); /* RIP -> IP */
236 return ip; 238 return ip;
237} 239}
240
241void map_groups__init(struct map_groups *self)
242{
243 int i;
244 for (i = 0; i < MAP__NR_TYPES; ++i) {
245 self->maps[i] = RB_ROOT;
246 INIT_LIST_HEAD(&self->removed_maps[i]);
247 }
248 self->machine = NULL;
249}
250
251void map_groups__flush(struct map_groups *self)
252{
253 int type;
254
255 for (type = 0; type < MAP__NR_TYPES; type++) {
256 struct rb_root *root = &self->maps[type];
257 struct rb_node *next = rb_first(root);
258
259 while (next) {
260 struct map *pos = rb_entry(next, struct map, rb_node);
261 next = rb_next(&pos->rb_node);
262 rb_erase(&pos->rb_node, root);
263 /*
264 * We may have references to this map, for
265 * instance in some hist_entry instances, so
266 * just move them to a separate list.
267 */
268 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
269 }
270 }
271}
272
273struct symbol *map_groups__find_symbol(struct map_groups *self,
274 enum map_type type, u64 addr,
275 struct map **mapp,
276 symbol_filter_t filter)
277{
278 struct map *map = map_groups__find(self, type, addr);
279
280 if (map != NULL) {
281 if (mapp != NULL)
282 *mapp = map;
283 return map__find_symbol(map, map->map_ip(map, addr), filter);
284 }
285
286 return NULL;
287}
288
289struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
290 enum map_type type,
291 const char *name,
292 struct map **mapp,
293 symbol_filter_t filter)
294{
295 struct rb_node *nd;
296
297 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
298 struct map *pos = rb_entry(nd, struct map, rb_node);
299 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
300
301 if (sym == NULL)
302 continue;
303 if (mapp != NULL)
304 *mapp = pos;
305 return sym;
306 }
307
308 return NULL;
309}
310
311size_t __map_groups__fprintf_maps(struct map_groups *self,
312 enum map_type type, int verbose, FILE *fp)
313{
314 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
315 struct rb_node *nd;
316
317 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
318 struct map *pos = rb_entry(nd, struct map, rb_node);
319 printed += fprintf(fp, "Map:");
320 printed += map__fprintf(pos, fp);
321 if (verbose > 2) {
322 printed += dso__fprintf(pos->dso, type, fp);
323 printed += fprintf(fp, "--\n");
324 }
325 }
326
327 return printed;
328}
329
330size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
331{
332 size_t printed = 0, i;
333 for (i = 0; i < MAP__NR_TYPES; ++i)
334 printed += __map_groups__fprintf_maps(self, i, verbose, fp);
335 return printed;
336}
337
338static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
339 enum map_type type,
340 int verbose, FILE *fp)
341{
342 struct map *pos;
343 size_t printed = 0;
344
345 list_for_each_entry(pos, &self->removed_maps[type], node) {
346 printed += fprintf(fp, "Map:");
347 printed += map__fprintf(pos, fp);
348 if (verbose > 1) {
349 printed += dso__fprintf(pos->dso, type, fp);
350 printed += fprintf(fp, "--\n");
351 }
352 }
353 return printed;
354}
355
356static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
357 int verbose, FILE *fp)
358{
359 size_t printed = 0, i;
360 for (i = 0; i < MAP__NR_TYPES; ++i)
361 printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
362 return printed;
363}
364
365size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
366{
367 size_t printed = map_groups__fprintf_maps(self, verbose, fp);
368 printed += fprintf(fp, "Removed maps:\n");
369 return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
370}
371
372int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
373 int verbose, FILE *fp)
374{
375 struct rb_root *root = &self->maps[map->type];
376 struct rb_node *next = rb_first(root);
377
378 while (next) {
379 struct map *pos = rb_entry(next, struct map, rb_node);
380 next = rb_next(&pos->rb_node);
381
382 if (!map__overlap(pos, map))
383 continue;
384
385 if (verbose >= 2) {
386 fputs("overlapping maps:\n", fp);
387 map__fprintf(map, fp);
388 map__fprintf(pos, fp);
389 }
390
391 rb_erase(&pos->rb_node, root);
392 /*
393 * We may have references to this map, for instance in some
394 * hist_entry instances, so just move them to a separate
395 * list.
396 */
397 list_add_tail(&pos->node, &self->removed_maps[map->type]);
398 /*
399 * Now check if we need to create new maps for areas not
400 * overlapped by the new map:
401 */
402 if (map->start > pos->start) {
403 struct map *before = map__clone(pos);
404
405 if (before == NULL)
406 return -ENOMEM;
407
408 before->end = map->start - 1;
409 map_groups__insert(self, before);
410 if (verbose >= 2)
411 map__fprintf(before, fp);
412 }
413
414 if (map->end < pos->end) {
415 struct map *after = map__clone(pos);
416
417 if (after == NULL)
418 return -ENOMEM;
419
420 after->start = map->end + 1;
421 map_groups__insert(self, after);
422 if (verbose >= 2)
423 map__fprintf(after, fp);
424 }
425 }
426
427 return 0;
428}
429
430/*
431 * XXX This should not really _copy_ te maps, but refcount them.
432 */
433int map_groups__clone(struct map_groups *self,
434 struct map_groups *parent, enum map_type type)
435{
436 struct rb_node *nd;
437 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
438 struct map *map = rb_entry(nd, struct map, rb_node);
439 struct map *new = map__clone(map);
440 if (new == NULL)
441 return -ENOMEM;
442 map_groups__insert(self, new);
443 }
444 return 0;
445}
446
447static u64 map__reloc_map_ip(struct map *map, u64 ip)
448{
449 return ip + (s64)map->pgoff;
450}
451
452static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
453{
454 return ip - (s64)map->pgoff;
455}
456
457void map__reloc_vmlinux(struct map *self)
458{
459 struct kmap *kmap = map__kmap(self);
460 s64 reloc;
461
462 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
463 return;
464
465 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
466 kmap->ref_reloc_sym->addr);
467
468 if (!reloc)
469 return;
470
471 self->map_ip = map__reloc_map_ip;
472 self->unmap_ip = map__reloc_unmap_ip;
473 self->pgoff = reloc;
474}
475
476void maps__insert(struct rb_root *maps, struct map *map)
477{
478 struct rb_node **p = &maps->rb_node;
479 struct rb_node *parent = NULL;
480 const u64 ip = map->start;
481 struct map *m;
482
483 while (*p != NULL) {
484 parent = *p;
485 m = rb_entry(parent, struct map, rb_node);
486 if (ip < m->start)
487 p = &(*p)->rb_left;
488 else
489 p = &(*p)->rb_right;
490 }
491
492 rb_link_node(&map->rb_node, parent, p);
493 rb_insert_color(&map->rb_node, maps);
494}
495
496struct map *maps__find(struct rb_root *maps, u64 ip)
497{
498 struct rb_node **p = &maps->rb_node;
499 struct rb_node *parent = NULL;
500 struct map *m;
501
502 while (*p != NULL) {
503 parent = *p;
504 m = rb_entry(parent, struct map, rb_node);
505 if (ip < m->start)
506 p = &(*p)->rb_left;
507 else if (ip > m->end)
508 p = &(*p)->rb_right;
509 else
510 return m;
511 }
512
513 return NULL;
514}
515
516int machine__init(struct machine *self, const char *root_dir, pid_t pid)
517{
518 map_groups__init(&self->kmaps);
519 RB_CLEAR_NODE(&self->rb_node);
520 INIT_LIST_HEAD(&self->user_dsos);
521 INIT_LIST_HEAD(&self->kernel_dsos);
522
523 self->kmaps.machine = self;
524 self->pid = pid;
525 self->root_dir = strdup(root_dir);
526 return self->root_dir == NULL ? -ENOMEM : 0;
527}
528
529struct machine *machines__add(struct rb_root *self, pid_t pid,
530 const char *root_dir)
531{
532 struct rb_node **p = &self->rb_node;
533 struct rb_node *parent = NULL;
534 struct machine *pos, *machine = malloc(sizeof(*machine));
535
536 if (!machine)
537 return NULL;
538
539 if (machine__init(machine, root_dir, pid) != 0) {
540 free(machine);
541 return NULL;
542 }
543
544 while (*p != NULL) {
545 parent = *p;
546 pos = rb_entry(parent, struct machine, rb_node);
547 if (pid < pos->pid)
548 p = &(*p)->rb_left;
549 else
550 p = &(*p)->rb_right;
551 }
552
553 rb_link_node(&machine->rb_node, parent, p);
554 rb_insert_color(&machine->rb_node, self);
555
556 return machine;
557}
558
559struct machine *machines__find(struct rb_root *self, pid_t pid)
560{
561 struct rb_node **p = &self->rb_node;
562 struct rb_node *parent = NULL;
563 struct machine *machine;
564 struct machine *default_machine = NULL;
565
566 while (*p != NULL) {
567 parent = *p;
568 machine = rb_entry(parent, struct machine, rb_node);
569 if (pid < machine->pid)
570 p = &(*p)->rb_left;
571 else if (pid > machine->pid)
572 p = &(*p)->rb_right;
573 else
574 return machine;
575 if (!machine->pid)
576 default_machine = machine;
577 }
578
579 return default_machine;
580}
581
582struct machine *machines__findnew(struct rb_root *self, pid_t pid)
583{
584 char path[PATH_MAX];
585 const char *root_dir;
586 struct machine *machine = machines__find(self, pid);
587
588 if (!machine || machine->pid != pid) {
589 if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
590 root_dir = "";
591 else {
592 if (!symbol_conf.guestmount)
593 goto out;
594 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
595 if (access(path, R_OK)) {
596 pr_err("Can't access file %s\n", path);
597 goto out;
598 }
599 root_dir = path;
600 }
601 machine = machines__add(self, pid, root_dir);
602 }
603
604out:
605 return machine;
606}
607
608void machines__process(struct rb_root *self, machine__process_t process, void *data)
609{
610 struct rb_node *nd;
611
612 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
613 struct machine *pos = rb_entry(nd, struct machine, rb_node);
614 process(pos, data);
615 }
616}
617
618char *machine__mmap_name(struct machine *self, char *bf, size_t size)
619{
620 if (machine__is_host(self))
621 snprintf(bf, size, "[%s]", "kernel.kallsyms");
622 else if (machine__is_default_guest(self))
623 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
624 else
625 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
626
627 return bf;
628}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b756368076c6..f39134512829 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -4,7 +4,9 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7#include <linux/types.h> 7#include <stdio.h>
8#include <stdbool.h>
9#include "types.h"
8 10
9enum map_type { 11enum map_type {
10 MAP__FUNCTION = 0, 12 MAP__FUNCTION = 0,
@@ -18,6 +20,7 @@ extern const char *map_type__name[MAP__NR_TYPES];
18struct dso; 20struct dso;
19struct ref_reloc_sym; 21struct ref_reloc_sym;
20struct map_groups; 22struct map_groups;
23struct machine;
21 24
22struct map { 25struct map {
23 union { 26 union {
@@ -27,6 +30,7 @@ struct map {
27 u64 start; 30 u64 start;
28 u64 end; 31 u64 end;
29 enum map_type type; 32 enum map_type type;
33 u32 priv;
30 u64 pgoff; 34 u64 pgoff;
31 35
32 /* ip -> dso rip */ 36 /* ip -> dso rip */
@@ -35,6 +39,7 @@ struct map {
35 u64 (*unmap_ip)(struct map *, u64); 39 u64 (*unmap_ip)(struct map *, u64);
36 40
37 struct dso *dso; 41 struct dso *dso;
42 struct map_groups *groups;
38}; 43};
39 44
40struct kmap { 45struct kmap {
@@ -42,6 +47,32 @@ struct kmap {
42 struct map_groups *kmaps; 47 struct map_groups *kmaps;
43}; 48};
44 49
50struct map_groups {
51 struct rb_root maps[MAP__NR_TYPES];
52 struct list_head removed_maps[MAP__NR_TYPES];
53 struct machine *machine;
54};
55
56/* Native host kernel uses -1 as pid index in machine */
57#define HOST_KERNEL_ID (-1)
58#define DEFAULT_GUEST_KERNEL_ID (0)
59
60struct machine {
61 struct rb_node rb_node;
62 pid_t pid;
63 char *root_dir;
64 struct list_head user_dsos;
65 struct list_head kernel_dsos;
66 struct map_groups kmaps;
67 struct map *vmlinux_maps[MAP__NR_TYPES];
68};
69
70static inline
71struct map *machine__kernel_map(struct machine *self, enum map_type type)
72{
73 return self->vmlinux_maps[type];
74}
75
45static inline struct kmap *map__kmap(struct map *self) 76static inline struct kmap *map__kmap(struct map *self)
46{ 77{
47 return (struct kmap *)(self + 1); 78 return (struct kmap *)(self + 1);
@@ -68,14 +99,14 @@ u64 map__rip_2objdump(struct map *map, u64 rip);
68u64 map__objdump_2ip(struct map *map, u64 addr); 99u64 map__objdump_2ip(struct map *map, u64 addr);
69 100
70struct symbol; 101struct symbol;
71struct mmap_event;
72 102
73typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 103typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
74 104
75void map__init(struct map *self, enum map_type type, 105void map__init(struct map *self, enum map_type type,
76 u64 start, u64 end, u64 pgoff, struct dso *dso); 106 u64 start, u64 end, u64 pgoff, struct dso *dso);
77struct map *map__new(struct mmap_event *event, enum map_type, 107struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
78 char *cwd, int cwdlen); 108 u64 pgoff, u32 pid, char *filename,
109 enum map_type type, char *cwd, int cwdlen);
79void map__delete(struct map *self); 110void map__delete(struct map *self);
80struct map *map__clone(struct map *self); 111struct map *map__clone(struct map *self);
81int map__overlap(struct map *l, struct map *r); 112int map__overlap(struct map *l, struct map *r);
@@ -91,4 +122,96 @@ void map__fixup_end(struct map *self);
91 122
92void map__reloc_vmlinux(struct map *self); 123void map__reloc_vmlinux(struct map *self);
93 124
125size_t __map_groups__fprintf_maps(struct map_groups *self,
126 enum map_type type, int verbose, FILE *fp);
127void maps__insert(struct rb_root *maps, struct map *map);
128struct map *maps__find(struct rb_root *maps, u64 addr);
129void map_groups__init(struct map_groups *self);
130int map_groups__clone(struct map_groups *self,
131 struct map_groups *parent, enum map_type type);
132size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
133size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
134
135typedef void (*machine__process_t)(struct machine *self, void *data);
136
137void machines__process(struct rb_root *self, machine__process_t process, void *data);
138struct machine *machines__add(struct rb_root *self, pid_t pid,
139 const char *root_dir);
140struct machine *machines__find_host(struct rb_root *self);
141struct machine *machines__find(struct rb_root *self, pid_t pid);
142struct machine *machines__findnew(struct rb_root *self, pid_t pid);
143char *machine__mmap_name(struct machine *self, char *bf, size_t size);
144int machine__init(struct machine *self, const char *root_dir, pid_t pid);
145
146/*
147 * Default guest kernel is defined by parameter --guestkallsyms
148 * and --guestmodules
149 */
150static inline bool machine__is_default_guest(struct machine *self)
151{
152 return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
153}
154
155static inline bool machine__is_host(struct machine *self)
156{
157 return self ? self->pid == HOST_KERNEL_ID : false;
158}
159
160static inline void map_groups__insert(struct map_groups *self, struct map *map)
161{
162 maps__insert(&self->maps[map->type], map);
163 map->groups = self;
164}
165
166static inline struct map *map_groups__find(struct map_groups *self,
167 enum map_type type, u64 addr)
168{
169 return maps__find(&self->maps[type], addr);
170}
171
172struct symbol *map_groups__find_symbol(struct map_groups *self,
173 enum map_type type, u64 addr,
174 struct map **mapp,
175 symbol_filter_t filter);
176
177struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
178 enum map_type type,
179 const char *name,
180 struct map **mapp,
181 symbol_filter_t filter);
182
183static inline
184struct symbol *machine__find_kernel_symbol(struct machine *self,
185 enum map_type type, u64 addr,
186 struct map **mapp,
187 symbol_filter_t filter)
188{
189 return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
190}
191
192static inline
193struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
194 struct map **mapp,
195 symbol_filter_t filter)
196{
197 return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
198}
199
200static inline
201struct symbol *map_groups__find_function_by_name(struct map_groups *self,
202 const char *name, struct map **mapp,
203 symbol_filter_t filter)
204{
205 return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
206}
207
208int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
209 int verbose, FILE *fp);
210
211struct map *map_groups__find_by_name(struct map_groups *self,
212 enum map_type type, const char *name);
213struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
214
215void map_groups__flush(struct map_groups *self);
216
94#endif /* __PERF_MAP_H */ 217#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/newt.c b/tools/perf/util/newt.c
new file mode 100644
index 000000000000..ccb7c5bb269e
--- /dev/null
+++ b/tools/perf/util/newt.c
@@ -0,0 +1,1084 @@
1#define _GNU_SOURCE
2#include <stdio.h>
3#undef _GNU_SOURCE
4
5#include <slang.h>
6#include <stdlib.h>
7#include <newt.h>
8#include <sys/ttydefaults.h>
9
10#include "cache.h"
11#include "hist.h"
12#include "pstack.h"
13#include "session.h"
14#include "sort.h"
15#include "symbol.h"
16
17#if SLANG_VERSION < 20104
18#define slsmg_printf(msg, args...) SLsmg_printf((char *)msg, ##args)
19#define slsmg_write_nstring(msg, len) SLsmg_write_nstring((char *)msg, len)
20#define sltt_set_color(obj, name, fg, bg) SLtt_set_color(obj,(char *)name,\
21 (char *)fg, (char *)bg)
22#else
23#define slsmg_printf SLsmg_printf
24#define slsmg_write_nstring SLsmg_write_nstring
25#define sltt_set_color SLtt_set_color
26#endif
27
28struct ui_progress {
29 newtComponent form, scale;
30};
31
32struct ui_progress *ui_progress__new(const char *title, u64 total)
33{
34 struct ui_progress *self = malloc(sizeof(*self));
35
36 if (self != NULL) {
37 int cols;
38 newtGetScreenSize(&cols, NULL);
39 cols -= 4;
40 newtCenteredWindow(cols, 1, title);
41 self->form = newtForm(NULL, NULL, 0);
42 if (self->form == NULL)
43 goto out_free_self;
44 self->scale = newtScale(0, 0, cols, total);
45 if (self->scale == NULL)
46 goto out_free_form;
47 newtFormAddComponent(self->form, self->scale);
48 newtRefresh();
49 }
50
51 return self;
52
53out_free_form:
54 newtFormDestroy(self->form);
55out_free_self:
56 free(self);
57 return NULL;
58}
59
60void ui_progress__update(struct ui_progress *self, u64 curr)
61{
62 newtScaleSet(self->scale, curr);
63 newtRefresh();
64}
65
66void ui_progress__delete(struct ui_progress *self)
67{
68 newtFormDestroy(self->form);
69 newtPopWindow();
70 free(self);
71}
72
73static void ui_helpline__pop(void)
74{
75 newtPopHelpLine();
76}
77
78static void ui_helpline__push(const char *msg)
79{
80 newtPushHelpLine(msg);
81}
82
83static void ui_helpline__vpush(const char *fmt, va_list ap)
84{
85 char *s;
86
87 if (vasprintf(&s, fmt, ap) < 0)
88 vfprintf(stderr, fmt, ap);
89 else {
90 ui_helpline__push(s);
91 free(s);
92 }
93}
94
95static void ui_helpline__fpush(const char *fmt, ...)
96{
97 va_list ap;
98
99 va_start(ap, fmt);
100 ui_helpline__vpush(fmt, ap);
101 va_end(ap);
102}
103
104static void ui_helpline__puts(const char *msg)
105{
106 ui_helpline__pop();
107 ui_helpline__push(msg);
108}
109
110static char browser__last_msg[1024];
111
112int browser__show_help(const char *format, va_list ap)
113{
114 int ret;
115 static int backlog;
116
117 ret = vsnprintf(browser__last_msg + backlog,
118 sizeof(browser__last_msg) - backlog, format, ap);
119 backlog += ret;
120
121 if (browser__last_msg[backlog - 1] == '\n') {
122 ui_helpline__puts(browser__last_msg);
123 newtRefresh();
124 backlog = 0;
125 }
126
127 return ret;
128}
129
130static void newt_form__set_exit_keys(newtComponent self)
131{
132 newtFormAddHotKey(self, NEWT_KEY_LEFT);
133 newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
134 newtFormAddHotKey(self, 'Q');
135 newtFormAddHotKey(self, 'q');
136 newtFormAddHotKey(self, CTRL('c'));
137}
138
139static newtComponent newt_form__new(void)
140{
141 newtComponent self = newtForm(NULL, NULL, 0);
142 if (self)
143 newt_form__set_exit_keys(self);
144 return self;
145}
146
147static int popup_menu(int argc, char * const argv[])
148{
149 struct newtExitStruct es;
150 int i, rc = -1, max_len = 5;
151 newtComponent listbox, form = newt_form__new();
152
153 if (form == NULL)
154 return -1;
155
156 listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
157 if (listbox == NULL)
158 goto out_destroy_form;
159
160 newtFormAddComponent(form, listbox);
161
162 for (i = 0; i < argc; ++i) {
163 int len = strlen(argv[i]);
164 if (len > max_len)
165 max_len = len;
166 if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
167 goto out_destroy_form;
168 }
169
170 newtCenteredWindow(max_len, argc, NULL);
171 newtFormRun(form, &es);
172 rc = newtListboxGetCurrent(listbox) - NULL;
173 if (es.reason == NEWT_EXIT_HOTKEY)
174 rc = -1;
175 newtPopWindow();
176out_destroy_form:
177 newtFormDestroy(form);
178 return rc;
179}
180
181static int ui__help_window(const char *text)
182{
183 struct newtExitStruct es;
184 newtComponent tb, form = newt_form__new();
185 int rc = -1;
186 int max_len = 0, nr_lines = 0;
187 const char *t;
188
189 if (form == NULL)
190 return -1;
191
192 t = text;
193 while (1) {
194 const char *sep = strchr(t, '\n');
195 int len;
196
197 if (sep == NULL)
198 sep = strchr(t, '\0');
199 len = sep - t;
200 if (max_len < len)
201 max_len = len;
202 ++nr_lines;
203 if (*sep == '\0')
204 break;
205 t = sep + 1;
206 }
207
208 tb = newtTextbox(0, 0, max_len, nr_lines, 0);
209 if (tb == NULL)
210 goto out_destroy_form;
211
212 newtTextboxSetText(tb, text);
213 newtFormAddComponent(form, tb);
214 newtCenteredWindow(max_len, nr_lines, NULL);
215 newtFormRun(form, &es);
216 newtPopWindow();
217 rc = 0;
218out_destroy_form:
219 newtFormDestroy(form);
220 return rc;
221}
222
223static bool dialog_yesno(const char *msg)
224{
225 /* newtWinChoice should really be accepting const char pointers... */
226 char yes[] = "Yes", no[] = "No";
227 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
228}
229
230#define HE_COLORSET_TOP 50
231#define HE_COLORSET_MEDIUM 51
232#define HE_COLORSET_NORMAL 52
233#define HE_COLORSET_SELECTED 53
234#define HE_COLORSET_CODE 54
235
236static int ui_browser__percent_color(double percent, bool current)
237{
238 if (current)
239 return HE_COLORSET_SELECTED;
240 if (percent >= MIN_RED)
241 return HE_COLORSET_TOP;
242 if (percent >= MIN_GREEN)
243 return HE_COLORSET_MEDIUM;
244 return HE_COLORSET_NORMAL;
245}
246
247struct ui_browser {
248 newtComponent form, sb;
249 u64 index, first_visible_entry_idx;
250 void *first_visible_entry, *entries;
251 u16 top, left, width, height;
252 void *priv;
253 u32 nr_entries;
254};
255
256static void ui_browser__refresh_dimensions(struct ui_browser *self)
257{
258 int cols, rows;
259 newtGetScreenSize(&cols, &rows);
260
261 if (self->width > cols - 4)
262 self->width = cols - 4;
263 self->height = rows - 5;
264 if (self->height > self->nr_entries)
265 self->height = self->nr_entries;
266 self->top = (rows - self->height) / 2;
267 self->left = (cols - self->width) / 2;
268}
269
270static void ui_browser__reset_index(struct ui_browser *self)
271{
272 self->index = self->first_visible_entry_idx = 0;
273 self->first_visible_entry = NULL;
274}
275
276static int objdump_line__show(struct objdump_line *self, struct list_head *head,
277 int width, struct hist_entry *he, int len,
278 bool current_entry)
279{
280 if (self->offset != -1) {
281 struct symbol *sym = he->ms.sym;
282 unsigned int hits = 0;
283 double percent = 0.0;
284 int color;
285 struct sym_priv *priv = symbol__priv(sym);
286 struct sym_ext *sym_ext = priv->ext;
287 struct sym_hist *h = priv->hist;
288 s64 offset = self->offset;
289 struct objdump_line *next = objdump__get_next_ip_line(head, self);
290
291 while (offset < (s64)len &&
292 (next == NULL || offset < next->offset)) {
293 if (sym_ext) {
294 percent += sym_ext[offset].percent;
295 } else
296 hits += h->ip[offset];
297
298 ++offset;
299 }
300
301 if (sym_ext == NULL && h->sum)
302 percent = 100.0 * hits / h->sum;
303
304 color = ui_browser__percent_color(percent, current_entry);
305 SLsmg_set_color(color);
306 slsmg_printf(" %7.2f ", percent);
307 if (!current_entry)
308 SLsmg_set_color(HE_COLORSET_CODE);
309 } else {
310 int color = ui_browser__percent_color(0, current_entry);
311 SLsmg_set_color(color);
312 slsmg_write_nstring(" ", 9);
313 }
314
315 SLsmg_write_char(':');
316 slsmg_write_nstring(" ", 8);
317 if (!*self->line)
318 slsmg_write_nstring(" ", width - 18);
319 else
320 slsmg_write_nstring(self->line, width - 18);
321
322 return 0;
323}
324
325static int ui_browser__refresh_entries(struct ui_browser *self)
326{
327 struct objdump_line *pos;
328 struct list_head *head = self->entries;
329 struct hist_entry *he = self->priv;
330 int row = 0;
331 int len = he->ms.sym->end - he->ms.sym->start;
332
333 if (self->first_visible_entry == NULL || self->first_visible_entry == self->entries)
334 self->first_visible_entry = head->next;
335
336 pos = list_entry(self->first_visible_entry, struct objdump_line, node);
337
338 list_for_each_entry_from(pos, head, node) {
339 bool current_entry = (self->first_visible_entry_idx + row) == self->index;
340 SLsmg_gotorc(self->top + row, self->left);
341 objdump_line__show(pos, head, self->width,
342 he, len, current_entry);
343 if (++row == self->height)
344 break;
345 }
346
347 SLsmg_set_color(HE_COLORSET_NORMAL);
348 SLsmg_fill_region(self->top + row, self->left,
349 self->height - row, self->width, ' ');
350
351 return 0;
352}
353
354static int ui_browser__run(struct ui_browser *self, const char *title,
355 struct newtExitStruct *es)
356{
357 if (self->form) {
358 newtFormDestroy(self->form);
359 newtPopWindow();
360 }
361
362 ui_browser__refresh_dimensions(self);
363 newtCenteredWindow(self->width + 2, self->height, title);
364 self->form = newt_form__new();
365 if (self->form == NULL)
366 return -1;
367
368 self->sb = newtVerticalScrollbar(self->width + 1, 0, self->height,
369 HE_COLORSET_NORMAL,
370 HE_COLORSET_SELECTED);
371 if (self->sb == NULL)
372 return -1;
373
374 newtFormAddHotKey(self->form, NEWT_KEY_UP);
375 newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
376 newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
377 newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
378 newtFormAddHotKey(self->form, NEWT_KEY_HOME);
379 newtFormAddHotKey(self->form, NEWT_KEY_END);
380
381 if (ui_browser__refresh_entries(self) < 0)
382 return -1;
383 newtFormAddComponent(self->form, self->sb);
384
385 while (1) {
386 unsigned int offset;
387
388 newtFormRun(self->form, es);
389
390 if (es->reason != NEWT_EXIT_HOTKEY)
391 break;
392 switch (es->u.key) {
393 case NEWT_KEY_DOWN:
394 if (self->index == self->nr_entries - 1)
395 break;
396 ++self->index;
397 if (self->index == self->first_visible_entry_idx + self->height) {
398 struct list_head *pos = self->first_visible_entry;
399 ++self->first_visible_entry_idx;
400 self->first_visible_entry = pos->next;
401 }
402 break;
403 case NEWT_KEY_UP:
404 if (self->index == 0)
405 break;
406 --self->index;
407 if (self->index < self->first_visible_entry_idx) {
408 struct list_head *pos = self->first_visible_entry;
409 --self->first_visible_entry_idx;
410 self->first_visible_entry = pos->prev;
411 }
412 break;
413 case NEWT_KEY_PGDN:
414 if (self->first_visible_entry_idx + self->height > self->nr_entries - 1)
415 break;
416
417 offset = self->height;
418 if (self->index + offset > self->nr_entries - 1)
419 offset = self->nr_entries - 1 - self->index;
420 self->index += offset;
421 self->first_visible_entry_idx += offset;
422
423 while (offset--) {
424 struct list_head *pos = self->first_visible_entry;
425 self->first_visible_entry = pos->next;
426 }
427
428 break;
429 case NEWT_KEY_PGUP:
430 if (self->first_visible_entry_idx == 0)
431 break;
432
433 if (self->first_visible_entry_idx < self->height)
434 offset = self->first_visible_entry_idx;
435 else
436 offset = self->height;
437
438 self->index -= offset;
439 self->first_visible_entry_idx -= offset;
440
441 while (offset--) {
442 struct list_head *pos = self->first_visible_entry;
443 self->first_visible_entry = pos->prev;
444 }
445 break;
446 case NEWT_KEY_HOME:
447 ui_browser__reset_index(self);
448 break;
449 case NEWT_KEY_END: {
450 struct list_head *head = self->entries;
451 offset = self->height - 1;
452
453 if (offset > self->nr_entries)
454 offset = self->nr_entries;
455
456 self->index = self->first_visible_entry_idx = self->nr_entries - 1 - offset;
457 self->first_visible_entry = head->prev;
458 while (offset-- != 0) {
459 struct list_head *pos = self->first_visible_entry;
460 self->first_visible_entry = pos->prev;
461 }
462 }
463 break;
464 case NEWT_KEY_ESCAPE:
465 case NEWT_KEY_LEFT:
466 case CTRL('c'):
467 case 'Q':
468 case 'q':
469 return 0;
470 default:
471 continue;
472 }
473 if (ui_browser__refresh_entries(self) < 0)
474 return -1;
475 }
476 return 0;
477}
478
479/*
480 * When debugging newt problems it was useful to be able to "unroll"
481 * the calls to newtCheckBoxTreeAdd{Array,Item}, so that we can generate
482 * a source file with the sequence of calls to these methods, to then
483 * tweak the arrays to get the intended results, so I'm keeping this code
484 * here, may be useful again in the future.
485 */
486#undef NEWT_DEBUG
487
488static void newt_checkbox_tree__add(newtComponent tree, const char *str,
489 void *priv, int *indexes)
490{
491#ifdef NEWT_DEBUG
492 /* Print the newtCheckboxTreeAddArray to tinker with its index arrays */
493 int i = 0, len = 40 - strlen(str);
494
495 fprintf(stderr,
496 "\tnewtCheckboxTreeAddItem(tree, %*.*s\"%s\", (void *)%p, 0, ",
497 len, len, " ", str, priv);
498 while (indexes[i] != NEWT_ARG_LAST) {
499 if (indexes[i] != NEWT_ARG_APPEND)
500 fprintf(stderr, " %d,", indexes[i]);
501 else
502 fprintf(stderr, " %s,", "NEWT_ARG_APPEND");
503 ++i;
504 }
505 fprintf(stderr, " %s", " NEWT_ARG_LAST);\n");
506 fflush(stderr);
507#endif
508 newtCheckboxTreeAddArray(tree, str, priv, 0, indexes);
509}
510
511static char *callchain_list__sym_name(struct callchain_list *self,
512 char *bf, size_t bfsize)
513{
514 if (self->ms.sym)
515 return self->ms.sym->name;
516
517 snprintf(bf, bfsize, "%#Lx", self->ip);
518 return bf;
519}
520
521static void __callchain__append_graph_browser(struct callchain_node *self,
522 newtComponent tree, u64 total,
523 int *indexes, int depth)
524{
525 struct rb_node *node;
526 u64 new_total, remaining;
527 int idx = 0;
528
529 if (callchain_param.mode == CHAIN_GRAPH_REL)
530 new_total = self->children_hit;
531 else
532 new_total = total;
533
534 remaining = new_total;
535 node = rb_first(&self->rb_root);
536 while (node) {
537 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
538 struct rb_node *next = rb_next(node);
539 u64 cumul = cumul_hits(child);
540 struct callchain_list *chain;
541 int first = true, printed = 0;
542 int chain_idx = -1;
543 remaining -= cumul;
544
545 indexes[depth] = NEWT_ARG_APPEND;
546 indexes[depth + 1] = NEWT_ARG_LAST;
547
548 list_for_each_entry(chain, &child->val, list) {
549 char ipstr[BITS_PER_LONG / 4 + 1],
550 *alloc_str = NULL;
551 const char *str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
552
553 if (first) {
554 double percent = cumul * 100.0 / new_total;
555
556 first = false;
557 if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
558 str = "Not enough memory!";
559 else
560 str = alloc_str;
561 } else {
562 indexes[depth] = idx;
563 indexes[depth + 1] = NEWT_ARG_APPEND;
564 indexes[depth + 2] = NEWT_ARG_LAST;
565 ++chain_idx;
566 }
567 newt_checkbox_tree__add(tree, str, &chain->ms, indexes);
568 free(alloc_str);
569 ++printed;
570 }
571
572 indexes[depth] = idx;
573 if (chain_idx != -1)
574 indexes[depth + 1] = chain_idx;
575 if (printed != 0)
576 ++idx;
577 __callchain__append_graph_browser(child, tree, new_total, indexes,
578 depth + (chain_idx != -1 ? 2 : 1));
579 node = next;
580 }
581}
582
583static void callchain__append_graph_browser(struct callchain_node *self,
584 newtComponent tree, u64 total,
585 int *indexes, int parent_idx)
586{
587 struct callchain_list *chain;
588 int i = 0;
589
590 indexes[1] = NEWT_ARG_APPEND;
591 indexes[2] = NEWT_ARG_LAST;
592
593 list_for_each_entry(chain, &self->val, list) {
594 char ipstr[BITS_PER_LONG / 4 + 1], *str;
595
596 if (chain->ip >= PERF_CONTEXT_MAX)
597 continue;
598
599 if (!i++ && sort__first_dimension == SORT_SYM)
600 continue;
601
602 str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
603 newt_checkbox_tree__add(tree, str, &chain->ms, indexes);
604 }
605
606 indexes[1] = parent_idx;
607 indexes[2] = NEWT_ARG_APPEND;
608 indexes[3] = NEWT_ARG_LAST;
609 __callchain__append_graph_browser(self, tree, total, indexes, 2);
610}
611
612static void hist_entry__append_callchain_browser(struct hist_entry *self,
613 newtComponent tree, u64 total, int parent_idx)
614{
615 struct rb_node *rb_node;
616 int indexes[1024] = { [0] = parent_idx, };
617 int idx = 0;
618 struct callchain_node *chain;
619
620 rb_node = rb_first(&self->sorted_chain);
621 while (rb_node) {
622 chain = rb_entry(rb_node, struct callchain_node, rb_node);
623 switch (callchain_param.mode) {
624 case CHAIN_FLAT:
625 break;
626 case CHAIN_GRAPH_ABS: /* falldown */
627 case CHAIN_GRAPH_REL:
628 callchain__append_graph_browser(chain, tree, total, indexes, idx++);
629 break;
630 case CHAIN_NONE:
631 default:
632 break;
633 }
634 rb_node = rb_next(rb_node);
635 }
636}
637
638static size_t hist_entry__append_browser(struct hist_entry *self,
639 newtComponent tree, u64 total)
640{
641 char s[256];
642 size_t ret;
643
644 if (symbol_conf.exclude_other && !self->parent)
645 return 0;
646
647 ret = hist_entry__snprintf(self, s, sizeof(s), NULL,
648 false, 0, false, total);
649 if (symbol_conf.use_callchain) {
650 int indexes[2];
651
652 indexes[0] = NEWT_ARG_APPEND;
653 indexes[1] = NEWT_ARG_LAST;
654 newt_checkbox_tree__add(tree, s, &self->ms, indexes);
655 } else
656 newtListboxAppendEntry(tree, s, &self->ms);
657
658 return ret;
659}
660
661static void hist_entry__annotate_browser(struct hist_entry *self)
662{
663 struct ui_browser browser;
664 struct newtExitStruct es;
665 struct objdump_line *pos, *n;
666 LIST_HEAD(head);
667
668 if (self->ms.sym == NULL)
669 return;
670
671 if (hist_entry__annotate(self, &head) < 0)
672 return;
673
674 ui_helpline__push("Press <- or ESC to exit");
675
676 memset(&browser, 0, sizeof(browser));
677 browser.entries = &head;
678 browser.priv = self;
679 list_for_each_entry(pos, &head, node) {
680 size_t line_len = strlen(pos->line);
681 if (browser.width < line_len)
682 browser.width = line_len;
683 ++browser.nr_entries;
684 }
685
686 browser.width += 18; /* Percentage */
687 ui_browser__run(&browser, self->ms.sym->name, &es);
688 newtFormDestroy(browser.form);
689 newtPopWindow();
690 list_for_each_entry_safe(pos, n, &head, node) {
691 list_del(&pos->node);
692 objdump_line__free(pos);
693 }
694 ui_helpline__pop();
695}
696
697static const void *newt__symbol_tree_get_current(newtComponent self)
698{
699 if (symbol_conf.use_callchain)
700 return newtCheckboxTreeGetCurrent(self);
701 return newtListboxGetCurrent(self);
702}
703
704static void hist_browser__selection(newtComponent self, void *data)
705{
706 const struct map_symbol **symbol_ptr = data;
707 *symbol_ptr = newt__symbol_tree_get_current(self);
708}
709
710struct hist_browser {
711 newtComponent form, tree;
712 const struct map_symbol *selection;
713};
714
715static struct hist_browser *hist_browser__new(void)
716{
717 struct hist_browser *self = malloc(sizeof(*self));
718
719 if (self != NULL)
720 self->form = NULL;
721
722 return self;
723}
724
725static void hist_browser__delete(struct hist_browser *self)
726{
727 newtFormDestroy(self->form);
728 newtPopWindow();
729 free(self);
730}
731
732static int hist_browser__populate(struct hist_browser *self, struct hists *hists,
733 const char *title)
734{
735 int max_len = 0, idx, cols, rows;
736 struct ui_progress *progress;
737 struct rb_node *nd;
738 u64 curr_hist = 0;
739 char seq[] = ".", unit;
740 char str[256];
741 unsigned long nr_events = hists->stats.nr_events[PERF_RECORD_SAMPLE];
742
743 if (self->form) {
744 newtFormDestroy(self->form);
745 newtPopWindow();
746 }
747
748 nr_events = convert_unit(nr_events, &unit);
749 snprintf(str, sizeof(str), "Events: %lu%c ",
750 nr_events, unit);
751 newtDrawRootText(0, 0, str);
752
753 newtGetScreenSize(NULL, &rows);
754
755 if (symbol_conf.use_callchain)
756 self->tree = newtCheckboxTreeMulti(0, 0, rows - 5, seq,
757 NEWT_FLAG_SCROLL);
758 else
759 self->tree = newtListbox(0, 0, rows - 5,
760 (NEWT_FLAG_SCROLL |
761 NEWT_FLAG_RETURNEXIT));
762
763 newtComponentAddCallback(self->tree, hist_browser__selection,
764 &self->selection);
765
766 progress = ui_progress__new("Adding entries to the browser...",
767 hists->nr_entries);
768 if (progress == NULL)
769 return -1;
770
771 idx = 0;
772 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
773 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
774 int len;
775
776 if (h->filtered)
777 continue;
778
779 len = hist_entry__append_browser(h, self->tree, hists->stats.total_period);
780 if (len > max_len)
781 max_len = len;
782 if (symbol_conf.use_callchain)
783 hist_entry__append_callchain_browser(h, self->tree,
784 hists->stats.total_period, idx++);
785 ++curr_hist;
786 if (curr_hist % 5)
787 ui_progress__update(progress, curr_hist);
788 }
789
790 ui_progress__delete(progress);
791
792 newtGetScreenSize(&cols, &rows);
793
794 if (max_len > cols)
795 max_len = cols - 3;
796
797 if (!symbol_conf.use_callchain)
798 newtListboxSetWidth(self->tree, max_len);
799
800 newtCenteredWindow(max_len + (symbol_conf.use_callchain ? 5 : 0),
801 rows - 5, title);
802 self->form = newt_form__new();
803 if (self->form == NULL)
804 return -1;
805
806 newtFormAddHotKey(self->form, 'A');
807 newtFormAddHotKey(self->form, 'a');
808 newtFormAddHotKey(self->form, 'D');
809 newtFormAddHotKey(self->form, 'd');
810 newtFormAddHotKey(self->form, 'T');
811 newtFormAddHotKey(self->form, 't');
812 newtFormAddHotKey(self->form, '?');
813 newtFormAddHotKey(self->form, 'H');
814 newtFormAddHotKey(self->form, 'h');
815 newtFormAddHotKey(self->form, NEWT_KEY_F1);
816 newtFormAddHotKey(self->form, NEWT_KEY_RIGHT);
817 newtFormAddComponents(self->form, self->tree, NULL);
818 self->selection = newt__symbol_tree_get_current(self->tree);
819
820 return 0;
821}
822
823static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
824{
825 int *indexes;
826
827 if (!symbol_conf.use_callchain)
828 goto out;
829
830 indexes = newtCheckboxTreeFindItem(self->tree, (void *)self->selection);
831 if (indexes) {
832 bool is_hist_entry = indexes[1] == NEWT_ARG_LAST;
833 free(indexes);
834 if (is_hist_entry)
835 goto out;
836 }
837 return NULL;
838out:
839 return container_of(self->selection, struct hist_entry, ms);
840}
841
842static struct thread *hist_browser__selected_thread(struct hist_browser *self)
843{
844 struct hist_entry *he = hist_browser__selected_entry(self);
845 return he ? he->thread : NULL;
846}
847
848static int hist_browser__title(char *bf, size_t size, const char *input_name,
849 const struct dso *dso, const struct thread *thread)
850{
851 int printed = 0;
852
853 if (thread)
854 printed += snprintf(bf + printed, size - printed,
855 "Thread: %s(%d)",
856 (thread->comm_set ? thread->comm : ""),
857 thread->pid);
858 if (dso)
859 printed += snprintf(bf + printed, size - printed,
860 "%sDSO: %s", thread ? " " : "",
861 dso->short_name);
862 return printed ?: snprintf(bf, size, "Report: %s", input_name);
863}
864
865int hists__browse(struct hists *self, const char *helpline, const char *input_name)
866{
867 struct hist_browser *browser = hist_browser__new();
868 struct pstack *fstack = pstack__new(2);
869 const struct thread *thread_filter = NULL;
870 const struct dso *dso_filter = NULL;
871 struct newtExitStruct es;
872 char msg[160];
873 int err = -1;
874
875 if (browser == NULL)
876 return -1;
877
878 fstack = pstack__new(2);
879 if (fstack == NULL)
880 goto out;
881
882 ui_helpline__push(helpline);
883
884 hist_browser__title(msg, sizeof(msg), input_name,
885 dso_filter, thread_filter);
886 if (hist_browser__populate(browser, self, msg) < 0)
887 goto out_free_stack;
888
889 while (1) {
890 const struct thread *thread;
891 const struct dso *dso;
892 char *options[16];
893 int nr_options = 0, choice = 0, i,
894 annotate = -2, zoom_dso = -2, zoom_thread = -2;
895
896 newtFormRun(browser->form, &es);
897
898 thread = hist_browser__selected_thread(browser);
899 dso = browser->selection->map ? browser->selection->map->dso : NULL;
900
901 if (es.reason == NEWT_EXIT_HOTKEY) {
902 if (es.u.key == NEWT_KEY_F1)
903 goto do_help;
904
905 switch (toupper(es.u.key)) {
906 case 'A':
907 goto do_annotate;
908 case 'D':
909 goto zoom_dso;
910 case 'T':
911 goto zoom_thread;
912 case 'H':
913 case '?':
914do_help:
915 ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
916 "<- Zoom out\n"
917 "a Annotate current symbol\n"
918 "h/?/F1 Show this window\n"
919 "d Zoom into current DSO\n"
920 "t Zoom into current Thread\n"
921 "q/CTRL+C Exit browser");
922 continue;
923 default:;
924 }
925 if (toupper(es.u.key) == 'Q' ||
926 es.u.key == CTRL('c'))
927 break;
928 if (es.u.key == NEWT_KEY_ESCAPE) {
929 if (dialog_yesno("Do you really want to exit?"))
930 break;
931 else
932 continue;
933 }
934
935 if (es.u.key == NEWT_KEY_LEFT) {
936 const void *top;
937
938 if (pstack__empty(fstack))
939 continue;
940 top = pstack__pop(fstack);
941 if (top == &dso_filter)
942 goto zoom_out_dso;
943 if (top == &thread_filter)
944 goto zoom_out_thread;
945 continue;
946 }
947 }
948
949 if (browser->selection->sym != NULL &&
950 asprintf(&options[nr_options], "Annotate %s",
951 browser->selection->sym->name) > 0)
952 annotate = nr_options++;
953
954 if (thread != NULL &&
955 asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
956 (thread_filter ? "out of" : "into"),
957 (thread->comm_set ? thread->comm : ""),
958 thread->pid) > 0)
959 zoom_thread = nr_options++;
960
961 if (dso != NULL &&
962 asprintf(&options[nr_options], "Zoom %s %s DSO",
963 (dso_filter ? "out of" : "into"),
964 (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
965 zoom_dso = nr_options++;
966
967 options[nr_options++] = (char *)"Exit";
968
969 choice = popup_menu(nr_options, options);
970
971 for (i = 0; i < nr_options - 1; ++i)
972 free(options[i]);
973
974 if (choice == nr_options - 1)
975 break;
976
977 if (choice == -1)
978 continue;
979
980 if (choice == annotate) {
981 struct hist_entry *he;
982do_annotate:
983 if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
984 ui_helpline__puts("No vmlinux file found, can't "
985 "annotate with just a "
986 "kallsyms file");
987 continue;
988 }
989
990 he = hist_browser__selected_entry(browser);
991 if (he == NULL)
992 continue;
993
994 hist_entry__annotate_browser(he);
995 } else if (choice == zoom_dso) {
996zoom_dso:
997 if (dso_filter) {
998 pstack__remove(fstack, &dso_filter);
999zoom_out_dso:
1000 ui_helpline__pop();
1001 dso_filter = NULL;
1002 } else {
1003 if (dso == NULL)
1004 continue;
1005 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
1006 dso->kernel ? "the Kernel" : dso->short_name);
1007 dso_filter = dso;
1008 pstack__push(fstack, &dso_filter);
1009 }
1010 hists__filter_by_dso(self, dso_filter);
1011 hist_browser__title(msg, sizeof(msg), input_name,
1012 dso_filter, thread_filter);
1013 if (hist_browser__populate(browser, self, msg) < 0)
1014 goto out;
1015 } else if (choice == zoom_thread) {
1016zoom_thread:
1017 if (thread_filter) {
1018 pstack__remove(fstack, &thread_filter);
1019zoom_out_thread:
1020 ui_helpline__pop();
1021 thread_filter = NULL;
1022 } else {
1023 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
1024 thread->comm_set ? thread->comm : "",
1025 thread->pid);
1026 thread_filter = thread;
1027 pstack__push(fstack, &thread_filter);
1028 }
1029 hists__filter_by_thread(self, thread_filter);
1030 hist_browser__title(msg, sizeof(msg), input_name,
1031 dso_filter, thread_filter);
1032 if (hist_browser__populate(browser, self, msg) < 0)
1033 goto out;
1034 }
1035 }
1036 err = 0;
1037out_free_stack:
1038 pstack__delete(fstack);
1039out:
1040 hist_browser__delete(browser);
1041 return err;
1042}
1043
1044static struct newtPercentTreeColors {
1045 const char *topColorFg, *topColorBg;
1046 const char *mediumColorFg, *mediumColorBg;
1047 const char *normalColorFg, *normalColorBg;
1048 const char *selColorFg, *selColorBg;
1049 const char *codeColorFg, *codeColorBg;
1050} defaultPercentTreeColors = {
1051 "red", "lightgray",
1052 "green", "lightgray",
1053 "black", "lightgray",
1054 "lightgray", "magenta",
1055 "blue", "lightgray",
1056};
1057
1058void setup_browser(void)
1059{
1060 struct newtPercentTreeColors *c = &defaultPercentTreeColors;
1061 if (!isatty(1))
1062 return;
1063
1064 use_browser = true;
1065 newtInit();
1066 newtCls();
1067 ui_helpline__puts(" ");
1068 sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg);
1069 sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg);
1070 sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg);
1071 sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg);
1072 sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg);
1073}
1074
1075void exit_browser(bool wait_for_ok)
1076{
1077 if (use_browser) {
1078 if (wait_for_ok) {
1079 char title[] = "Fatal Error", ok[] = "Ok";
1080 newtWinMessage(title, ok, browser__last_msg);
1081 }
1082 newtFinished();
1083 }
1084}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 05d0c5c2030c..9bf0f402ca73 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -5,6 +5,7 @@
5#include "parse-events.h" 5#include "parse-events.h"
6#include "exec_cmd.h" 6#include "exec_cmd.h"
7#include "string.h" 7#include "string.h"
8#include "symbol.h"
8#include "cache.h" 9#include "cache.h"
9#include "header.h" 10#include "header.h"
10#include "debugfs.h" 11#include "debugfs.h"
@@ -409,7 +410,6 @@ static enum event_result
409parse_single_tracepoint_event(char *sys_name, 410parse_single_tracepoint_event(char *sys_name,
410 const char *evt_name, 411 const char *evt_name,
411 unsigned int evt_length, 412 unsigned int evt_length,
412 char *flags,
413 struct perf_event_attr *attr, 413 struct perf_event_attr *attr,
414 const char **strp) 414 const char **strp)
415{ 415{
@@ -418,14 +418,6 @@ parse_single_tracepoint_event(char *sys_name,
418 u64 id; 418 u64 id;
419 int fd; 419 int fd;
420 420
421 if (flags) {
422 if (!strncmp(flags, "record", strlen(flags))) {
423 attr->sample_type |= PERF_SAMPLE_RAW;
424 attr->sample_type |= PERF_SAMPLE_TIME;
425 attr->sample_type |= PERF_SAMPLE_CPU;
426 }
427 }
428
429 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, 421 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
430 sys_name, evt_name); 422 sys_name, evt_name);
431 423
@@ -444,6 +436,13 @@ parse_single_tracepoint_event(char *sys_name,
444 attr->type = PERF_TYPE_TRACEPOINT; 436 attr->type = PERF_TYPE_TRACEPOINT;
445 *strp = evt_name + evt_length; 437 *strp = evt_name + evt_length;
446 438
439 attr->sample_type |= PERF_SAMPLE_RAW;
440 attr->sample_type |= PERF_SAMPLE_TIME;
441 attr->sample_type |= PERF_SAMPLE_CPU;
442
443 attr->sample_period = 1;
444
445
447 return EVT_HANDLED; 446 return EVT_HANDLED;
448} 447}
449 448
@@ -532,8 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
532 flags); 531 flags);
533 } else 532 } else
534 return parse_single_tracepoint_event(sys_name, evt_name, 533 return parse_single_tracepoint_event(sys_name, evt_name,
535 evt_length, flags, 534 evt_length, attr, strp);
536 attr, strp);
537} 535}
538 536
539static enum event_result 537static enum event_result
@@ -690,19 +688,29 @@ static enum event_result
690parse_event_modifier(const char **strp, struct perf_event_attr *attr) 688parse_event_modifier(const char **strp, struct perf_event_attr *attr)
691{ 689{
692 const char *str = *strp; 690 const char *str = *strp;
693 int eu = 1, ek = 1, eh = 1; 691 int exclude = 0;
692 int eu = 0, ek = 0, eh = 0, precise = 0;
694 693
695 if (*str++ != ':') 694 if (*str++ != ':')
696 return 0; 695 return 0;
697 while (*str) { 696 while (*str) {
698 if (*str == 'u') 697 if (*str == 'u') {
698 if (!exclude)
699 exclude = eu = ek = eh = 1;
699 eu = 0; 700 eu = 0;
700 else if (*str == 'k') 701 } else if (*str == 'k') {
702 if (!exclude)
703 exclude = eu = ek = eh = 1;
701 ek = 0; 704 ek = 0;
702 else if (*str == 'h') 705 } else if (*str == 'h') {
706 if (!exclude)
707 exclude = eu = ek = eh = 1;
703 eh = 0; 708 eh = 0;
704 else 709 } else if (*str == 'p') {
710 precise++;
711 } else
705 break; 712 break;
713
706 ++str; 714 ++str;
707 } 715 }
708 if (str >= *strp + 2) { 716 if (str >= *strp + 2) {
@@ -710,6 +718,7 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
710 attr->exclude_user = eu; 718 attr->exclude_user = eu;
711 attr->exclude_kernel = ek; 719 attr->exclude_kernel = ek;
712 attr->exclude_hv = eh; 720 attr->exclude_hv = eh;
721 attr->precise_ip = precise;
713 return 1; 722 return 1;
714 } 723 }
715 return 0; 724 return 0;
@@ -934,7 +943,8 @@ void print_events(void)
934 943
935 printf("\n"); 944 printf("\n");
936 printf(" %-42s [%s]\n", 945 printf(" %-42s [%s]\n",
937 "rNNN", event_type_descriptors[PERF_TYPE_RAW]); 946 "rNNN (see 'perf list --help' on how to encode it)",
947 event_type_descriptors[PERF_TYPE_RAW]);
938 printf("\n"); 948 printf("\n");
939 949
940 printf(" %-42s [%s]\n", 950 printf(" %-42s [%s]\n",
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b8c1f64bc935..fc4ab3fe877a 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -13,6 +13,7 @@ struct tracepoint_path {
13}; 13};
14 14
15extern struct tracepoint_path *tracepoint_id_to_path(u64 config); 15extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events);
16 17
17extern int nr_counters; 18extern int nr_counters;
18 19
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index efebd5b476b3..99d02aa57dbf 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -49,8 +49,9 @@ static int get_value(struct parse_opt_ctx_t *p,
49 break; 49 break;
50 /* FALLTHROUGH */ 50 /* FALLTHROUGH */
51 case OPTION_BOOLEAN: 51 case OPTION_BOOLEAN:
52 case OPTION_INCR:
52 case OPTION_BIT: 53 case OPTION_BIT:
53 case OPTION_SET_INT: 54 case OPTION_SET_UINT:
54 case OPTION_SET_PTR: 55 case OPTION_SET_PTR:
55 return opterror(opt, "takes no value", flags); 56 return opterror(opt, "takes no value", flags);
56 case OPTION_END: 57 case OPTION_END:
@@ -58,7 +59,9 @@ static int get_value(struct parse_opt_ctx_t *p,
58 case OPTION_GROUP: 59 case OPTION_GROUP:
59 case OPTION_STRING: 60 case OPTION_STRING:
60 case OPTION_INTEGER: 61 case OPTION_INTEGER:
62 case OPTION_UINTEGER:
61 case OPTION_LONG: 63 case OPTION_LONG:
64 case OPTION_U64:
62 default: 65 default:
63 break; 66 break;
64 } 67 }
@@ -73,11 +76,15 @@ static int get_value(struct parse_opt_ctx_t *p,
73 return 0; 76 return 0;
74 77
75 case OPTION_BOOLEAN: 78 case OPTION_BOOLEAN:
79 *(bool *)opt->value = unset ? false : true;
80 return 0;
81
82 case OPTION_INCR:
76 *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; 83 *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
77 return 0; 84 return 0;
78 85
79 case OPTION_SET_INT: 86 case OPTION_SET_UINT:
80 *(int *)opt->value = unset ? 0 : opt->defval; 87 *(unsigned int *)opt->value = unset ? 0 : opt->defval;
81 return 0; 88 return 0;
82 89
83 case OPTION_SET_PTR: 90 case OPTION_SET_PTR:
@@ -120,6 +127,22 @@ static int get_value(struct parse_opt_ctx_t *p,
120 return opterror(opt, "expects a numerical value", flags); 127 return opterror(opt, "expects a numerical value", flags);
121 return 0; 128 return 0;
122 129
130 case OPTION_UINTEGER:
131 if (unset) {
132 *(unsigned int *)opt->value = 0;
133 return 0;
134 }
135 if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
136 *(unsigned int *)opt->value = opt->defval;
137 return 0;
138 }
139 if (get_arg(p, opt, flags, &arg))
140 return -1;
141 *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10);
142 if (*s)
143 return opterror(opt, "expects a numerical value", flags);
144 return 0;
145
123 case OPTION_LONG: 146 case OPTION_LONG:
124 if (unset) { 147 if (unset) {
125 *(long *)opt->value = 0; 148 *(long *)opt->value = 0;
@@ -136,6 +159,22 @@ static int get_value(struct parse_opt_ctx_t *p,
136 return opterror(opt, "expects a numerical value", flags); 159 return opterror(opt, "expects a numerical value", flags);
137 return 0; 160 return 0;
138 161
162 case OPTION_U64:
163 if (unset) {
164 *(u64 *)opt->value = 0;
165 return 0;
166 }
167 if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
168 *(u64 *)opt->value = opt->defval;
169 return 0;
170 }
171 if (get_arg(p, opt, flags, &arg))
172 return -1;
173 *(u64 *)opt->value = strtoull(arg, (char **)&s, 10);
174 if (*s)
175 return opterror(opt, "expects a numerical value", flags);
176 return 0;
177
139 case OPTION_END: 178 case OPTION_END:
140 case OPTION_ARGUMENT: 179 case OPTION_ARGUMENT:
141 case OPTION_GROUP: 180 case OPTION_GROUP:
@@ -441,7 +480,10 @@ int usage_with_options_internal(const char * const *usagestr,
441 switch (opts->type) { 480 switch (opts->type) {
442 case OPTION_ARGUMENT: 481 case OPTION_ARGUMENT:
443 break; 482 break;
483 case OPTION_LONG:
484 case OPTION_U64:
444 case OPTION_INTEGER: 485 case OPTION_INTEGER:
486 case OPTION_UINTEGER:
445 if (opts->flags & PARSE_OPT_OPTARG) 487 if (opts->flags & PARSE_OPT_OPTARG)
446 if (opts->long_name) 488 if (opts->long_name)
447 pos += fprintf(stderr, "[=<n>]"); 489 pos += fprintf(stderr, "[=<n>]");
@@ -473,14 +515,14 @@ int usage_with_options_internal(const char * const *usagestr,
473 pos += fprintf(stderr, " ..."); 515 pos += fprintf(stderr, " ...");
474 } 516 }
475 break; 517 break;
476 default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ 518 default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */
477 case OPTION_END: 519 case OPTION_END:
478 case OPTION_GROUP: 520 case OPTION_GROUP:
479 case OPTION_BIT: 521 case OPTION_BIT:
480 case OPTION_BOOLEAN: 522 case OPTION_BOOLEAN:
481 case OPTION_SET_INT: 523 case OPTION_INCR:
524 case OPTION_SET_UINT:
482 case OPTION_SET_PTR: 525 case OPTION_SET_PTR:
483 case OPTION_LONG:
484 break; 526 break;
485 } 527 }
486 528
@@ -500,6 +542,7 @@ int usage_with_options_internal(const char * const *usagestr,
500void usage_with_options(const char * const *usagestr, 542void usage_with_options(const char * const *usagestr,
501 const struct option *opts) 543 const struct option *opts)
502{ 544{
545 exit_browser(false);
503 usage_with_options_internal(usagestr, opts, 0); 546 usage_with_options_internal(usagestr, opts, 0);
504 exit(129); 547 exit(129);
505} 548}
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 948805af43c2..c7d72dce54b2 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -1,6 +1,9 @@
1#ifndef __PERF_PARSE_OPTIONS_H 1#ifndef __PERF_PARSE_OPTIONS_H
2#define __PERF_PARSE_OPTIONS_H 2#define __PERF_PARSE_OPTIONS_H
3 3
4#include <linux/kernel.h>
5#include <stdbool.h>
6
4enum parse_opt_type { 7enum parse_opt_type {
5 /* special types */ 8 /* special types */
6 OPTION_END, 9 OPTION_END,
@@ -8,14 +11,17 @@ enum parse_opt_type {
8 OPTION_GROUP, 11 OPTION_GROUP,
9 /* options with no arguments */ 12 /* options with no arguments */
10 OPTION_BIT, 13 OPTION_BIT,
11 OPTION_BOOLEAN, /* _INCR would have been a better name */ 14 OPTION_BOOLEAN,
12 OPTION_SET_INT, 15 OPTION_INCR,
16 OPTION_SET_UINT,
13 OPTION_SET_PTR, 17 OPTION_SET_PTR,
14 /* options with arguments (usually) */ 18 /* options with arguments (usually) */
15 OPTION_STRING, 19 OPTION_STRING,
16 OPTION_INTEGER, 20 OPTION_INTEGER,
17 OPTION_LONG, 21 OPTION_LONG,
18 OPTION_CALLBACK, 22 OPTION_CALLBACK,
23 OPTION_U64,
24 OPTION_UINTEGER,
19}; 25};
20 26
21enum parse_opt_flags { 27enum parse_opt_flags {
@@ -73,7 +79,7 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
73 * 79 *
74 * `defval`:: 80 * `defval`::
75 * default value to fill (*->value) with for PARSE_OPT_OPTARG. 81 * default value to fill (*->value) with for PARSE_OPT_OPTARG.
76 * OPTION_{BIT,SET_INT,SET_PTR} store the {mask,integer,pointer} to put in 82 * OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in
77 * the value when met. 83 * the value when met.
78 * CALLBACKS can use it like they want. 84 * CALLBACKS can use it like they want.
79 */ 85 */
@@ -90,16 +96,21 @@ struct option {
90 intptr_t defval; 96 intptr_t defval;
91}; 97};
92 98
99#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
100
93#define OPT_END() { .type = OPTION_END } 101#define OPT_END() { .type = OPTION_END }
94#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) } 102#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) }
95#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } 103#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
96#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) } 104#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
97#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } 105#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
98#define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) } 106#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
107#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) }
99#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } 108#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
100#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } 109#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
101#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } 110#define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
102#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h) } 111#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
112#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
113#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
103#define OPT_DATE(s, l, v, h) \ 114#define OPT_DATE(s, l, v, h) \
104 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } 115 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
105#define OPT_CALLBACK(s, l, v, a, h, f) \ 116#define OPT_CALLBACK(s, l, v, a, h, f) \
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 7c004b6ef24f..914c67095d96 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -33,20 +33,27 @@
33#include <limits.h> 33#include <limits.h>
34 34
35#undef _GNU_SOURCE 35#undef _GNU_SOURCE
36#include "util.h"
36#include "event.h" 37#include "event.h"
37#include "string.h" 38#include "string.h"
38#include "strlist.h" 39#include "strlist.h"
39#include "debug.h" 40#include "debug.h"
40#include "cache.h" 41#include "cache.h"
41#include "color.h" 42#include "color.h"
42#include "parse-events.h" /* For debugfs_path */ 43#include "symbol.h"
44#include "thread.h"
45#include "debugfs.h"
46#include "trace-event.h" /* For __unused */
43#include "probe-event.h" 47#include "probe-event.h"
48#include "probe-finder.h"
44 49
45#define MAX_CMDLEN 256 50#define MAX_CMDLEN 256
46#define MAX_PROBE_ARGS 128 51#define MAX_PROBE_ARGS 128
47#define PERFPROBE_GROUP "probe" 52#define PERFPROBE_GROUP "probe"
48 53
49#define semantic_error(msg ...) die("Semantic error :" msg) 54bool probe_event_dry_run; /* Dry run flag */
55
56#define semantic_error(msg ...) pr_err("Semantic error :" msg)
50 57
51/* If there is no space to write, returns -E2BIG. */ 58/* If there is no space to write, returns -E2BIG. */
52static int e_snprintf(char *str, size_t size, const char *format, ...) 59static int e_snprintf(char *str, size_t size, const char *format, ...)
@@ -64,7 +71,275 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
64 return ret; 71 return ret;
65} 72}
66 73
67void parse_line_range_desc(const char *arg, struct line_range *lr) 74static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
75static struct machine machine;
76
77/* Initialize symbol maps and path of vmlinux */
78static int init_vmlinux(void)
79{
80 struct dso *kernel;
81 int ret;
82
83 symbol_conf.sort_by_name = true;
84 if (symbol_conf.vmlinux_name == NULL)
85 symbol_conf.try_vmlinux_path = true;
86 else
87 pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
88 ret = symbol__init();
89 if (ret < 0) {
90 pr_debug("Failed to init symbol map.\n");
91 goto out;
92 }
93
94 ret = machine__init(&machine, "/", 0);
95 if (ret < 0)
96 goto out;
97
98 kernel = dso__new_kernel(symbol_conf.vmlinux_name);
99 if (kernel == NULL)
100 die("Failed to create kernel dso.");
101
102 ret = __machine__create_kernel_maps(&machine, kernel);
103 if (ret < 0)
104 pr_debug("Failed to create kernel maps.\n");
105
106out:
107 if (ret < 0)
108 pr_warning("Failed to init vmlinux path.\n");
109 return ret;
110}
111
112#ifdef DWARF_SUPPORT
113static int open_vmlinux(void)
114{
115 if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
116 pr_debug("Failed to load kernel map.\n");
117 return -EINVAL;
118 }
119 pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
120 return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
121}
122
123/* Convert trace point to probe point with debuginfo */
124static int convert_to_perf_probe_point(struct kprobe_trace_point *tp,
125 struct perf_probe_point *pp)
126{
127 struct symbol *sym;
128 int fd, ret = -ENOENT;
129
130 sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
131 tp->symbol, NULL);
132 if (sym) {
133 fd = open_vmlinux();
134 if (fd >= 0) {
135 ret = find_perf_probe_point(fd,
136 sym->start + tp->offset, pp);
137 close(fd);
138 }
139 }
140 if (ret <= 0) {
141 pr_debug("Failed to find corresponding probes from "
142 "debuginfo. Use kprobe event information.\n");
143 pp->function = strdup(tp->symbol);
144 if (pp->function == NULL)
145 return -ENOMEM;
146 pp->offset = tp->offset;
147 }
148 pp->retprobe = tp->retprobe;
149
150 return 0;
151}
152
153/* Try to find perf_probe_event with debuginfo */
154static int try_to_find_kprobe_trace_events(struct perf_probe_event *pev,
155 struct kprobe_trace_event **tevs,
156 int max_tevs)
157{
158 bool need_dwarf = perf_probe_event_need_dwarf(pev);
159 int fd, ntevs;
160
161 fd = open_vmlinux();
162 if (fd < 0) {
163 if (need_dwarf) {
164 pr_warning("Failed to open debuginfo file.\n");
165 return fd;
166 }
167 pr_debug("Could not open vmlinux. Try to use symbols.\n");
168 return 0;
169 }
170
171 /* Searching trace events corresponding to probe event */
172 ntevs = find_kprobe_trace_events(fd, pev, tevs, max_tevs);
173 close(fd);
174
175 if (ntevs > 0) { /* Succeeded to find trace events */
176 pr_debug("find %d kprobe_trace_events.\n", ntevs);
177 return ntevs;
178 }
179
180 if (ntevs == 0) { /* No error but failed to find probe point. */
181 pr_warning("Probe point '%s' not found.\n",
182 synthesize_perf_probe_point(&pev->point));
183 return -ENOENT;
184 }
185 /* Error path : ntevs < 0 */
186 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
187 if (ntevs == -EBADF) {
188 pr_warning("Warning: No dwarf info found in the vmlinux - "
189 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
190 if (!need_dwarf) {
191 pr_debug("Trying to use symbols.\nn");
192 return 0;
193 }
194 }
195 return ntevs;
196}
197
198#define LINEBUF_SIZE 256
199#define NR_ADDITIONAL_LINES 2
200
201static int show_one_line(FILE *fp, int l, bool skip, bool show_num)
202{
203 char buf[LINEBUF_SIZE];
204 const char *color = PERF_COLOR_BLUE;
205
206 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
207 goto error;
208 if (!skip) {
209 if (show_num)
210 fprintf(stdout, "%7d %s", l, buf);
211 else
212 color_fprintf(stdout, color, " %s", buf);
213 }
214
215 while (strlen(buf) == LINEBUF_SIZE - 1 &&
216 buf[LINEBUF_SIZE - 2] != '\n') {
217 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
218 goto error;
219 if (!skip) {
220 if (show_num)
221 fprintf(stdout, "%s", buf);
222 else
223 color_fprintf(stdout, color, "%s", buf);
224 }
225 }
226
227 return 0;
228error:
229 if (feof(fp))
230 pr_warning("Source file is shorter than expected.\n");
231 else
232 pr_warning("File read error: %s\n", strerror(errno));
233
234 return -1;
235}
236
237/*
238 * Show line-range always requires debuginfo to find source file and
239 * line number.
240 */
241int show_line_range(struct line_range *lr)
242{
243 int l = 1;
244 struct line_node *ln;
245 FILE *fp;
246 int fd, ret;
247
248 /* Search a line range */
249 ret = init_vmlinux();
250 if (ret < 0)
251 return ret;
252
253 fd = open_vmlinux();
254 if (fd < 0) {
255 pr_warning("Failed to open debuginfo file.\n");
256 return fd;
257 }
258
259 ret = find_line_range(fd, lr);
260 close(fd);
261 if (ret == 0) {
262 pr_warning("Specified source line is not found.\n");
263 return -ENOENT;
264 } else if (ret < 0) {
265 pr_warning("Debuginfo analysis failed. (%d)\n", ret);
266 return ret;
267 }
268
269 setup_pager();
270
271 if (lr->function)
272 fprintf(stdout, "<%s:%d>\n", lr->function,
273 lr->start - lr->offset);
274 else
275 fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
276
277 fp = fopen(lr->path, "r");
278 if (fp == NULL) {
279 pr_warning("Failed to open %s: %s\n", lr->path,
280 strerror(errno));
281 return -errno;
282 }
283 /* Skip to starting line number */
284 while (l < lr->start && ret >= 0)
285 ret = show_one_line(fp, l++, true, false);
286 if (ret < 0)
287 goto end;
288
289 list_for_each_entry(ln, &lr->line_list, list) {
290 while (ln->line > l && ret >= 0)
291 ret = show_one_line(fp, (l++) - lr->offset,
292 false, false);
293 if (ret >= 0)
294 ret = show_one_line(fp, (l++) - lr->offset,
295 false, true);
296 if (ret < 0)
297 goto end;
298 }
299
300 if (lr->end == INT_MAX)
301 lr->end = l + NR_ADDITIONAL_LINES;
302 while (l <= lr->end && !feof(fp) && ret >= 0)
303 ret = show_one_line(fp, (l++) - lr->offset, false, false);
304end:
305 fclose(fp);
306 return ret;
307}
308
309#else /* !DWARF_SUPPORT */
310
311static int convert_to_perf_probe_point(struct kprobe_trace_point *tp,
312 struct perf_probe_point *pp)
313{
314 pp->function = strdup(tp->symbol);
315 if (pp->function == NULL)
316 return -ENOMEM;
317 pp->offset = tp->offset;
318 pp->retprobe = tp->retprobe;
319
320 return 0;
321}
322
323static int try_to_find_kprobe_trace_events(struct perf_probe_event *pev,
324 struct kprobe_trace_event **tevs __unused,
325 int max_tevs __unused)
326{
327 if (perf_probe_event_need_dwarf(pev)) {
328 pr_warning("Debuginfo-analysis is not supported.\n");
329 return -ENOSYS;
330 }
331 return 0;
332}
333
334int show_line_range(struct line_range *lr __unused)
335{
336 pr_warning("Debuginfo-analysis is not supported.\n");
337 return -ENOSYS;
338}
339
340#endif
341
342int parse_line_range_desc(const char *arg, struct line_range *lr)
68{ 343{
69 const char *ptr; 344 const char *ptr;
70 char *tmp; 345 char *tmp;
@@ -75,29 +350,45 @@ void parse_line_range_desc(const char *arg, struct line_range *lr)
75 */ 350 */
76 ptr = strchr(arg, ':'); 351 ptr = strchr(arg, ':');
77 if (ptr) { 352 if (ptr) {
78 lr->start = (unsigned int)strtoul(ptr + 1, &tmp, 0); 353 lr->start = (int)strtoul(ptr + 1, &tmp, 0);
79 if (*tmp == '+') 354 if (*tmp == '+') {
80 lr->end = lr->start + (unsigned int)strtoul(tmp + 1, 355 lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0);
81 &tmp, 0); 356 lr->end--; /*
82 else if (*tmp == '-') 357 * Adjust the number of lines here.
83 lr->end = (unsigned int)strtoul(tmp + 1, &tmp, 0); 358 * If the number of lines == 1, the
359 * the end of line should be equal to
360 * the start of line.
361 */
362 } else if (*tmp == '-')
363 lr->end = (int)strtoul(tmp + 1, &tmp, 0);
84 else 364 else
85 lr->end = 0; 365 lr->end = INT_MAX;
86 pr_debug("Line range is %u to %u\n", lr->start, lr->end); 366 pr_debug("Line range is %d to %d\n", lr->start, lr->end);
87 if (lr->end && lr->start > lr->end) 367 if (lr->start > lr->end) {
88 semantic_error("Start line must be smaller" 368 semantic_error("Start line must be smaller"
89 " than end line."); 369 " than end line.\n");
90 if (*tmp != '\0') 370 return -EINVAL;
91 semantic_error("Tailing with invalid character '%d'.", 371 }
372 if (*tmp != '\0') {
373 semantic_error("Tailing with invalid character '%d'.\n",
92 *tmp); 374 *tmp);
375 return -EINVAL;
376 }
93 tmp = strndup(arg, (ptr - arg)); 377 tmp = strndup(arg, (ptr - arg));
94 } else 378 } else {
95 tmp = strdup(arg); 379 tmp = strdup(arg);
380 lr->end = INT_MAX;
381 }
382
383 if (tmp == NULL)
384 return -ENOMEM;
96 385
97 if (strchr(tmp, '.')) 386 if (strchr(tmp, '.'))
98 lr->file = tmp; 387 lr->file = tmp;
99 else 388 else
100 lr->function = tmp; 389 lr->function = tmp;
390
391 return 0;
101} 392}
102 393
103/* Check the name is good for event/group */ 394/* Check the name is good for event/group */
@@ -113,8 +404,9 @@ static bool check_event_name(const char *name)
113} 404}
114 405
115/* Parse probepoint definition. */ 406/* Parse probepoint definition. */
116static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp) 407static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
117{ 408{
409 struct perf_probe_point *pp = &pev->point;
118 char *ptr, *tmp; 410 char *ptr, *tmp;
119 char c, nc = 0; 411 char c, nc = 0;
120 /* 412 /*
@@ -129,13 +421,19 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
129 if (ptr && *ptr == '=') { /* Event name */ 421 if (ptr && *ptr == '=') { /* Event name */
130 *ptr = '\0'; 422 *ptr = '\0';
131 tmp = ptr + 1; 423 tmp = ptr + 1;
132 ptr = strchr(arg, ':'); 424 if (strchr(arg, ':')) {
133 if (ptr) /* Group name is not supported yet. */ 425 semantic_error("Group name is not supported yet.\n");
134 semantic_error("Group name is not supported yet."); 426 return -ENOTSUP;
135 if (!check_event_name(arg)) 427 }
428 if (!check_event_name(arg)) {
136 semantic_error("%s is bad for event name -it must " 429 semantic_error("%s is bad for event name -it must "
137 "follow C symbol-naming rule.", arg); 430 "follow C symbol-naming rule.\n", arg);
138 pp->event = strdup(arg); 431 return -EINVAL;
432 }
433 pev->event = strdup(arg);
434 if (pev->event == NULL)
435 return -ENOMEM;
436 pev->group = NULL;
139 arg = tmp; 437 arg = tmp;
140 } 438 }
141 439
@@ -145,12 +443,15 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
145 *ptr++ = '\0'; 443 *ptr++ = '\0';
146 } 444 }
147 445
446 tmp = strdup(arg);
447 if (tmp == NULL)
448 return -ENOMEM;
449
148 /* Check arg is function or file and copy it */ 450 /* Check arg is function or file and copy it */
149 if (strchr(arg, '.')) /* File */ 451 if (strchr(tmp, '.')) /* File */
150 pp->file = strdup(arg); 452 pp->file = tmp;
151 else /* Function */ 453 else /* Function */
152 pp->function = strdup(arg); 454 pp->function = tmp;
153 DIE_IF(pp->file == NULL && pp->function == NULL);
154 455
155 /* Parse other options */ 456 /* Parse other options */
156 while (ptr) { 457 while (ptr) {
@@ -158,6 +459,8 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
158 c = nc; 459 c = nc;
159 if (c == ';') { /* Lazy pattern must be the last part */ 460 if (c == ';') { /* Lazy pattern must be the last part */
160 pp->lazy_line = strdup(arg); 461 pp->lazy_line = strdup(arg);
462 if (pp->lazy_line == NULL)
463 return -ENOMEM;
161 break; 464 break;
162 } 465 }
163 ptr = strpbrk(arg, ";:+@%"); 466 ptr = strpbrk(arg, ";:+@%");
@@ -168,266 +471,658 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
168 switch (c) { 471 switch (c) {
169 case ':': /* Line number */ 472 case ':': /* Line number */
170 pp->line = strtoul(arg, &tmp, 0); 473 pp->line = strtoul(arg, &tmp, 0);
171 if (*tmp != '\0') 474 if (*tmp != '\0') {
172 semantic_error("There is non-digit char" 475 semantic_error("There is non-digit char"
173 " in line number."); 476 " in line number.\n");
477 return -EINVAL;
478 }
174 break; 479 break;
175 case '+': /* Byte offset from a symbol */ 480 case '+': /* Byte offset from a symbol */
176 pp->offset = strtoul(arg, &tmp, 0); 481 pp->offset = strtoul(arg, &tmp, 0);
177 if (*tmp != '\0') 482 if (*tmp != '\0') {
178 semantic_error("There is non-digit character" 483 semantic_error("There is non-digit character"
179 " in offset."); 484 " in offset.\n");
485 return -EINVAL;
486 }
180 break; 487 break;
181 case '@': /* File name */ 488 case '@': /* File name */
182 if (pp->file) 489 if (pp->file) {
183 semantic_error("SRC@SRC is not allowed."); 490 semantic_error("SRC@SRC is not allowed.\n");
491 return -EINVAL;
492 }
184 pp->file = strdup(arg); 493 pp->file = strdup(arg);
185 DIE_IF(pp->file == NULL); 494 if (pp->file == NULL)
495 return -ENOMEM;
186 break; 496 break;
187 case '%': /* Probe places */ 497 case '%': /* Probe places */
188 if (strcmp(arg, "return") == 0) { 498 if (strcmp(arg, "return") == 0) {
189 pp->retprobe = 1; 499 pp->retprobe = 1;
190 } else /* Others not supported yet */ 500 } else { /* Others not supported yet */
191 semantic_error("%%%s is not supported.", arg); 501 semantic_error("%%%s is not supported.\n", arg);
502 return -ENOTSUP;
503 }
192 break; 504 break;
193 default: 505 default: /* Buggy case */
194 DIE_IF("Program has a bug."); 506 pr_err("This program has a bug at %s:%d.\n",
507 __FILE__, __LINE__);
508 return -ENOTSUP;
195 break; 509 break;
196 } 510 }
197 } 511 }
198 512
199 /* Exclusion check */ 513 /* Exclusion check */
200 if (pp->lazy_line && pp->line) 514 if (pp->lazy_line && pp->line) {
201 semantic_error("Lazy pattern can't be used with line number."); 515 semantic_error("Lazy pattern can't be used with line number.");
516 return -EINVAL;
517 }
202 518
203 if (pp->lazy_line && pp->offset) 519 if (pp->lazy_line && pp->offset) {
204 semantic_error("Lazy pattern can't be used with offset."); 520 semantic_error("Lazy pattern can't be used with offset.");
521 return -EINVAL;
522 }
205 523
206 if (pp->line && pp->offset) 524 if (pp->line && pp->offset) {
207 semantic_error("Offset can't be used with line number."); 525 semantic_error("Offset can't be used with line number.");
526 return -EINVAL;
527 }
208 528
209 if (!pp->line && !pp->lazy_line && pp->file && !pp->function) 529 if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
210 semantic_error("File always requires line number or " 530 semantic_error("File always requires line number or "
211 "lazy pattern."); 531 "lazy pattern.");
532 return -EINVAL;
533 }
212 534
213 if (pp->offset && !pp->function) 535 if (pp->offset && !pp->function) {
214 semantic_error("Offset requires an entry function."); 536 semantic_error("Offset requires an entry function.");
537 return -EINVAL;
538 }
215 539
216 if (pp->retprobe && !pp->function) 540 if (pp->retprobe && !pp->function) {
217 semantic_error("Return probe requires an entry function."); 541 semantic_error("Return probe requires an entry function.");
542 return -EINVAL;
543 }
218 544
219 if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) 545 if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
220 semantic_error("Offset/Line/Lazy pattern can't be used with " 546 semantic_error("Offset/Line/Lazy pattern can't be used with "
221 "return probe."); 547 "return probe.");
548 return -EINVAL;
549 }
222 550
223 pr_debug("symbol:%s file:%s line:%d offset:%d return:%d lazy:%s\n", 551 pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n",
224 pp->function, pp->file, pp->line, pp->offset, pp->retprobe, 552 pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
225 pp->lazy_line); 553 pp->lazy_line);
554 return 0;
226} 555}
227 556
228/* Parse perf-probe event definition */ 557/* Parse perf-probe event argument */
229void parse_perf_probe_event(const char *str, struct probe_point *pp, 558static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
230 bool *need_dwarf)
231{ 559{
232 char **argv; 560 char *tmp;
233 int argc, i; 561 struct perf_probe_arg_field **fieldp;
562
563 pr_debug("parsing arg: %s into ", str);
234 564
235 *need_dwarf = false; 565 tmp = strchr(str, '=');
566 if (tmp) {
567 arg->name = strndup(str, tmp - str);
568 if (arg->name == NULL)
569 return -ENOMEM;
570 pr_debug("name:%s ", arg->name);
571 str = tmp + 1;
572 }
236 573
237 argv = argv_split(str, &argc); 574 tmp = strchr(str, ':');
238 if (!argv) 575 if (tmp) { /* Type setting */
239 die("argv_split failed."); 576 *tmp = '\0';
240 if (argc > MAX_PROBE_ARGS + 1) 577 arg->type = strdup(tmp + 1);
241 semantic_error("Too many arguments"); 578 if (arg->type == NULL)
579 return -ENOMEM;
580 pr_debug("type:%s ", arg->type);
581 }
242 582
583 tmp = strpbrk(str, "-.");
584 if (!is_c_varname(str) || !tmp) {
585 /* A variable, register, symbol or special value */
586 arg->var = strdup(str);
587 if (arg->var == NULL)
588 return -ENOMEM;
589 pr_debug("%s\n", arg->var);
590 return 0;
591 }
592
593 /* Structure fields */
594 arg->var = strndup(str, tmp - str);
595 if (arg->var == NULL)
596 return -ENOMEM;
597 pr_debug("%s, ", arg->var);
598 fieldp = &arg->field;
599
600 do {
601 *fieldp = zalloc(sizeof(struct perf_probe_arg_field));
602 if (*fieldp == NULL)
603 return -ENOMEM;
604 if (*tmp == '.') {
605 str = tmp + 1;
606 (*fieldp)->ref = false;
607 } else if (tmp[1] == '>') {
608 str = tmp + 2;
609 (*fieldp)->ref = true;
610 } else {
611 semantic_error("Argument parse error: %s\n", str);
612 return -EINVAL;
613 }
614
615 tmp = strpbrk(str, "-.");
616 if (tmp) {
617 (*fieldp)->name = strndup(str, tmp - str);
618 if ((*fieldp)->name == NULL)
619 return -ENOMEM;
620 pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref);
621 fieldp = &(*fieldp)->next;
622 }
623 } while (tmp);
624 (*fieldp)->name = strdup(str);
625 if ((*fieldp)->name == NULL)
626 return -ENOMEM;
627 pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref);
628
629 /* If no name is specified, set the last field name */
630 if (!arg->name) {
631 arg->name = strdup((*fieldp)->name);
632 if (arg->name == NULL)
633 return -ENOMEM;
634 }
635 return 0;
636}
637
638/* Parse perf-probe event command */
639int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
640{
641 char **argv;
642 int argc, i, ret = 0;
643
644 argv = argv_split(cmd, &argc);
645 if (!argv) {
646 pr_debug("Failed to split arguments.\n");
647 return -ENOMEM;
648 }
649 if (argc - 1 > MAX_PROBE_ARGS) {
650 semantic_error("Too many probe arguments (%d).\n", argc - 1);
651 ret = -ERANGE;
652 goto out;
653 }
243 /* Parse probe point */ 654 /* Parse probe point */
244 parse_perf_probe_probepoint(argv[0], pp); 655 ret = parse_perf_probe_point(argv[0], pev);
245 if (pp->file || pp->line || pp->lazy_line) 656 if (ret < 0)
246 *need_dwarf = true; 657 goto out;
247 658
248 /* Copy arguments and ensure return probe has no C argument */ 659 /* Copy arguments and ensure return probe has no C argument */
249 pp->nr_args = argc - 1; 660 pev->nargs = argc - 1;
250 pp->args = zalloc(sizeof(char *) * pp->nr_args); 661 pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
251 for (i = 0; i < pp->nr_args; i++) { 662 if (pev->args == NULL) {
252 pp->args[i] = strdup(argv[i + 1]); 663 ret = -ENOMEM;
253 if (!pp->args[i]) 664 goto out;
254 die("Failed to copy argument."); 665 }
255 if (is_c_varname(pp->args[i])) { 666 for (i = 0; i < pev->nargs && ret >= 0; i++) {
256 if (pp->retprobe) 667 ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]);
257 semantic_error("You can't specify local" 668 if (ret >= 0 &&
258 " variable for kretprobe"); 669 is_c_varname(pev->args[i].var) && pev->point.retprobe) {
259 *need_dwarf = true; 670 semantic_error("You can't specify local variable for"
671 " kretprobe.\n");
672 ret = -EINVAL;
260 } 673 }
261 } 674 }
262 675out:
263 argv_free(argv); 676 argv_free(argv);
677
678 return ret;
679}
680
681/* Return true if this perf_probe_event requires debuginfo */
682bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
683{
684 int i;
685
686 if (pev->point.file || pev->point.line || pev->point.lazy_line)
687 return true;
688
689 for (i = 0; i < pev->nargs; i++)
690 if (is_c_varname(pev->args[i].var))
691 return true;
692
693 return false;
264} 694}
265 695
266/* Parse kprobe_events event into struct probe_point */ 696/* Parse kprobe_events event into struct probe_point */
267void parse_trace_kprobe_event(const char *str, struct probe_point *pp) 697int parse_kprobe_trace_command(const char *cmd, struct kprobe_trace_event *tev)
268{ 698{
699 struct kprobe_trace_point *tp = &tev->point;
269 char pr; 700 char pr;
270 char *p; 701 char *p;
271 int ret, i, argc; 702 int ret, i, argc;
272 char **argv; 703 char **argv;
273 704
274 pr_debug("Parsing kprobe_events: %s\n", str); 705 pr_debug("Parsing kprobe_events: %s\n", cmd);
275 argv = argv_split(str, &argc); 706 argv = argv_split(cmd, &argc);
276 if (!argv) 707 if (!argv) {
277 die("argv_split failed."); 708 pr_debug("Failed to split arguments.\n");
278 if (argc < 2) 709 return -ENOMEM;
279 semantic_error("Too less arguments."); 710 }
711 if (argc < 2) {
712 semantic_error("Too few probe arguments.\n");
713 ret = -ERANGE;
714 goto out;
715 }
280 716
281 /* Scan event and group name. */ 717 /* Scan event and group name. */
282 ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", 718 ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
283 &pr, (float *)(void *)&pp->group, 719 &pr, (float *)(void *)&tev->group,
284 (float *)(void *)&pp->event); 720 (float *)(void *)&tev->event);
285 if (ret != 3) 721 if (ret != 3) {
286 semantic_error("Failed to parse event name: %s", argv[0]); 722 semantic_error("Failed to parse event name: %s\n", argv[0]);
287 pr_debug("Group:%s Event:%s probe:%c\n", pp->group, pp->event, pr); 723 ret = -EINVAL;
724 goto out;
725 }
726 pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
288 727
289 pp->retprobe = (pr == 'r'); 728 tp->retprobe = (pr == 'r');
290 729
291 /* Scan function name and offset */ 730 /* Scan function name and offset */
292 ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, 731 ret = sscanf(argv[1], "%a[^+]+%lu", (float *)(void *)&tp->symbol,
293 &pp->offset); 732 &tp->offset);
294 if (ret == 1) 733 if (ret == 1)
295 pp->offset = 0; 734 tp->offset = 0;
296
297 /* kprobe_events doesn't have this information */
298 pp->line = 0;
299 pp->file = NULL;
300 735
301 pp->nr_args = argc - 2; 736 tev->nargs = argc - 2;
302 pp->args = zalloc(sizeof(char *) * pp->nr_args); 737 tev->args = zalloc(sizeof(struct kprobe_trace_arg) * tev->nargs);
303 for (i = 0; i < pp->nr_args; i++) { 738 if (tev->args == NULL) {
739 ret = -ENOMEM;
740 goto out;
741 }
742 for (i = 0; i < tev->nargs; i++) {
304 p = strchr(argv[i + 2], '='); 743 p = strchr(argv[i + 2], '=');
305 if (p) /* We don't need which register is assigned. */ 744 if (p) /* We don't need which register is assigned. */
306 *p = '\0'; 745 *p++ = '\0';
307 pp->args[i] = strdup(argv[i + 2]); 746 else
308 if (!pp->args[i]) 747 p = argv[i + 2];
309 die("Failed to copy argument."); 748 tev->args[i].name = strdup(argv[i + 2]);
749 /* TODO: parse regs and offset */
750 tev->args[i].value = strdup(p);
751 if (tev->args[i].name == NULL || tev->args[i].value == NULL) {
752 ret = -ENOMEM;
753 goto out;
754 }
310 } 755 }
311 756 ret = 0;
757out:
312 argv_free(argv); 758 argv_free(argv);
759 return ret;
313} 760}
314 761
315/* Synthesize only probe point (not argument) */ 762/* Compose only probe arg */
316int synthesize_perf_probe_point(struct probe_point *pp) 763int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
317{ 764{
318 char *buf; 765 struct perf_probe_arg_field *field = pa->field;
319 char offs[64] = "", line[64] = "";
320 int ret; 766 int ret;
767 char *tmp = buf;
321 768
322 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 769 if (pa->name && pa->var)
323 pp->found = 1; 770 ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
324 if (!buf) 771 else
325 die("Failed to allocate memory by zalloc."); 772 ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
773 if (ret <= 0)
774 goto error;
775 tmp += ret;
776 len -= ret;
777
778 while (field) {
779 ret = e_snprintf(tmp, len, "%s%s", field->ref ? "->" : ".",
780 field->name);
781 if (ret <= 0)
782 goto error;
783 tmp += ret;
784 len -= ret;
785 field = field->next;
786 }
787
788 if (pa->type) {
789 ret = e_snprintf(tmp, len, ":%s", pa->type);
790 if (ret <= 0)
791 goto error;
792 tmp += ret;
793 len -= ret;
794 }
795
796 return tmp - buf;
797error:
798 pr_debug("Failed to synthesize perf probe argument: %s",
799 strerror(-ret));
800 return ret;
801}
802
803/* Compose only probe point (not argument) */
804static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
805{
806 char *buf, *tmp;
807 char offs[32] = "", line[32] = "", file[32] = "";
808 int ret, len;
809
810 buf = zalloc(MAX_CMDLEN);
811 if (buf == NULL) {
812 ret = -ENOMEM;
813 goto error;
814 }
326 if (pp->offset) { 815 if (pp->offset) {
327 ret = e_snprintf(offs, 64, "+%d", pp->offset); 816 ret = e_snprintf(offs, 32, "+%lu", pp->offset);
328 if (ret <= 0) 817 if (ret <= 0)
329 goto error; 818 goto error;
330 } 819 }
331 if (pp->line) { 820 if (pp->line) {
332 ret = e_snprintf(line, 64, ":%d", pp->line); 821 ret = e_snprintf(line, 32, ":%d", pp->line);
822 if (ret <= 0)
823 goto error;
824 }
825 if (pp->file) {
826 len = strlen(pp->file) - 31;
827 if (len < 0)
828 len = 0;
829 tmp = strchr(pp->file + len, '/');
830 if (!tmp)
831 tmp = pp->file + len;
832 ret = e_snprintf(file, 32, "@%s", tmp + 1);
333 if (ret <= 0) 833 if (ret <= 0)
334 goto error; 834 goto error;
335 } 835 }
336 836
337 if (pp->function) 837 if (pp->function)
338 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function, 838 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
339 offs, pp->retprobe ? "%return" : "", line); 839 offs, pp->retprobe ? "%return" : "", line,
840 file);
340 else 841 else
341 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line); 842 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
342 if (ret <= 0) { 843 if (ret <= 0)
844 goto error;
845
846 return buf;
343error: 847error:
344 free(pp->probes[0]); 848 pr_debug("Failed to synthesize perf probe point: %s",
345 pp->probes[0] = NULL; 849 strerror(-ret));
346 pp->found = 0; 850 if (buf)
347 } 851 free(buf);
348 return ret; 852 return NULL;
349} 853}
350 854
351int synthesize_perf_probe_event(struct probe_point *pp) 855#if 0
856char *synthesize_perf_probe_command(struct perf_probe_event *pev)
352{ 857{
353 char *buf; 858 char *buf;
354 int i, len, ret; 859 int i, len, ret;
355 860
356 len = synthesize_perf_probe_point(pp); 861 buf = synthesize_perf_probe_point(&pev->point);
357 if (len < 0) 862 if (!buf)
358 return 0; 863 return NULL;
359 864
360 buf = pp->probes[0]; 865 len = strlen(buf);
361 for (i = 0; i < pp->nr_args; i++) { 866 for (i = 0; i < pev->nargs; i++) {
362 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", 867 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
363 pp->args[i]); 868 pev->args[i].name);
364 if (ret <= 0) 869 if (ret <= 0) {
365 goto error; 870 free(buf);
871 return NULL;
872 }
366 len += ret; 873 len += ret;
367 } 874 }
368 pp->found = 1;
369 875
370 return pp->found; 876 return buf;
371error: 877}
372 free(pp->probes[0]); 878#endif
373 pp->probes[0] = NULL; 879
880static int __synthesize_kprobe_trace_arg_ref(struct kprobe_trace_arg_ref *ref,
881 char **buf, size_t *buflen,
882 int depth)
883{
884 int ret;
885 if (ref->next) {
886 depth = __synthesize_kprobe_trace_arg_ref(ref->next, buf,
887 buflen, depth + 1);
888 if (depth < 0)
889 goto out;
890 }
891
892 ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
893 if (ret < 0)
894 depth = ret;
895 else {
896 *buf += ret;
897 *buflen -= ret;
898 }
899out:
900 return depth;
374 901
375 return ret;
376} 902}
377 903
378int synthesize_trace_kprobe_event(struct probe_point *pp) 904static int synthesize_kprobe_trace_arg(struct kprobe_trace_arg *arg,
905 char *buf, size_t buflen)
379{ 906{
907 int ret, depth = 0;
908 char *tmp = buf;
909
910 /* Argument name or separator */
911 if (arg->name)
912 ret = e_snprintf(buf, buflen, " %s=", arg->name);
913 else
914 ret = e_snprintf(buf, buflen, " ");
915 if (ret < 0)
916 return ret;
917 buf += ret;
918 buflen -= ret;
919
920 /* Dereferencing arguments */
921 if (arg->ref) {
922 depth = __synthesize_kprobe_trace_arg_ref(arg->ref, &buf,
923 &buflen, 1);
924 if (depth < 0)
925 return depth;
926 }
927
928 /* Print argument value */
929 ret = e_snprintf(buf, buflen, "%s", arg->value);
930 if (ret < 0)
931 return ret;
932 buf += ret;
933 buflen -= ret;
934
935 /* Closing */
936 while (depth--) {
937 ret = e_snprintf(buf, buflen, ")");
938 if (ret < 0)
939 return ret;
940 buf += ret;
941 buflen -= ret;
942 }
943 /* Print argument type */
944 if (arg->type) {
945 ret = e_snprintf(buf, buflen, ":%s", arg->type);
946 if (ret <= 0)
947 return ret;
948 buf += ret;
949 }
950
951 return buf - tmp;
952}
953
954char *synthesize_kprobe_trace_command(struct kprobe_trace_event *tev)
955{
956 struct kprobe_trace_point *tp = &tev->point;
380 char *buf; 957 char *buf;
381 int i, len, ret; 958 int i, len, ret;
382 959
383 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 960 buf = zalloc(MAX_CMDLEN);
384 if (!buf) 961 if (buf == NULL)
385 die("Failed to allocate memory by zalloc."); 962 return NULL;
386 ret = e_snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); 963
387 if (ret <= 0) 964 len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s+%lu",
965 tp->retprobe ? 'r' : 'p',
966 tev->group, tev->event,
967 tp->symbol, tp->offset);
968 if (len <= 0)
388 goto error; 969 goto error;
389 len = ret;
390 970
391 for (i = 0; i < pp->nr_args; i++) { 971 for (i = 0; i < tev->nargs; i++) {
392 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", 972 ret = synthesize_kprobe_trace_arg(&tev->args[i], buf + len,
393 pp->args[i]); 973 MAX_CMDLEN - len);
394 if (ret <= 0) 974 if (ret <= 0)
395 goto error; 975 goto error;
396 len += ret; 976 len += ret;
397 } 977 }
398 pp->found = 1;
399 978
400 return pp->found; 979 return buf;
401error: 980error:
402 free(pp->probes[0]); 981 free(buf);
403 pp->probes[0] = NULL; 982 return NULL;
983}
984
985int convert_to_perf_probe_event(struct kprobe_trace_event *tev,
986 struct perf_probe_event *pev)
987{
988 char buf[64] = "";
989 int i, ret;
990
991 /* Convert event/group name */
992 pev->event = strdup(tev->event);
993 pev->group = strdup(tev->group);
994 if (pev->event == NULL || pev->group == NULL)
995 return -ENOMEM;
996
997 /* Convert trace_point to probe_point */
998 ret = convert_to_perf_probe_point(&tev->point, &pev->point);
999 if (ret < 0)
1000 return ret;
1001
1002 /* Convert trace_arg to probe_arg */
1003 pev->nargs = tev->nargs;
1004 pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
1005 if (pev->args == NULL)
1006 return -ENOMEM;
1007 for (i = 0; i < tev->nargs && ret >= 0; i++) {
1008 if (tev->args[i].name)
1009 pev->args[i].name = strdup(tev->args[i].name);
1010 else {
1011 ret = synthesize_kprobe_trace_arg(&tev->args[i],
1012 buf, 64);
1013 pev->args[i].name = strdup(buf);
1014 }
1015 if (pev->args[i].name == NULL && ret >= 0)
1016 ret = -ENOMEM;
1017 }
1018
1019 if (ret < 0)
1020 clear_perf_probe_event(pev);
404 1021
405 return ret; 1022 return ret;
406} 1023}
407 1024
408static int open_kprobe_events(int flags, int mode) 1025void clear_perf_probe_event(struct perf_probe_event *pev)
1026{
1027 struct perf_probe_point *pp = &pev->point;
1028 struct perf_probe_arg_field *field, *next;
1029 int i;
1030
1031 if (pev->event)
1032 free(pev->event);
1033 if (pev->group)
1034 free(pev->group);
1035 if (pp->file)
1036 free(pp->file);
1037 if (pp->function)
1038 free(pp->function);
1039 if (pp->lazy_line)
1040 free(pp->lazy_line);
1041 for (i = 0; i < pev->nargs; i++) {
1042 if (pev->args[i].name)
1043 free(pev->args[i].name);
1044 if (pev->args[i].var)
1045 free(pev->args[i].var);
1046 if (pev->args[i].type)
1047 free(pev->args[i].type);
1048 field = pev->args[i].field;
1049 while (field) {
1050 next = field->next;
1051 if (field->name)
1052 free(field->name);
1053 free(field);
1054 field = next;
1055 }
1056 }
1057 if (pev->args)
1058 free(pev->args);
1059 memset(pev, 0, sizeof(*pev));
1060}
1061
1062void clear_kprobe_trace_event(struct kprobe_trace_event *tev)
1063{
1064 struct kprobe_trace_arg_ref *ref, *next;
1065 int i;
1066
1067 if (tev->event)
1068 free(tev->event);
1069 if (tev->group)
1070 free(tev->group);
1071 if (tev->point.symbol)
1072 free(tev->point.symbol);
1073 for (i = 0; i < tev->nargs; i++) {
1074 if (tev->args[i].name)
1075 free(tev->args[i].name);
1076 if (tev->args[i].value)
1077 free(tev->args[i].value);
1078 if (tev->args[i].type)
1079 free(tev->args[i].type);
1080 ref = tev->args[i].ref;
1081 while (ref) {
1082 next = ref->next;
1083 free(ref);
1084 ref = next;
1085 }
1086 }
1087 if (tev->args)
1088 free(tev->args);
1089 memset(tev, 0, sizeof(*tev));
1090}
1091
1092static int open_kprobe_events(bool readwrite)
409{ 1093{
410 char buf[PATH_MAX]; 1094 char buf[PATH_MAX];
1095 const char *__debugfs;
411 int ret; 1096 int ret;
412 1097
413 ret = e_snprintf(buf, PATH_MAX, "%s/../kprobe_events", debugfs_path); 1098 __debugfs = debugfs_find_mountpoint();
414 if (ret < 0) 1099 if (__debugfs == NULL) {
415 die("Failed to make kprobe_events path."); 1100 pr_warning("Debugfs is not mounted.\n");
1101 return -ENOENT;
1102 }
1103
1104 ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
1105 if (ret >= 0) {
1106 pr_debug("Opening %s write=%d\n", buf, readwrite);
1107 if (readwrite && !probe_event_dry_run)
1108 ret = open(buf, O_RDWR, O_APPEND);
1109 else
1110 ret = open(buf, O_RDONLY, 0);
1111 }
416 1112
417 ret = open(buf, flags, mode);
418 if (ret < 0) { 1113 if (ret < 0) {
419 if (errno == ENOENT) 1114 if (errno == ENOENT)
420 die("kprobe_events file does not exist -" 1115 pr_warning("kprobe_events file does not exist - please"
421 " please rebuild with CONFIG_KPROBE_EVENT."); 1116 " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
422 else 1117 else
423 die("Could not open kprobe_events file: %s", 1118 pr_warning("Failed to open kprobe_events file: %s\n",
424 strerror(errno)); 1119 strerror(errno));
425 } 1120 }
426 return ret; 1121 return ret;
427} 1122}
428 1123
429/* Get raw string list of current kprobe_events */ 1124/* Get raw string list of current kprobe_events */
430static struct strlist *get_trace_kprobe_event_rawlist(int fd) 1125static struct strlist *get_kprobe_trace_command_rawlist(int fd)
431{ 1126{
432 int ret, idx; 1127 int ret, idx;
433 FILE *fp; 1128 FILE *fp;
@@ -447,271 +1142,486 @@ static struct strlist *get_trace_kprobe_event_rawlist(int fd)
447 if (p[idx] == '\n') 1142 if (p[idx] == '\n')
448 p[idx] = '\0'; 1143 p[idx] = '\0';
449 ret = strlist__add(sl, buf); 1144 ret = strlist__add(sl, buf);
450 if (ret < 0) 1145 if (ret < 0) {
451 die("strlist__add failed: %s", strerror(-ret)); 1146 pr_debug("strlist__add failed: %s\n", strerror(-ret));
1147 strlist__delete(sl);
1148 return NULL;
1149 }
452 } 1150 }
453 fclose(fp); 1151 fclose(fp);
454 1152
455 return sl; 1153 return sl;
456} 1154}
457 1155
458/* Free and zero clear probe_point */
459static void clear_probe_point(struct probe_point *pp)
460{
461 int i;
462
463 if (pp->event)
464 free(pp->event);
465 if (pp->group)
466 free(pp->group);
467 if (pp->function)
468 free(pp->function);
469 if (pp->file)
470 free(pp->file);
471 if (pp->lazy_line)
472 free(pp->lazy_line);
473 for (i = 0; i < pp->nr_args; i++)
474 free(pp->args[i]);
475 if (pp->args)
476 free(pp->args);
477 for (i = 0; i < pp->found; i++)
478 free(pp->probes[i]);
479 memset(pp, 0, sizeof(*pp));
480}
481
482/* Show an event */ 1156/* Show an event */
483static void show_perf_probe_event(const char *event, const char *place, 1157static int show_perf_probe_event(struct perf_probe_event *pev)
484 struct probe_point *pp)
485{ 1158{
486 int i, ret; 1159 int i, ret;
487 char buf[128]; 1160 char buf[128];
1161 char *place;
1162
1163 /* Synthesize only event probe point */
1164 place = synthesize_perf_probe_point(&pev->point);
1165 if (!place)
1166 return -EINVAL;
488 1167
489 ret = e_snprintf(buf, 128, "%s:%s", pp->group, event); 1168 ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event);
490 if (ret < 0) 1169 if (ret < 0)
491 die("Failed to copy event: %s", strerror(-ret)); 1170 return ret;
492 printf(" %-40s (on %s", buf, place); 1171
1172 printf(" %-20s (on %s", buf, place);
493 1173
494 if (pp->nr_args > 0) { 1174 if (pev->nargs > 0) {
495 printf(" with"); 1175 printf(" with");
496 for (i = 0; i < pp->nr_args; i++) 1176 for (i = 0; i < pev->nargs; i++) {
497 printf(" %s", pp->args[i]); 1177 ret = synthesize_perf_probe_arg(&pev->args[i],
1178 buf, 128);
1179 if (ret < 0)
1180 break;
1181 printf(" %s", buf);
1182 }
498 } 1183 }
499 printf(")\n"); 1184 printf(")\n");
1185 free(place);
1186 return ret;
500} 1187}
501 1188
502/* List up current perf-probe events */ 1189/* List up current perf-probe events */
503void show_perf_probe_events(void) 1190int show_perf_probe_events(void)
504{ 1191{
505 int fd; 1192 int fd, ret;
506 struct probe_point pp; 1193 struct kprobe_trace_event tev;
1194 struct perf_probe_event pev;
507 struct strlist *rawlist; 1195 struct strlist *rawlist;
508 struct str_node *ent; 1196 struct str_node *ent;
509 1197
510 setup_pager(); 1198 setup_pager();
511 memset(&pp, 0, sizeof(pp)); 1199 ret = init_vmlinux();
1200 if (ret < 0)
1201 return ret;
1202
1203 memset(&tev, 0, sizeof(tev));
1204 memset(&pev, 0, sizeof(pev));
512 1205
513 fd = open_kprobe_events(O_RDONLY, 0); 1206 fd = open_kprobe_events(false);
514 rawlist = get_trace_kprobe_event_rawlist(fd); 1207 if (fd < 0)
1208 return fd;
1209
1210 rawlist = get_kprobe_trace_command_rawlist(fd);
515 close(fd); 1211 close(fd);
1212 if (!rawlist)
1213 return -ENOENT;
516 1214
517 strlist__for_each(ent, rawlist) { 1215 strlist__for_each(ent, rawlist) {
518 parse_trace_kprobe_event(ent->s, &pp); 1216 ret = parse_kprobe_trace_command(ent->s, &tev);
519 /* Synthesize only event probe point */ 1217 if (ret >= 0) {
520 synthesize_perf_probe_point(&pp); 1218 ret = convert_to_perf_probe_event(&tev, &pev);
521 /* Show an event */ 1219 if (ret >= 0)
522 show_perf_probe_event(pp.event, pp.probes[0], &pp); 1220 ret = show_perf_probe_event(&pev);
523 clear_probe_point(&pp); 1221 }
1222 clear_perf_probe_event(&pev);
1223 clear_kprobe_trace_event(&tev);
1224 if (ret < 0)
1225 break;
524 } 1226 }
525
526 strlist__delete(rawlist); 1227 strlist__delete(rawlist);
1228
1229 return ret;
527} 1230}
528 1231
529/* Get current perf-probe event names */ 1232/* Get current perf-probe event names */
530static struct strlist *get_perf_event_names(int fd, bool include_group) 1233static struct strlist *get_kprobe_trace_event_names(int fd, bool include_group)
531{ 1234{
532 char buf[128]; 1235 char buf[128];
533 struct strlist *sl, *rawlist; 1236 struct strlist *sl, *rawlist;
534 struct str_node *ent; 1237 struct str_node *ent;
535 struct probe_point pp; 1238 struct kprobe_trace_event tev;
1239 int ret = 0;
536 1240
537 memset(&pp, 0, sizeof(pp)); 1241 memset(&tev, 0, sizeof(tev));
538 rawlist = get_trace_kprobe_event_rawlist(fd);
539 1242
1243 rawlist = get_kprobe_trace_command_rawlist(fd);
540 sl = strlist__new(true, NULL); 1244 sl = strlist__new(true, NULL);
541 strlist__for_each(ent, rawlist) { 1245 strlist__for_each(ent, rawlist) {
542 parse_trace_kprobe_event(ent->s, &pp); 1246 ret = parse_kprobe_trace_command(ent->s, &tev);
1247 if (ret < 0)
1248 break;
543 if (include_group) { 1249 if (include_group) {
544 if (e_snprintf(buf, 128, "%s:%s", pp.group, 1250 ret = e_snprintf(buf, 128, "%s:%s", tev.group,
545 pp.event) < 0) 1251 tev.event);
546 die("Failed to copy group:event name."); 1252 if (ret >= 0)
547 strlist__add(sl, buf); 1253 ret = strlist__add(sl, buf);
548 } else 1254 } else
549 strlist__add(sl, pp.event); 1255 ret = strlist__add(sl, tev.event);
550 clear_probe_point(&pp); 1256 clear_kprobe_trace_event(&tev);
1257 if (ret < 0)
1258 break;
551 } 1259 }
552
553 strlist__delete(rawlist); 1260 strlist__delete(rawlist);
554 1261
1262 if (ret < 0) {
1263 strlist__delete(sl);
1264 return NULL;
1265 }
555 return sl; 1266 return sl;
556} 1267}
557 1268
558static void write_trace_kprobe_event(int fd, const char *buf) 1269static int write_kprobe_trace_event(int fd, struct kprobe_trace_event *tev)
559{ 1270{
560 int ret; 1271 int ret = 0;
1272 char *buf = synthesize_kprobe_trace_command(tev);
1273
1274 if (!buf) {
1275 pr_debug("Failed to synthesize kprobe trace event.\n");
1276 return -EINVAL;
1277 }
561 1278
562 pr_debug("Writing event: %s\n", buf); 1279 pr_debug("Writing event: %s\n", buf);
563 ret = write(fd, buf, strlen(buf)); 1280 if (!probe_event_dry_run) {
564 if (ret <= 0) 1281 ret = write(fd, buf, strlen(buf));
565 die("Failed to write event: %s", strerror(errno)); 1282 if (ret <= 0)
1283 pr_warning("Failed to write event: %s\n",
1284 strerror(errno));
1285 }
1286 free(buf);
1287 return ret;
566} 1288}
567 1289
568static void get_new_event_name(char *buf, size_t len, const char *base, 1290static int get_new_event_name(char *buf, size_t len, const char *base,
569 struct strlist *namelist, bool allow_suffix) 1291 struct strlist *namelist, bool allow_suffix)
570{ 1292{
571 int i, ret; 1293 int i, ret;
572 1294
573 /* Try no suffix */ 1295 /* Try no suffix */
574 ret = e_snprintf(buf, len, "%s", base); 1296 ret = e_snprintf(buf, len, "%s", base);
575 if (ret < 0) 1297 if (ret < 0) {
576 die("snprintf() failed: %s", strerror(-ret)); 1298 pr_debug("snprintf() failed: %s\n", strerror(-ret));
1299 return ret;
1300 }
577 if (!strlist__has_entry(namelist, buf)) 1301 if (!strlist__has_entry(namelist, buf))
578 return; 1302 return 0;
579 1303
580 if (!allow_suffix) { 1304 if (!allow_suffix) {
581 pr_warning("Error: event \"%s\" already exists. " 1305 pr_warning("Error: event \"%s\" already exists. "
582 "(Use -f to force duplicates.)\n", base); 1306 "(Use -f to force duplicates.)\n", base);
583 die("Can't add new event."); 1307 return -EEXIST;
584 } 1308 }
585 1309
586 /* Try to add suffix */ 1310 /* Try to add suffix */
587 for (i = 1; i < MAX_EVENT_INDEX; i++) { 1311 for (i = 1; i < MAX_EVENT_INDEX; i++) {
588 ret = e_snprintf(buf, len, "%s_%d", base, i); 1312 ret = e_snprintf(buf, len, "%s_%d", base, i);
589 if (ret < 0) 1313 if (ret < 0) {
590 die("snprintf() failed: %s", strerror(-ret)); 1314 pr_debug("snprintf() failed: %s\n", strerror(-ret));
1315 return ret;
1316 }
591 if (!strlist__has_entry(namelist, buf)) 1317 if (!strlist__has_entry(namelist, buf))
592 break; 1318 break;
593 } 1319 }
594 if (i == MAX_EVENT_INDEX) 1320 if (i == MAX_EVENT_INDEX) {
595 die("Too many events are on the same function."); 1321 pr_warning("Too many events are on the same function.\n");
1322 ret = -ERANGE;
1323 }
1324
1325 return ret;
596} 1326}
597 1327
598void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 1328static int __add_kprobe_trace_events(struct perf_probe_event *pev,
599 bool force_add) 1329 struct kprobe_trace_event *tevs,
1330 int ntevs, bool allow_suffix)
600{ 1331{
601 int i, j, fd; 1332 int i, fd, ret;
602 struct probe_point *pp; 1333 struct kprobe_trace_event *tev = NULL;
603 char buf[MAX_CMDLEN]; 1334 char buf[64];
604 char event[64]; 1335 const char *event, *group;
605 struct strlist *namelist; 1336 struct strlist *namelist;
606 bool allow_suffix;
607 1337
608 fd = open_kprobe_events(O_RDWR, O_APPEND); 1338 fd = open_kprobe_events(true);
1339 if (fd < 0)
1340 return fd;
609 /* Get current event names */ 1341 /* Get current event names */
610 namelist = get_perf_event_names(fd, false); 1342 namelist = get_kprobe_trace_event_names(fd, false);
611 1343 if (!namelist) {
612 for (j = 0; j < nr_probes; j++) { 1344 pr_debug("Failed to get current event list.\n");
613 pp = probes + j; 1345 return -EIO;
614 if (!pp->event) 1346 }
615 pp->event = strdup(pp->function); 1347
616 if (!pp->group) 1348 ret = 0;
617 pp->group = strdup(PERFPROBE_GROUP); 1349 printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":");
618 DIE_IF(!pp->event || !pp->group); 1350 for (i = 0; i < ntevs; i++) {
619 /* If force_add is true, suffix search is allowed */ 1351 tev = &tevs[i];
620 allow_suffix = force_add; 1352 if (pev->event)
621 for (i = 0; i < pp->found; i++) { 1353 event = pev->event;
622 /* Get an unused new event name */ 1354 else
623 get_new_event_name(event, 64, pp->event, namelist, 1355 if (pev->point.function)
624 allow_suffix); 1356 event = pev->point.function;
625 snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", 1357 else
626 pp->retprobe ? 'r' : 'p', 1358 event = tev->point.symbol;
627 pp->group, event, 1359 if (pev->group)
628 pp->probes[i]); 1360 group = pev->group;
629 write_trace_kprobe_event(fd, buf); 1361 else
630 printf("Added new event:\n"); 1362 group = PERFPROBE_GROUP;
631 /* Get the first parameter (probe-point) */ 1363
632 sscanf(pp->probes[i], "%s", buf); 1364 /* Get an unused new event name */
633 show_perf_probe_event(event, buf, pp); 1365 ret = get_new_event_name(buf, 64, event,
634 /* Add added event name to namelist */ 1366 namelist, allow_suffix);
635 strlist__add(namelist, event); 1367 if (ret < 0)
636 /* 1368 break;
637 * Probes after the first probe which comes from same 1369 event = buf;
638 * user input are always allowed to add suffix, because 1370
639 * there might be several addresses corresponding to 1371 tev->event = strdup(event);
640 * one code line. 1372 tev->group = strdup(group);
641 */ 1373 if (tev->event == NULL || tev->group == NULL) {
642 allow_suffix = true; 1374 ret = -ENOMEM;
1375 break;
643 } 1376 }
1377 ret = write_kprobe_trace_event(fd, tev);
1378 if (ret < 0)
1379 break;
1380 /* Add added event name to namelist */
1381 strlist__add(namelist, event);
1382
1383 /* Trick here - save current event/group */
1384 event = pev->event;
1385 group = pev->group;
1386 pev->event = tev->event;
1387 pev->group = tev->group;
1388 show_perf_probe_event(pev);
1389 /* Trick here - restore current event/group */
1390 pev->event = (char *)event;
1391 pev->group = (char *)group;
1392
1393 /*
1394 * Probes after the first probe which comes from same
1395 * user input are always allowed to add suffix, because
1396 * there might be several addresses corresponding to
1397 * one code line.
1398 */
1399 allow_suffix = true;
1400 }
1401
1402 if (ret >= 0) {
1403 /* Show how to use the event. */
1404 printf("\nYou can now use it on all perf tools, such as:\n\n");
1405 printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
1406 tev->event);
644 } 1407 }
645 /* Show how to use the event. */
646 printf("\nYou can now use it on all perf tools, such as:\n\n");
647 printf("\tperf record -e %s:%s -a sleep 1\n\n", PERFPROBE_GROUP, event);
648 1408
649 strlist__delete(namelist); 1409 strlist__delete(namelist);
650 close(fd); 1410 close(fd);
1411 return ret;
1412}
1413
1414static int convert_to_kprobe_trace_events(struct perf_probe_event *pev,
1415 struct kprobe_trace_event **tevs,
1416 int max_tevs)
1417{
1418 struct symbol *sym;
1419 int ret = 0, i;
1420 struct kprobe_trace_event *tev;
1421
1422 /* Convert perf_probe_event with debuginfo */
1423 ret = try_to_find_kprobe_trace_events(pev, tevs, max_tevs);
1424 if (ret != 0)
1425 return ret;
1426
1427 /* Allocate trace event buffer */
1428 tev = *tevs = zalloc(sizeof(struct kprobe_trace_event));
1429 if (tev == NULL)
1430 return -ENOMEM;
1431
1432 /* Copy parameters */
1433 tev->point.symbol = strdup(pev->point.function);
1434 if (tev->point.symbol == NULL) {
1435 ret = -ENOMEM;
1436 goto error;
1437 }
1438 tev->point.offset = pev->point.offset;
1439 tev->nargs = pev->nargs;
1440 if (tev->nargs) {
1441 tev->args = zalloc(sizeof(struct kprobe_trace_arg)
1442 * tev->nargs);
1443 if (tev->args == NULL) {
1444 ret = -ENOMEM;
1445 goto error;
1446 }
1447 for (i = 0; i < tev->nargs; i++) {
1448 if (pev->args[i].name) {
1449 tev->args[i].name = strdup(pev->args[i].name);
1450 if (tev->args[i].name == NULL) {
1451 ret = -ENOMEM;
1452 goto error;
1453 }
1454 }
1455 tev->args[i].value = strdup(pev->args[i].var);
1456 if (tev->args[i].value == NULL) {
1457 ret = -ENOMEM;
1458 goto error;
1459 }
1460 if (pev->args[i].type) {
1461 tev->args[i].type = strdup(pev->args[i].type);
1462 if (tev->args[i].type == NULL) {
1463 ret = -ENOMEM;
1464 goto error;
1465 }
1466 }
1467 }
1468 }
1469
1470 /* Currently just checking function name from symbol map */
1471 sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
1472 tev->point.symbol, NULL);
1473 if (!sym) {
1474 pr_warning("Kernel symbol \'%s\' not found.\n",
1475 tev->point.symbol);
1476 ret = -ENOENT;
1477 goto error;
1478 }
1479
1480 return 1;
1481error:
1482 clear_kprobe_trace_event(tev);
1483 free(tev);
1484 *tevs = NULL;
1485 return ret;
1486}
1487
1488struct __event_package {
1489 struct perf_probe_event *pev;
1490 struct kprobe_trace_event *tevs;
1491 int ntevs;
1492};
1493
1494int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
1495 bool force_add, int max_tevs)
1496{
1497 int i, j, ret;
1498 struct __event_package *pkgs;
1499
1500 pkgs = zalloc(sizeof(struct __event_package) * npevs);
1501 if (pkgs == NULL)
1502 return -ENOMEM;
1503
1504 /* Init vmlinux path */
1505 ret = init_vmlinux();
1506 if (ret < 0)
1507 return ret;
1508
1509 /* Loop 1: convert all events */
1510 for (i = 0; i < npevs; i++) {
1511 pkgs[i].pev = &pevs[i];
1512 /* Convert with or without debuginfo */
1513 ret = convert_to_kprobe_trace_events(pkgs[i].pev,
1514 &pkgs[i].tevs, max_tevs);
1515 if (ret < 0)
1516 goto end;
1517 pkgs[i].ntevs = ret;
1518 }
1519
1520 /* Loop 2: add all events */
1521 for (i = 0; i < npevs && ret >= 0; i++)
1522 ret = __add_kprobe_trace_events(pkgs[i].pev, pkgs[i].tevs,
1523 pkgs[i].ntevs, force_add);
1524end:
1525 /* Loop 3: cleanup trace events */
1526 for (i = 0; i < npevs; i++)
1527 for (j = 0; j < pkgs[i].ntevs; j++)
1528 clear_kprobe_trace_event(&pkgs[i].tevs[j]);
1529
1530 return ret;
651} 1531}
652 1532
653static void __del_trace_kprobe_event(int fd, struct str_node *ent) 1533static int __del_trace_kprobe_event(int fd, struct str_node *ent)
654{ 1534{
655 char *p; 1535 char *p;
656 char buf[128]; 1536 char buf[128];
1537 int ret;
657 1538
658 /* Convert from perf-probe event to trace-kprobe event */ 1539 /* Convert from perf-probe event to trace-kprobe event */
659 if (e_snprintf(buf, 128, "-:%s", ent->s) < 0) 1540 ret = e_snprintf(buf, 128, "-:%s", ent->s);
660 die("Failed to copy event."); 1541 if (ret < 0)
1542 goto error;
1543
661 p = strchr(buf + 2, ':'); 1544 p = strchr(buf + 2, ':');
662 if (!p) 1545 if (!p) {
663 die("Internal error: %s should have ':' but not.", ent->s); 1546 pr_debug("Internal error: %s should have ':' but not.\n",
1547 ent->s);
1548 ret = -ENOTSUP;
1549 goto error;
1550 }
664 *p = '/'; 1551 *p = '/';
665 1552
666 write_trace_kprobe_event(fd, buf); 1553 pr_debug("Writing event: %s\n", buf);
1554 ret = write(fd, buf, strlen(buf));
1555 if (ret < 0)
1556 goto error;
1557
667 printf("Remove event: %s\n", ent->s); 1558 printf("Remove event: %s\n", ent->s);
1559 return 0;
1560error:
1561 pr_warning("Failed to delete event: %s\n", strerror(-ret));
1562 return ret;
668} 1563}
669 1564
670static void del_trace_kprobe_event(int fd, const char *group, 1565static int del_trace_kprobe_event(int fd, const char *group,
671 const char *event, struct strlist *namelist) 1566 const char *event, struct strlist *namelist)
672{ 1567{
673 char buf[128]; 1568 char buf[128];
674 struct str_node *ent, *n; 1569 struct str_node *ent, *n;
675 int found = 0; 1570 int found = 0, ret = 0;
676 1571
677 if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 1572 ret = e_snprintf(buf, 128, "%s:%s", group, event);
678 die("Failed to copy event."); 1573 if (ret < 0) {
1574 pr_err("Failed to copy event.");
1575 return ret;
1576 }
679 1577
680 if (strpbrk(buf, "*?")) { /* Glob-exp */ 1578 if (strpbrk(buf, "*?")) { /* Glob-exp */
681 strlist__for_each_safe(ent, n, namelist) 1579 strlist__for_each_safe(ent, n, namelist)
682 if (strglobmatch(ent->s, buf)) { 1580 if (strglobmatch(ent->s, buf)) {
683 found++; 1581 found++;
684 __del_trace_kprobe_event(fd, ent); 1582 ret = __del_trace_kprobe_event(fd, ent);
1583 if (ret < 0)
1584 break;
685 strlist__remove(namelist, ent); 1585 strlist__remove(namelist, ent);
686 } 1586 }
687 } else { 1587 } else {
688 ent = strlist__find(namelist, buf); 1588 ent = strlist__find(namelist, buf);
689 if (ent) { 1589 if (ent) {
690 found++; 1590 found++;
691 __del_trace_kprobe_event(fd, ent); 1591 ret = __del_trace_kprobe_event(fd, ent);
692 strlist__remove(namelist, ent); 1592 if (ret >= 0)
1593 strlist__remove(namelist, ent);
693 } 1594 }
694 } 1595 }
695 if (found == 0) 1596 if (found == 0 && ret >= 0)
696 pr_info("Info: event \"%s\" does not exist, could not remove it.\n", buf); 1597 pr_info("Info: Event \"%s\" does not exist.\n", buf);
1598
1599 return ret;
697} 1600}
698 1601
699void del_trace_kprobe_events(struct strlist *dellist) 1602int del_perf_probe_events(struct strlist *dellist)
700{ 1603{
701 int fd; 1604 int fd, ret = 0;
702 const char *group, *event; 1605 const char *group, *event;
703 char *p, *str; 1606 char *p, *str;
704 struct str_node *ent; 1607 struct str_node *ent;
705 struct strlist *namelist; 1608 struct strlist *namelist;
706 1609
707 fd = open_kprobe_events(O_RDWR, O_APPEND); 1610 fd = open_kprobe_events(true);
1611 if (fd < 0)
1612 return fd;
1613
708 /* Get current event names */ 1614 /* Get current event names */
709 namelist = get_perf_event_names(fd, true); 1615 namelist = get_kprobe_trace_event_names(fd, true);
1616 if (namelist == NULL)
1617 return -EINVAL;
710 1618
711 strlist__for_each(ent, dellist) { 1619 strlist__for_each(ent, dellist) {
712 str = strdup(ent->s); 1620 str = strdup(ent->s);
713 if (!str) 1621 if (str == NULL) {
714 die("Failed to copy event."); 1622 ret = -ENOMEM;
1623 break;
1624 }
715 pr_debug("Parsing: %s\n", str); 1625 pr_debug("Parsing: %s\n", str);
716 p = strchr(str, ':'); 1626 p = strchr(str, ':');
717 if (p) { 1627 if (p) {
@@ -723,80 +1633,14 @@ void del_trace_kprobe_events(struct strlist *dellist)
723 event = str; 1633 event = str;
724 } 1634 }
725 pr_debug("Group: %s, Event: %s\n", group, event); 1635 pr_debug("Group: %s, Event: %s\n", group, event);
726 del_trace_kprobe_event(fd, group, event, namelist); 1636 ret = del_trace_kprobe_event(fd, group, event, namelist);
727 free(str); 1637 free(str);
1638 if (ret < 0)
1639 break;
728 } 1640 }
729 strlist__delete(namelist); 1641 strlist__delete(namelist);
730 close(fd); 1642 close(fd);
731}
732 1643
733#define LINEBUF_SIZE 256 1644 return ret;
734#define NR_ADDITIONAL_LINES 2
735
736static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
737{
738 char buf[LINEBUF_SIZE];
739 const char *color = PERF_COLOR_BLUE;
740
741 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
742 goto error;
743 if (!skip) {
744 if (show_num)
745 fprintf(stdout, "%7u %s", l, buf);
746 else
747 color_fprintf(stdout, color, " %s", buf);
748 }
749
750 while (strlen(buf) == LINEBUF_SIZE - 1 &&
751 buf[LINEBUF_SIZE - 2] != '\n') {
752 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
753 goto error;
754 if (!skip) {
755 if (show_num)
756 fprintf(stdout, "%s", buf);
757 else
758 color_fprintf(stdout, color, "%s", buf);
759 }
760 }
761 return;
762error:
763 if (feof(fp))
764 die("Source file is shorter than expected.");
765 else
766 die("File read error: %s", strerror(errno));
767} 1645}
768 1646
769void show_line_range(struct line_range *lr)
770{
771 unsigned int l = 1;
772 struct line_node *ln;
773 FILE *fp;
774
775 setup_pager();
776
777 if (lr->function)
778 fprintf(stdout, "<%s:%d>\n", lr->function,
779 lr->start - lr->offset);
780 else
781 fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
782
783 fp = fopen(lr->path, "r");
784 if (fp == NULL)
785 die("Failed to open %s: %s", lr->path, strerror(errno));
786 /* Skip to starting line number */
787 while (l < lr->start)
788 show_one_line(fp, l++, true, false);
789
790 list_for_each_entry(ln, &lr->line_list, list) {
791 while (ln->line > l)
792 show_one_line(fp, (l++) - lr->offset, false, false);
793 show_one_line(fp, (l++) - lr->offset, false, true);
794 }
795
796 if (lr->end == INT_MAX)
797 lr->end = l + NR_ADDITIONAL_LINES;
798 while (l < lr->end && !feof(fp))
799 show_one_line(fp, (l++) - lr->offset, false, false);
800
801 fclose(fp);
802}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 711287d4baea..e9db1a214ca4 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -2,21 +2,125 @@
2#define _PROBE_EVENT_H 2#define _PROBE_EVENT_H
3 3
4#include <stdbool.h> 4#include <stdbool.h>
5#include "probe-finder.h"
6#include "strlist.h" 5#include "strlist.h"
7 6
8extern void parse_line_range_desc(const char *arg, struct line_range *lr); 7extern bool probe_event_dry_run;
9extern void parse_perf_probe_event(const char *str, struct probe_point *pp, 8
10 bool *need_dwarf); 9/* kprobe-tracer tracing point */
11extern int synthesize_perf_probe_point(struct probe_point *pp); 10struct kprobe_trace_point {
12extern int synthesize_perf_probe_event(struct probe_point *pp); 11 char *symbol; /* Base symbol */
13extern void parse_trace_kprobe_event(const char *str, struct probe_point *pp); 12 unsigned long offset; /* Offset from symbol */
14extern int synthesize_trace_kprobe_event(struct probe_point *pp); 13 bool retprobe; /* Return probe flag */
15extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 14};
16 bool force_add); 15
17extern void del_trace_kprobe_events(struct strlist *dellist); 16/* kprobe-tracer tracing argument referencing offset */
18extern void show_perf_probe_events(void); 17struct kprobe_trace_arg_ref {
19extern void show_line_range(struct line_range *lr); 18 struct kprobe_trace_arg_ref *next; /* Next reference */
19 long offset; /* Offset value */
20};
21
22/* kprobe-tracer tracing argument */
23struct kprobe_trace_arg {
24 char *name; /* Argument name */
25 char *value; /* Base value */
26 char *type; /* Type name */
27 struct kprobe_trace_arg_ref *ref; /* Referencing offset */
28};
29
30/* kprobe-tracer tracing event (point + arg) */
31struct kprobe_trace_event {
32 char *event; /* Event name */
33 char *group; /* Group name */
34 struct kprobe_trace_point point; /* Trace point */
35 int nargs; /* Number of args */
36 struct kprobe_trace_arg *args; /* Arguments */
37};
38
39/* Perf probe probing point */
40struct perf_probe_point {
41 char *file; /* File path */
42 char *function; /* Function name */
43 int line; /* Line number */
44 bool retprobe; /* Return probe flag */
45 char *lazy_line; /* Lazy matching pattern */
46 unsigned long offset; /* Offset from function entry */
47};
48
49/* Perf probe probing argument field chain */
50struct perf_probe_arg_field {
51 struct perf_probe_arg_field *next; /* Next field */
52 char *name; /* Name of the field */
53 bool ref; /* Referencing flag */
54};
55
56/* Perf probe probing argument */
57struct perf_probe_arg {
58 char *name; /* Argument name */
59 char *var; /* Variable name */
60 char *type; /* Type name */
61 struct perf_probe_arg_field *field; /* Structure fields */
62};
63
64/* Perf probe probing event (point + arg) */
65struct perf_probe_event {
66 char *event; /* Event name */
67 char *group; /* Group name */
68 struct perf_probe_point point; /* Probe point */
69 int nargs; /* Number of arguments */
70 struct perf_probe_arg *args; /* Arguments */
71};
72
73
74/* Line number container */
75struct line_node {
76 struct list_head list;
77 int line;
78};
79
80/* Line range */
81struct line_range {
82 char *file; /* File name */
83 char *function; /* Function name */
84 int start; /* Start line number */
85 int end; /* End line number */
86 int offset; /* Start line offset */
87 char *path; /* Real path name */
88 struct list_head line_list; /* Visible lines */
89};
90
91/* Command string to events */
92extern int parse_perf_probe_command(const char *cmd,
93 struct perf_probe_event *pev);
94extern int parse_kprobe_trace_command(const char *cmd,
95 struct kprobe_trace_event *tev);
96
97/* Events to command string */
98extern char *synthesize_perf_probe_command(struct perf_probe_event *pev);
99extern char *synthesize_kprobe_trace_command(struct kprobe_trace_event *tev);
100extern int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf,
101 size_t len);
102
103/* Check the perf_probe_event needs debuginfo */
104extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
105
106/* Convert from kprobe_trace_event to perf_probe_event */
107extern int convert_to_perf_probe_event(struct kprobe_trace_event *tev,
108 struct perf_probe_event *pev);
109
110/* Release event contents */
111extern void clear_perf_probe_event(struct perf_probe_event *pev);
112extern void clear_kprobe_trace_event(struct kprobe_trace_event *tev);
113
114/* Command string to line-range */
115extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
116
117
118extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
119 bool force_add, int max_probe_points);
120extern int del_perf_probe_events(struct strlist *dellist);
121extern int show_perf_probe_events(void);
122extern int show_line_range(struct line_range *lr);
123
20 124
21/* Maximum index number of event-name postfix */ 125/* Maximum index number of event-name postfix */
22#define MAX_EVENT_INDEX 1024 126#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c171a243d05b..562b1443e785 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -31,6 +31,7 @@
31#include <string.h> 31#include <string.h>
32#include <stdarg.h> 32#include <stdarg.h>
33#include <ctype.h> 33#include <ctype.h>
34#include <dwarf-regs.h>
34 35
35#include "string.h" 36#include "string.h"
36#include "event.h" 37#include "event.h"
@@ -38,57 +39,8 @@
38#include "util.h" 39#include "util.h"
39#include "probe-finder.h" 40#include "probe-finder.h"
40 41
41 42/* Kprobe tracer basic type is up to u64 */
42/* 43#define MAX_BASIC_TYPE_BITS 64
43 * Generic dwarf analysis helpers
44 */
45
46#define X86_32_MAX_REGS 8
47const char *x86_32_regs_table[X86_32_MAX_REGS] = {
48 "%ax",
49 "%cx",
50 "%dx",
51 "%bx",
52 "$stack", /* Stack address instead of %sp */
53 "%bp",
54 "%si",
55 "%di",
56};
57
58#define X86_64_MAX_REGS 16
59const char *x86_64_regs_table[X86_64_MAX_REGS] = {
60 "%ax",
61 "%dx",
62 "%cx",
63 "%bx",
64 "%si",
65 "%di",
66 "%bp",
67 "%sp",
68 "%r8",
69 "%r9",
70 "%r10",
71 "%r11",
72 "%r12",
73 "%r13",
74 "%r14",
75 "%r15",
76};
77
78/* TODO: switching by dwarf address size */
79#ifdef __x86_64__
80#define ARCH_MAX_REGS X86_64_MAX_REGS
81#define arch_regs_table x86_64_regs_table
82#else
83#define ARCH_MAX_REGS X86_32_MAX_REGS
84#define arch_regs_table x86_32_regs_table
85#endif
86
87/* Return architecture dependent register string (for kprobe-tracer) */
88static const char *get_arch_regstr(unsigned int n)
89{
90 return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
91}
92 44
93/* 45/*
94 * Compare the tail of two strings. 46 * Compare the tail of two strings.
@@ -108,7 +60,7 @@ static int strtailcmp(const char *s1, const char *s2)
108/* Line number list operations */ 60/* Line number list operations */
109 61
110/* Add a line to line number list */ 62/* Add a line to line number list */
111static void line_list__add_line(struct list_head *head, unsigned int line) 63static int line_list__add_line(struct list_head *head, int line)
112{ 64{
113 struct line_node *ln; 65 struct line_node *ln;
114 struct list_head *p; 66 struct list_head *p;
@@ -119,21 +71,23 @@ static void line_list__add_line(struct list_head *head, unsigned int line)
119 p = &ln->list; 71 p = &ln->list;
120 goto found; 72 goto found;
121 } else if (ln->line == line) /* Already exist */ 73 } else if (ln->line == line) /* Already exist */
122 return ; 74 return 1;
123 } 75 }
124 /* List is empty, or the smallest entry */ 76 /* List is empty, or the smallest entry */
125 p = head; 77 p = head;
126found: 78found:
127 pr_debug("line list: add a line %u\n", line); 79 pr_debug("line list: add a line %u\n", line);
128 ln = zalloc(sizeof(struct line_node)); 80 ln = zalloc(sizeof(struct line_node));
129 DIE_IF(ln == NULL); 81 if (ln == NULL)
82 return -ENOMEM;
130 ln->line = line; 83 ln->line = line;
131 INIT_LIST_HEAD(&ln->list); 84 INIT_LIST_HEAD(&ln->list);
132 list_add(&ln->list, p); 85 list_add(&ln->list, p);
86 return 0;
133} 87}
134 88
135/* Check if the line in line number list */ 89/* Check if the line in line number list */
136static int line_list__has_line(struct list_head *head, unsigned int line) 90static int line_list__has_line(struct list_head *head, int line)
137{ 91{
138 struct line_node *ln; 92 struct line_node *ln;
139 93
@@ -184,9 +138,129 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
184 if (strtailcmp(src, fname) == 0) 138 if (strtailcmp(src, fname) == 0)
185 break; 139 break;
186 } 140 }
141 if (i == nfiles)
142 return NULL;
187 return src; 143 return src;
188} 144}
189 145
146/* Compare diename and tname */
147static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
148{
149 const char *name;
150 name = dwarf_diename(dw_die);
151 return name ? strcmp(tname, name) : -1;
152}
153
154/* Get type die, but skip qualifiers and typedef */
155static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
156{
157 Dwarf_Attribute attr;
158 int tag;
159
160 do {
161 if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
162 dwarf_formref_die(&attr, die_mem) == NULL)
163 return NULL;
164
165 tag = dwarf_tag(die_mem);
166 vr_die = die_mem;
167 } while (tag == DW_TAG_const_type ||
168 tag == DW_TAG_restrict_type ||
169 tag == DW_TAG_volatile_type ||
170 tag == DW_TAG_shared_type ||
171 tag == DW_TAG_typedef);
172
173 return die_mem;
174}
175
176static bool die_is_signed_type(Dwarf_Die *tp_die)
177{
178 Dwarf_Attribute attr;
179 Dwarf_Word ret;
180
181 if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
182 dwarf_formudata(&attr, &ret) != 0)
183 return false;
184
185 return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
186 ret == DW_ATE_signed_fixed);
187}
188
189static int die_get_byte_size(Dwarf_Die *tp_die)
190{
191 Dwarf_Attribute attr;
192 Dwarf_Word ret;
193
194 if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
195 dwarf_formudata(&attr, &ret) != 0)
196 return 0;
197
198 return (int)ret;
199}
200
201/* Get data_member_location offset */
202static int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
203{
204 Dwarf_Attribute attr;
205 Dwarf_Op *expr;
206 size_t nexpr;
207 int ret;
208
209 if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
210 return -ENOENT;
211
212 if (dwarf_formudata(&attr, offs) != 0) {
213 /* DW_AT_data_member_location should be DW_OP_plus_uconst */
214 ret = dwarf_getlocation(&attr, &expr, &nexpr);
215 if (ret < 0 || nexpr == 0)
216 return -ENOENT;
217
218 if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
219 pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
220 expr[0].atom, nexpr);
221 return -ENOTSUP;
222 }
223 *offs = (Dwarf_Word)expr[0].number;
224 }
225 return 0;
226}
227
228/* Return values for die_find callbacks */
229enum {
230 DIE_FIND_CB_FOUND = 0, /* End of Search */
231 DIE_FIND_CB_CHILD = 1, /* Search only children */
232 DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
233 DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
234};
235
236/* Search a child die */
237static Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
238 int (*callback)(Dwarf_Die *, void *),
239 void *data, Dwarf_Die *die_mem)
240{
241 Dwarf_Die child_die;
242 int ret;
243
244 ret = dwarf_child(rt_die, die_mem);
245 if (ret != 0)
246 return NULL;
247
248 do {
249 ret = callback(die_mem, data);
250 if (ret == DIE_FIND_CB_FOUND)
251 return die_mem;
252
253 if ((ret & DIE_FIND_CB_CHILD) &&
254 die_find_child(die_mem, callback, data, &child_die)) {
255 memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
256 return die_mem;
257 }
258 } while ((ret & DIE_FIND_CB_SIBLING) &&
259 dwarf_siblingof(die_mem, die_mem) == 0);
260
261 return NULL;
262}
263
190struct __addr_die_search_param { 264struct __addr_die_search_param {
191 Dwarf_Addr addr; 265 Dwarf_Addr addr;
192 Dwarf_Die *die_mem; 266 Dwarf_Die *die_mem;
@@ -205,8 +279,8 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
205} 279}
206 280
207/* Search a real subprogram including this line, */ 281/* Search a real subprogram including this line, */
208static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr, 282static Dwarf_Die *die_find_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
209 Dwarf_Die *die_mem) 283 Dwarf_Die *die_mem)
210{ 284{
211 struct __addr_die_search_param ad; 285 struct __addr_die_search_param ad;
212 ad.addr = addr; 286 ad.addr = addr;
@@ -218,77 +292,64 @@ static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
218 return die_mem; 292 return die_mem;
219} 293}
220 294
221/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */ 295/* die_find callback for inline function search */
222static Dwarf_Die *die_get_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 296static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
223 Dwarf_Die *die_mem)
224{ 297{
225 Dwarf_Die child_die; 298 Dwarf_Addr *addr = data;
226 int ret;
227 299
228 ret = dwarf_child(sp_die, die_mem); 300 if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
229 if (ret != 0) 301 dwarf_haspc(die_mem, *addr))
230 return NULL; 302 return DIE_FIND_CB_FOUND;
231 303
232 do { 304 return DIE_FIND_CB_CONTINUE;
233 if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
234 dwarf_haspc(die_mem, addr))
235 return die_mem;
236
237 if (die_get_inlinefunc(die_mem, addr, &child_die)) {
238 memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
239 return die_mem;
240 }
241 } while (dwarf_siblingof(die_mem, die_mem) == 0);
242
243 return NULL;
244} 305}
245 306
246/* Compare diename and tname */ 307/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
247static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) 308static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
309 Dwarf_Die *die_mem)
248{ 310{
249 const char *name; 311 return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
250 name = dwarf_diename(dw_die);
251 DIE_IF(name == NULL);
252 return strcmp(tname, name);
253} 312}
254 313
255/* Get entry pc(or low pc, 1st entry of ranges) of the die */ 314static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
256static Dwarf_Addr die_get_entrypc(Dwarf_Die *dw_die)
257{ 315{
258 Dwarf_Addr epc; 316 const char *name = data;
259 int ret; 317 int tag;
260 318
261 ret = dwarf_entrypc(dw_die, &epc); 319 tag = dwarf_tag(die_mem);
262 DIE_IF(ret == -1); 320 if ((tag == DW_TAG_formal_parameter ||
263 return epc; 321 tag == DW_TAG_variable) &&
322 (die_compare_name(die_mem, name) == 0))
323 return DIE_FIND_CB_FOUND;
324
325 return DIE_FIND_CB_CONTINUE;
264} 326}
265 327
266/* Get a variable die */ 328/* Find a variable called 'name' */
267static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name, 329static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
268 Dwarf_Die *die_mem) 330 Dwarf_Die *die_mem)
269{ 331{
270 Dwarf_Die child_die; 332 return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
271 int tag; 333 die_mem);
272 int ret; 334}
273 335
274 ret = dwarf_child(sp_die, die_mem); 336static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
275 if (ret != 0) 337{
276 return NULL; 338 const char *name = data;
277 339
278 do { 340 if ((dwarf_tag(die_mem) == DW_TAG_member) &&
279 tag = dwarf_tag(die_mem); 341 (die_compare_name(die_mem, name) == 0))
280 if ((tag == DW_TAG_formal_parameter || 342 return DIE_FIND_CB_FOUND;
281 tag == DW_TAG_variable) &&
282 (die_compare_name(die_mem, name) == 0))
283 return die_mem;
284 343
285 if (die_find_variable(die_mem, name, &child_die)) { 344 return DIE_FIND_CB_SIBLING;
286 memcpy(die_mem, &child_die, sizeof(Dwarf_Die)); 345}
287 return die_mem;
288 }
289 } while (dwarf_siblingof(die_mem, die_mem) == 0);
290 346
291 return NULL; 347/* Find a member called 'name' */
348static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
349 Dwarf_Die *die_mem)
350{
351 return die_find_child(st_die, __die_find_member_cb, (void *)name,
352 die_mem);
292} 353}
293 354
294/* 355/*
@@ -296,19 +357,22 @@ static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
296 */ 357 */
297 358
298/* Show a location */ 359/* Show a location */
299static void show_location(Dwarf_Op *op, struct probe_finder *pf) 360static int convert_location(Dwarf_Op *op, struct probe_finder *pf)
300{ 361{
301 unsigned int regn; 362 unsigned int regn;
302 Dwarf_Word offs = 0; 363 Dwarf_Word offs = 0;
303 int deref = 0, ret; 364 bool ref = false;
304 const char *regs; 365 const char *regs;
366 struct kprobe_trace_arg *tvar = pf->tvar;
305 367
306 /* TODO: support CFA */
307 /* If this is based on frame buffer, set the offset */ 368 /* If this is based on frame buffer, set the offset */
308 if (op->atom == DW_OP_fbreg) { 369 if (op->atom == DW_OP_fbreg) {
309 if (pf->fb_ops == NULL) 370 if (pf->fb_ops == NULL) {
310 die("The attribute of frame base is not supported.\n"); 371 pr_warning("The attribute of frame base is not "
311 deref = 1; 372 "supported.\n");
373 return -ENOTSUP;
374 }
375 ref = true;
312 offs = op->number; 376 offs = op->number;
313 op = &pf->fb_ops[0]; 377 op = &pf->fb_ops[0];
314 } 378 }
@@ -316,35 +380,164 @@ static void show_location(Dwarf_Op *op, struct probe_finder *pf)
316 if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { 380 if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
317 regn = op->atom - DW_OP_breg0; 381 regn = op->atom - DW_OP_breg0;
318 offs += op->number; 382 offs += op->number;
319 deref = 1; 383 ref = true;
320 } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) { 384 } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
321 regn = op->atom - DW_OP_reg0; 385 regn = op->atom - DW_OP_reg0;
322 } else if (op->atom == DW_OP_bregx) { 386 } else if (op->atom == DW_OP_bregx) {
323 regn = op->number; 387 regn = op->number;
324 offs += op->number2; 388 offs += op->number2;
325 deref = 1; 389 ref = true;
326 } else if (op->atom == DW_OP_regx) { 390 } else if (op->atom == DW_OP_regx) {
327 regn = op->number; 391 regn = op->number;
328 } else 392 } else {
329 die("DW_OP %d is not supported.", op->atom); 393 pr_warning("DW_OP %x is not supported.\n", op->atom);
394 return -ENOTSUP;
395 }
330 396
331 regs = get_arch_regstr(regn); 397 regs = get_arch_regstr(regn);
332 if (!regs) 398 if (!regs) {
333 die("%u exceeds max register number.", regn); 399 pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
400 return -ERANGE;
401 }
402
403 tvar->value = strdup(regs);
404 if (tvar->value == NULL)
405 return -ENOMEM;
406
407 if (ref) {
408 tvar->ref = zalloc(sizeof(struct kprobe_trace_arg_ref));
409 if (tvar->ref == NULL)
410 return -ENOMEM;
411 tvar->ref->offset = (long)offs;
412 }
413 return 0;
414}
415
416static int convert_variable_type(Dwarf_Die *vr_die,
417 struct kprobe_trace_arg *targ)
418{
419 Dwarf_Die type;
420 char buf[16];
421 int ret;
422
423 if (die_get_real_type(vr_die, &type) == NULL) {
424 pr_warning("Failed to get a type information of %s.\n",
425 dwarf_diename(vr_die));
426 return -ENOENT;
427 }
428
429 ret = die_get_byte_size(&type) * 8;
430 if (ret) {
431 /* Check the bitwidth */
432 if (ret > MAX_BASIC_TYPE_BITS) {
433 pr_info("%s exceeds max-bitwidth."
434 " Cut down to %d bits.\n",
435 dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
436 ret = MAX_BASIC_TYPE_BITS;
437 }
438
439 ret = snprintf(buf, 16, "%c%d",
440 die_is_signed_type(&type) ? 's' : 'u', ret);
441 if (ret < 0 || ret >= 16) {
442 if (ret >= 16)
443 ret = -E2BIG;
444 pr_warning("Failed to convert variable type: %s\n",
445 strerror(-ret));
446 return ret;
447 }
448 targ->type = strdup(buf);
449 if (targ->type == NULL)
450 return -ENOMEM;
451 }
452 return 0;
453}
454
455static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
456 struct perf_probe_arg_field *field,
457 struct kprobe_trace_arg_ref **ref_ptr,
458 Dwarf_Die *die_mem)
459{
460 struct kprobe_trace_arg_ref *ref = *ref_ptr;
461 Dwarf_Die type;
462 Dwarf_Word offs;
463 int ret;
464
465 pr_debug("converting %s in %s\n", field->name, varname);
466 if (die_get_real_type(vr_die, &type) == NULL) {
467 pr_warning("Failed to get the type of %s.\n", varname);
468 return -ENOENT;
469 }
470
471 /* Check the pointer and dereference */
472 if (dwarf_tag(&type) == DW_TAG_pointer_type) {
473 if (!field->ref) {
474 pr_err("Semantic error: %s must be referred by '->'\n",
475 field->name);
476 return -EINVAL;
477 }
478 /* Get the type pointed by this pointer */
479 if (die_get_real_type(&type, &type) == NULL) {
480 pr_warning("Failed to get the type of %s.\n", varname);
481 return -ENOENT;
482 }
483 /* Verify it is a data structure */
484 if (dwarf_tag(&type) != DW_TAG_structure_type) {
485 pr_warning("%s is not a data structure.\n", varname);
486 return -EINVAL;
487 }
488
489 ref = zalloc(sizeof(struct kprobe_trace_arg_ref));
490 if (ref == NULL)
491 return -ENOMEM;
492 if (*ref_ptr)
493 (*ref_ptr)->next = ref;
494 else
495 *ref_ptr = ref;
496 } else {
497 /* Verify it is a data structure */
498 if (dwarf_tag(&type) != DW_TAG_structure_type) {
499 pr_warning("%s is not a data structure.\n", varname);
500 return -EINVAL;
501 }
502 if (field->ref) {
503 pr_err("Semantic error: %s must be referred by '.'\n",
504 field->name);
505 return -EINVAL;
506 }
507 if (!ref) {
508 pr_warning("Structure on a register is not "
509 "supported yet.\n");
510 return -ENOTSUP;
511 }
512 }
513
514 if (die_find_member(&type, field->name, die_mem) == NULL) {
515 pr_warning("%s(tyep:%s) has no member %s.\n", varname,
516 dwarf_diename(&type), field->name);
517 return -EINVAL;
518 }
334 519
335 if (deref) 520 /* Get the offset of the field */
336 ret = snprintf(pf->buf, pf->len, " %s=%+jd(%s)", 521 ret = die_get_data_member_location(die_mem, &offs);
337 pf->var, (intmax_t)offs, regs); 522 if (ret < 0) {
523 pr_warning("Failed to get the offset of %s.\n", field->name);
524 return ret;
525 }
526 ref->offset += (long)offs;
527
528 /* Converting next field */
529 if (field->next)
530 return convert_variable_fields(die_mem, field->name,
531 field->next, &ref, die_mem);
338 else 532 else
339 ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs); 533 return 0;
340 DIE_IF(ret < 0);
341 DIE_IF(ret >= pf->len);
342} 534}
343 535
344/* Show a variables in kprobe event format */ 536/* Show a variables in kprobe event format */
345static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf) 537static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
346{ 538{
347 Dwarf_Attribute attr; 539 Dwarf_Attribute attr;
540 Dwarf_Die die_mem;
348 Dwarf_Op *expr; 541 Dwarf_Op *expr;
349 size_t nexpr; 542 size_t nexpr;
350 int ret; 543 int ret;
@@ -356,142 +549,191 @@ static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
356 if (ret <= 0 || nexpr == 0) 549 if (ret <= 0 || nexpr == 0)
357 goto error; 550 goto error;
358 551
359 show_location(expr, pf); 552 ret = convert_location(expr, pf);
553 if (ret == 0 && pf->pvar->field) {
554 ret = convert_variable_fields(vr_die, pf->pvar->var,
555 pf->pvar->field, &pf->tvar->ref,
556 &die_mem);
557 vr_die = &die_mem;
558 }
559 if (ret == 0) {
560 if (pf->pvar->type) {
561 pf->tvar->type = strdup(pf->pvar->type);
562 if (pf->tvar->type == NULL)
563 ret = -ENOMEM;
564 } else
565 ret = convert_variable_type(vr_die, pf->tvar);
566 }
360 /* *expr will be cached in libdw. Don't free it. */ 567 /* *expr will be cached in libdw. Don't free it. */
361 return ; 568 return ret;
362error: 569error:
363 /* TODO: Support const_value */ 570 /* TODO: Support const_value */
364 die("Failed to find the location of %s at this address.\n" 571 pr_err("Failed to find the location of %s at this address.\n"
365 " Perhaps, it has been optimized out.", pf->var); 572 " Perhaps, it has been optimized out.\n", pf->pvar->var);
573 return -ENOENT;
366} 574}
367 575
368/* Find a variable in a subprogram die */ 576/* Find a variable in a subprogram die */
369static void find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) 577static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
370{ 578{
371 int ret;
372 Dwarf_Die vr_die; 579 Dwarf_Die vr_die;
580 char buf[32], *ptr;
581 int ret;
373 582
374 /* TODO: Support struct members and arrays */ 583 /* TODO: Support arrays */
375 if (!is_c_varname(pf->var)) { 584 if (pf->pvar->name)
376 /* Output raw parameters */ 585 pf->tvar->name = strdup(pf->pvar->name);
377 ret = snprintf(pf->buf, pf->len, " %s", pf->var); 586 else {
378 DIE_IF(ret < 0); 587 ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
379 DIE_IF(ret >= pf->len); 588 if (ret < 0)
380 return ; 589 return ret;
590 ptr = strchr(buf, ':'); /* Change type separator to _ */
591 if (ptr)
592 *ptr = '_';
593 pf->tvar->name = strdup(buf);
594 }
595 if (pf->tvar->name == NULL)
596 return -ENOMEM;
597
598 if (!is_c_varname(pf->pvar->var)) {
599 /* Copy raw parameters */
600 pf->tvar->value = strdup(pf->pvar->var);
601 if (pf->tvar->value == NULL)
602 return -ENOMEM;
603 else
604 return 0;
381 } 605 }
382 606
383 pr_debug("Searching '%s' variable in context.\n", pf->var); 607 pr_debug("Searching '%s' variable in context.\n",
608 pf->pvar->var);
384 /* Search child die for local variables and parameters. */ 609 /* Search child die for local variables and parameters. */
385 if (!die_find_variable(sp_die, pf->var, &vr_die)) 610 if (!die_find_variable(sp_die, pf->pvar->var, &vr_die)) {
386 die("Failed to find '%s' in this function.", pf->var); 611 pr_warning("Failed to find '%s' in this function.\n",
387 612 pf->pvar->var);
388 show_variable(&vr_die, pf); 613 return -ENOENT;
614 }
615 return convert_variable(&vr_die, pf);
389} 616}
390 617
391/* Show a probe point to output buffer */ 618/* Show a probe point to output buffer */
392static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) 619static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
393{ 620{
394 struct probe_point *pp = pf->pp; 621 struct kprobe_trace_event *tev;
395 Dwarf_Addr eaddr; 622 Dwarf_Addr eaddr;
396 Dwarf_Die die_mem; 623 Dwarf_Die die_mem;
397 const char *name; 624 const char *name;
398 char tmp[MAX_PROBE_BUFFER]; 625 int ret, i;
399 int ret, i, len;
400 Dwarf_Attribute fb_attr; 626 Dwarf_Attribute fb_attr;
401 size_t nops; 627 size_t nops;
402 628
629 if (pf->ntevs == pf->max_tevs) {
630 pr_warning("Too many( > %d) probe point found.\n",
631 pf->max_tevs);
632 return -ERANGE;
633 }
634 tev = &pf->tevs[pf->ntevs++];
635
403 /* If no real subprogram, find a real one */ 636 /* If no real subprogram, find a real one */
404 if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { 637 if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
405 sp_die = die_get_real_subprogram(&pf->cu_die, 638 sp_die = die_find_real_subprogram(&pf->cu_die,
406 pf->addr, &die_mem); 639 pf->addr, &die_mem);
407 if (!sp_die) 640 if (!sp_die) {
408 die("Probe point is not found in subprograms."); 641 pr_warning("Failed to find probe point in any "
642 "functions.\n");
643 return -ENOENT;
644 }
409 } 645 }
410 646
411 /* Output name of probe point */ 647 /* Copy the name of probe point */
412 name = dwarf_diename(sp_die); 648 name = dwarf_diename(sp_die);
413 if (name) { 649 if (name) {
414 dwarf_entrypc(sp_die, &eaddr); 650 if (dwarf_entrypc(sp_die, &eaddr) != 0) {
415 ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%lu", name, 651 pr_warning("Failed to get entry pc of %s\n",
416 (unsigned long)(pf->addr - eaddr)); 652 dwarf_diename(sp_die));
417 /* Copy the function name if possible */ 653 return -ENOENT;
418 if (!pp->function) {
419 pp->function = strdup(name);
420 pp->offset = (size_t)(pf->addr - eaddr);
421 } 654 }
422 } else { 655 tev->point.symbol = strdup(name);
656 if (tev->point.symbol == NULL)
657 return -ENOMEM;
658 tev->point.offset = (unsigned long)(pf->addr - eaddr);
659 } else
423 /* This function has no name. */ 660 /* This function has no name. */
424 ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%jx", 661 tev->point.offset = (unsigned long)pf->addr;
425 (uintmax_t)pf->addr); 662
426 if (!pp->function) { 663 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
427 /* TODO: Use _stext */ 664 tev->point.offset);
428 pp->function = strdup("");
429 pp->offset = (size_t)pf->addr;
430 }
431 }
432 DIE_IF(ret < 0);
433 DIE_IF(ret >= MAX_PROBE_BUFFER);
434 len = ret;
435 pr_debug("Probe point found: %s\n", tmp);
436 665
437 /* Get the frame base attribute/ops */ 666 /* Get the frame base attribute/ops */
438 dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); 667 dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
439 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); 668 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
440 if (ret <= 0 || nops == 0) 669 if (ret <= 0 || nops == 0) {
441 pf->fb_ops = NULL; 670 pf->fb_ops = NULL;
671 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
672 pf->cfi != NULL) {
673 Dwarf_Frame *frame;
674 if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
675 dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
676 pr_warning("Failed to get CFA on 0x%jx\n",
677 (uintmax_t)pf->addr);
678 return -ENOENT;
679 }
680 }
442 681
443 /* Find each argument */ 682 /* Find each argument */
444 /* TODO: use dwarf_cfi_addrframe */ 683 tev->nargs = pf->pev->nargs;
445 for (i = 0; i < pp->nr_args; i++) { 684 tev->args = zalloc(sizeof(struct kprobe_trace_arg) * tev->nargs);
446 pf->var = pp->args[i]; 685 if (tev->args == NULL)
447 pf->buf = &tmp[len]; 686 return -ENOMEM;
448 pf->len = MAX_PROBE_BUFFER - len; 687 for (i = 0; i < pf->pev->nargs; i++) {
449 find_variable(sp_die, pf); 688 pf->pvar = &pf->pev->args[i];
450 len += strlen(pf->buf); 689 pf->tvar = &tev->args[i];
690 ret = find_variable(sp_die, pf);
691 if (ret != 0)
692 return ret;
451 } 693 }
452 694
453 /* *pf->fb_ops will be cached in libdw. Don't free it. */ 695 /* *pf->fb_ops will be cached in libdw. Don't free it. */
454 pf->fb_ops = NULL; 696 pf->fb_ops = NULL;
455 697 return 0;
456 if (pp->found == MAX_PROBES)
457 die("Too many( > %d) probe point found.\n", MAX_PROBES);
458
459 pp->probes[pp->found] = strdup(tmp);
460 pp->found++;
461} 698}
462 699
463/* Find probe point from its line number */ 700/* Find probe point from its line number */
464static void find_probe_point_by_line(struct probe_finder *pf) 701static int find_probe_point_by_line(struct probe_finder *pf)
465{ 702{
466 Dwarf_Lines *lines; 703 Dwarf_Lines *lines;
467 Dwarf_Line *line; 704 Dwarf_Line *line;
468 size_t nlines, i; 705 size_t nlines, i;
469 Dwarf_Addr addr; 706 Dwarf_Addr addr;
470 int lineno; 707 int lineno;
471 int ret; 708 int ret = 0;
472 709
473 ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines); 710 if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
474 DIE_IF(ret != 0); 711 pr_warning("No source lines found in this CU.\n");
712 return -ENOENT;
713 }
475 714
476 for (i = 0; i < nlines; i++) { 715 for (i = 0; i < nlines && ret == 0; i++) {
477 line = dwarf_onesrcline(lines, i); 716 line = dwarf_onesrcline(lines, i);
478 dwarf_lineno(line, &lineno); 717 if (dwarf_lineno(line, &lineno) != 0 ||
479 if (lineno != pf->lno) 718 lineno != pf->lno)
480 continue; 719 continue;
481 720
482 /* TODO: Get fileno from line, but how? */ 721 /* TODO: Get fileno from line, but how? */
483 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) 722 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
484 continue; 723 continue;
485 724
486 ret = dwarf_lineaddr(line, &addr); 725 if (dwarf_lineaddr(line, &addr) != 0) {
487 DIE_IF(ret != 0); 726 pr_warning("Failed to get the address of the line.\n");
727 return -ENOENT;
728 }
488 pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n", 729 pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
489 (int)i, lineno, (uintmax_t)addr); 730 (int)i, lineno, (uintmax_t)addr);
490 pf->addr = addr; 731 pf->addr = addr;
491 732
492 show_probe_point(NULL, pf); 733 ret = convert_probe_point(NULL, pf);
493 /* Continuing, because target line might be inlined. */ 734 /* Continuing, because target line might be inlined. */
494 } 735 }
736 return ret;
495} 737}
496 738
497/* Find lines which match lazy pattern */ 739/* Find lines which match lazy pattern */
@@ -499,16 +741,27 @@ static int find_lazy_match_lines(struct list_head *head,
499 const char *fname, const char *pat) 741 const char *fname, const char *pat)
500{ 742{
501 char *fbuf, *p1, *p2; 743 char *fbuf, *p1, *p2;
502 int fd, line, nlines = 0; 744 int fd, ret, line, nlines = 0;
503 struct stat st; 745 struct stat st;
504 746
505 fd = open(fname, O_RDONLY); 747 fd = open(fname, O_RDONLY);
506 if (fd < 0) 748 if (fd < 0) {
507 die("failed to open %s", fname); 749 pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
508 DIE_IF(fstat(fd, &st) < 0); 750 return fd;
509 fbuf = malloc(st.st_size + 2); 751 }
510 DIE_IF(fbuf == NULL); 752
511 DIE_IF(read(fd, fbuf, st.st_size) < 0); 753 ret = fstat(fd, &st);
754 if (ret < 0) {
755 pr_warning("Failed to get the size of %s: %s\n",
756 fname, strerror(errno));
757 return ret;
758 }
759 fbuf = xmalloc(st.st_size + 2);
760 ret = read(fd, fbuf, st.st_size);
761 if (ret < 0) {
762 pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
763 return ret;
764 }
512 close(fd); 765 close(fd);
513 fbuf[st.st_size] = '\n'; /* Dummy line */ 766 fbuf[st.st_size] = '\n'; /* Dummy line */
514 fbuf[st.st_size + 1] = '\0'; 767 fbuf[st.st_size + 1] = '\0';
@@ -528,7 +781,7 @@ static int find_lazy_match_lines(struct list_head *head,
528} 781}
529 782
530/* Find probe points from lazy pattern */ 783/* Find probe points from lazy pattern */
531static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) 784static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
532{ 785{
533 Dwarf_Lines *lines; 786 Dwarf_Lines *lines;
534 Dwarf_Line *line; 787 Dwarf_Line *line;
@@ -536,37 +789,46 @@ static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
536 Dwarf_Addr addr; 789 Dwarf_Addr addr;
537 Dwarf_Die die_mem; 790 Dwarf_Die die_mem;
538 int lineno; 791 int lineno;
539 int ret; 792 int ret = 0;
540 793
541 if (list_empty(&pf->lcache)) { 794 if (list_empty(&pf->lcache)) {
542 /* Matching lazy line pattern */ 795 /* Matching lazy line pattern */
543 ret = find_lazy_match_lines(&pf->lcache, pf->fname, 796 ret = find_lazy_match_lines(&pf->lcache, pf->fname,
544 pf->pp->lazy_line); 797 pf->pev->point.lazy_line);
545 if (ret <= 0) 798 if (ret == 0) {
546 die("No matched lines found in %s.", pf->fname); 799 pr_debug("No matched lines found in %s.\n", pf->fname);
800 return 0;
801 } else if (ret < 0)
802 return ret;
547 } 803 }
548 804
549 ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines); 805 if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
550 DIE_IF(ret != 0); 806 pr_warning("No source lines found in this CU.\n");
551 for (i = 0; i < nlines; i++) { 807 return -ENOENT;
808 }
809
810 for (i = 0; i < nlines && ret >= 0; i++) {
552 line = dwarf_onesrcline(lines, i); 811 line = dwarf_onesrcline(lines, i);
553 812
554 dwarf_lineno(line, &lineno); 813 if (dwarf_lineno(line, &lineno) != 0 ||
555 if (!line_list__has_line(&pf->lcache, lineno)) 814 !line_list__has_line(&pf->lcache, lineno))
556 continue; 815 continue;
557 816
558 /* TODO: Get fileno from line, but how? */ 817 /* TODO: Get fileno from line, but how? */
559 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) 818 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
560 continue; 819 continue;
561 820
562 ret = dwarf_lineaddr(line, &addr); 821 if (dwarf_lineaddr(line, &addr) != 0) {
563 DIE_IF(ret != 0); 822 pr_debug("Failed to get the address of line %d.\n",
823 lineno);
824 continue;
825 }
564 if (sp_die) { 826 if (sp_die) {
565 /* Address filtering 1: does sp_die include addr? */ 827 /* Address filtering 1: does sp_die include addr? */
566 if (!dwarf_haspc(sp_die, addr)) 828 if (!dwarf_haspc(sp_die, addr))
567 continue; 829 continue;
568 /* Address filtering 2: No child include addr? */ 830 /* Address filtering 2: No child include addr? */
569 if (die_get_inlinefunc(sp_die, addr, &die_mem)) 831 if (die_find_inlinefunc(sp_die, addr, &die_mem))
570 continue; 832 continue;
571 } 833 }
572 834
@@ -574,27 +836,44 @@ static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
574 (int)i, lineno, (unsigned long long)addr); 836 (int)i, lineno, (unsigned long long)addr);
575 pf->addr = addr; 837 pf->addr = addr;
576 838
577 show_probe_point(sp_die, pf); 839 ret = convert_probe_point(sp_die, pf);
578 /* Continuing, because target line might be inlined. */ 840 /* Continuing, because target line might be inlined. */
579 } 841 }
580 /* TODO: deallocate lines, but how? */ 842 /* TODO: deallocate lines, but how? */
843 return ret;
581} 844}
582 845
846/* Callback parameter with return value */
847struct dwarf_callback_param {
848 void *data;
849 int retval;
850};
851
583static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) 852static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
584{ 853{
585 struct probe_finder *pf = (struct probe_finder *)data; 854 struct dwarf_callback_param *param = data;
586 struct probe_point *pp = pf->pp; 855 struct probe_finder *pf = param->data;
856 struct perf_probe_point *pp = &pf->pev->point;
857 Dwarf_Addr addr;
587 858
588 if (pp->lazy_line) 859 if (pp->lazy_line)
589 find_probe_point_lazy(in_die, pf); 860 param->retval = find_probe_point_lazy(in_die, pf);
590 else { 861 else {
591 /* Get probe address */ 862 /* Get probe address */
592 pf->addr = die_get_entrypc(in_die); 863 if (dwarf_entrypc(in_die, &addr) != 0) {
864 pr_warning("Failed to get entry pc of %s.\n",
865 dwarf_diename(in_die));
866 param->retval = -ENOENT;
867 return DWARF_CB_ABORT;
868 }
869 pf->addr = addr;
593 pf->addr += pp->offset; 870 pf->addr += pp->offset;
594 pr_debug("found inline addr: 0x%jx\n", 871 pr_debug("found inline addr: 0x%jx\n",
595 (uintmax_t)pf->addr); 872 (uintmax_t)pf->addr);
596 873
597 show_probe_point(in_die, pf); 874 param->retval = convert_probe_point(in_die, pf);
875 if (param->retval < 0)
876 return DWARF_CB_ABORT;
598 } 877 }
599 878
600 return DWARF_CB_OK; 879 return DWARF_CB_OK;
@@ -603,59 +882,88 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
603/* Search function from function name */ 882/* Search function from function name */
604static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) 883static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
605{ 884{
606 struct probe_finder *pf = (struct probe_finder *)data; 885 struct dwarf_callback_param *param = data;
607 struct probe_point *pp = pf->pp; 886 struct probe_finder *pf = param->data;
887 struct perf_probe_point *pp = &pf->pev->point;
608 888
609 /* Check tag and diename */ 889 /* Check tag and diename */
610 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 890 if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
611 die_compare_name(sp_die, pp->function) != 0) 891 die_compare_name(sp_die, pp->function) != 0)
612 return 0; 892 return DWARF_CB_OK;
613 893
614 pf->fname = dwarf_decl_file(sp_die); 894 pf->fname = dwarf_decl_file(sp_die);
615 if (pp->line) { /* Function relative line */ 895 if (pp->line) { /* Function relative line */
616 dwarf_decl_line(sp_die, &pf->lno); 896 dwarf_decl_line(sp_die, &pf->lno);
617 pf->lno += pp->line; 897 pf->lno += pp->line;
618 find_probe_point_by_line(pf); 898 param->retval = find_probe_point_by_line(pf);
619 } else if (!dwarf_func_inline(sp_die)) { 899 } else if (!dwarf_func_inline(sp_die)) {
620 /* Real function */ 900 /* Real function */
621 if (pp->lazy_line) 901 if (pp->lazy_line)
622 find_probe_point_lazy(sp_die, pf); 902 param->retval = find_probe_point_lazy(sp_die, pf);
623 else { 903 else {
624 pf->addr = die_get_entrypc(sp_die); 904 if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
905 pr_warning("Failed to get entry pc of %s.\n",
906 dwarf_diename(sp_die));
907 param->retval = -ENOENT;
908 return DWARF_CB_ABORT;
909 }
625 pf->addr += pp->offset; 910 pf->addr += pp->offset;
626 /* TODO: Check the address in this function */ 911 /* TODO: Check the address in this function */
627 show_probe_point(sp_die, pf); 912 param->retval = convert_probe_point(sp_die, pf);
628 } 913 }
629 } else 914 } else {
915 struct dwarf_callback_param _param = {.data = (void *)pf,
916 .retval = 0};
630 /* Inlined function: search instances */ 917 /* Inlined function: search instances */
631 dwarf_func_inline_instances(sp_die, probe_point_inline_cb, pf); 918 dwarf_func_inline_instances(sp_die, probe_point_inline_cb,
919 &_param);
920 param->retval = _param.retval;
921 }
632 922
633 return 1; /* Exit; no same symbol in this CU. */ 923 return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
634} 924}
635 925
636static void find_probe_point_by_func(struct probe_finder *pf) 926static int find_probe_point_by_func(struct probe_finder *pf)
637{ 927{
638 dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, pf, 0); 928 struct dwarf_callback_param _param = {.data = (void *)pf,
929 .retval = 0};
930 dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0);
931 return _param.retval;
639} 932}
640 933
641/* Find a probe point */ 934/* Find kprobe_trace_events specified by perf_probe_event from debuginfo */
642int find_probe_point(int fd, struct probe_point *pp) 935int find_kprobe_trace_events(int fd, struct perf_probe_event *pev,
936 struct kprobe_trace_event **tevs, int max_tevs)
643{ 937{
644 struct probe_finder pf = {.pp = pp}; 938 struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
939 struct perf_probe_point *pp = &pev->point;
645 Dwarf_Off off, noff; 940 Dwarf_Off off, noff;
646 size_t cuhl; 941 size_t cuhl;
647 Dwarf_Die *diep; 942 Dwarf_Die *diep;
648 Dwarf *dbg; 943 Dwarf *dbg;
944 int ret = 0;
945
946 pf.tevs = zalloc(sizeof(struct kprobe_trace_event) * max_tevs);
947 if (pf.tevs == NULL)
948 return -ENOMEM;
949 *tevs = pf.tevs;
950 pf.ntevs = 0;
649 951
650 dbg = dwarf_begin(fd, DWARF_C_READ); 952 dbg = dwarf_begin(fd, DWARF_C_READ);
651 if (!dbg) 953 if (!dbg) {
652 return -ENOENT; 954 pr_warning("No dwarf info found in the vmlinux - "
955 "please rebuild with CONFIG_DEBUG_INFO=y.\n");
956 return -EBADF;
957 }
958
959 /* Get the call frame information from this dwarf */
960 pf.cfi = dwarf_getcfi(dbg);
653 961
654 pp->found = 0;
655 off = 0; 962 off = 0;
656 line_list__init(&pf.lcache); 963 line_list__init(&pf.lcache);
657 /* Loop on CUs (Compilation Unit) */ 964 /* Loop on CUs (Compilation Unit) */
658 while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { 965 while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
966 ret >= 0) {
659 /* Get the DIE(Debugging Information Entry) of this CU */ 967 /* Get the DIE(Debugging Information Entry) of this CU */
660 diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die); 968 diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
661 if (!diep) 969 if (!diep)
@@ -669,12 +977,12 @@ int find_probe_point(int fd, struct probe_point *pp)
669 977
670 if (!pp->file || pf.fname) { 978 if (!pp->file || pf.fname) {
671 if (pp->function) 979 if (pp->function)
672 find_probe_point_by_func(&pf); 980 ret = find_probe_point_by_func(&pf);
673 else if (pp->lazy_line) 981 else if (pp->lazy_line)
674 find_probe_point_lazy(NULL, &pf); 982 ret = find_probe_point_lazy(NULL, &pf);
675 else { 983 else {
676 pf.lno = pp->line; 984 pf.lno = pp->line;
677 find_probe_point_by_line(&pf); 985 ret = find_probe_point_by_line(&pf);
678 } 986 }
679 } 987 }
680 off = noff; 988 off = noff;
@@ -682,41 +990,169 @@ int find_probe_point(int fd, struct probe_point *pp)
682 line_list__free(&pf.lcache); 990 line_list__free(&pf.lcache);
683 dwarf_end(dbg); 991 dwarf_end(dbg);
684 992
685 return pp->found; 993 return (ret < 0) ? ret : pf.ntevs;
994}
995
996/* Reverse search */
997int find_perf_probe_point(int fd, unsigned long addr,
998 struct perf_probe_point *ppt)
999{
1000 Dwarf_Die cudie, spdie, indie;
1001 Dwarf *dbg;
1002 Dwarf_Line *line;
1003 Dwarf_Addr laddr, eaddr;
1004 const char *tmp;
1005 int lineno, ret = 0;
1006 bool found = false;
1007
1008 dbg = dwarf_begin(fd, DWARF_C_READ);
1009 if (!dbg)
1010 return -EBADF;
1011
1012 /* Find cu die */
1013 if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
1014 ret = -EINVAL;
1015 goto end;
1016 }
1017
1018 /* Find a corresponding line */
1019 line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr);
1020 if (line) {
1021 if (dwarf_lineaddr(line, &laddr) == 0 &&
1022 (Dwarf_Addr)addr == laddr &&
1023 dwarf_lineno(line, &lineno) == 0) {
1024 tmp = dwarf_linesrc(line, NULL, NULL);
1025 if (tmp) {
1026 ppt->line = lineno;
1027 ppt->file = strdup(tmp);
1028 if (ppt->file == NULL) {
1029 ret = -ENOMEM;
1030 goto end;
1031 }
1032 found = true;
1033 }
1034 }
1035 }
1036
1037 /* Find a corresponding function */
1038 if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) {
1039 tmp = dwarf_diename(&spdie);
1040 if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0)
1041 goto end;
1042
1043 if (ppt->line) {
1044 if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
1045 &indie)) {
1046 /* addr in an inline function */
1047 tmp = dwarf_diename(&indie);
1048 if (!tmp)
1049 goto end;
1050 ret = dwarf_decl_line(&indie, &lineno);
1051 } else {
1052 if (eaddr == addr) { /* Function entry */
1053 lineno = ppt->line;
1054 ret = 0;
1055 } else
1056 ret = dwarf_decl_line(&spdie, &lineno);
1057 }
1058 if (ret == 0) {
1059 /* Make a relative line number */
1060 ppt->line -= lineno;
1061 goto found;
1062 }
1063 }
1064 /* We don't have a line number, let's use offset */
1065 ppt->offset = addr - (unsigned long)eaddr;
1066found:
1067 ppt->function = strdup(tmp);
1068 if (ppt->function == NULL) {
1069 ret = -ENOMEM;
1070 goto end;
1071 }
1072 found = true;
1073 }
1074
1075end:
1076 dwarf_end(dbg);
1077 if (ret >= 0)
1078 ret = found ? 1 : 0;
1079 return ret;
1080}
1081
1082/* Add a line and store the src path */
1083static int line_range_add_line(const char *src, unsigned int lineno,
1084 struct line_range *lr)
1085{
1086 /* Copy real path */
1087 if (!lr->path) {
1088 lr->path = strdup(src);
1089 if (lr->path == NULL)
1090 return -ENOMEM;
1091 }
1092 return line_list__add_line(&lr->line_list, lineno);
1093}
1094
1095/* Search function declaration lines */
1096static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
1097{
1098 struct dwarf_callback_param *param = data;
1099 struct line_finder *lf = param->data;
1100 const char *src;
1101 int lineno;
1102
1103 src = dwarf_decl_file(sp_die);
1104 if (src && strtailcmp(src, lf->fname) != 0)
1105 return DWARF_CB_OK;
1106
1107 if (dwarf_decl_line(sp_die, &lineno) != 0 ||
1108 (lf->lno_s > lineno || lf->lno_e < lineno))
1109 return DWARF_CB_OK;
1110
1111 param->retval = line_range_add_line(src, lineno, lf->lr);
1112 if (param->retval < 0)
1113 return DWARF_CB_ABORT;
1114 return DWARF_CB_OK;
1115}
1116
1117static int find_line_range_func_decl_lines(struct line_finder *lf)
1118{
1119 struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
1120 dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
1121 return param.retval;
686} 1122}
687 1123
688/* Find line range from its line number */ 1124/* Find line range from its line number */
689static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) 1125static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
690{ 1126{
691 Dwarf_Lines *lines; 1127 Dwarf_Lines *lines;
692 Dwarf_Line *line; 1128 Dwarf_Line *line;
693 size_t nlines, i; 1129 size_t nlines, i;
694 Dwarf_Addr addr; 1130 Dwarf_Addr addr;
695 int lineno; 1131 int lineno, ret = 0;
696 int ret;
697 const char *src; 1132 const char *src;
698 Dwarf_Die die_mem; 1133 Dwarf_Die die_mem;
699 1134
700 line_list__init(&lf->lr->line_list); 1135 line_list__init(&lf->lr->line_list);
701 ret = dwarf_getsrclines(&lf->cu_die, &lines, &nlines); 1136 if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
702 DIE_IF(ret != 0); 1137 pr_warning("No source lines found in this CU.\n");
1138 return -ENOENT;
1139 }
703 1140
1141 /* Search probable lines on lines list */
704 for (i = 0; i < nlines; i++) { 1142 for (i = 0; i < nlines; i++) {
705 line = dwarf_onesrcline(lines, i); 1143 line = dwarf_onesrcline(lines, i);
706 ret = dwarf_lineno(line, &lineno); 1144 if (dwarf_lineno(line, &lineno) != 0 ||
707 DIE_IF(ret != 0); 1145 (lf->lno_s > lineno || lf->lno_e < lineno))
708 if (lf->lno_s > lineno || lf->lno_e < lineno)
709 continue; 1146 continue;
710 1147
711 if (sp_die) { 1148 if (sp_die) {
712 /* Address filtering 1: does sp_die include addr? */ 1149 /* Address filtering 1: does sp_die include addr? */
713 ret = dwarf_lineaddr(line, &addr); 1150 if (dwarf_lineaddr(line, &addr) != 0 ||
714 DIE_IF(ret != 0); 1151 !dwarf_haspc(sp_die, addr))
715 if (!dwarf_haspc(sp_die, addr))
716 continue; 1152 continue;
717 1153
718 /* Address filtering 2: No child include addr? */ 1154 /* Address filtering 2: No child include addr? */
719 if (die_get_inlinefunc(sp_die, addr, &die_mem)) 1155 if (die_find_inlinefunc(sp_die, addr, &die_mem))
720 continue; 1156 continue;
721 } 1157 }
722 1158
@@ -725,30 +1161,49 @@ static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
725 if (strtailcmp(src, lf->fname) != 0) 1161 if (strtailcmp(src, lf->fname) != 0)
726 continue; 1162 continue;
727 1163
728 /* Copy real path */ 1164 ret = line_range_add_line(src, lineno, lf->lr);
729 if (!lf->lr->path) 1165 if (ret < 0)
730 lf->lr->path = strdup(src); 1166 return ret;
731 line_list__add_line(&lf->lr->line_list, (unsigned int)lineno);
732 } 1167 }
1168
1169 /*
1170 * Dwarf lines doesn't include function declarations. We have to
1171 * check functions list or given function.
1172 */
1173 if (sp_die) {
1174 src = dwarf_decl_file(sp_die);
1175 if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
1176 (lf->lno_s <= lineno && lf->lno_e >= lineno))
1177 ret = line_range_add_line(src, lineno, lf->lr);
1178 } else
1179 ret = find_line_range_func_decl_lines(lf);
1180
733 /* Update status */ 1181 /* Update status */
734 if (!list_empty(&lf->lr->line_list)) 1182 if (ret >= 0)
735 lf->found = 1; 1183 if (!list_empty(&lf->lr->line_list))
1184 ret = lf->found = 1;
1185 else
1186 ret = 0; /* Lines are not found */
736 else { 1187 else {
737 free(lf->lr->path); 1188 free(lf->lr->path);
738 lf->lr->path = NULL; 1189 lf->lr->path = NULL;
739 } 1190 }
1191 return ret;
740} 1192}
741 1193
742static int line_range_inline_cb(Dwarf_Die *in_die, void *data) 1194static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
743{ 1195{
744 find_line_range_by_line(in_die, (struct line_finder *)data); 1196 struct dwarf_callback_param *param = data;
1197
1198 param->retval = find_line_range_by_line(in_die, param->data);
745 return DWARF_CB_ABORT; /* No need to find other instances */ 1199 return DWARF_CB_ABORT; /* No need to find other instances */
746} 1200}
747 1201
748/* Search function from function name */ 1202/* Search function from function name */
749static int line_range_search_cb(Dwarf_Die *sp_die, void *data) 1203static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
750{ 1204{
751 struct line_finder *lf = (struct line_finder *)data; 1205 struct dwarf_callback_param *param = data;
1206 struct line_finder *lf = param->data;
752 struct line_range *lr = lf->lr; 1207 struct line_range *lr = lf->lr;
753 1208
754 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1209 if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
@@ -757,44 +1212,55 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
757 dwarf_decl_line(sp_die, &lr->offset); 1212 dwarf_decl_line(sp_die, &lr->offset);
758 pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset); 1213 pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
759 lf->lno_s = lr->offset + lr->start; 1214 lf->lno_s = lr->offset + lr->start;
760 if (!lr->end) 1215 if (lf->lno_s < 0) /* Overflow */
1216 lf->lno_s = INT_MAX;
1217 lf->lno_e = lr->offset + lr->end;
1218 if (lf->lno_e < 0) /* Overflow */
761 lf->lno_e = INT_MAX; 1219 lf->lno_e = INT_MAX;
762 else 1220 pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
763 lf->lno_e = lr->offset + lr->end;
764 lr->start = lf->lno_s; 1221 lr->start = lf->lno_s;
765 lr->end = lf->lno_e; 1222 lr->end = lf->lno_e;
766 if (dwarf_func_inline(sp_die)) 1223 if (dwarf_func_inline(sp_die)) {
1224 struct dwarf_callback_param _param;
1225 _param.data = (void *)lf;
1226 _param.retval = 0;
767 dwarf_func_inline_instances(sp_die, 1227 dwarf_func_inline_instances(sp_die,
768 line_range_inline_cb, lf); 1228 line_range_inline_cb,
769 else 1229 &_param);
770 find_line_range_by_line(sp_die, lf); 1230 param->retval = _param.retval;
771 return 1; 1231 } else
1232 param->retval = find_line_range_by_line(sp_die, lf);
1233 return DWARF_CB_ABORT;
772 } 1234 }
773 return 0; 1235 return DWARF_CB_OK;
774} 1236}
775 1237
776static void find_line_range_by_func(struct line_finder *lf) 1238static int find_line_range_by_func(struct line_finder *lf)
777{ 1239{
778 dwarf_getfuncs(&lf->cu_die, line_range_search_cb, lf, 0); 1240 struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
1241 dwarf_getfuncs(&lf->cu_die, line_range_search_cb, &param, 0);
1242 return param.retval;
779} 1243}
780 1244
781int find_line_range(int fd, struct line_range *lr) 1245int find_line_range(int fd, struct line_range *lr)
782{ 1246{
783 struct line_finder lf = {.lr = lr, .found = 0}; 1247 struct line_finder lf = {.lr = lr, .found = 0};
784 int ret; 1248 int ret = 0;
785 Dwarf_Off off = 0, noff; 1249 Dwarf_Off off = 0, noff;
786 size_t cuhl; 1250 size_t cuhl;
787 Dwarf_Die *diep; 1251 Dwarf_Die *diep;
788 Dwarf *dbg; 1252 Dwarf *dbg;
789 1253
790 dbg = dwarf_begin(fd, DWARF_C_READ); 1254 dbg = dwarf_begin(fd, DWARF_C_READ);
791 if (!dbg) 1255 if (!dbg) {
792 return -ENOENT; 1256 pr_warning("No dwarf info found in the vmlinux - "
1257 "please rebuild with CONFIG_DEBUG_INFO=y.\n");
1258 return -EBADF;
1259 }
793 1260
794 /* Loop on CUs (Compilation Unit) */ 1261 /* Loop on CUs (Compilation Unit) */
795 while (!lf.found) { 1262 while (!lf.found && ret >= 0) {
796 ret = dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL); 1263 if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0)
797 if (ret != 0)
798 break; 1264 break;
799 1265
800 /* Get the DIE(Debugging Information Entry) of this CU */ 1266 /* Get the DIE(Debugging Information Entry) of this CU */
@@ -810,20 +1276,18 @@ int find_line_range(int fd, struct line_range *lr)
810 1276
811 if (!lr->file || lf.fname) { 1277 if (!lr->file || lf.fname) {
812 if (lr->function) 1278 if (lr->function)
813 find_line_range_by_func(&lf); 1279 ret = find_line_range_by_func(&lf);
814 else { 1280 else {
815 lf.lno_s = lr->start; 1281 lf.lno_s = lr->start;
816 if (!lr->end) 1282 lf.lno_e = lr->end;
817 lf.lno_e = INT_MAX; 1283 ret = find_line_range_by_line(NULL, &lf);
818 else
819 lf.lno_e = lr->end;
820 find_line_range_by_line(NULL, &lf);
821 } 1284 }
822 } 1285 }
823 off = noff; 1286 off = noff;
824 } 1287 }
825 pr_debug("path: %lx\n", (unsigned long)lr->path); 1288 pr_debug("path: %lx\n", (unsigned long)lr->path);
826 dwarf_end(dbg); 1289 dwarf_end(dbg);
827 return lf.found; 1290
1291 return (ret < 0) ? ret : lf.found;
828} 1292}
829 1293
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 21f7354397b4..66f1980e3855 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -3,6 +3,7 @@
3 3
4#include <stdbool.h> 4#include <stdbool.h>
5#include "util.h" 5#include "util.h"
6#include "probe-event.h"
6 7
7#define MAX_PATH_LEN 256 8#define MAX_PATH_LEN 256
8#define MAX_PROBE_BUFFER 1024 9#define MAX_PROBE_BUFFER 1024
@@ -14,67 +15,39 @@ static inline int is_c_varname(const char *name)
14 return isalpha(name[0]) || name[0] == '_'; 15 return isalpha(name[0]) || name[0] == '_';
15} 16}
16 17
17struct probe_point { 18#ifdef DWARF_SUPPORT
18 char *event; /* Event name */ 19/* Find kprobe_trace_events specified by perf_probe_event from debuginfo */
19 char *group; /* Event group */ 20extern int find_kprobe_trace_events(int fd, struct perf_probe_event *pev,
21 struct kprobe_trace_event **tevs,
22 int max_tevs);
20 23
21 /* Inputs */ 24/* Find a perf_probe_point from debuginfo */
22 char *file; /* File name */ 25extern int find_perf_probe_point(int fd, unsigned long addr,
23 int line; /* Line number */ 26 struct perf_probe_point *ppt);
24 char *lazy_line; /* Lazy line pattern */
25 27
26 char *function; /* Function name */
27 int offset; /* Offset bytes */
28
29 int nr_args; /* Number of arguments */
30 char **args; /* Arguments */
31
32 int retprobe; /* Return probe */
33
34 /* Output */
35 int found; /* Number of found probe points */
36 char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
37};
38
39/* Line number container */
40struct line_node {
41 struct list_head list;
42 unsigned int line;
43};
44
45/* Line range */
46struct line_range {
47 char *file; /* File name */
48 char *function; /* Function name */
49 unsigned int start; /* Start line number */
50 unsigned int end; /* End line number */
51 int offset; /* Start line offset */
52 char *path; /* Real path name */
53 struct list_head line_list; /* Visible lines */
54};
55
56#ifndef NO_DWARF_SUPPORT
57extern int find_probe_point(int fd, struct probe_point *pp);
58extern int find_line_range(int fd, struct line_range *lr); 28extern int find_line_range(int fd, struct line_range *lr);
59 29
60#include <dwarf.h> 30#include <dwarf.h>
61#include <libdw.h> 31#include <libdw.h>
62 32
63struct probe_finder { 33struct probe_finder {
64 struct probe_point *pp; /* Target probe point */ 34 struct perf_probe_event *pev; /* Target probe event */
35 struct kprobe_trace_event *tevs; /* Result trace events */
36 int ntevs; /* Number of trace events */
37 int max_tevs; /* Max number of trace events */
65 38
66 /* For function searching */ 39 /* For function searching */
67 Dwarf_Addr addr; /* Address */
68 const char *fname; /* File name */
69 int lno; /* Line number */ 40 int lno; /* Line number */
41 Dwarf_Addr addr; /* Address */
42 const char *fname; /* Real file name */
70 Dwarf_Die cu_die; /* Current CU */ 43 Dwarf_Die cu_die; /* Current CU */
44 struct list_head lcache; /* Line cache for lazy match */
71 45
72 /* For variable searching */ 46 /* For variable searching */
47 Dwarf_CFI *cfi; /* Call Frame Information */
73 Dwarf_Op *fb_ops; /* Frame base attribute */ 48 Dwarf_Op *fb_ops; /* Frame base attribute */
74 const char *var; /* Current variable name */ 49 struct perf_probe_arg *pvar; /* Current target variable */
75 char *buf; /* Current output buffer */ 50 struct kprobe_trace_arg *tvar; /* Current result variable */
76 int len; /* Length of output buffer */
77 struct list_head lcache; /* Line cache for lazy match */
78}; 51};
79 52
80struct line_finder { 53struct line_finder {
@@ -87,6 +60,6 @@ struct line_finder {
87 int found; 60 int found;
88}; 61};
89 62
90#endif /* NO_DWARF_SUPPORT */ 63#endif /* DWARF_SUPPORT */
91 64
92#endif /*_PROBE_FINDER_H */ 65#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c
new file mode 100644
index 000000000000..13d36faf64eb
--- /dev/null
+++ b/tools/perf/util/pstack.c
@@ -0,0 +1,75 @@
1/*
2 * Simple pointer stack
3 *
4 * (c) 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
5 */
6
7#include "util.h"
8#include "pstack.h"
9#include <linux/kernel.h>
10#include <stdlib.h>
11
12struct pstack {
13 unsigned short top;
14 unsigned short max_nr_entries;
15 void *entries[0];
16};
17
18struct pstack *pstack__new(unsigned short max_nr_entries)
19{
20 struct pstack *self = zalloc((sizeof(*self) +
21 max_nr_entries * sizeof(void *)));
22 if (self != NULL)
23 self->max_nr_entries = max_nr_entries;
24 return self;
25}
26
27void pstack__delete(struct pstack *self)
28{
29 free(self);
30}
31
32bool pstack__empty(const struct pstack *self)
33{
34 return self->top == 0;
35}
36
37void pstack__remove(struct pstack *self, void *key)
38{
39 unsigned short i = self->top, last_index = self->top - 1;
40
41 while (i-- != 0) {
42 if (self->entries[i] == key) {
43 if (i < last_index)
44 memmove(self->entries + i,
45 self->entries + i + 1,
46 (last_index - i) * sizeof(void *));
47 --self->top;
48 return;
49 }
50 }
51 pr_err("%s: %p not on the pstack!\n", __func__, key);
52}
53
54void pstack__push(struct pstack *self, void *key)
55{
56 if (self->top == self->max_nr_entries) {
57 pr_err("%s: top=%d, overflow!\n", __func__, self->top);
58 return;
59 }
60 self->entries[self->top++] = key;
61}
62
63void *pstack__pop(struct pstack *self)
64{
65 void *ret;
66
67 if (self->top == 0) {
68 pr_err("%s: underflow!\n", __func__);
69 return NULL;
70 }
71
72 ret = self->entries[--self->top];
73 self->entries[self->top] = NULL;
74 return ret;
75}
diff --git a/tools/perf/util/pstack.h b/tools/perf/util/pstack.h
new file mode 100644
index 000000000000..5ad07023504b
--- /dev/null
+++ b/tools/perf/util/pstack.h
@@ -0,0 +1,12 @@
1#ifndef _PERF_PSTACK_
2#define _PERF_PSTACK_
3
4struct pstack;
5struct pstack *pstack__new(unsigned short max_nr_entries);
6void pstack__delete(struct pstack *self);
7bool pstack__empty(const struct pstack *self);
8void pstack__remove(struct pstack *self, void *key);
9void pstack__push(struct pstack *self, void *key);
10void *pstack__pop(struct pstack *self);
11
12#endif /* _PERF_PSTACK_ */
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 5376378e0cfc..b059dc50cc2d 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -371,7 +371,6 @@ static int perl_start_script(const char *script, int argc, const char **argv)
371 run_start_sub(); 371 run_start_sub();
372 372
373 free(command_line); 373 free(command_line);
374 fprintf(stderr, "perf trace started with Perl script %s\n\n", script);
375 return 0; 374 return 0;
376error: 375error:
377 perl_free(my_perl); 376 perl_free(my_perl);
@@ -394,8 +393,6 @@ static int perl_stop_script(void)
394 perl_destruct(my_perl); 393 perl_destruct(my_perl);
395 perl_free(my_perl); 394 perl_free(my_perl);
396 395
397 fprintf(stderr, "\nperf trace Perl script stopped\n");
398
399 return 0; 396 return 0;
400} 397}
401 398
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6a72f14c5986..81f39cab3aaa 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -374,8 +374,6 @@ static int python_start_script(const char *script, int argc, const char **argv)
374 } 374 }
375 375
376 free(command_line); 376 free(command_line);
377 fprintf(stderr, "perf trace started with Python script %s\n\n",
378 script);
379 377
380 return err; 378 return err;
381error: 379error:
@@ -407,8 +405,6 @@ out:
407 Py_XDECREF(main_module); 405 Py_XDECREF(main_module);
408 Py_Finalize(); 406 Py_Finalize();
409 407
410 fprintf(stderr, "\nperf trace Python script stopped\n");
411
412 return err; 408 return err;
413} 409}
414 410
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index eed1cb889008..25bfca4f10f0 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,6 +14,16 @@ static int perf_session__open(struct perf_session *self, bool force)
14{ 14{
15 struct stat input_stat; 15 struct stat input_stat;
16 16
17 if (!strcmp(self->filename, "-")) {
18 self->fd_pipe = true;
19 self->fd = STDIN_FILENO;
20
21 if (perf_header__read(self, self->fd) < 0)
22 pr_err("incompatible file format");
23
24 return 0;
25 }
26
17 self->fd = open(self->filename, O_RDONLY); 27 self->fd = open(self->filename, O_RDONLY);
18 if (self->fd < 0) { 28 if (self->fd < 0) {
19 pr_err("failed to open file: %s", self->filename); 29 pr_err("failed to open file: %s", self->filename);
@@ -38,7 +48,7 @@ static int perf_session__open(struct perf_session *self, bool force)
38 goto out_close; 48 goto out_close;
39 } 49 }
40 50
41 if (perf_header__read(&self->header, self->fd) < 0) { 51 if (perf_header__read(self, self->fd) < 0) {
42 pr_err("incompatible file format"); 52 pr_err("incompatible file format");
43 goto out_close; 53 goto out_close;
44 } 54 }
@@ -52,12 +62,21 @@ out_close:
52 return -1; 62 return -1;
53} 63}
54 64
55static inline int perf_session__create_kernel_maps(struct perf_session *self) 65void perf_session__update_sample_type(struct perf_session *self)
66{
67 self->sample_type = perf_header__sample_type(&self->header);
68}
69
70int perf_session__create_kernel_maps(struct perf_session *self)
56{ 71{
57 return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps); 72 int ret = machine__create_kernel_maps(&self->host_machine);
73
74 if (ret >= 0)
75 ret = machines__create_guest_kernel_maps(&self->machines);
76 return ret;
58} 77}
59 78
60struct perf_session *perf_session__new(const char *filename, int mode, bool force) 79struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
61{ 80{
62 size_t len = filename ? strlen(filename) + 1 : 0; 81 size_t len = filename ? strlen(filename) + 1 : 0;
63 struct perf_session *self = zalloc(sizeof(*self) + len); 82 struct perf_session *self = zalloc(sizeof(*self) + len);
@@ -70,13 +89,15 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
70 89
71 memcpy(self->filename, filename, len); 90 memcpy(self->filename, filename, len);
72 self->threads = RB_ROOT; 91 self->threads = RB_ROOT;
73 self->stats_by_id = RB_ROOT; 92 self->hists_tree = RB_ROOT;
74 self->last_match = NULL; 93 self->last_match = NULL;
75 self->mmap_window = 32; 94 self->mmap_window = 32;
76 self->cwd = NULL; 95 self->cwd = NULL;
77 self->cwdlen = 0; 96 self->cwdlen = 0;
78 self->unknown_events = 0; 97 self->machines = RB_ROOT;
79 map_groups__init(&self->kmaps); 98 self->repipe = repipe;
99 INIT_LIST_HEAD(&self->ordered_samples.samples_head);
100 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
80 101
81 if (mode == O_RDONLY) { 102 if (mode == O_RDONLY) {
82 if (perf_session__open(self, force) < 0) 103 if (perf_session__open(self, force) < 0)
@@ -90,7 +111,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
90 goto out_delete; 111 goto out_delete;
91 } 112 }
92 113
93 self->sample_type = perf_header__sample_type(&self->header); 114 perf_session__update_sample_type(self);
94out: 115out:
95 return self; 116 return self;
96out_free: 117out_free:
@@ -117,22 +138,17 @@ static bool symbol__match_parent_regex(struct symbol *sym)
117 return 0; 138 return 0;
118} 139}
119 140
120struct symbol **perf_session__resolve_callchain(struct perf_session *self, 141struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
121 struct thread *thread, 142 struct thread *thread,
122 struct ip_callchain *chain, 143 struct ip_callchain *chain,
123 struct symbol **parent) 144 struct symbol **parent)
124{ 145{
125 u8 cpumode = PERF_RECORD_MISC_USER; 146 u8 cpumode = PERF_RECORD_MISC_USER;
126 struct symbol **syms = NULL;
127 unsigned int i; 147 unsigned int i;
148 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
128 149
129 if (symbol_conf.use_callchain) { 150 if (!syms)
130 syms = calloc(chain->nr, sizeof(*syms)); 151 return NULL;
131 if (!syms) {
132 fprintf(stderr, "Can't allocate memory for symbols\n");
133 exit(-1);
134 }
135 }
136 152
137 for (i = 0; i < chain->nr; i++) { 153 for (i = 0; i < chain->nr; i++) {
138 u64 ip = chain->ips[i]; 154 u64 ip = chain->ips[i];
@@ -152,15 +168,17 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
152 continue; 168 continue;
153 } 169 }
154 170
171 al.filtered = false;
155 thread__find_addr_location(thread, self, cpumode, 172 thread__find_addr_location(thread, self, cpumode,
156 MAP__FUNCTION, ip, &al, NULL); 173 MAP__FUNCTION, thread->pid, ip, &al, NULL);
157 if (al.sym != NULL) { 174 if (al.sym != NULL) {
158 if (sort__has_parent && !*parent && 175 if (sort__has_parent && !*parent &&
159 symbol__match_parent_regex(al.sym)) 176 symbol__match_parent_regex(al.sym))
160 *parent = al.sym; 177 *parent = al.sym;
161 if (!symbol_conf.use_callchain) 178 if (!symbol_conf.use_callchain)
162 break; 179 break;
163 syms[i] = al.sym; 180 syms[i].map = al.map;
181 syms[i].sym = al.sym;
164 } 182 }
165 } 183 }
166 184
@@ -174,6 +192,18 @@ static int process_event_stub(event_t *event __used,
174 return 0; 192 return 0;
175} 193}
176 194
195static int process_finished_round_stub(event_t *event __used,
196 struct perf_session *session __used,
197 struct perf_event_ops *ops __used)
198{
199 dump_printf(": unhandled!\n");
200 return 0;
201}
202
203static int process_finished_round(event_t *event,
204 struct perf_session *session,
205 struct perf_event_ops *ops);
206
177static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 207static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
178{ 208{
179 if (handler->sample == NULL) 209 if (handler->sample == NULL)
@@ -194,29 +224,20 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
194 handler->throttle = process_event_stub; 224 handler->throttle = process_event_stub;
195 if (handler->unthrottle == NULL) 225 if (handler->unthrottle == NULL)
196 handler->unthrottle = process_event_stub; 226 handler->unthrottle = process_event_stub;
197} 227 if (handler->attr == NULL)
198 228 handler->attr = process_event_stub;
199static const char *event__name[] = { 229 if (handler->event_type == NULL)
200 [0] = "TOTAL", 230 handler->event_type = process_event_stub;
201 [PERF_RECORD_MMAP] = "MMAP", 231 if (handler->tracing_data == NULL)
202 [PERF_RECORD_LOST] = "LOST", 232 handler->tracing_data = process_event_stub;
203 [PERF_RECORD_COMM] = "COMM", 233 if (handler->build_id == NULL)
204 [PERF_RECORD_EXIT] = "EXIT", 234 handler->build_id = process_event_stub;
205 [PERF_RECORD_THROTTLE] = "THROTTLE", 235 if (handler->finished_round == NULL) {
206 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 236 if (handler->ordered_samples)
207 [PERF_RECORD_FORK] = "FORK", 237 handler->finished_round = process_finished_round;
208 [PERF_RECORD_READ] = "READ", 238 else
209 [PERF_RECORD_SAMPLE] = "SAMPLE", 239 handler->finished_round = process_finished_round_stub;
210}; 240 }
211
212unsigned long event__total[PERF_RECORD_MAX];
213
214void event__print_totals(void)
215{
216 int i;
217 for (i = 0; i < PERF_RECORD_MAX; ++i)
218 pr_info("%10s events: %10ld\n",
219 event__name[i], event__total[i]);
220} 241}
221 242
222void mem_bswap_64(void *src, int byte_size) 243void mem_bswap_64(void *src, int byte_size)
@@ -270,6 +291,37 @@ static void event__read_swap(event_t *self)
270 self->read.id = bswap_64(self->read.id); 291 self->read.id = bswap_64(self->read.id);
271} 292}
272 293
294static void event__attr_swap(event_t *self)
295{
296 size_t size;
297
298 self->attr.attr.type = bswap_32(self->attr.attr.type);
299 self->attr.attr.size = bswap_32(self->attr.attr.size);
300 self->attr.attr.config = bswap_64(self->attr.attr.config);
301 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
302 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
303 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
304 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
305 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
306 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
307 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
308
309 size = self->header.size;
310 size -= (void *)&self->attr.id - (void *)self;
311 mem_bswap_64(self->attr.id, size);
312}
313
314static void event__event_type_swap(event_t *self)
315{
316 self->event_type.event_type.event_id =
317 bswap_64(self->event_type.event_type.event_id);
318}
319
320static void event__tracing_data_swap(event_t *self)
321{
322 self->tracing_data.size = bswap_32(self->tracing_data.size);
323}
324
273typedef void (*event__swap_op)(event_t *self); 325typedef void (*event__swap_op)(event_t *self);
274 326
275static event__swap_op event__swap_ops[] = { 327static event__swap_op event__swap_ops[] = {
@@ -280,9 +332,212 @@ static event__swap_op event__swap_ops[] = {
280 [PERF_RECORD_LOST] = event__all64_swap, 332 [PERF_RECORD_LOST] = event__all64_swap,
281 [PERF_RECORD_READ] = event__read_swap, 333 [PERF_RECORD_READ] = event__read_swap,
282 [PERF_RECORD_SAMPLE] = event__all64_swap, 334 [PERF_RECORD_SAMPLE] = event__all64_swap,
283 [PERF_RECORD_MAX] = NULL, 335 [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
336 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
337 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
338 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
339 [PERF_RECORD_HEADER_MAX] = NULL,
284}; 340};
285 341
342struct sample_queue {
343 u64 timestamp;
344 struct sample_event *event;
345 struct list_head list;
346};
347
348static void flush_sample_queue(struct perf_session *s,
349 struct perf_event_ops *ops)
350{
351 struct list_head *head = &s->ordered_samples.samples_head;
352 u64 limit = s->ordered_samples.next_flush;
353 struct sample_queue *tmp, *iter;
354
355 if (!ops->ordered_samples || !limit)
356 return;
357
358 list_for_each_entry_safe(iter, tmp, head, list) {
359 if (iter->timestamp > limit)
360 return;
361
362 if (iter == s->ordered_samples.last_inserted)
363 s->ordered_samples.last_inserted = NULL;
364
365 ops->sample((event_t *)iter->event, s);
366
367 s->ordered_samples.last_flush = iter->timestamp;
368 list_del(&iter->list);
369 free(iter->event);
370 free(iter);
371 }
372}
373
374/*
375 * When perf record finishes a pass on every buffers, it records this pseudo
376 * event.
377 * We record the max timestamp t found in the pass n.
378 * Assuming these timestamps are monotonic across cpus, we know that if
379 * a buffer still has events with timestamps below t, they will be all
380 * available and then read in the pass n + 1.
381 * Hence when we start to read the pass n + 2, we can safely flush every
382 * events with timestamps below t.
383 *
384 * ============ PASS n =================
385 * CPU 0 | CPU 1
386 * |
387 * cnt1 timestamps | cnt2 timestamps
388 * 1 | 2
389 * 2 | 3
390 * - | 4 <--- max recorded
391 *
392 * ============ PASS n + 1 ==============
393 * CPU 0 | CPU 1
394 * |
395 * cnt1 timestamps | cnt2 timestamps
396 * 3 | 5
397 * 4 | 6
398 * 5 | 7 <---- max recorded
399 *
400 * Flush every events below timestamp 4
401 *
402 * ============ PASS n + 2 ==============
403 * CPU 0 | CPU 1
404 * |
405 * cnt1 timestamps | cnt2 timestamps
406 * 6 | 8
407 * 7 | 9
408 * - | 10
409 *
410 * Flush every events below timestamp 7
411 * etc...
412 */
413static int process_finished_round(event_t *event __used,
414 struct perf_session *session,
415 struct perf_event_ops *ops)
416{
417 flush_sample_queue(session, ops);
418 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
419
420 return 0;
421}
422
423static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
424{
425 struct sample_queue *iter;
426
427 list_for_each_entry_reverse(iter, head, list) {
428 if (iter->timestamp < new->timestamp) {
429 list_add(&new->list, &iter->list);
430 return;
431 }
432 }
433
434 list_add(&new->list, head);
435}
436
437static void __queue_sample_before(struct sample_queue *new,
438 struct sample_queue *iter,
439 struct list_head *head)
440{
441 list_for_each_entry_continue_reverse(iter, head, list) {
442 if (iter->timestamp < new->timestamp) {
443 list_add(&new->list, &iter->list);
444 return;
445 }
446 }
447
448 list_add(&new->list, head);
449}
450
451static void __queue_sample_after(struct sample_queue *new,
452 struct sample_queue *iter,
453 struct list_head *head)
454{
455 list_for_each_entry_continue(iter, head, list) {
456 if (iter->timestamp > new->timestamp) {
457 list_add_tail(&new->list, &iter->list);
458 return;
459 }
460 }
461 list_add_tail(&new->list, head);
462}
463
464/* The queue is ordered by time */
465static void __queue_sample_event(struct sample_queue *new,
466 struct perf_session *s)
467{
468 struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
469 struct list_head *head = &s->ordered_samples.samples_head;
470
471
472 if (!last_inserted) {
473 __queue_sample_end(new, head);
474 return;
475 }
476
477 /*
478 * Most of the time the current event has a timestamp
479 * very close to the last event inserted, unless we just switched
480 * to another event buffer. Having a sorting based on a list and
481 * on the last inserted event that is close to the current one is
482 * probably more efficient than an rbtree based sorting.
483 */
484 if (last_inserted->timestamp >= new->timestamp)
485 __queue_sample_before(new, last_inserted, head);
486 else
487 __queue_sample_after(new, last_inserted, head);
488}
489
490static int queue_sample_event(event_t *event, struct sample_data *data,
491 struct perf_session *s)
492{
493 u64 timestamp = data->time;
494 struct sample_queue *new;
495
496
497 if (timestamp < s->ordered_samples.last_flush) {
498 printf("Warning: Timestamp below last timeslice flush\n");
499 return -EINVAL;
500 }
501
502 new = malloc(sizeof(*new));
503 if (!new)
504 return -ENOMEM;
505
506 new->timestamp = timestamp;
507
508 new->event = malloc(event->header.size);
509 if (!new->event) {
510 free(new);
511 return -ENOMEM;
512 }
513
514 memcpy(new->event, event, event->header.size);
515
516 __queue_sample_event(new, s);
517 s->ordered_samples.last_inserted = new;
518
519 if (new->timestamp > s->ordered_samples.max_timestamp)
520 s->ordered_samples.max_timestamp = new->timestamp;
521
522 return 0;
523}
524
525static int perf_session__process_sample(event_t *event, struct perf_session *s,
526 struct perf_event_ops *ops)
527{
528 struct sample_data data;
529
530 if (!ops->ordered_samples)
531 return ops->sample(event, s);
532
533 bzero(&data, sizeof(struct sample_data));
534 event__parse_sample(event, s->sample_type, &data);
535
536 queue_sample_event(event, &data, s);
537
538 return 0;
539}
540
286static int perf_session__process_event(struct perf_session *self, 541static int perf_session__process_event(struct perf_session *self,
287 event_t *event, 542 event_t *event,
288 struct perf_event_ops *ops, 543 struct perf_event_ops *ops,
@@ -290,12 +545,11 @@ static int perf_session__process_event(struct perf_session *self,
290{ 545{
291 trace_event(event); 546 trace_event(event);
292 547
293 if (event->header.type < PERF_RECORD_MAX) { 548 if (event->header.type < PERF_RECORD_HEADER_MAX) {
294 dump_printf("%#Lx [%#x]: PERF_RECORD_%s", 549 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
295 offset + head, event->header.size, 550 offset + head, event->header.size,
296 event__name[event->header.type]); 551 event__name[event->header.type]);
297 ++event__total[0]; 552 hists__inc_nr_events(&self->hists, event->header.type);
298 ++event__total[event->header.type];
299 } 553 }
300 554
301 if (self->header.needs_swap && event__swap_ops[event->header.type]) 555 if (self->header.needs_swap && event__swap_ops[event->header.type])
@@ -303,7 +557,7 @@ static int perf_session__process_event(struct perf_session *self,
303 557
304 switch (event->header.type) { 558 switch (event->header.type) {
305 case PERF_RECORD_SAMPLE: 559 case PERF_RECORD_SAMPLE:
306 return ops->sample(event, self); 560 return perf_session__process_sample(event, self, ops);
307 case PERF_RECORD_MMAP: 561 case PERF_RECORD_MMAP:
308 return ops->mmap(event, self); 562 return ops->mmap(event, self);
309 case PERF_RECORD_COMM: 563 case PERF_RECORD_COMM:
@@ -320,8 +574,20 @@ static int perf_session__process_event(struct perf_session *self,
320 return ops->throttle(event, self); 574 return ops->throttle(event, self);
321 case PERF_RECORD_UNTHROTTLE: 575 case PERF_RECORD_UNTHROTTLE:
322 return ops->unthrottle(event, self); 576 return ops->unthrottle(event, self);
577 case PERF_RECORD_HEADER_ATTR:
578 return ops->attr(event, self);
579 case PERF_RECORD_HEADER_EVENT_TYPE:
580 return ops->event_type(event, self);
581 case PERF_RECORD_HEADER_TRACING_DATA:
582 /* setup for reading amidst mmap */
583 lseek(self->fd, offset + head, SEEK_SET);
584 return ops->tracing_data(event, self);
585 case PERF_RECORD_HEADER_BUILD_ID:
586 return ops->build_id(event, self);
587 case PERF_RECORD_FINISHED_ROUND:
588 return ops->finished_round(event, self, ops);
323 default: 589 default:
324 self->unknown_events++; 590 ++self->hists.stats.nr_unknown_events;
325 return -1; 591 return -1;
326 } 592 }
327} 593}
@@ -333,56 +599,114 @@ void perf_event_header__bswap(struct perf_event_header *self)
333 self->size = bswap_16(self->size); 599 self->size = bswap_16(self->size);
334} 600}
335 601
336int perf_header__read_build_ids(struct perf_header *self, 602static struct thread *perf_session__register_idle_thread(struct perf_session *self)
337 int input, u64 offset, u64 size)
338{ 603{
339 struct build_id_event bev; 604 struct thread *thread = perf_session__findnew(self, 0);
340 char filename[PATH_MAX];
341 u64 limit = offset + size;
342 int err = -1;
343
344 while (offset < limit) {
345 struct dso *dso;
346 ssize_t len;
347 struct list_head *head = &dsos__user;
348 605
349 if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 606 if (thread == NULL || thread__set_comm(thread, "swapper")) {
350 goto out; 607 pr_err("problem inserting idle task.\n");
608 thread = NULL;
609 }
351 610
352 if (self->needs_swap) 611 return thread;
353 perf_event_header__bswap(&bev.header); 612}
354 613
355 len = bev.header.size - sizeof(bev); 614int do_read(int fd, void *buf, size_t size)
356 if (read(input, filename, len) != len) 615{
357 goto out; 616 void *buf_start = buf;
358 617
359 if (bev.header.misc & PERF_RECORD_MISC_KERNEL) 618 while (size) {
360 head = &dsos__kernel; 619 int ret = read(fd, buf, size);
361 620
362 dso = __dsos__findnew(head, filename); 621 if (ret <= 0)
363 if (dso != NULL) { 622 return ret;
364 dso__set_build_id(dso, &bev.build_id);
365 if (head == &dsos__kernel && filename[0] == '[')
366 dso->kernel = 1;
367 }
368 623
369 offset += bev.header.size; 624 size -= ret;
625 buf += ret;
370 } 626 }
371 err = 0; 627
372out: 628 return buf - buf_start;
373 return err;
374} 629}
375 630
376static struct thread *perf_session__register_idle_thread(struct perf_session *self) 631#define session_done() (*(volatile int *)(&session_done))
632volatile int session_done;
633
634static int __perf_session__process_pipe_events(struct perf_session *self,
635 struct perf_event_ops *ops)
377{ 636{
378 struct thread *thread = perf_session__findnew(self, 0); 637 event_t event;
638 uint32_t size;
639 int skip = 0;
640 u64 head;
641 int err;
642 void *p;
379 643
380 if (thread == NULL || thread__set_comm(thread, "swapper")) { 644 perf_event_ops__fill_defaults(ops);
381 pr_err("problem inserting idle task.\n"); 645
382 thread = NULL; 646 head = 0;
647more:
648 err = do_read(self->fd, &event, sizeof(struct perf_event_header));
649 if (err <= 0) {
650 if (err == 0)
651 goto done;
652
653 pr_err("failed to read event header\n");
654 goto out_err;
383 } 655 }
384 656
385 return thread; 657 if (self->header.needs_swap)
658 perf_event_header__bswap(&event.header);
659
660 size = event.header.size;
661 if (size == 0)
662 size = 8;
663
664 p = &event;
665 p += sizeof(struct perf_event_header);
666
667 if (size - sizeof(struct perf_event_header)) {
668 err = do_read(self->fd, p,
669 size - sizeof(struct perf_event_header));
670 if (err <= 0) {
671 if (err == 0) {
672 pr_err("unexpected end of event stream\n");
673 goto done;
674 }
675
676 pr_err("failed to read event data\n");
677 goto out_err;
678 }
679 }
680
681 if (size == 0 ||
682 (skip = perf_session__process_event(self, &event, ops,
683 0, head)) < 0) {
684 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
685 head, event.header.size, event.header.type);
686 /*
687 * assume we lost track of the stream, check alignment, and
688 * increment a single u64 in the hope to catch on again 'soon'.
689 */
690 if (unlikely(head & 7))
691 head &= ~7ULL;
692
693 size = 8;
694 }
695
696 head += size;
697
698 dump_printf("\n%#Lx [%#x]: event: %d\n",
699 head, event.header.size, event.header.type);
700
701 if (skip > 0)
702 head += skip;
703
704 if (!session_done())
705 goto more;
706done:
707 err = 0;
708out_err:
709 return err;
386} 710}
387 711
388int __perf_session__process_events(struct perf_session *self, 712int __perf_session__process_events(struct perf_session *self,
@@ -396,6 +720,10 @@ int __perf_session__process_events(struct perf_session *self,
396 event_t *event; 720 event_t *event;
397 uint32_t size; 721 uint32_t size;
398 char *buf; 722 char *buf;
723 struct ui_progress *progress = ui_progress__new("Processing events...",
724 self->size);
725 if (progress == NULL)
726 return -1;
399 727
400 perf_event_ops__fill_defaults(ops); 728 perf_event_ops__fill_defaults(ops);
401 729
@@ -424,6 +752,7 @@ remap:
424 752
425more: 753more:
426 event = (event_t *)(buf + head); 754 event = (event_t *)(buf + head);
755 ui_progress__update(progress, offset);
427 756
428 if (self->header.needs_swap) 757 if (self->header.needs_swap)
429 perf_event_header__bswap(&event->header); 758 perf_event_header__bswap(&event->header);
@@ -473,7 +802,11 @@ more:
473 goto more; 802 goto more;
474done: 803done:
475 err = 0; 804 err = 0;
805 /* do the final flush for ordered samples */
806 self->ordered_samples.next_flush = ULLONG_MAX;
807 flush_sample_queue(self, ops);
476out_err: 808out_err:
809 ui_progress__delete(progress);
477 return err; 810 return err;
478} 811}
479 812
@@ -502,9 +835,13 @@ out_getcwd_err:
502 self->cwdlen = strlen(self->cwd); 835 self->cwdlen = strlen(self->cwd);
503 } 836 }
504 837
505 err = __perf_session__process_events(self, self->header.data_offset, 838 if (!self->fd_pipe)
506 self->header.data_size, 839 err = __perf_session__process_events(self,
507 self->size, ops); 840 self->header.data_offset,
841 self->header.data_size,
842 self->size, ops);
843 else
844 err = __perf_session__process_pipe_events(self, ops);
508out_err: 845out_err:
509 return err; 846 return err;
510} 847}
@@ -519,56 +856,41 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
519 return true; 856 return true;
520} 857}
521 858
522int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, 859int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
523 const char *symbol_name, 860 const char *symbol_name,
524 u64 addr) 861 u64 addr)
525{ 862{
526 char *bracket; 863 char *bracket;
527 enum map_type i; 864 enum map_type i;
865 struct ref_reloc_sym *ref;
528 866
529 self->ref_reloc_sym.name = strdup(symbol_name); 867 ref = zalloc(sizeof(struct ref_reloc_sym));
530 if (self->ref_reloc_sym.name == NULL) 868 if (ref == NULL)
531 return -ENOMEM; 869 return -ENOMEM;
532 870
533 bracket = strchr(self->ref_reloc_sym.name, ']'); 871 ref->name = strdup(symbol_name);
872 if (ref->name == NULL) {
873 free(ref);
874 return -ENOMEM;
875 }
876
877 bracket = strchr(ref->name, ']');
534 if (bracket) 878 if (bracket)
535 *bracket = '\0'; 879 *bracket = '\0';
536 880
537 self->ref_reloc_sym.addr = addr; 881 ref->addr = addr;
538 882
539 for (i = 0; i < MAP__NR_TYPES; ++i) { 883 for (i = 0; i < MAP__NR_TYPES; ++i) {
540 struct kmap *kmap = map__kmap(self->vmlinux_maps[i]); 884 struct kmap *kmap = map__kmap(maps[i]);
541 kmap->ref_reloc_sym = &self->ref_reloc_sym; 885 kmap->ref_reloc_sym = ref;
542 } 886 }
543 887
544 return 0; 888 return 0;
545} 889}
546 890
547static u64 map__reloc_map_ip(struct map *map, u64 ip) 891size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
548{
549 return ip + (s64)map->pgoff;
550}
551
552static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
553{
554 return ip - (s64)map->pgoff;
555}
556
557void map__reloc_vmlinux(struct map *self)
558{ 892{
559 struct kmap *kmap = map__kmap(self); 893 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
560 s64 reloc; 894 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
561 895 machines__fprintf_dsos(&self->machines, fp);
562 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
563 return;
564
565 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
566 kmap->ref_reloc_sym->addr);
567
568 if (!reloc)
569 return;
570
571 self->map_ip = map__reloc_map_ip;
572 self->unmap_ip = map__reloc_unmap_ip;
573 self->pgoff = reloc;
574} 896}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5c33417eebb3..e7fce486ebe2 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -1,6 +1,7 @@
1#ifndef __PERF_SESSION_H 1#ifndef __PERF_SESSION_H
2#define __PERF_SESSION_H 2#define __PERF_SESSION_H
3 3
4#include "hist.h"
4#include "event.h" 5#include "event.h"
5#include "header.h" 6#include "header.h"
6#include "symbol.h" 7#include "symbol.h"
@@ -8,45 +9,69 @@
8#include <linux/rbtree.h> 9#include <linux/rbtree.h>
9#include "../../../include/linux/perf_event.h" 10#include "../../../include/linux/perf_event.h"
10 11
12struct sample_queue;
11struct ip_callchain; 13struct ip_callchain;
12struct thread; 14struct thread;
13 15
16struct ordered_samples {
17 u64 last_flush;
18 u64 next_flush;
19 u64 max_timestamp;
20 struct list_head samples_head;
21 struct sample_queue *last_inserted;
22};
23
14struct perf_session { 24struct perf_session {
15 struct perf_header header; 25 struct perf_header header;
16 unsigned long size; 26 unsigned long size;
17 unsigned long mmap_window; 27 unsigned long mmap_window;
18 struct map_groups kmaps;
19 struct rb_root threads; 28 struct rb_root threads;
20 struct thread *last_match; 29 struct thread *last_match;
21 struct map *vmlinux_maps[MAP__NR_TYPES]; 30 struct machine host_machine;
22 struct events_stats events_stats; 31 struct rb_root machines;
23 struct rb_root stats_by_id; 32 struct rb_root hists_tree;
24 unsigned long event_total[PERF_RECORD_MAX]; 33 /*
25 unsigned long unknown_events; 34 * FIXME: should point to the first entry in hists_tree and
26 struct rb_root hists; 35 * be a hists instance. Right now its only 'report'
36 * that is using ->hists_tree while all the rest use
37 * ->hists.
38 */
39 struct hists hists;
27 u64 sample_type; 40 u64 sample_type;
28 struct ref_reloc_sym ref_reloc_sym;
29 int fd; 41 int fd;
42 bool fd_pipe;
43 bool repipe;
30 int cwdlen; 44 int cwdlen;
31 char *cwd; 45 char *cwd;
46 struct ordered_samples ordered_samples;
32 char filename[0]; 47 char filename[0];
33}; 48};
34 49
50struct perf_event_ops;
51
35typedef int (*event_op)(event_t *self, struct perf_session *session); 52typedef int (*event_op)(event_t *self, struct perf_session *session);
53typedef int (*event_op2)(event_t *self, struct perf_session *session,
54 struct perf_event_ops *ops);
36 55
37struct perf_event_ops { 56struct perf_event_ops {
38 event_op sample, 57 event_op sample,
39 mmap, 58 mmap,
40 comm, 59 comm,
41 fork, 60 fork,
42 exit, 61 exit,
43 lost, 62 lost,
44 read, 63 read,
45 throttle, 64 throttle,
46 unthrottle; 65 unthrottle,
66 attr,
67 event_type,
68 tracing_data,
69 build_id;
70 event_op2 finished_round;
71 bool ordered_samples;
47}; 72};
48 73
49struct perf_session *perf_session__new(const char *filename, int mode, bool force); 74struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe);
50void perf_session__delete(struct perf_session *self); 75void perf_session__delete(struct perf_session *self);
51 76
52void perf_event_header__bswap(struct perf_event_header *self); 77void perf_event_header__bswap(struct perf_event_header *self);
@@ -57,33 +82,66 @@ int __perf_session__process_events(struct perf_session *self,
57int perf_session__process_events(struct perf_session *self, 82int perf_session__process_events(struct perf_session *self,
58 struct perf_event_ops *event_ops); 83 struct perf_event_ops *event_ops);
59 84
60struct symbol **perf_session__resolve_callchain(struct perf_session *self, 85struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
61 struct thread *thread, 86 struct thread *thread,
62 struct ip_callchain *chain, 87 struct ip_callchain *chain,
63 struct symbol **parent); 88 struct symbol **parent);
64 89
65bool perf_session__has_traces(struct perf_session *self, const char *msg); 90bool perf_session__has_traces(struct perf_session *self, const char *msg);
66 91
67int perf_header__read_build_ids(struct perf_header *self, int input, 92int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
68 u64 offset, u64 file_size);
69
70int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
71 const char *symbol_name, 93 const char *symbol_name,
72 u64 addr); 94 u64 addr);
73 95
74void mem_bswap_64(void *src, int byte_size); 96void mem_bswap_64(void *src, int byte_size);
75 97
76static inline int __perf_session__create_kernel_maps(struct perf_session *self, 98int perf_session__create_kernel_maps(struct perf_session *self);
77 struct dso *kernel) 99
100int do_read(int fd, void *buf, size_t size);
101void perf_session__update_sample_type(struct perf_session *self);
102
103static inline
104struct machine *perf_session__find_host_machine(struct perf_session *self)
105{
106 return &self->host_machine;
107}
108
109static inline
110struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
111{
112 if (pid == HOST_KERNEL_ID)
113 return &self->host_machine;
114 return machines__find(&self->machines, pid);
115}
116
117static inline
118struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
119{
120 if (pid == HOST_KERNEL_ID)
121 return &self->host_machine;
122 return machines__findnew(&self->machines, pid);
123}
124
125static inline
126void perf_session__process_machines(struct perf_session *self,
127 machine__process_t process)
128{
129 process(&self->host_machine, self);
130 return machines__process(&self->machines, process, self);
131}
132
133size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
134
135static inline
136size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
137 bool with_hits)
78{ 138{
79 return __map_groups__create_kernel_maps(&self->kmaps, 139 return machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
80 self->vmlinux_maps, kernel);
81} 140}
82 141
83static inline struct map * 142static inline
84 perf_session__new_module_map(struct perf_session *self, 143size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
85 u64 start, const char *filename)
86{ 144{
87 return map_groups__new_module(&self->kmaps, start, filename); 145 return hists__fprintf_nr_events(&self->hists, fp);
88} 146}
89#endif /* __PERF_SESSION_H */ 147#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cb0f327de9e8..2316cb5a4116 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1,10 +1,10 @@
1#include "sort.h" 1#include "sort.h"
2 2
3regex_t parent_regex; 3regex_t parent_regex;
4char default_parent_pattern[] = "^sys_|^do_page_fault"; 4const char default_parent_pattern[] = "^sys_|^do_page_fault";
5char *parent_pattern = default_parent_pattern; 5const char *parent_pattern = default_parent_pattern;
6char default_sort_order[] = "comm,dso,symbol"; 6const char default_sort_order[] = "comm,dso,symbol";
7char *sort_order = default_sort_order; 7const char *sort_order = default_sort_order;
8int sort__need_collapse = 0; 8int sort__need_collapse = 0;
9int sort__has_parent = 0; 9int sort__has_parent = 0;
10 10
@@ -18,39 +18,50 @@ char * field_sep;
18 18
19LIST_HEAD(hist_entry__sort_list); 19LIST_HEAD(hist_entry__sort_list);
20 20
21static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
22 size_t size, unsigned int width);
23static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
24 size_t size, unsigned int width);
25static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
26 size_t size, unsigned int width);
27static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
28 size_t size, unsigned int width);
29static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
30 size_t size, unsigned int width);
31
21struct sort_entry sort_thread = { 32struct sort_entry sort_thread = {
22 .header = "Command: Pid", 33 .se_header = "Command: Pid",
23 .cmp = sort__thread_cmp, 34 .se_cmp = sort__thread_cmp,
24 .print = sort__thread_print, 35 .se_snprintf = hist_entry__thread_snprintf,
25 .width = &threads__col_width, 36 .se_width = &threads__col_width,
26}; 37};
27 38
28struct sort_entry sort_comm = { 39struct sort_entry sort_comm = {
29 .header = "Command", 40 .se_header = "Command",
30 .cmp = sort__comm_cmp, 41 .se_cmp = sort__comm_cmp,
31 .collapse = sort__comm_collapse, 42 .se_collapse = sort__comm_collapse,
32 .print = sort__comm_print, 43 .se_snprintf = hist_entry__comm_snprintf,
33 .width = &comms__col_width, 44 .se_width = &comms__col_width,
34}; 45};
35 46
36struct sort_entry sort_dso = { 47struct sort_entry sort_dso = {
37 .header = "Shared Object", 48 .se_header = "Shared Object",
38 .cmp = sort__dso_cmp, 49 .se_cmp = sort__dso_cmp,
39 .print = sort__dso_print, 50 .se_snprintf = hist_entry__dso_snprintf,
40 .width = &dsos__col_width, 51 .se_width = &dsos__col_width,
41}; 52};
42 53
43struct sort_entry sort_sym = { 54struct sort_entry sort_sym = {
44 .header = "Symbol", 55 .se_header = "Symbol",
45 .cmp = sort__sym_cmp, 56 .se_cmp = sort__sym_cmp,
46 .print = sort__sym_print, 57 .se_snprintf = hist_entry__sym_snprintf,
47}; 58};
48 59
49struct sort_entry sort_parent = { 60struct sort_entry sort_parent = {
50 .header = "Parent symbol", 61 .se_header = "Parent symbol",
51 .cmp = sort__parent_cmp, 62 .se_cmp = sort__parent_cmp,
52 .print = sort__parent_print, 63 .se_snprintf = hist_entry__parent_snprintf,
53 .width = &parent_symbol__col_width, 64 .se_width = &parent_symbol__col_width,
54}; 65};
55 66
56struct sort_dimension { 67struct sort_dimension {
@@ -85,45 +96,38 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
85 return right->thread->pid - left->thread->pid; 96 return right->thread->pid - left->thread->pid;
86} 97}
87 98
88int repsep_fprintf(FILE *fp, const char *fmt, ...) 99static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
89{ 100{
90 int n; 101 int n;
91 va_list ap; 102 va_list ap;
92 103
93 va_start(ap, fmt); 104 va_start(ap, fmt);
94 if (!field_sep) 105 n = vsnprintf(bf, size, fmt, ap);
95 n = vfprintf(fp, fmt, ap); 106 if (field_sep && n > 0) {
96 else { 107 char *sep = bf;
97 char *bf = NULL; 108
98 n = vasprintf(&bf, fmt, ap); 109 while (1) {
99 if (n > 0) { 110 sep = strchr(sep, *field_sep);
100 char *sep = bf; 111 if (sep == NULL)
101 112 break;
102 while (1) { 113 *sep = '.';
103 sep = strchr(sep, *field_sep);
104 if (sep == NULL)
105 break;
106 *sep = '.';
107 }
108 } 114 }
109 fputs(bf, fp);
110 free(bf);
111 } 115 }
112 va_end(ap); 116 va_end(ap);
113 return n; 117 return n;
114} 118}
115 119
116size_t 120static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
117sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) 121 size_t size, unsigned int width)
118{ 122{
119 return repsep_fprintf(fp, "%*s:%5d", width - 6, 123 return repsep_snprintf(bf, size, "%*s:%5d", width,
120 self->thread->comm ?: "", self->thread->pid); 124 self->thread->comm ?: "", self->thread->pid);
121} 125}
122 126
123size_t 127static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
124sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) 128 size_t size, unsigned int width)
125{ 129{
126 return repsep_fprintf(fp, "%*s", width, self->thread->comm); 130 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
127} 131}
128 132
129/* --sort dso */ 133/* --sort dso */
@@ -131,8 +135,8 @@ sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
131int64_t 135int64_t
132sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 136sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
133{ 137{
134 struct dso *dso_l = left->map ? left->map->dso : NULL; 138 struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL;
135 struct dso *dso_r = right->map ? right->map->dso : NULL; 139 struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL;
136 const char *dso_name_l, *dso_name_r; 140 const char *dso_name_l, *dso_name_r;
137 141
138 if (!dso_l || !dso_r) 142 if (!dso_l || !dso_r)
@@ -149,16 +153,16 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
149 return strcmp(dso_name_l, dso_name_r); 153 return strcmp(dso_name_l, dso_name_r);
150} 154}
151 155
152size_t 156static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
153sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) 157 size_t size, unsigned int width)
154{ 158{
155 if (self->map && self->map->dso) { 159 if (self->ms.map && self->ms.map->dso) {
156 const char *dso_name = !verbose ? self->map->dso->short_name : 160 const char *dso_name = !verbose ? self->ms.map->dso->short_name :
157 self->map->dso->long_name; 161 self->ms.map->dso->long_name;
158 return repsep_fprintf(fp, "%-*s", width, dso_name); 162 return repsep_snprintf(bf, size, "%-*s", width, dso_name);
159 } 163 }
160 164
161 return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); 165 return repsep_snprintf(bf, size, "%*Lx", width, self->ip);
162} 166}
163 167
164/* --sort symbol */ 168/* --sort symbol */
@@ -168,31 +172,31 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
168{ 172{
169 u64 ip_l, ip_r; 173 u64 ip_l, ip_r;
170 174
171 if (left->sym == right->sym) 175 if (left->ms.sym == right->ms.sym)
172 return 0; 176 return 0;
173 177
174 ip_l = left->sym ? left->sym->start : left->ip; 178 ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
175 ip_r = right->sym ? right->sym->start : right->ip; 179 ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
176 180
177 return (int64_t)(ip_r - ip_l); 181 return (int64_t)(ip_r - ip_l);
178} 182}
179 183
180 184static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
181size_t 185 size_t size, unsigned int width __used)
182sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
183{ 186{
184 size_t ret = 0; 187 size_t ret = 0;
185 188
186 if (verbose) { 189 if (verbose) {
187 char o = self->map ? dso__symtab_origin(self->map->dso) : '!'; 190 char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
188 ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o); 191 ret += repsep_snprintf(bf, size, "%#018llx %c ", self->ip, o);
189 } 192 }
190 193
191 ret += repsep_fprintf(fp, "[%c] ", self->level); 194 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
192 if (self->sym) 195 if (self->ms.sym)
193 ret += repsep_fprintf(fp, "%s", self->sym->name); 196 ret += repsep_snprintf(bf + ret, size - ret, "%s",
197 self->ms.sym->name);
194 else 198 else
195 ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); 199 ret += repsep_snprintf(bf + ret, size - ret, "%#016llx", self->ip);
196 200
197 return ret; 201 return ret;
198} 202}
@@ -231,10 +235,10 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
231 return strcmp(sym_l->name, sym_r->name); 235 return strcmp(sym_l->name, sym_r->name);
232} 236}
233 237
234size_t 238static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
235sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) 239 size_t size, unsigned int width)
236{ 240{
237 return repsep_fprintf(fp, "%-*s", width, 241 return repsep_snprintf(bf, size, "%-*s", width,
238 self->parent ? self->parent->name : "[other]"); 242 self->parent ? self->parent->name : "[other]");
239} 243}
240 244
@@ -251,7 +255,7 @@ int sort_dimension__add(const char *tok)
251 if (strncasecmp(tok, sd->name, strlen(tok))) 255 if (strncasecmp(tok, sd->name, strlen(tok)))
252 continue; 256 continue;
253 257
254 if (sd->entry->collapse) 258 if (sd->entry->se_collapse)
255 sort__need_collapse = 1; 259 sort__need_collapse = 1;
256 260
257 if (sd->entry == &sort_parent) { 261 if (sd->entry == &sort_parent) {
@@ -260,9 +264,8 @@ int sort_dimension__add(const char *tok)
260 char err[BUFSIZ]; 264 char err[BUFSIZ];
261 265
262 regerror(ret, &parent_regex, err, sizeof(err)); 266 regerror(ret, &parent_regex, err, sizeof(err));
263 fprintf(stderr, "Invalid regex: %s\n%s", 267 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
264 parent_pattern, err); 268 return -EINVAL;
265 exit(-1);
266 } 269 }
267 sort__has_parent = 1; 270 sort__has_parent = 1;
268 } 271 }
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 753f9ea99fb0..0d61c4082f43 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -25,10 +25,10 @@
25#include "sort.h" 25#include "sort.h"
26 26
27extern regex_t parent_regex; 27extern regex_t parent_regex;
28extern char *sort_order; 28extern const char *sort_order;
29extern char default_parent_pattern[]; 29extern const char default_parent_pattern[];
30extern char *parent_pattern; 30extern const char *parent_pattern;
31extern char default_sort_order[]; 31extern const char default_sort_order[];
32extern int sort__need_collapse; 32extern int sort__need_collapse;
33extern int sort__has_parent; 33extern int sort__has_parent;
34extern char *field_sep; 34extern char *field_sep;
@@ -43,19 +43,24 @@ extern enum sort_type sort__first_dimension;
43 43
44struct hist_entry { 44struct hist_entry {
45 struct rb_node rb_node; 45 struct rb_node rb_node;
46 u64 count; 46 u64 period;
47 u64 period_sys;
48 u64 period_us;
49 u64 period_guest_sys;
50 u64 period_guest_us;
51 struct map_symbol ms;
47 struct thread *thread; 52 struct thread *thread;
48 struct map *map;
49 struct symbol *sym;
50 u64 ip; 53 u64 ip;
54 u32 nr_events;
51 char level; 55 char level;
52 struct symbol *parent; 56 u8 filtered;
53 struct callchain_node callchain; 57 struct symbol *parent;
54 union { 58 union {
55 unsigned long position; 59 unsigned long position;
56 struct hist_entry *pair; 60 struct hist_entry *pair;
57 struct rb_root sorted_chain; 61 struct rb_root sorted_chain;
58 }; 62 };
63 struct callchain_node callchain[0];
59}; 64};
60 65
61enum sort_type { 66enum sort_type {
@@ -73,12 +78,13 @@ enum sort_type {
73struct sort_entry { 78struct sort_entry {
74 struct list_head list; 79 struct list_head list;
75 80
76 const char *header; 81 const char *se_header;
77 82
78 int64_t (*cmp)(struct hist_entry *, struct hist_entry *); 83 int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
79 int64_t (*collapse)(struct hist_entry *, struct hist_entry *); 84 int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
80 size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); 85 int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size,
81 unsigned int *width; 86 unsigned int width);
87 unsigned int *se_width;
82 bool elide; 88 bool elide;
83}; 89};
84 90
@@ -87,7 +93,6 @@ extern struct list_head hist_entry__sort_list;
87 93
88void setup_sorting(const char * const usagestr[], const struct option *opts); 94void setup_sorting(const char * const usagestr[], const struct option *opts);
89 95
90extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
91extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); 96extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
92extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); 97extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
93extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); 98extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index a175949ed216..0409fc7c0058 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,48 +1,5 @@
1#include "string.h"
2#include "util.h" 1#include "util.h"
3 2#include "string.h"
4static int hex(char ch)
5{
6 if ((ch >= '0') && (ch <= '9'))
7 return ch - '0';
8 if ((ch >= 'a') && (ch <= 'f'))
9 return ch - 'a' + 10;
10 if ((ch >= 'A') && (ch <= 'F'))
11 return ch - 'A' + 10;
12 return -1;
13}
14
15/*
16 * While we find nice hex chars, build a long_val.
17 * Return number of chars processed.
18 */
19int hex2u64(const char *ptr, u64 *long_val)
20{
21 const char *p = ptr;
22 *long_val = 0;
23
24 while (*p) {
25 const int hex_val = hex(*p);
26
27 if (hex_val < 0)
28 break;
29
30 *long_val = (*long_val << 4) | hex_val;
31 p++;
32 }
33
34 return p - ptr;
35}
36
37char *strxfrchar(char *s, char from, char to)
38{
39 char *p = s;
40
41 while ((p = strchr(p, from)) != NULL)
42 *p++ = to;
43
44 return s;
45}
46 3
47#define K 1024LL 4#define K 1024LL
48/* 5/*
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
deleted file mode 100644
index 542e44de3719..000000000000
--- a/tools/perf/util/string.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef __PERF_STRING_H_
2#define __PERF_STRING_H_
3
4#include <stdbool.h>
5#include "types.h"
6
7int hex2u64(const char *ptr, u64 *val);
8char *strxfrchar(char *s, char from, char to);
9s64 perf_atoll(const char *str);
10char **argv_split(const char *str, int *argcp);
11void argv_free(char **argv);
12bool strglobmatch(const char *str, const char *pat);
13bool strlazymatch(const char *str, const char *pat);
14
15#define _STR(x) #x
16#define STR(x) _STR(x)
17
18#endif /* __PERF_STRING_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c458c4a371d1..a06131f6259a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,13 +1,19 @@
1#include "util.h" 1#define _GNU_SOURCE
2#include "../perf.h" 2#include <ctype.h>
3#include "sort.h" 3#include <dirent.h>
4#include "string.h" 4#include <errno.h>
5#include <libgen.h>
6#include <stdlib.h>
7#include <stdio.h>
8#include <string.h>
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/param.h>
12#include <fcntl.h>
13#include <unistd.h>
5#include "symbol.h" 14#include "symbol.h"
6#include "thread.h" 15#include "strlist.h"
7 16
8#include "debug.h"
9
10#include <asm/bug.h>
11#include <libelf.h> 17#include <libelf.h>
12#include <gelf.h> 18#include <gelf.h>
13#include <elf.h> 19#include <elf.h>
@@ -18,22 +24,12 @@
18#define NT_GNU_BUILD_ID 3 24#define NT_GNU_BUILD_ID 3
19#endif 25#endif
20 26
21enum dso_origin {
22 DSO__ORIG_KERNEL = 0,
23 DSO__ORIG_JAVA_JIT,
24 DSO__ORIG_BUILD_ID_CACHE,
25 DSO__ORIG_FEDORA,
26 DSO__ORIG_UBUNTU,
27 DSO__ORIG_BUILDID,
28 DSO__ORIG_DSO,
29 DSO__ORIG_KMODULE,
30 DSO__ORIG_NOT_FOUND,
31};
32
33static void dsos__add(struct list_head *head, struct dso *dso); 27static void dsos__add(struct list_head *head, struct dso *dso);
34static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 28static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
35static int dso__load_kernel_sym(struct dso *self, struct map *map, 29static int dso__load_kernel_sym(struct dso *self, struct map *map,
36 symbol_filter_t filter); 30 symbol_filter_t filter);
31static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
32 symbol_filter_t filter);
37static int vmlinux_path__nr_entries; 33static int vmlinux_path__nr_entries;
38static char **vmlinux_path; 34static char **vmlinux_path;
39 35
@@ -126,16 +122,17 @@ static void map_groups__fixup_end(struct map_groups *self)
126static struct symbol *symbol__new(u64 start, u64 len, const char *name) 122static struct symbol *symbol__new(u64 start, u64 len, const char *name)
127{ 123{
128 size_t namelen = strlen(name) + 1; 124 size_t namelen = strlen(name) + 1;
129 struct symbol *self = zalloc(symbol_conf.priv_size + 125 struct symbol *self = calloc(1, (symbol_conf.priv_size +
130 sizeof(*self) + namelen); 126 sizeof(*self) + namelen));
131 if (self == NULL) 127 if (self == NULL)
132 return NULL; 128 return NULL;
133 129
134 if (symbol_conf.priv_size) 130 if (symbol_conf.priv_size)
135 self = ((void *)self) + symbol_conf.priv_size; 131 self = ((void *)self) + symbol_conf.priv_size;
136 132
137 self->start = start; 133 self->start = start;
138 self->end = len ? start + len - 1 : start; 134 self->end = len ? start + len - 1 : start;
135 self->namelen = namelen - 1;
139 136
140 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); 137 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
141 138
@@ -178,7 +175,7 @@ static void dso__set_basename(struct dso *self)
178 175
179struct dso *dso__new(const char *name) 176struct dso *dso__new(const char *name)
180{ 177{
181 struct dso *self = zalloc(sizeof(*self) + strlen(name) + 1); 178 struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
182 179
183 if (self != NULL) { 180 if (self != NULL) {
184 int i; 181 int i;
@@ -192,6 +189,8 @@ struct dso *dso__new(const char *name)
192 self->loaded = 0; 189 self->loaded = 0;
193 self->sorted_by_name = 0; 190 self->sorted_by_name = 0;
194 self->has_build_id = 0; 191 self->has_build_id = 0;
192 self->kernel = DSO_TYPE_USER;
193 INIT_LIST_HEAD(&self->node);
195 } 194 }
196 195
197 return self; 196 return self;
@@ -408,12 +407,9 @@ int kallsyms__parse(const char *filename, void *arg,
408 char *symbol_name; 407 char *symbol_name;
409 408
410 line_len = getline(&line, &n, file); 409 line_len = getline(&line, &n, file);
411 if (line_len < 0) 410 if (line_len < 0 || !line)
412 break; 411 break;
413 412
414 if (!line)
415 goto out_failure;
416
417 line[--line_len] = '\0'; /* \n */ 413 line[--line_len] = '\0'; /* \n */
418 414
419 len = hex2u64(line, &start); 415 len = hex2u64(line, &start);
@@ -465,6 +461,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
465 * map__split_kallsyms, when we have split the maps per module 461 * map__split_kallsyms, when we have split the maps per module
466 */ 462 */
467 symbols__insert(root, sym); 463 symbols__insert(root, sym);
464
468 return 0; 465 return 0;
469} 466}
470 467
@@ -489,6 +486,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
489 symbol_filter_t filter) 486 symbol_filter_t filter)
490{ 487{
491 struct map_groups *kmaps = map__kmap(map)->kmaps; 488 struct map_groups *kmaps = map__kmap(map)->kmaps;
489 struct machine *machine = kmaps->machine;
492 struct map *curr_map = map; 490 struct map *curr_map = map;
493 struct symbol *pos; 491 struct symbol *pos;
494 int count = 0; 492 int count = 0;
@@ -510,15 +508,33 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
510 *module++ = '\0'; 508 *module++ = '\0';
511 509
512 if (strcmp(curr_map->dso->short_name, module)) { 510 if (strcmp(curr_map->dso->short_name, module)) {
513 curr_map = map_groups__find_by_name(kmaps, map->type, module); 511 if (curr_map != map &&
512 self->kernel == DSO_TYPE_GUEST_KERNEL &&
513 machine__is_default_guest(machine)) {
514 /*
515 * We assume all symbols of a module are
516 * continuous in * kallsyms, so curr_map
517 * points to a module and all its
518 * symbols are in its kmap. Mark it as
519 * loaded.
520 */
521 dso__set_loaded(curr_map->dso,
522 curr_map->type);
523 }
524
525 curr_map = map_groups__find_by_name(kmaps,
526 map->type, module);
514 if (curr_map == NULL) { 527 if (curr_map == NULL) {
515 pr_debug("/proc/{kallsyms,modules} " 528 pr_debug("%s/proc/{kallsyms,modules} "
516 "inconsistency while looking " 529 "inconsistency while looking "
517 "for \"%s\" module!\n", module); 530 "for \"%s\" module!\n",
518 return -1; 531 machine->root_dir, module);
532 curr_map = map;
533 goto discard_symbol;
519 } 534 }
520 535
521 if (curr_map->dso->loaded) 536 if (curr_map->dso->loaded &&
537 !machine__is_default_guest(machine))
522 goto discard_symbol; 538 goto discard_symbol;
523 } 539 }
524 /* 540 /*
@@ -531,13 +547,21 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
531 char dso_name[PATH_MAX]; 547 char dso_name[PATH_MAX];
532 struct dso *dso; 548 struct dso *dso;
533 549
534 snprintf(dso_name, sizeof(dso_name), "[kernel].%d", 550 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
535 kernel_range++); 551 snprintf(dso_name, sizeof(dso_name),
552 "[guest.kernel].%d",
553 kernel_range++);
554 else
555 snprintf(dso_name, sizeof(dso_name),
556 "[kernel].%d",
557 kernel_range++);
536 558
537 dso = dso__new(dso_name); 559 dso = dso__new(dso_name);
538 if (dso == NULL) 560 if (dso == NULL)
539 return -1; 561 return -1;
540 562
563 dso->kernel = self->kernel;
564
541 curr_map = map__new2(pos->start, dso, map->type); 565 curr_map = map__new2(pos->start, dso, map->type);
542 if (curr_map == NULL) { 566 if (curr_map == NULL) {
543 dso__delete(dso); 567 dso__delete(dso);
@@ -561,6 +585,12 @@ discard_symbol: rb_erase(&pos->rb_node, root);
561 } 585 }
562 } 586 }
563 587
588 if (curr_map != map &&
589 self->kernel == DSO_TYPE_GUEST_KERNEL &&
590 machine__is_default_guest(kmaps->machine)) {
591 dso__set_loaded(curr_map->dso, curr_map->type);
592 }
593
564 return count; 594 return count;
565} 595}
566 596
@@ -571,7 +601,10 @@ int dso__load_kallsyms(struct dso *self, const char *filename,
571 return -1; 601 return -1;
572 602
573 symbols__fixup_end(&self->symbols[map->type]); 603 symbols__fixup_end(&self->symbols[map->type]);
574 self->origin = DSO__ORIG_KERNEL; 604 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
605 self->origin = DSO__ORIG_GUEST_KERNEL;
606 else
607 self->origin = DSO__ORIG_KERNEL;
575 608
576 return dso__split_kallsyms(self, map, filter); 609 return dso__split_kallsyms(self, map, filter);
577} 610}
@@ -870,8 +903,8 @@ out_close:
870 if (err == 0) 903 if (err == 0)
871 return nr; 904 return nr;
872out: 905out:
873 pr_warning("%s: problems reading %s PLT info.\n", 906 pr_debug("%s: problems reading %s PLT info.\n",
874 __func__, self->long_name); 907 __func__, self->long_name);
875 return 0; 908 return 0;
876} 909}
877 910
@@ -958,7 +991,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
958 nr_syms = shdr.sh_size / shdr.sh_entsize; 991 nr_syms = shdr.sh_size / shdr.sh_entsize;
959 992
960 memset(&sym, 0, sizeof(sym)); 993 memset(&sym, 0, sizeof(sym));
961 if (!self->kernel) { 994 if (self->kernel == DSO_TYPE_USER) {
962 self->adjust_symbols = (ehdr.e_type == ET_EXEC || 995 self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
963 elf_section_by_name(elf, &ehdr, &shdr, 996 elf_section_by_name(elf, &ehdr, &shdr,
964 ".gnu.prelink_undo", 997 ".gnu.prelink_undo",
@@ -990,7 +1023,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
990 1023
991 section_name = elf_sec__name(&shdr, secstrs); 1024 section_name = elf_sec__name(&shdr, secstrs);
992 1025
993 if (self->kernel || kmodule) { 1026 if (self->kernel != DSO_TYPE_USER || kmodule) {
994 char dso_name[PATH_MAX]; 1027 char dso_name[PATH_MAX];
995 1028
996 if (strcmp(section_name, 1029 if (strcmp(section_name,
@@ -1017,6 +1050,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1017 curr_dso = dso__new(dso_name); 1050 curr_dso = dso__new(dso_name);
1018 if (curr_dso == NULL) 1051 if (curr_dso == NULL)
1019 goto out_elf_end; 1052 goto out_elf_end;
1053 curr_dso->kernel = self->kernel;
1020 curr_map = map__new2(start, curr_dso, 1054 curr_map = map__new2(start, curr_dso,
1021 map->type); 1055 map->type);
1022 if (curr_map == NULL) { 1056 if (curr_map == NULL) {
@@ -1025,9 +1059,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1025 } 1059 }
1026 curr_map->map_ip = identity__map_ip; 1060 curr_map->map_ip = identity__map_ip;
1027 curr_map->unmap_ip = identity__map_ip; 1061 curr_map->unmap_ip = identity__map_ip;
1028 curr_dso->origin = DSO__ORIG_KERNEL; 1062 curr_dso->origin = self->origin;
1029 map_groups__insert(kmap->kmaps, curr_map); 1063 map_groups__insert(kmap->kmaps, curr_map);
1030 dsos__add(&dsos__kernel, curr_dso); 1064 dsos__add(&self->node, curr_dso);
1031 dso__set_loaded(curr_dso, map->type); 1065 dso__set_loaded(curr_dso, map->type);
1032 } else 1066 } else
1033 curr_dso = curr_map->dso; 1067 curr_dso = curr_map->dso;
@@ -1089,7 +1123,7 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
1089 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; 1123 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
1090} 1124}
1091 1125
1092static bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1126bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1093{ 1127{
1094 bool have_build_id = false; 1128 bool have_build_id = false;
1095 struct dso *pos; 1129 struct dso *pos;
@@ -1107,13 +1141,6 @@ static bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1107 return have_build_id; 1141 return have_build_id;
1108} 1142}
1109 1143
1110bool dsos__read_build_ids(bool with_hits)
1111{
1112 bool kbuildids = __dsos__read_build_ids(&dsos__kernel, with_hits),
1113 ubuildids = __dsos__read_build_ids(&dsos__user, with_hits);
1114 return kbuildids || ubuildids;
1115}
1116
1117/* 1144/*
1118 * Align offset to 4 bytes as needed for note name and descriptor data. 1145 * Align offset to 4 bytes as needed for note name and descriptor data.
1119 */ 1146 */
@@ -1248,6 +1275,8 @@ char dso__symtab_origin(const struct dso *self)
1248 [DSO__ORIG_BUILDID] = 'b', 1275 [DSO__ORIG_BUILDID] = 'b',
1249 [DSO__ORIG_DSO] = 'd', 1276 [DSO__ORIG_DSO] = 'd',
1250 [DSO__ORIG_KMODULE] = 'K', 1277 [DSO__ORIG_KMODULE] = 'K',
1278 [DSO__ORIG_GUEST_KERNEL] = 'g',
1279 [DSO__ORIG_GUEST_KMODULE] = 'G',
1251 }; 1280 };
1252 1281
1253 if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) 1282 if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
@@ -1263,11 +1292,20 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1263 char build_id_hex[BUILD_ID_SIZE * 2 + 1]; 1292 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1264 int ret = -1; 1293 int ret = -1;
1265 int fd; 1294 int fd;
1295 struct machine *machine;
1296 const char *root_dir;
1266 1297
1267 dso__set_loaded(self, map->type); 1298 dso__set_loaded(self, map->type);
1268 1299
1269 if (self->kernel) 1300 if (self->kernel == DSO_TYPE_KERNEL)
1270 return dso__load_kernel_sym(self, map, filter); 1301 return dso__load_kernel_sym(self, map, filter);
1302 else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
1303 return dso__load_guest_kernel_sym(self, map, filter);
1304
1305 if (map->groups && map->groups->machine)
1306 machine = map->groups->machine;
1307 else
1308 machine = NULL;
1271 1309
1272 name = malloc(size); 1310 name = malloc(size);
1273 if (!name) 1311 if (!name)
@@ -1321,6 +1359,13 @@ more:
1321 case DSO__ORIG_DSO: 1359 case DSO__ORIG_DSO:
1322 snprintf(name, size, "%s", self->long_name); 1360 snprintf(name, size, "%s", self->long_name);
1323 break; 1361 break;
1362 case DSO__ORIG_GUEST_KMODULE:
1363 if (map->groups && map->groups->machine)
1364 root_dir = map->groups->machine->root_dir;
1365 else
1366 root_dir = "";
1367 snprintf(name, size, "%s%s", root_dir, self->long_name);
1368 break;
1324 1369
1325 default: 1370 default:
1326 goto out; 1371 goto out;
@@ -1374,7 +1419,8 @@ struct map *map_groups__find_by_name(struct map_groups *self,
1374 return NULL; 1419 return NULL;
1375} 1420}
1376 1421
1377static int dso__kernel_module_get_build_id(struct dso *self) 1422static int dso__kernel_module_get_build_id(struct dso *self,
1423 const char *root_dir)
1378{ 1424{
1379 char filename[PATH_MAX]; 1425 char filename[PATH_MAX];
1380 /* 1426 /*
@@ -1384,8 +1430,8 @@ static int dso__kernel_module_get_build_id(struct dso *self)
1384 const char *name = self->short_name + 1; 1430 const char *name = self->short_name + 1;
1385 1431
1386 snprintf(filename, sizeof(filename), 1432 snprintf(filename, sizeof(filename),
1387 "/sys/module/%.*s/notes/.note.gnu.build-id", 1433 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1388 (int)strlen(name - 1), name); 1434 root_dir, (int)strlen(name) - 1, name);
1389 1435
1390 if (sysfs__read_build_id(filename, self->build_id, 1436 if (sysfs__read_build_id(filename, self->build_id,
1391 sizeof(self->build_id)) == 0) 1437 sizeof(self->build_id)) == 0)
@@ -1394,26 +1440,33 @@ static int dso__kernel_module_get_build_id(struct dso *self)
1394 return 0; 1440 return 0;
1395} 1441}
1396 1442
1397static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirname) 1443static int map_groups__set_modules_path_dir(struct map_groups *self,
1444 const char *dir_name)
1398{ 1445{
1399 struct dirent *dent; 1446 struct dirent *dent;
1400 DIR *dir = opendir(dirname); 1447 DIR *dir = opendir(dir_name);
1401 1448
1402 if (!dir) { 1449 if (!dir) {
1403 pr_debug("%s: cannot open %s dir\n", __func__, dirname); 1450 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1404 return -1; 1451 return -1;
1405 } 1452 }
1406 1453
1407 while ((dent = readdir(dir)) != NULL) { 1454 while ((dent = readdir(dir)) != NULL) {
1408 char path[PATH_MAX]; 1455 char path[PATH_MAX];
1456 struct stat st;
1409 1457
1410 if (dent->d_type == DT_DIR) { 1458 /*sshfs might return bad dent->d_type, so we have to stat*/
1459 sprintf(path, "%s/%s", dir_name, dent->d_name);
1460 if (stat(path, &st))
1461 continue;
1462
1463 if (S_ISDIR(st.st_mode)) {
1411 if (!strcmp(dent->d_name, ".") || 1464 if (!strcmp(dent->d_name, ".") ||
1412 !strcmp(dent->d_name, "..")) 1465 !strcmp(dent->d_name, ".."))
1413 continue; 1466 continue;
1414 1467
1415 snprintf(path, sizeof(path), "%s/%s", 1468 snprintf(path, sizeof(path), "%s/%s",
1416 dirname, dent->d_name); 1469 dir_name, dent->d_name);
1417 if (map_groups__set_modules_path_dir(self, path) < 0) 1470 if (map_groups__set_modules_path_dir(self, path) < 0)
1418 goto failure; 1471 goto failure;
1419 } else { 1472 } else {
@@ -1433,13 +1486,13 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirna
1433 continue; 1486 continue;
1434 1487
1435 snprintf(path, sizeof(path), "%s/%s", 1488 snprintf(path, sizeof(path), "%s/%s",
1436 dirname, dent->d_name); 1489 dir_name, dent->d_name);
1437 1490
1438 long_name = strdup(path); 1491 long_name = strdup(path);
1439 if (long_name == NULL) 1492 if (long_name == NULL)
1440 goto failure; 1493 goto failure;
1441 dso__set_long_name(map->dso, long_name); 1494 dso__set_long_name(map->dso, long_name);
1442 dso__kernel_module_get_build_id(map->dso); 1495 dso__kernel_module_get_build_id(map->dso, "");
1443 } 1496 }
1444 } 1497 }
1445 1498
@@ -1449,18 +1502,47 @@ failure:
1449 return -1; 1502 return -1;
1450} 1503}
1451 1504
1452static int map_groups__set_modules_path(struct map_groups *self) 1505static char *get_kernel_version(const char *root_dir)
1453{ 1506{
1454 struct utsname uts; 1507 char version[PATH_MAX];
1508 FILE *file;
1509 char *name, *tmp;
1510 const char *prefix = "Linux version ";
1511
1512 sprintf(version, "%s/proc/version", root_dir);
1513 file = fopen(version, "r");
1514 if (!file)
1515 return NULL;
1516
1517 version[0] = '\0';
1518 tmp = fgets(version, sizeof(version), file);
1519 fclose(file);
1520
1521 name = strstr(version, prefix);
1522 if (!name)
1523 return NULL;
1524 name += strlen(prefix);
1525 tmp = strchr(name, ' ');
1526 if (tmp)
1527 *tmp = '\0';
1528
1529 return strdup(name);
1530}
1531
1532static int machine__set_modules_path(struct machine *self)
1533{
1534 char *version;
1455 char modules_path[PATH_MAX]; 1535 char modules_path[PATH_MAX];
1456 1536
1457 if (uname(&uts) < 0) 1537 version = get_kernel_version(self->root_dir);
1538 if (!version)
1458 return -1; 1539 return -1;
1459 1540
1460 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", 1541 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
1461 uts.release); 1542 self->root_dir, version);
1543 free(version);
1462 1544
1463 return map_groups__set_modules_path_dir(self, modules_path); 1545 return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
1464} 1546}
1465 1547
1466/* 1548/*
@@ -1470,8 +1552,8 @@ static int map_groups__set_modules_path(struct map_groups *self)
1470 */ 1552 */
1471static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 1553static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1472{ 1554{
1473 struct map *self = zalloc(sizeof(*self) + 1555 struct map *self = calloc(1, (sizeof(*self) +
1474 (dso->kernel ? sizeof(struct kmap) : 0)); 1556 (dso->kernel ? sizeof(struct kmap) : 0)));
1475 if (self != NULL) { 1557 if (self != NULL) {
1476 /* 1558 /*
1477 * ->end will be filled after we load all the symbols 1559 * ->end will be filled after we load all the symbols
@@ -1482,11 +1564,11 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1482 return self; 1564 return self;
1483} 1565}
1484 1566
1485struct map *map_groups__new_module(struct map_groups *self, u64 start, 1567struct map *machine__new_module(struct machine *self, u64 start,
1486 const char *filename) 1568 const char *filename)
1487{ 1569{
1488 struct map *map; 1570 struct map *map;
1489 struct dso *dso = __dsos__findnew(&dsos__kernel, filename); 1571 struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
1490 1572
1491 if (dso == NULL) 1573 if (dso == NULL)
1492 return NULL; 1574 return NULL;
@@ -1495,18 +1577,31 @@ struct map *map_groups__new_module(struct map_groups *self, u64 start,
1495 if (map == NULL) 1577 if (map == NULL)
1496 return NULL; 1578 return NULL;
1497 1579
1498 dso->origin = DSO__ORIG_KMODULE; 1580 if (machine__is_host(self))
1499 map_groups__insert(self, map); 1581 dso->origin = DSO__ORIG_KMODULE;
1582 else
1583 dso->origin = DSO__ORIG_GUEST_KMODULE;
1584 map_groups__insert(&self->kmaps, map);
1500 return map; 1585 return map;
1501} 1586}
1502 1587
1503static int map_groups__create_modules(struct map_groups *self) 1588static int machine__create_modules(struct machine *self)
1504{ 1589{
1505 char *line = NULL; 1590 char *line = NULL;
1506 size_t n; 1591 size_t n;
1507 FILE *file = fopen("/proc/modules", "r"); 1592 FILE *file;
1508 struct map *map; 1593 struct map *map;
1594 const char *modules;
1595 char path[PATH_MAX];
1596
1597 if (machine__is_default_guest(self))
1598 modules = symbol_conf.default_guest_modules;
1599 else {
1600 sprintf(path, "%s/proc/modules", self->root_dir);
1601 modules = path;
1602 }
1509 1603
1604 file = fopen(modules, "r");
1510 if (file == NULL) 1605 if (file == NULL)
1511 return -1; 1606 return -1;
1512 1607
@@ -1538,16 +1633,16 @@ static int map_groups__create_modules(struct map_groups *self)
1538 *sep = '\0'; 1633 *sep = '\0';
1539 1634
1540 snprintf(name, sizeof(name), "[%s]", line); 1635 snprintf(name, sizeof(name), "[%s]", line);
1541 map = map_groups__new_module(self, start, name); 1636 map = machine__new_module(self, start, name);
1542 if (map == NULL) 1637 if (map == NULL)
1543 goto out_delete_line; 1638 goto out_delete_line;
1544 dso__kernel_module_get_build_id(map->dso); 1639 dso__kernel_module_get_build_id(map->dso, self->root_dir);
1545 } 1640 }
1546 1641
1547 free(line); 1642 free(line);
1548 fclose(file); 1643 fclose(file);
1549 1644
1550 return map_groups__set_modules_path(self); 1645 return machine__set_modules_path(self);
1551 1646
1552out_delete_line: 1647out_delete_line:
1553 free(line); 1648 free(line);
@@ -1714,8 +1809,56 @@ out_fixup:
1714 return err; 1809 return err;
1715} 1810}
1716 1811
1717LIST_HEAD(dsos__user); 1812static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
1718LIST_HEAD(dsos__kernel); 1813 symbol_filter_t filter)
1814{
1815 int err;
1816 const char *kallsyms_filename = NULL;
1817 struct machine *machine;
1818 char path[PATH_MAX];
1819
1820 if (!map->groups) {
1821 pr_debug("Guest kernel map hasn't the point to groups\n");
1822 return -1;
1823 }
1824 machine = map->groups->machine;
1825
1826 if (machine__is_default_guest(machine)) {
1827 /*
1828 * if the user specified a vmlinux filename, use it and only
1829 * it, reporting errors to the user if it cannot be used.
1830 * Or use file guest_kallsyms inputted by user on commandline
1831 */
1832 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1833 err = dso__load_vmlinux(self, map,
1834 symbol_conf.default_guest_vmlinux_name, filter);
1835 goto out_try_fixup;
1836 }
1837
1838 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1839 if (!kallsyms_filename)
1840 return -1;
1841 } else {
1842 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1843 kallsyms_filename = path;
1844 }
1845
1846 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1847 if (err > 0)
1848 pr_debug("Using %s for symbols\n", kallsyms_filename);
1849
1850out_try_fixup:
1851 if (err > 0) {
1852 if (kallsyms_filename != NULL) {
1853 machine__mmap_name(machine, path, sizeof(path));
1854 dso__set_long_name(self, strdup(path));
1855 }
1856 map__fixup_start(map);
1857 map__fixup_end(map);
1858 }
1859
1860 return err;
1861}
1719 1862
1720static void dsos__add(struct list_head *head, struct dso *dso) 1863static void dsos__add(struct list_head *head, struct dso *dso)
1721{ 1864{
@@ -1747,21 +1890,32 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
1747 return dso; 1890 return dso;
1748} 1891}
1749 1892
1750static void __dsos__fprintf(struct list_head *head, FILE *fp) 1893size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1751{ 1894{
1752 struct dso *pos; 1895 struct dso *pos;
1896 size_t ret = 0;
1753 1897
1754 list_for_each_entry(pos, head, node) { 1898 list_for_each_entry(pos, head, node) {
1755 int i; 1899 int i;
1756 for (i = 0; i < MAP__NR_TYPES; ++i) 1900 for (i = 0; i < MAP__NR_TYPES; ++i)
1757 dso__fprintf(pos, i, fp); 1901 ret += dso__fprintf(pos, i, fp);
1758 } 1902 }
1903
1904 return ret;
1759} 1905}
1760 1906
1761void dsos__fprintf(FILE *fp) 1907size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
1762{ 1908{
1763 __dsos__fprintf(&dsos__kernel, fp); 1909 struct rb_node *nd;
1764 __dsos__fprintf(&dsos__user, fp); 1910 size_t ret = 0;
1911
1912 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
1913 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1914 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
1915 ret += __dsos__fprintf(&pos->user_dsos, fp);
1916 }
1917
1918 return ret;
1765} 1919}
1766 1920
1767static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1921static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
@@ -1779,10 +1933,17 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1779 return ret; 1933 return ret;
1780} 1934}
1781 1935
1782size_t dsos__fprintf_buildid(FILE *fp, bool with_hits) 1936size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
1783{ 1937{
1784 return (__dsos__fprintf_buildid(&dsos__kernel, fp, with_hits) + 1938 struct rb_node *nd;
1785 __dsos__fprintf_buildid(&dsos__user, fp, with_hits)); 1939 size_t ret = 0;
1940
1941 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
1942 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1943 ret += __dsos__fprintf_buildid(&pos->kernel_dsos, fp, with_hits);
1944 ret += __dsos__fprintf_buildid(&pos->user_dsos, fp, with_hits);
1945 }
1946 return ret;
1786} 1947}
1787 1948
1788struct dso *dso__new_kernel(const char *name) 1949struct dso *dso__new_kernel(const char *name)
@@ -1791,55 +1952,98 @@ struct dso *dso__new_kernel(const char *name)
1791 1952
1792 if (self != NULL) { 1953 if (self != NULL) {
1793 dso__set_short_name(self, "[kernel]"); 1954 dso__set_short_name(self, "[kernel]");
1794 self->kernel = 1; 1955 self->kernel = DSO_TYPE_KERNEL;
1795 } 1956 }
1796 1957
1797 return self; 1958 return self;
1798} 1959}
1799 1960
1800void dso__read_running_kernel_build_id(struct dso *self) 1961static struct dso *dso__new_guest_kernel(struct machine *machine,
1962 const char *name)
1801{ 1963{
1802 if (sysfs__read_build_id("/sys/kernel/notes", self->build_id, 1964 char bf[PATH_MAX];
1965 struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
1966
1967 if (self != NULL) {
1968 dso__set_short_name(self, "[guest.kernel]");
1969 self->kernel = DSO_TYPE_GUEST_KERNEL;
1970 }
1971
1972 return self;
1973}
1974
1975void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
1976{
1977 char path[PATH_MAX];
1978
1979 if (machine__is_default_guest(machine))
1980 return;
1981 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1982 if (sysfs__read_build_id(path, self->build_id,
1803 sizeof(self->build_id)) == 0) 1983 sizeof(self->build_id)) == 0)
1804 self->has_build_id = true; 1984 self->has_build_id = true;
1805} 1985}
1806 1986
1807static struct dso *dsos__create_kernel(const char *vmlinux) 1987static struct dso *machine__create_kernel(struct machine *self)
1808{ 1988{
1809 struct dso *kernel = dso__new_kernel(vmlinux); 1989 const char *vmlinux_name = NULL;
1990 struct dso *kernel;
1810 1991
1811 if (kernel != NULL) { 1992 if (machine__is_host(self)) {
1812 dso__read_running_kernel_build_id(kernel); 1993 vmlinux_name = symbol_conf.vmlinux_name;
1813 dsos__add(&dsos__kernel, kernel); 1994 kernel = dso__new_kernel(vmlinux_name);
1995 } else {
1996 if (machine__is_default_guest(self))
1997 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1998 kernel = dso__new_guest_kernel(self, vmlinux_name);
1814 } 1999 }
1815 2000
2001 if (kernel != NULL) {
2002 dso__read_running_kernel_build_id(kernel, self);
2003 dsos__add(&self->kernel_dsos, kernel);
2004 }
1816 return kernel; 2005 return kernel;
1817} 2006}
1818 2007
1819int __map_groups__create_kernel_maps(struct map_groups *self, 2008int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
1820 struct map *vmlinux_maps[MAP__NR_TYPES],
1821 struct dso *kernel)
1822{ 2009{
1823 enum map_type type; 2010 enum map_type type;
1824 2011
1825 for (type = 0; type < MAP__NR_TYPES; ++type) { 2012 for (type = 0; type < MAP__NR_TYPES; ++type) {
1826 struct kmap *kmap; 2013 struct kmap *kmap;
1827 2014
1828 vmlinux_maps[type] = map__new2(0, kernel, type); 2015 self->vmlinux_maps[type] = map__new2(0, kernel, type);
1829 if (vmlinux_maps[type] == NULL) 2016 if (self->vmlinux_maps[type] == NULL)
1830 return -1; 2017 return -1;
1831 2018
1832 vmlinux_maps[type]->map_ip = 2019 self->vmlinux_maps[type]->map_ip =
1833 vmlinux_maps[type]->unmap_ip = identity__map_ip; 2020 self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
1834 2021
1835 kmap = map__kmap(vmlinux_maps[type]); 2022 kmap = map__kmap(self->vmlinux_maps[type]);
1836 kmap->kmaps = self; 2023 kmap->kmaps = &self->kmaps;
1837 map_groups__insert(self, vmlinux_maps[type]); 2024 map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
1838 } 2025 }
1839 2026
1840 return 0; 2027 return 0;
1841} 2028}
1842 2029
2030int machine__create_kernel_maps(struct machine *self)
2031{
2032 struct dso *kernel = machine__create_kernel(self);
2033
2034 if (kernel == NULL ||
2035 __machine__create_kernel_maps(self, kernel) < 0)
2036 return -1;
2037
2038 if (symbol_conf.use_modules && machine__create_modules(self) < 0)
2039 pr_debug("Problems creating module maps, continuing anyway...\n");
2040 /*
2041 * Now that we have all the maps created, just set the ->end of them:
2042 */
2043 map_groups__fixup_end(&self->kmaps);
2044 return 0;
2045}
2046
1843static void vmlinux_path__exit(void) 2047static void vmlinux_path__exit(void)
1844{ 2048{
1845 while (--vmlinux_path__nr_entries >= 0) { 2049 while (--vmlinux_path__nr_entries >= 0) {
@@ -1895,6 +2099,17 @@ out_fail:
1895 return -1; 2099 return -1;
1896} 2100}
1897 2101
2102size_t vmlinux_path__fprintf(FILE *fp)
2103{
2104 int i;
2105 size_t printed = 0;
2106
2107 for (i = 0; i < vmlinux_path__nr_entries; ++i)
2108 printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
2109
2110 return printed;
2111}
2112
1898static int setup_list(struct strlist **list, const char *list_str, 2113static int setup_list(struct strlist **list, const char *list_str,
1899 const char *list_name) 2114 const char *list_name)
1900{ 2115{
@@ -1945,22 +2160,129 @@ out_free_comm_list:
1945 return -1; 2160 return -1;
1946} 2161}
1947 2162
1948int map_groups__create_kernel_maps(struct map_groups *self, 2163int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
1949 struct map *vmlinux_maps[MAP__NR_TYPES])
1950{ 2164{
1951 struct dso *kernel = dsos__create_kernel(symbol_conf.vmlinux_name); 2165 struct machine *machine = machines__findnew(self, pid);
1952 2166
1953 if (kernel == NULL) 2167 if (machine == NULL)
1954 return -1; 2168 return -1;
1955 2169
1956 if (__map_groups__create_kernel_maps(self, vmlinux_maps, kernel) < 0) 2170 return machine__create_kernel_maps(machine);
1957 return -1; 2171}
1958 2172
1959 if (symbol_conf.use_modules && map_groups__create_modules(self) < 0) 2173static int hex(char ch)
1960 pr_debug("Problems creating module maps, continuing anyway...\n"); 2174{
1961 /* 2175 if ((ch >= '0') && (ch <= '9'))
1962 * Now that we have all the maps created, just set the ->end of them: 2176 return ch - '0';
1963 */ 2177 if ((ch >= 'a') && (ch <= 'f'))
1964 map_groups__fixup_end(self); 2178 return ch - 'a' + 10;
1965 return 0; 2179 if ((ch >= 'A') && (ch <= 'F'))
2180 return ch - 'A' + 10;
2181 return -1;
2182}
2183
2184/*
2185 * While we find nice hex chars, build a long_val.
2186 * Return number of chars processed.
2187 */
2188int hex2u64(const char *ptr, u64 *long_val)
2189{
2190 const char *p = ptr;
2191 *long_val = 0;
2192
2193 while (*p) {
2194 const int hex_val = hex(*p);
2195
2196 if (hex_val < 0)
2197 break;
2198
2199 *long_val = (*long_val << 4) | hex_val;
2200 p++;
2201 }
2202
2203 return p - ptr;
2204}
2205
2206char *strxfrchar(char *s, char from, char to)
2207{
2208 char *p = s;
2209
2210 while ((p = strchr(p, from)) != NULL)
2211 *p++ = to;
2212
2213 return s;
2214}
2215
2216int machines__create_guest_kernel_maps(struct rb_root *self)
2217{
2218 int ret = 0;
2219 struct dirent **namelist = NULL;
2220 int i, items = 0;
2221 char path[PATH_MAX];
2222 pid_t pid;
2223
2224 if (symbol_conf.default_guest_vmlinux_name ||
2225 symbol_conf.default_guest_modules ||
2226 symbol_conf.default_guest_kallsyms) {
2227 machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
2228 }
2229
2230 if (symbol_conf.guestmount) {
2231 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
2232 if (items <= 0)
2233 return -ENOENT;
2234 for (i = 0; i < items; i++) {
2235 if (!isdigit(namelist[i]->d_name[0])) {
2236 /* Filter out . and .. */
2237 continue;
2238 }
2239 pid = atoi(namelist[i]->d_name);
2240 sprintf(path, "%s/%s/proc/kallsyms",
2241 symbol_conf.guestmount,
2242 namelist[i]->d_name);
2243 ret = access(path, R_OK);
2244 if (ret) {
2245 pr_debug("Can't access file %s\n", path);
2246 goto failure;
2247 }
2248 machines__create_kernel_maps(self, pid);
2249 }
2250failure:
2251 free(namelist);
2252 }
2253
2254 return ret;
2255}
2256
2257int machine__load_kallsyms(struct machine *self, const char *filename,
2258 enum map_type type, symbol_filter_t filter)
2259{
2260 struct map *map = self->vmlinux_maps[type];
2261 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
2262
2263 if (ret > 0) {
2264 dso__set_loaded(map->dso, type);
2265 /*
2266 * Since /proc/kallsyms will have multiple sessions for the
2267 * kernel, with modules between them, fixup the end of all
2268 * sections.
2269 */
2270 __map_groups__fixup_end(&self->kmaps, type);
2271 }
2272
2273 return ret;
2274}
2275
2276int machine__load_vmlinux_path(struct machine *self, enum map_type type,
2277 symbol_filter_t filter)
2278{
2279 struct map *map = self->vmlinux_maps[type];
2280 int ret = dso__load_vmlinux_path(map->dso, map, filter);
2281
2282 if (ret > 0) {
2283 dso__set_loaded(map->dso, type);
2284 map__reloc_vmlinux(map);
2285 }
2286
2287 return ret;
1966} 2288}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f30a37428919..032469e41876 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -3,10 +3,11 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <stdbool.h> 5#include <stdbool.h>
6#include "types.h" 6#include <stdint.h>
7#include "map.h"
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/rbtree.h> 9#include <linux/rbtree.h>
9#include "event.h" 10#include <stdio.h>
10 11
11#define DEBUG_CACHE_DIR ".debug" 12#define DEBUG_CACHE_DIR ".debug"
12 13
@@ -29,6 +30,9 @@ static inline char *bfd_demangle(void __used *v, const char __used *c,
29#endif 30#endif
30#endif 31#endif
31 32
33int hex2u64(const char *ptr, u64 *val);
34char *strxfrchar(char *s, char from, char to);
35
32/* 36/*
33 * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP; 37 * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
34 * for newer versions we can use mmap to reduce memory usage: 38 * for newer versions we can use mmap to reduce memory usage:
@@ -44,10 +48,13 @@ static inline char *bfd_demangle(void __used *v, const char __used *c,
44#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ 48#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
45#endif 49#endif
46 50
51#define BUILD_ID_SIZE 20
52
47struct symbol { 53struct symbol {
48 struct rb_node rb_node; 54 struct rb_node rb_node;
49 u64 start; 55 u64 start;
50 u64 end; 56 u64 end;
57 u16 namelen;
51 char name[0]; 58 char name[0];
52}; 59};
53 60
@@ -63,10 +70,15 @@ struct symbol_conf {
63 show_nr_samples, 70 show_nr_samples,
64 use_callchain, 71 use_callchain,
65 exclude_other, 72 exclude_other,
66 full_paths; 73 full_paths,
74 show_cpu_utilization;
67 const char *vmlinux_name, 75 const char *vmlinux_name,
68 *field_sep; 76 *field_sep;
69 char *dso_list_str, 77 const char *default_guest_vmlinux_name,
78 *default_guest_kallsyms,
79 *default_guest_modules;
80 const char *guestmount;
81 const char *dso_list_str,
70 *comm_list_str, 82 *comm_list_str,
71 *sym_list_str, 83 *sym_list_str,
72 *col_width_list_str; 84 *col_width_list_str;
@@ -88,6 +100,11 @@ struct ref_reloc_sym {
88 u64 unrelocated_addr; 100 u64 unrelocated_addr;
89}; 101};
90 102
103struct map_symbol {
104 struct map *map;
105 struct symbol *sym;
106};
107
91struct addr_location { 108struct addr_location {
92 struct thread *thread; 109 struct thread *thread;
93 struct map *map; 110 struct map *map;
@@ -95,6 +112,13 @@ struct addr_location {
95 u64 addr; 112 u64 addr;
96 char level; 113 char level;
97 bool filtered; 114 bool filtered;
115 unsigned int cpumode;
116};
117
118enum dso_kernel_type {
119 DSO_TYPE_USER = 0,
120 DSO_TYPE_KERNEL,
121 DSO_TYPE_GUEST_KERNEL
98}; 122};
99 123
100struct dso { 124struct dso {
@@ -104,8 +128,9 @@ struct dso {
104 u8 adjust_symbols:1; 128 u8 adjust_symbols:1;
105 u8 slen_calculated:1; 129 u8 slen_calculated:1;
106 u8 has_build_id:1; 130 u8 has_build_id:1;
107 u8 kernel:1; 131 enum dso_kernel_type kernel;
108 u8 hit:1; 132 u8 hit:1;
133 u8 annotate_warned:1;
109 unsigned char origin; 134 unsigned char origin;
110 u8 sorted_by_name; 135 u8 sorted_by_name;
111 u8 loaded; 136 u8 loaded;
@@ -131,42 +156,65 @@ static inline void dso__set_loaded(struct dso *self, enum map_type type)
131 156
132void dso__sort_by_name(struct dso *self, enum map_type type); 157void dso__sort_by_name(struct dso *self, enum map_type type);
133 158
134extern struct list_head dsos__user, dsos__kernel;
135
136struct dso *__dsos__findnew(struct list_head *head, const char *name); 159struct dso *__dsos__findnew(struct list_head *head, const char *name);
137 160
138static inline struct dso *dsos__findnew(const char *name)
139{
140 return __dsos__findnew(&dsos__user, name);
141}
142
143int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); 161int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
144int dso__load_vmlinux_path(struct dso *self, struct map *map, 162int dso__load_vmlinux_path(struct dso *self, struct map *map,
145 symbol_filter_t filter); 163 symbol_filter_t filter);
146int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, 164int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
147 symbol_filter_t filter); 165 symbol_filter_t filter);
148void dsos__fprintf(FILE *fp); 166int machine__load_kallsyms(struct machine *self, const char *filename,
149size_t dsos__fprintf_buildid(FILE *fp, bool with_hits); 167 enum map_type type, symbol_filter_t filter);
168int machine__load_vmlinux_path(struct machine *self, enum map_type type,
169 symbol_filter_t filter);
170
171size_t __dsos__fprintf(struct list_head *head, FILE *fp);
172
173size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
174size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
150 175
151size_t dso__fprintf_buildid(struct dso *self, FILE *fp); 176size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
152size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); 177size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
178
179enum dso_origin {
180 DSO__ORIG_KERNEL = 0,
181 DSO__ORIG_GUEST_KERNEL,
182 DSO__ORIG_JAVA_JIT,
183 DSO__ORIG_BUILD_ID_CACHE,
184 DSO__ORIG_FEDORA,
185 DSO__ORIG_UBUNTU,
186 DSO__ORIG_BUILDID,
187 DSO__ORIG_DSO,
188 DSO__ORIG_GUEST_KMODULE,
189 DSO__ORIG_KMODULE,
190 DSO__ORIG_NOT_FOUND,
191};
192
153char dso__symtab_origin(const struct dso *self); 193char dso__symtab_origin(const struct dso *self);
154void dso__set_long_name(struct dso *self, char *name); 194void dso__set_long_name(struct dso *self, char *name);
155void dso__set_build_id(struct dso *self, void *build_id); 195void dso__set_build_id(struct dso *self, void *build_id);
156void dso__read_running_kernel_build_id(struct dso *self); 196void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine);
157struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); 197struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
158struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, 198struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
159 const char *name); 199 const char *name);
160 200
161int filename__read_build_id(const char *filename, void *bf, size_t size); 201int filename__read_build_id(const char *filename, void *bf, size_t size);
162int sysfs__read_build_id(const char *filename, void *bf, size_t size); 202int sysfs__read_build_id(const char *filename, void *bf, size_t size);
163bool dsos__read_build_ids(bool with_hits); 203bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
164int build_id__sprintf(const u8 *self, int len, char *bf); 204int build_id__sprintf(const u8 *self, int len, char *bf);
165int kallsyms__parse(const char *filename, void *arg, 205int kallsyms__parse(const char *filename, void *arg,
166 int (*process_symbol)(void *arg, const char *name, 206 int (*process_symbol)(void *arg, const char *name,
167 char type, u64 start)); 207 char type, u64 start));
168 208
209int __machine__create_kernel_maps(struct machine *self, struct dso *kernel);
210int machine__create_kernel_maps(struct machine *self);
211
212int machines__create_kernel_maps(struct rb_root *self, pid_t pid);
213int machines__create_guest_kernel_maps(struct rb_root *self);
214
169int symbol__init(void); 215int symbol__init(void);
170bool symbol_type__is_a(char symbol_type, enum map_type map_type); 216bool symbol_type__is_a(char symbol_type, enum map_type map_type);
171 217
218size_t vmlinux_path__fprintf(FILE *fp);
219
172#endif /* __PERF_SYMBOL */ 220#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index fa968312ee7d..1f7ecd47f499 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,13 +7,35 @@
7#include "util.h" 7#include "util.h"
8#include "debug.h" 8#include "debug.h"
9 9
10void map_groups__init(struct map_groups *self) 10int find_all_tid(int pid, pid_t ** all_tid)
11{ 11{
12 char name[256];
13 int items;
14 struct dirent **namelist = NULL;
15 int ret = 0;
12 int i; 16 int i;
13 for (i = 0; i < MAP__NR_TYPES; ++i) { 17
14 self->maps[i] = RB_ROOT; 18 sprintf(name, "/proc/%d/task", pid);
15 INIT_LIST_HEAD(&self->removed_maps[i]); 19 items = scandir(name, &namelist, NULL, NULL);
20 if (items <= 0)
21 return -ENOENT;
22 *all_tid = malloc(sizeof(pid_t) * items);
23 if (!*all_tid) {
24 ret = -ENOMEM;
25 goto failure;
16 } 26 }
27
28 for (i = 0; i < items; i++)
29 (*all_tid)[i] = atoi(namelist[i]->d_name);
30
31 ret = items;
32
33failure:
34 for (i=0; i<items; i++)
35 free(namelist[i]);
36 free(namelist);
37
38 return ret;
17} 39}
18 40
19static struct thread *thread__new(pid_t pid) 41static struct thread *thread__new(pid_t pid)
@@ -31,28 +53,6 @@ static struct thread *thread__new(pid_t pid)
31 return self; 53 return self;
32} 54}
33 55
34static void map_groups__flush(struct map_groups *self)
35{
36 int type;
37
38 for (type = 0; type < MAP__NR_TYPES; type++) {
39 struct rb_root *root = &self->maps[type];
40 struct rb_node *next = rb_first(root);
41
42 while (next) {
43 struct map *pos = rb_entry(next, struct map, rb_node);
44 next = rb_next(&pos->rb_node);
45 rb_erase(&pos->rb_node, root);
46 /*
47 * We may have references to this map, for
48 * instance in some hist_entry instances, so
49 * just move them to a separate list.
50 */
51 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
52 }
53 }
54}
55
56int thread__set_comm(struct thread *self, const char *comm) 56int thread__set_comm(struct thread *self, const char *comm)
57{ 57{
58 int err; 58 int err;
@@ -79,69 +79,10 @@ int thread__comm_len(struct thread *self)
79 return self->comm_len; 79 return self->comm_len;
80} 80}
81 81
82size_t __map_groups__fprintf_maps(struct map_groups *self,
83 enum map_type type, FILE *fp)
84{
85 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
86 struct rb_node *nd;
87
88 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
89 struct map *pos = rb_entry(nd, struct map, rb_node);
90 printed += fprintf(fp, "Map:");
91 printed += map__fprintf(pos, fp);
92 if (verbose > 2) {
93 printed += dso__fprintf(pos->dso, type, fp);
94 printed += fprintf(fp, "--\n");
95 }
96 }
97
98 return printed;
99}
100
101size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
102{
103 size_t printed = 0, i;
104 for (i = 0; i < MAP__NR_TYPES; ++i)
105 printed += __map_groups__fprintf_maps(self, i, fp);
106 return printed;
107}
108
109static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
110 enum map_type type, FILE *fp)
111{
112 struct map *pos;
113 size_t printed = 0;
114
115 list_for_each_entry(pos, &self->removed_maps[type], node) {
116 printed += fprintf(fp, "Map:");
117 printed += map__fprintf(pos, fp);
118 if (verbose > 1) {
119 printed += dso__fprintf(pos->dso, type, fp);
120 printed += fprintf(fp, "--\n");
121 }
122 }
123 return printed;
124}
125
126static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
127{
128 size_t printed = 0, i;
129 for (i = 0; i < MAP__NR_TYPES; ++i)
130 printed += __map_groups__fprintf_removed_maps(self, i, fp);
131 return printed;
132}
133
134static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
135{
136 size_t printed = map_groups__fprintf_maps(self, fp);
137 printed += fprintf(fp, "Removed maps:\n");
138 return printed + map_groups__fprintf_removed_maps(self, fp);
139}
140
141static size_t thread__fprintf(struct thread *self, FILE *fp) 82static size_t thread__fprintf(struct thread *self, FILE *fp)
142{ 83{
143 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + 84 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
144 map_groups__fprintf(&self->mg, fp); 85 map_groups__fprintf(&self->mg, verbose, fp);
145} 86}
146 87
147struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) 88struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
@@ -183,127 +124,12 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
183 return th; 124 return th;
184} 125}
185 126
186static int map_groups__fixup_overlappings(struct map_groups *self,
187 struct map *map)
188{
189 struct rb_root *root = &self->maps[map->type];
190 struct rb_node *next = rb_first(root);
191
192 while (next) {
193 struct map *pos = rb_entry(next, struct map, rb_node);
194 next = rb_next(&pos->rb_node);
195
196 if (!map__overlap(pos, map))
197 continue;
198
199 if (verbose >= 2) {
200 fputs("overlapping maps:\n", stderr);
201 map__fprintf(map, stderr);
202 map__fprintf(pos, stderr);
203 }
204
205 rb_erase(&pos->rb_node, root);
206 /*
207 * We may have references to this map, for instance in some
208 * hist_entry instances, so just move them to a separate
209 * list.
210 */
211 list_add_tail(&pos->node, &self->removed_maps[map->type]);
212 /*
213 * Now check if we need to create new maps for areas not
214 * overlapped by the new map:
215 */
216 if (map->start > pos->start) {
217 struct map *before = map__clone(pos);
218
219 if (before == NULL)
220 return -ENOMEM;
221
222 before->end = map->start - 1;
223 map_groups__insert(self, before);
224 if (verbose >= 2)
225 map__fprintf(before, stderr);
226 }
227
228 if (map->end < pos->end) {
229 struct map *after = map__clone(pos);
230
231 if (after == NULL)
232 return -ENOMEM;
233
234 after->start = map->end + 1;
235 map_groups__insert(self, after);
236 if (verbose >= 2)
237 map__fprintf(after, stderr);
238 }
239 }
240
241 return 0;
242}
243
244void maps__insert(struct rb_root *maps, struct map *map)
245{
246 struct rb_node **p = &maps->rb_node;
247 struct rb_node *parent = NULL;
248 const u64 ip = map->start;
249 struct map *m;
250
251 while (*p != NULL) {
252 parent = *p;
253 m = rb_entry(parent, struct map, rb_node);
254 if (ip < m->start)
255 p = &(*p)->rb_left;
256 else
257 p = &(*p)->rb_right;
258 }
259
260 rb_link_node(&map->rb_node, parent, p);
261 rb_insert_color(&map->rb_node, maps);
262}
263
264struct map *maps__find(struct rb_root *maps, u64 ip)
265{
266 struct rb_node **p = &maps->rb_node;
267 struct rb_node *parent = NULL;
268 struct map *m;
269
270 while (*p != NULL) {
271 parent = *p;
272 m = rb_entry(parent, struct map, rb_node);
273 if (ip < m->start)
274 p = &(*p)->rb_left;
275 else if (ip > m->end)
276 p = &(*p)->rb_right;
277 else
278 return m;
279 }
280
281 return NULL;
282}
283
284void thread__insert_map(struct thread *self, struct map *map) 127void thread__insert_map(struct thread *self, struct map *map)
285{ 128{
286 map_groups__fixup_overlappings(&self->mg, map); 129 map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
287 map_groups__insert(&self->mg, map); 130 map_groups__insert(&self->mg, map);
288} 131}
289 132
290/*
291 * XXX This should not really _copy_ te maps, but refcount them.
292 */
293static int map_groups__clone(struct map_groups *self,
294 struct map_groups *parent, enum map_type type)
295{
296 struct rb_node *nd;
297 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
298 struct map *map = rb_entry(nd, struct map, rb_node);
299 struct map *new = map__clone(map);
300 if (new == NULL)
301 return -ENOMEM;
302 map_groups__insert(self, new);
303 }
304 return 0;
305}
306
307int thread__fork(struct thread *self, struct thread *parent) 133int thread__fork(struct thread *self, struct thread *parent)
308{ 134{
309 int i; 135 int i;
@@ -336,15 +162,3 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
336 162
337 return ret; 163 return ret;
338} 164}
339
340struct symbol *map_groups__find_symbol(struct map_groups *self,
341 enum map_type type, u64 addr,
342 symbol_filter_t filter)
343{
344 struct map *map = map_groups__find(self, type, addr);
345
346 if (map != NULL)
347 return map__find_symbol(map, map->map_ip(map, addr), filter);
348
349 return NULL;
350}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index dcf70303e58e..1dfd9ff8bdcd 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,6 @@
5#include <unistd.h> 5#include <unistd.h>
6#include "symbol.h" 6#include "symbol.h"
7 7
8struct map_groups {
9 struct rb_root maps[MAP__NR_TYPES];
10 struct list_head removed_maps[MAP__NR_TYPES];
11};
12
13size_t __map_groups__fprintf_maps(struct map_groups *self,
14 enum map_type type, FILE *fp);
15
16struct thread { 8struct thread {
17 struct rb_node rb_node; 9 struct rb_node rb_node;
18 struct map_groups mg; 10 struct map_groups mg;
@@ -23,29 +15,16 @@ struct thread {
23 int comm_len; 15 int comm_len;
24}; 16};
25 17
26void map_groups__init(struct map_groups *self); 18struct perf_session;
19
20int find_all_tid(int pid, pid_t ** all_tid);
27int thread__set_comm(struct thread *self, const char *comm); 21int thread__set_comm(struct thread *self, const char *comm);
28int thread__comm_len(struct thread *self); 22int thread__comm_len(struct thread *self);
29struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 23struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
30void thread__insert_map(struct thread *self, struct map *map); 24void thread__insert_map(struct thread *self, struct map *map);
31int thread__fork(struct thread *self, struct thread *parent); 25int thread__fork(struct thread *self, struct thread *parent);
32size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp);
33size_t perf_session__fprintf(struct perf_session *self, FILE *fp); 26size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
34 27
35void maps__insert(struct rb_root *maps, struct map *map);
36struct map *maps__find(struct rb_root *maps, u64 addr);
37
38static inline void map_groups__insert(struct map_groups *self, struct map *map)
39{
40 maps__insert(&self->maps[map->type], map);
41}
42
43static inline struct map *map_groups__find(struct map_groups *self,
44 enum map_type type, u64 addr)
45{
46 return maps__find(&self->maps[type], addr);
47}
48
49static inline struct map *thread__find_map(struct thread *self, 28static inline struct map *thread__find_map(struct thread *self,
50 enum map_type type, u64 addr) 29 enum map_type type, u64 addr)
51{ 30{
@@ -54,34 +33,12 @@ static inline struct map *thread__find_map(struct thread *self,
54 33
55void thread__find_addr_map(struct thread *self, 34void thread__find_addr_map(struct thread *self,
56 struct perf_session *session, u8 cpumode, 35 struct perf_session *session, u8 cpumode,
57 enum map_type type, u64 addr, 36 enum map_type type, pid_t pid, u64 addr,
58 struct addr_location *al); 37 struct addr_location *al);
59 38
60void thread__find_addr_location(struct thread *self, 39void thread__find_addr_location(struct thread *self,
61 struct perf_session *session, u8 cpumode, 40 struct perf_session *session, u8 cpumode,
62 enum map_type type, u64 addr, 41 enum map_type type, pid_t pid, u64 addr,
63 struct addr_location *al, 42 struct addr_location *al,
64 symbol_filter_t filter); 43 symbol_filter_t filter);
65struct symbol *map_groups__find_symbol(struct map_groups *self,
66 enum map_type type, u64 addr,
67 symbol_filter_t filter);
68
69static inline struct symbol *map_groups__find_function(struct map_groups *self,
70 u64 addr,
71 symbol_filter_t filter)
72{
73 return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter);
74}
75
76struct map *map_groups__find_by_name(struct map_groups *self,
77 enum map_type type, const char *name);
78
79int __map_groups__create_kernel_maps(struct map_groups *self,
80 struct map *vmlinux_maps[MAP__NR_TYPES],
81 struct dso *kernel);
82int map_groups__create_kernel_maps(struct map_groups *self,
83 struct map *vmlinux_maps[MAP__NR_TYPES]);
84
85struct map *map_groups__new_module(struct map_groups *self, u64 start,
86 const char *filename);
87#endif /* __PERF_THREAD_H */ 44#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 5ea8973ad331..b1572601286c 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -154,10 +154,17 @@ static void put_tracing_file(char *file)
154 free(file); 154 free(file);
155} 155}
156 156
157static ssize_t calc_data_size;
158
157static ssize_t write_or_die(const void *buf, size_t len) 159static ssize_t write_or_die(const void *buf, size_t len)
158{ 160{
159 int ret; 161 int ret;
160 162
163 if (calc_data_size) {
164 calc_data_size += len;
165 return len;
166 }
167
161 ret = write(output_fd, buf, len); 168 ret = write(output_fd, buf, len);
162 if (ret < 0) 169 if (ret < 0)
163 die("writing to '%s'", output_file); 170 die("writing to '%s'", output_file);
@@ -480,6 +487,17 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
480 return nr_tracepoints > 0 ? path.next : NULL; 487 return nr_tracepoints > 0 ? path.next : NULL;
481} 488}
482 489
490bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events)
491{
492 int i;
493
494 for (i = 0; i < nb_events; i++)
495 if (pattrs[i].type == PERF_TYPE_TRACEPOINT)
496 return true;
497
498 return false;
499}
500
483int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) 501int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
484{ 502{
485 char buf[BUFSIZ]; 503 char buf[BUFSIZ];
@@ -526,3 +544,20 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
526 544
527 return 0; 545 return 0;
528} 546}
547
548ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
549 int nb_events)
550{
551 ssize_t size;
552 int err = 0;
553
554 calc_data_size = 1;
555 err = read_tracing_data(fd, pattrs, nb_events);
556 size = calc_data_size - 1;
557 calc_data_size = 0;
558
559 if (err < 0)
560 return err;
561
562 return size;
563}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 613c9cc90570..73a02223c629 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -37,10 +37,12 @@ int header_page_ts_offset;
37int header_page_ts_size; 37int header_page_ts_size;
38int header_page_size_offset; 38int header_page_size_offset;
39int header_page_size_size; 39int header_page_size_size;
40int header_page_overwrite_offset;
41int header_page_overwrite_size;
40int header_page_data_offset; 42int header_page_data_offset;
41int header_page_data_size; 43int header_page_data_size;
42 44
43int latency_format; 45bool latency_format;
44 46
45static char *input_buf; 47static char *input_buf;
46static unsigned long long input_buf_ptr; 48static unsigned long long input_buf_ptr;
@@ -628,23 +630,32 @@ static int test_type(enum event_type type, enum event_type expect)
628 return 0; 630 return 0;
629} 631}
630 632
631static int test_type_token(enum event_type type, char *token, 633static int __test_type_token(enum event_type type, char *token,
632 enum event_type expect, const char *expect_tok) 634 enum event_type expect, const char *expect_tok,
635 bool warn)
633{ 636{
634 if (type != expect) { 637 if (type != expect) {
635 warning("Error: expected type %d but read %d", 638 if (warn)
636 expect, type); 639 warning("Error: expected type %d but read %d",
640 expect, type);
637 return -1; 641 return -1;
638 } 642 }
639 643
640 if (strcmp(token, expect_tok) != 0) { 644 if (strcmp(token, expect_tok) != 0) {
641 warning("Error: expected '%s' but read '%s'", 645 if (warn)
642 expect_tok, token); 646 warning("Error: expected '%s' but read '%s'",
647 expect_tok, token);
643 return -1; 648 return -1;
644 } 649 }
645 return 0; 650 return 0;
646} 651}
647 652
653static int test_type_token(enum event_type type, char *token,
654 enum event_type expect, const char *expect_tok)
655{
656 return __test_type_token(type, token, expect, expect_tok, true);
657}
658
648static int __read_expect_type(enum event_type expect, char **tok, int newline_ok) 659static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
649{ 660{
650 enum event_type type; 661 enum event_type type;
@@ -661,7 +672,8 @@ static int read_expect_type(enum event_type expect, char **tok)
661 return __read_expect_type(expect, tok, 1); 672 return __read_expect_type(expect, tok, 1);
662} 673}
663 674
664static int __read_expected(enum event_type expect, const char *str, int newline_ok) 675static int __read_expected(enum event_type expect, const char *str,
676 int newline_ok, bool warn)
665{ 677{
666 enum event_type type; 678 enum event_type type;
667 char *token; 679 char *token;
@@ -672,7 +684,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
672 else 684 else
673 type = read_token_item(&token); 685 type = read_token_item(&token);
674 686
675 ret = test_type_token(type, token, expect, str); 687 ret = __test_type_token(type, token, expect, str, warn);
676 688
677 free_token(token); 689 free_token(token);
678 690
@@ -681,12 +693,12 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
681 693
682static int read_expected(enum event_type expect, const char *str) 694static int read_expected(enum event_type expect, const char *str)
683{ 695{
684 return __read_expected(expect, str, 1); 696 return __read_expected(expect, str, 1, true);
685} 697}
686 698
687static int read_expected_item(enum event_type expect, const char *str) 699static int read_expected_item(enum event_type expect, const char *str)
688{ 700{
689 return __read_expected(expect, str, 0); 701 return __read_expected(expect, str, 0, true);
690} 702}
691 703
692static char *event_read_name(void) 704static char *event_read_name(void)
@@ -744,7 +756,7 @@ static int field_is_string(struct format_field *field)
744 756
745static int field_is_dynamic(struct format_field *field) 757static int field_is_dynamic(struct format_field *field)
746{ 758{
747 if (!strcmp(field->type, "__data_loc")) 759 if (!strncmp(field->type, "__data_loc", 10))
748 return 1; 760 return 1;
749 761
750 return 0; 762 return 0;
@@ -3087,88 +3099,6 @@ static void print_args(struct print_arg *args)
3087 } 3099 }
3088} 3100}
3089 3101
3090static void parse_header_field(const char *field,
3091 int *offset, int *size)
3092{
3093 char *token;
3094 int type;
3095
3096 if (read_expected(EVENT_ITEM, "field") < 0)
3097 return;
3098 if (read_expected(EVENT_OP, ":") < 0)
3099 return;
3100
3101 /* type */
3102 if (read_expect_type(EVENT_ITEM, &token) < 0)
3103 goto fail;
3104 free_token(token);
3105
3106 if (read_expected(EVENT_ITEM, field) < 0)
3107 return;
3108 if (read_expected(EVENT_OP, ";") < 0)
3109 return;
3110 if (read_expected(EVENT_ITEM, "offset") < 0)
3111 return;
3112 if (read_expected(EVENT_OP, ":") < 0)
3113 return;
3114 if (read_expect_type(EVENT_ITEM, &token) < 0)
3115 goto fail;
3116 *offset = atoi(token);
3117 free_token(token);
3118 if (read_expected(EVENT_OP, ";") < 0)
3119 return;
3120 if (read_expected(EVENT_ITEM, "size") < 0)
3121 return;
3122 if (read_expected(EVENT_OP, ":") < 0)
3123 return;
3124 if (read_expect_type(EVENT_ITEM, &token) < 0)
3125 goto fail;
3126 *size = atoi(token);
3127 free_token(token);
3128 if (read_expected(EVENT_OP, ";") < 0)
3129 return;
3130 type = read_token(&token);
3131 if (type != EVENT_NEWLINE) {
3132 /* newer versions of the kernel have a "signed" type */
3133 if (type != EVENT_ITEM)
3134 goto fail;
3135
3136 if (strcmp(token, "signed") != 0)
3137 goto fail;
3138
3139 free_token(token);
3140
3141 if (read_expected(EVENT_OP, ":") < 0)
3142 return;
3143
3144 if (read_expect_type(EVENT_ITEM, &token))
3145 goto fail;
3146
3147 free_token(token);
3148 if (read_expected(EVENT_OP, ";") < 0)
3149 return;
3150
3151 if (read_expect_type(EVENT_NEWLINE, &token))
3152 goto fail;
3153 }
3154 fail:
3155 free_token(token);
3156}
3157
3158int parse_header_page(char *buf, unsigned long size)
3159{
3160 init_input_buf(buf, size);
3161
3162 parse_header_field("timestamp", &header_page_ts_offset,
3163 &header_page_ts_size);
3164 parse_header_field("commit", &header_page_size_offset,
3165 &header_page_size_size);
3166 parse_header_field("data", &header_page_data_offset,
3167 &header_page_data_size);
3168
3169 return 0;
3170}
3171
3172int parse_ftrace_file(char *buf, unsigned long size) 3102int parse_ftrace_file(char *buf, unsigned long size)
3173{ 3103{
3174 struct format_field *field; 3104 struct format_field *field;
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 7cd1193918c7..cb54cd002f49 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -50,14 +50,51 @@ static int long_size;
50 50
51static unsigned long page_size; 51static unsigned long page_size;
52 52
53static ssize_t calc_data_size;
54static bool repipe;
55
56/* If it fails, the next read will report it */
57static void skip(int size)
58{
59 lseek(input_fd, size, SEEK_CUR);
60}
61
62static int do_read(int fd, void *buf, int size)
63{
64 int rsize = size;
65
66 while (size) {
67 int ret = read(fd, buf, size);
68
69 if (ret <= 0)
70 return -1;
71
72 if (repipe) {
73 int retw = write(STDOUT_FILENO, buf, ret);
74
75 if (retw <= 0 || retw != ret)
76 die("repiping input file");
77 }
78
79 size -= ret;
80 buf += ret;
81 }
82
83 return rsize;
84}
85
53static int read_or_die(void *data, int size) 86static int read_or_die(void *data, int size)
54{ 87{
55 int r; 88 int r;
56 89
57 r = read(input_fd, data, size); 90 r = do_read(input_fd, data, size);
58 if (r != size) 91 if (r <= 0)
59 die("reading input file (size expected=%d received=%d)", 92 die("reading input file (size expected=%d received=%d)",
60 size, r); 93 size, r);
94
95 if (calc_data_size)
96 calc_data_size += r;
97
61 return r; 98 return r;
62} 99}
63 100
@@ -82,57 +119,36 @@ static char *read_string(void)
82 char buf[BUFSIZ]; 119 char buf[BUFSIZ];
83 char *str = NULL; 120 char *str = NULL;
84 int size = 0; 121 int size = 0;
85 int i;
86 off_t r; 122 off_t r;
123 char c;
87 124
88 for (;;) { 125 for (;;) {
89 r = read(input_fd, buf, BUFSIZ); 126 r = read(input_fd, &c, 1);
90 if (r < 0) 127 if (r < 0)
91 die("reading input file"); 128 die("reading input file");
92 129
93 if (!r) 130 if (!r)
94 die("no data"); 131 die("no data");
95 132
96 for (i = 0; i < r; i++) { 133 if (repipe) {
97 if (!buf[i]) 134 int retw = write(STDOUT_FILENO, &c, 1);
98 break;
99 }
100 if (i < r)
101 break;
102 135
103 if (str) { 136 if (retw <= 0 || retw != r)
104 size += BUFSIZ; 137 die("repiping input file string");
105 str = realloc(str, size);
106 if (!str)
107 die("malloc of size %d", size);
108 memcpy(str + (size - BUFSIZ), buf, BUFSIZ);
109 } else {
110 size = BUFSIZ;
111 str = malloc_or_die(size);
112 memcpy(str, buf, size);
113 } 138 }
114 }
115 139
116 /* trailing \0: */ 140 buf[size++] = c;
117 i++; 141
118 142 if (!c)
119 /* move the file descriptor to the end of the string */ 143 break;
120 r = lseek(input_fd, -(r - i), SEEK_CUR);
121 if (r == (off_t)-1)
122 die("lseek");
123
124 if (str) {
125 size += i;
126 str = realloc(str, size);
127 if (!str)
128 die("malloc of size %d", size);
129 memcpy(str + (size - i), buf, i);
130 } else {
131 size = i;
132 str = malloc_or_die(i);
133 memcpy(str, buf, i);
134 } 144 }
135 145
146 if (calc_data_size)
147 calc_data_size += size;
148
149 str = malloc_or_die(size);
150 memcpy(str, buf, size);
151
136 return str; 152 return str;
137} 153}
138 154
@@ -174,7 +190,6 @@ static void read_ftrace_printk(void)
174static void read_header_files(void) 190static void read_header_files(void)
175{ 191{
176 unsigned long long size; 192 unsigned long long size;
177 char *header_page;
178 char *header_event; 193 char *header_event;
179 char buf[BUFSIZ]; 194 char buf[BUFSIZ];
180 195
@@ -184,10 +199,7 @@ static void read_header_files(void)
184 die("did not read header page"); 199 die("did not read header page");
185 200
186 size = read8(); 201 size = read8();
187 header_page = malloc_or_die(size); 202 skip(size);
188 read_or_die(header_page, size);
189 parse_header_page(header_page, size);
190 free(header_page);
191 203
192 /* 204 /*
193 * The size field in the page is of type long, 205 * The size field in the page is of type long,
@@ -459,7 +471,7 @@ struct record *trace_read_data(int cpu)
459 return data; 471 return data;
460} 472}
461 473
462void trace_report(int fd) 474ssize_t trace_report(int fd, bool __repipe)
463{ 475{
464 char buf[BUFSIZ]; 476 char buf[BUFSIZ];
465 char test[] = { 23, 8, 68 }; 477 char test[] = { 23, 8, 68 };
@@ -467,6 +479,10 @@ void trace_report(int fd)
467 int show_version = 0; 479 int show_version = 0;
468 int show_funcs = 0; 480 int show_funcs = 0;
469 int show_printk = 0; 481 int show_printk = 0;
482 ssize_t size;
483
484 calc_data_size = 1;
485 repipe = __repipe;
470 486
471 input_fd = fd; 487 input_fd = fd;
472 488
@@ -499,14 +515,18 @@ void trace_report(int fd)
499 read_proc_kallsyms(); 515 read_proc_kallsyms();
500 read_ftrace_printk(); 516 read_ftrace_printk();
501 517
518 size = calc_data_size - 1;
519 calc_data_size = 0;
520 repipe = false;
521
502 if (show_funcs) { 522 if (show_funcs) {
503 print_funcs(); 523 print_funcs();
504 return; 524 return size;
505 } 525 }
506 if (show_printk) { 526 if (show_printk) {
507 print_printk(); 527 print_printk();
508 return; 528 return size;
509 } 529 }
510 530
511 return; 531 return size;
512} 532}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index c3269b937db4..406d452956db 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,6 +1,7 @@
1#ifndef __PERF_TRACE_EVENTS_H 1#ifndef __PERF_TRACE_EVENTS_H
2#define __PERF_TRACE_EVENTS_H 2#define __PERF_TRACE_EVENTS_H
3 3
4#include <stdbool.h>
4#include "parse-events.h" 5#include "parse-events.h"
5 6
6#define __unused __attribute__((unused)) 7#define __unused __attribute__((unused))
@@ -162,7 +163,7 @@ struct record *trace_read_data(int cpu);
162 163
163void parse_set_info(int nr_cpus, int long_sz); 164void parse_set_info(int nr_cpus, int long_sz);
164 165
165void trace_report(int fd); 166ssize_t trace_report(int fd, bool repipe);
166 167
167void *malloc_or_die(unsigned int size); 168void *malloc_or_die(unsigned int size);
168 169
@@ -241,9 +242,8 @@ extern int header_page_size_size;
241extern int header_page_data_offset; 242extern int header_page_data_offset;
242extern int header_page_data_size; 243extern int header_page_data_size;
243 244
244extern int latency_format; 245extern bool latency_format;
245 246
246int parse_header_page(char *buf, unsigned long size);
247int trace_parse_common_type(void *data); 247int trace_parse_common_type(void *data);
248int trace_parse_common_pid(void *data); 248int trace_parse_common_pid(void *data);
249int parse_common_pc(void *data); 249int parse_common_pc(void *data);
@@ -258,6 +258,8 @@ void *raw_field_ptr(struct event *event, const char *name, void *data);
258unsigned long long eval_flag(const char *flag); 258unsigned long long eval_flag(const char *flag);
259 259
260int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); 260int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
261ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
262 int nb_events);
261 263
262/* taken from kernel/trace/trace.h */ 264/* taken from kernel/trace/trace.h */
263enum trace_flag_type { 265enum trace_flag_type {
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index f9b890fde681..214265674ddd 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -92,3 +92,25 @@ out_close_from:
92out: 92out:
93 return err; 93 return err;
94} 94}
95
96unsigned long convert_unit(unsigned long value, char *unit)
97{
98 *unit = ' ';
99
100 if (value > 1000) {
101 value /= 1000;
102 *unit = 'K';
103 }
104
105 if (value > 1000) {
106 value /= 1000;
107 *unit = 'M';
108 }
109
110 if (value > 1000) {
111 value /= 1000;
112 *unit = 'G';
113 }
114
115 return value;
116}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0f5b2a6f1080..0795bf304b19 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -42,12 +42,14 @@
42#define _ALL_SOURCE 1 42#define _ALL_SOURCE 1
43#define _GNU_SOURCE 1 43#define _GNU_SOURCE 1
44#define _BSD_SOURCE 1 44#define _BSD_SOURCE 1
45#define HAS_BOOL
45 46
46#include <unistd.h> 47#include <unistd.h>
47#include <stdio.h> 48#include <stdio.h>
48#include <sys/stat.h> 49#include <sys/stat.h>
49#include <sys/statfs.h> 50#include <sys/statfs.h>
50#include <fcntl.h> 51#include <fcntl.h>
52#include <stdbool.h>
51#include <stddef.h> 53#include <stddef.h>
52#include <stdlib.h> 54#include <stdlib.h>
53#include <stdarg.h> 55#include <stdarg.h>
@@ -78,6 +80,7 @@
78#include <pwd.h> 80#include <pwd.h>
79#include <inttypes.h> 81#include <inttypes.h>
80#include "../../../include/linux/magic.h" 82#include "../../../include/linux/magic.h"
83#include "types.h"
81 84
82 85
83#ifndef NO_ICONV 86#ifndef NO_ICONV
@@ -295,6 +298,13 @@ extern void *xmemdupz(const void *data, size_t len);
295extern char *xstrndup(const char *str, size_t len); 298extern char *xstrndup(const char *str, size_t len);
296extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); 299extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
297 300
301static inline void *xzalloc(size_t size)
302{
303 void *buf = xmalloc(size);
304
305 return memset(buf, 0, size);
306}
307
298static inline void *zalloc(size_t size) 308static inline void *zalloc(size_t size)
299{ 309{
300 return calloc(1, size); 310 return calloc(1, size);
@@ -309,6 +319,7 @@ static inline int has_extension(const char *filename, const char *ext)
309{ 319{
310 size_t len = strlen(filename); 320 size_t len = strlen(filename);
311 size_t extlen = strlen(ext); 321 size_t extlen = strlen(ext);
322
312 return len > extlen && !memcmp(filename + len - extlen, ext, extlen); 323 return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
313} 324}
314 325
@@ -322,6 +333,7 @@ static inline int has_extension(const char *filename, const char *ext)
322#undef isalnum 333#undef isalnum
323#undef tolower 334#undef tolower
324#undef toupper 335#undef toupper
336
325extern unsigned char sane_ctype[256]; 337extern unsigned char sane_ctype[256];
326#define GIT_SPACE 0x01 338#define GIT_SPACE 0x01
327#define GIT_DIGIT 0x02 339#define GIT_DIGIT 0x02
@@ -406,4 +418,14 @@ void git_qsort(void *base, size_t nmemb, size_t size,
406int mkdir_p(char *path, mode_t mode); 418int mkdir_p(char *path, mode_t mode);
407int copyfile(const char *from, const char *to); 419int copyfile(const char *from, const char *to);
408 420
421s64 perf_atoll(const char *str);
422char **argv_split(const char *str, int *argcp);
423void argv_free(char **argv);
424bool strglobmatch(const char *str, const char *pat);
425bool strlazymatch(const char *str, const char *pat);
426unsigned long convert_unit(unsigned long value, char *unit);
427
428#define _STR(x) #x
429#define STR(x) _STR(x)
430
409#endif 431#endif