aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/annotate.c49
-rw-r--r--tools/perf/util/annotate.h5
-rw-r--r--tools/perf/util/auxtrace.c11
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/config.c8
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c102
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h29
-rw-r--r--tools/perf/util/cs-etm.c216
-rw-r--r--tools/perf/util/dso.c8
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/event.c61
-rw-r--r--tools/perf/util/event.h8
-rw-r--r--tools/perf/util/evlist.c20
-rw-r--r--tools/perf/util/evlist.h8
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c1
-rw-r--r--tools/perf/util/header.c51
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/jitdump.c2
-rw-r--r--tools/perf/util/machine.c33
-rw-r--r--tools/perf/util/machine.h6
-rw-r--r--tools/perf/util/map.c89
-rw-r--r--tools/perf/util/map.h18
-rw-r--r--tools/perf/util/mmap.c152
-rw-r--r--tools/perf/util/mmap.h26
-rw-r--r--tools/perf/util/ordered-events.c44
-rw-r--r--tools/perf/util/ordered-events.h8
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c47
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/probe-file.c2
-rw-r--r--tools/perf/util/python.c4
-rw-r--r--tools/perf/util/s390-cpumsf.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c24
-rw-r--r--tools/perf/util/session.c7
-rw-r--r--tools/perf/util/sort.c63
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/srccode.c186
-rw-r--r--tools/perf/util/srccode.h7
-rw-r--r--tools/perf/util/srcline.c28
-rw-r--r--tools/perf/util/srcline.h1
-rw-r--r--tools/perf/util/stat-display.c16
-rw-r--r--tools/perf/util/stat-shadow.c6
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol.c26
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c2
-rw-r--r--tools/perf/util/thread.h6
-rw-r--r--tools/perf/util/top.c8
-rw-r--r--tools/perf/util/top.h10
-rw-r--r--tools/perf/util/trace-event-parse.c16
-rw-r--r--tools/perf/util/trace-event-read.c4
-rw-r--r--tools/perf/util/trace-event.c8
-rw-r--r--tools/perf/util/trace-event.h16
58 files changed, 1192 insertions, 284 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index b7bf201fe8a8..af72be7f5b3b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -77,6 +77,7 @@ libperf-y += stat-shadow.o
77libperf-y += stat-display.o 77libperf-y += stat-display.o
78libperf-y += record.o 78libperf-y += record.o
79libperf-y += srcline.o 79libperf-y += srcline.o
80libperf-y += srccode.o
80libperf-y += data.o 81libperf-y += data.o
81libperf-y += tsc.o 82libperf-y += tsc.o
82libperf-y += cloexec.o 83libperf-y += cloexec.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 6936daf89ddd..ac9805e0bc76 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -134,6 +134,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
134 return 0; 134 return 0;
135} 135}
136 136
137#include "arch/arc/annotate/instructions.c"
137#include "arch/arm/annotate/instructions.c" 138#include "arch/arm/annotate/instructions.c"
138#include "arch/arm64/annotate/instructions.c" 139#include "arch/arm64/annotate/instructions.c"
139#include "arch/x86/annotate/instructions.c" 140#include "arch/x86/annotate/instructions.c"
@@ -143,6 +144,10 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
143 144
144static struct arch architectures[] = { 145static struct arch architectures[] = {
145 { 146 {
147 .name = "arc",
148 .init = arc__annotate_init,
149 },
150 {
146 .name = "arm", 151 .name = "arm",
147 .init = arm__annotate_init, 152 .init = arm__annotate_init,
148 }, 153 },
@@ -1000,6 +1005,7 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
1000static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 1005static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
1001{ 1006{
1002 unsigned n_insn; 1007 unsigned n_insn;
1008 unsigned int cover_insn = 0;
1003 u64 offset; 1009 u64 offset;
1004 1010
1005 n_insn = annotation__count_insn(notes, start, end); 1011 n_insn = annotation__count_insn(notes, start, end);
@@ -1013,21 +1019,34 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
1013 for (offset = start; offset <= end; offset++) { 1019 for (offset = start; offset <= end; offset++) {
1014 struct annotation_line *al = notes->offsets[offset]; 1020 struct annotation_line *al = notes->offsets[offset];
1015 1021
1016 if (al) 1022 if (al && al->ipc == 0.0) {
1017 al->ipc = ipc; 1023 al->ipc = ipc;
1024 cover_insn++;
1025 }
1026 }
1027
1028 if (cover_insn) {
1029 notes->hit_cycles += ch->cycles;
1030 notes->hit_insn += n_insn * ch->num;
1031 notes->cover_insn += cover_insn;
1018 } 1032 }
1019 } 1033 }
1020} 1034}
1021 1035
1022void annotation__compute_ipc(struct annotation *notes, size_t size) 1036void annotation__compute_ipc(struct annotation *notes, size_t size)
1023{ 1037{
1024 u64 offset; 1038 s64 offset;
1025 1039
1026 if (!notes->src || !notes->src->cycles_hist) 1040 if (!notes->src || !notes->src->cycles_hist)
1027 return; 1041 return;
1028 1042
1043 notes->total_insn = annotation__count_insn(notes, 0, size - 1);
1044 notes->hit_cycles = 0;
1045 notes->hit_insn = 0;
1046 notes->cover_insn = 0;
1047
1029 pthread_mutex_lock(&notes->lock); 1048 pthread_mutex_lock(&notes->lock);
1030 for (offset = 0; offset < size; ++offset) { 1049 for (offset = size - 1; offset >= 0; --offset) {
1031 struct cyc_hist *ch; 1050 struct cyc_hist *ch;
1032 1051
1033 ch = &notes->src->cycles_hist[offset]; 1052 ch = &notes->src->cycles_hist[offset];
@@ -1758,7 +1777,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1758 while (!feof(file)) { 1777 while (!feof(file)) {
1759 /* 1778 /*
1760 * The source code line number (lineno) needs to be kept in 1779 * The source code line number (lineno) needs to be kept in
1761 * accross calls to symbol__parse_objdump_line(), so that it 1780 * across calls to symbol__parse_objdump_line(), so that it
1762 * can associate it with the instructions till the next one. 1781 * can associate it with the instructions till the next one.
1763 * See disasm_line__new() and struct disasm_line::line_nr. 1782 * See disasm_line__new() and struct disasm_line::line_nr.
1764 */ 1783 */
@@ -2563,6 +2582,22 @@ call_like:
2563 disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset); 2582 disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
2564} 2583}
2565 2584
2585static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
2586{
2587 double ipc = 0.0, coverage = 0.0;
2588
2589 if (notes->hit_cycles)
2590 ipc = notes->hit_insn / ((double)notes->hit_cycles);
2591
2592 if (notes->total_insn) {
2593 coverage = notes->cover_insn * 100.0 /
2594 ((double)notes->total_insn);
2595 }
2596
2597 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
2598 ipc, coverage);
2599}
2600
2566static void __annotation_line__write(struct annotation_line *al, struct annotation *notes, 2601static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
2567 bool first_line, bool current_entry, bool change_color, int width, 2602 bool first_line, bool current_entry, bool change_color, int width,
2568 void *obj, unsigned int percent_type, 2603 void *obj, unsigned int percent_type,
@@ -2658,6 +2693,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
2658 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2693 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2659 "Cycle(min/max)"); 2694 "Cycle(min/max)");
2660 } 2695 }
2696
2697 if (show_title && !*al->line) {
2698 ipc_coverage_string(bf, sizeof(bf), notes);
2699 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2700 }
2661 } 2701 }
2662 2702
2663 obj__printf(obj, " "); 2703 obj__printf(obj, " ");
@@ -2763,6 +2803,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
2763 notes->nr_events = nr_pcnt; 2803 notes->nr_events = nr_pcnt;
2764 2804
2765 annotation__update_column_widths(notes); 2805 annotation__update_column_widths(notes);
2806 sym->annotate2 = true;
2766 2807
2767 return 0; 2808 return 0;
2768 2809
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 5399ba2321bb..fb6463730ba4 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -64,6 +64,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
64#define ANNOTATION__IPC_WIDTH 6 64#define ANNOTATION__IPC_WIDTH 6
65#define ANNOTATION__CYCLES_WIDTH 6 65#define ANNOTATION__CYCLES_WIDTH 6
66#define ANNOTATION__MINMAX_CYCLES_WIDTH 19 66#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
67#define ANNOTATION__AVG_IPC_WIDTH 36
67 68
68struct annotation_options { 69struct annotation_options {
69 bool hide_src_code, 70 bool hide_src_code,
@@ -262,6 +263,10 @@ struct annotation {
262 pthread_mutex_t lock; 263 pthread_mutex_t lock;
263 u64 max_coverage; 264 u64 max_coverage;
264 u64 start; 265 u64 start;
266 u64 hit_cycles;
267 u64 hit_insn;
268 unsigned int total_insn;
269 unsigned int cover_insn;
265 struct annotation_options *options; 270 struct annotation_options *options;
266 struct annotation_line **offsets; 271 struct annotation_line **offsets;
267 int nr_events; 272 int nr_events;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 72d5ba2479bf..f69961c4a4f3 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1983,17 +1983,14 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
1983 1983
1984static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 1984static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
1985{ 1985{
1986 struct symbol *first_sym = dso__first_symbol(dso); 1986 if (dso__data_file_size(dso, NULL)) {
1987 struct symbol *last_sym = dso__last_symbol(dso); 1987 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
1988
1989 if (!first_sym || !last_sym) {
1990 pr_err("Failed to determine filter for %s\nNo symbols found.\n",
1991 filt->filename); 1988 filt->filename);
1992 return -EINVAL; 1989 return -EINVAL;
1993 } 1990 }
1994 1991
1995 filt->addr = first_sym->start; 1992 filt->addr = 0;
1996 filt->size = last_sym->end - first_sym->start; 1993 filt->size = dso->data.file_size;
1997 1994
1998 return 0; 1995 return 0;
1999} 1996}
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index f9ae1a993806..2f3eb6d293ee 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -99,7 +99,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
99 if (err) 99 if (err)
100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
101 } else 101 } else
102 pr_debug("bpf: successfull builtin compilation\n"); 102 pr_debug("bpf: successful builtin compilation\n");
103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
104 104
105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) 105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
@@ -1603,7 +1603,7 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
1603 1603
1604 op = bpf_map__add_newop(map, NULL); 1604 op = bpf_map__add_newop(map, NULL);
1605 if (IS_ERR(op)) 1605 if (IS_ERR(op))
1606 return ERR_PTR(PTR_ERR(op)); 1606 return ERR_CAST(op);
1607 op->op_type = BPF_MAP_OP_SET_EVSEL; 1607 op->op_type = BPF_MAP_OP_SET_EVSEL;
1608 op->v.evsel = evsel; 1608 op->v.evsel = evsel;
1609 } 1609 }
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 5ac157056cdf..1ea8f898f1a1 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -14,6 +14,7 @@
14#include "util.h" 14#include "util.h"
15#include "cache.h" 15#include "cache.h"
16#include <subcmd/exec-cmd.h> 16#include <subcmd/exec-cmd.h>
17#include "util/event.h" /* proc_map_timeout */
17#include "util/hist.h" /* perf_hist_config */ 18#include "util/hist.h" /* perf_hist_config */
18#include "util/llvm-utils.h" /* perf_llvm_config */ 19#include "util/llvm-utils.h" /* perf_llvm_config */
19#include "config.h" 20#include "config.h"
@@ -419,6 +420,9 @@ static int perf_buildid_config(const char *var, const char *value)
419static int perf_default_core_config(const char *var __maybe_unused, 420static int perf_default_core_config(const char *var __maybe_unused,
420 const char *value __maybe_unused) 421 const char *value __maybe_unused)
421{ 422{
423 if (!strcmp(var, "core.proc-map-timeout"))
424 proc_map_timeout = strtoul(value, NULL, 10);
425
422 /* Add other config variables here. */ 426 /* Add other config variables here. */
423 return 0; 427 return 0;
424} 428}
@@ -811,14 +815,14 @@ int config_error_nonbool(const char *var)
811void set_buildid_dir(const char *dir) 815void set_buildid_dir(const char *dir)
812{ 816{
813 if (dir) 817 if (dir)
814 scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir); 818 scnprintf(buildid_dir, MAXPATHLEN, "%s", dir);
815 819
816 /* default to $HOME/.debug */ 820 /* default to $HOME/.debug */
817 if (buildid_dir[0] == '\0') { 821 if (buildid_dir[0] == '\0') {
818 char *home = getenv("HOME"); 822 char *home = getenv("HOME");
819 823
820 if (home) { 824 if (home) {
821 snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", 825 snprintf(buildid_dir, MAXPATHLEN, "%s/%s",
822 home, DEBUG_CACHE_DIR); 826 home, DEBUG_CACHE_DIR);
823 } else { 827 } else {
824 strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); 828 strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 938def6d0bb9..8c155575c6c5 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -36,7 +36,6 @@
36struct cs_etm_decoder { 36struct cs_etm_decoder {
37 void *data; 37 void *data;
38 void (*packet_printer)(const char *msg); 38 void (*packet_printer)(const char *msg);
39 bool trace_on;
40 dcd_tree_handle_t dcd_tree; 39 dcd_tree_handle_t dcd_tree;
41 cs_etm_mem_cb_type mem_access; 40 cs_etm_mem_cb_type mem_access;
42 ocsd_datapath_resp_t prev_return; 41 ocsd_datapath_resp_t prev_return;
@@ -116,6 +115,19 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
116 return 1; 115 return 1;
117} 116}
118 117
118static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
119 ocsd_etmv3_cfg *config)
120{
121 config->reg_idr = params->etmv3.reg_idr;
122 config->reg_ctrl = params->etmv3.reg_ctrl;
123 config->reg_ccer = params->etmv3.reg_ccer;
124 config->reg_trc_id = params->etmv3.reg_trc_id;
125 config->arch_ver = ARCH_V7;
126 config->core_prof = profile_CortexA;
127
128 return 0;
129}
130
119static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, 131static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
120 ocsd_etmv4_cfg *config) 132 ocsd_etmv4_cfg *config)
121{ 133{
@@ -237,10 +249,19 @@ cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
237 struct cs_etm_decoder *decoder) 249 struct cs_etm_decoder *decoder)
238{ 250{
239 const char *decoder_name; 251 const char *decoder_name;
252 ocsd_etmv3_cfg config_etmv3;
240 ocsd_etmv4_cfg trace_config_etmv4; 253 ocsd_etmv4_cfg trace_config_etmv4;
241 void *trace_config; 254 void *trace_config;
242 255
243 switch (t_params->protocol) { 256 switch (t_params->protocol) {
257 case CS_ETM_PROTO_ETMV3:
258 case CS_ETM_PROTO_PTM:
259 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
260 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
261 OCSD_BUILTIN_DCD_ETMV3 :
262 OCSD_BUILTIN_DCD_PTM;
263 trace_config = &config_etmv3;
264 break;
244 case CS_ETM_PROTO_ETMV4i: 265 case CS_ETM_PROTO_ETMV4i:
245 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 266 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
246 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 267 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
@@ -263,11 +284,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
263 decoder->tail = 0; 284 decoder->tail = 0;
264 decoder->packet_count = 0; 285 decoder->packet_count = 0;
265 for (i = 0; i < MAX_BUFFER; i++) { 286 for (i = 0; i < MAX_BUFFER; i++) {
287 decoder->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
266 decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; 288 decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
267 decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; 289 decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
290 decoder->packet_buffer[i].instr_count = 0;
268 decoder->packet_buffer[i].last_instr_taken_branch = false; 291 decoder->packet_buffer[i].last_instr_taken_branch = false;
269 decoder->packet_buffer[i].exc = false; 292 decoder->packet_buffer[i].last_instr_size = 0;
270 decoder->packet_buffer[i].exc_ret = false;
271 decoder->packet_buffer[i].cpu = INT_MIN; 293 decoder->packet_buffer[i].cpu = INT_MIN;
272 } 294 }
273} 295}
@@ -294,11 +316,13 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
294 decoder->packet_count++; 316 decoder->packet_count++;
295 317
296 decoder->packet_buffer[et].sample_type = sample_type; 318 decoder->packet_buffer[et].sample_type = sample_type;
297 decoder->packet_buffer[et].exc = false; 319 decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
298 decoder->packet_buffer[et].exc_ret = false;
299 decoder->packet_buffer[et].cpu = *((int *)inode->priv); 320 decoder->packet_buffer[et].cpu = *((int *)inode->priv);
300 decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; 321 decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
301 decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; 322 decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
323 decoder->packet_buffer[et].instr_count = 0;
324 decoder->packet_buffer[et].last_instr_taken_branch = false;
325 decoder->packet_buffer[et].last_instr_size = 0;
302 326
303 if (decoder->packet_count == MAX_BUFFER - 1) 327 if (decoder->packet_count == MAX_BUFFER - 1)
304 return OCSD_RESP_WAIT; 328 return OCSD_RESP_WAIT;
@@ -321,8 +345,28 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
321 345
322 packet = &decoder->packet_buffer[decoder->tail]; 346 packet = &decoder->packet_buffer[decoder->tail];
323 347
348 switch (elem->isa) {
349 case ocsd_isa_aarch64:
350 packet->isa = CS_ETM_ISA_A64;
351 break;
352 case ocsd_isa_arm:
353 packet->isa = CS_ETM_ISA_A32;
354 break;
355 case ocsd_isa_thumb2:
356 packet->isa = CS_ETM_ISA_T32;
357 break;
358 case ocsd_isa_tee:
359 case ocsd_isa_jazelle:
360 case ocsd_isa_custom:
361 case ocsd_isa_unknown:
362 default:
363 packet->isa = CS_ETM_ISA_UNKNOWN;
364 }
365
324 packet->start_addr = elem->st_addr; 366 packet->start_addr = elem->st_addr;
325 packet->end_addr = elem->en_addr; 367 packet->end_addr = elem->en_addr;
368 packet->instr_count = elem->num_instr_range;
369
326 switch (elem->last_i_type) { 370 switch (elem->last_i_type) {
327 case OCSD_INSTR_BR: 371 case OCSD_INSTR_BR:
328 case OCSD_INSTR_BR_INDIRECT: 372 case OCSD_INSTR_BR_INDIRECT:
@@ -336,15 +380,33 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
336 break; 380 break;
337 } 381 }
338 382
383 packet->last_instr_size = elem->last_instr_sz;
384
339 return ret; 385 return ret;
340} 386}
341 387
342static ocsd_datapath_resp_t 388static ocsd_datapath_resp_t
343cs_etm_decoder__buffer_trace_on(struct cs_etm_decoder *decoder, 389cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
344 const uint8_t trace_chan_id) 390 const uint8_t trace_chan_id)
345{ 391{
346 return cs_etm_decoder__buffer_packet(decoder, trace_chan_id, 392 return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
347 CS_ETM_TRACE_ON); 393 CS_ETM_DISCONTINUITY);
394}
395
396static ocsd_datapath_resp_t
397cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
398 const uint8_t trace_chan_id)
399{
400 return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
401 CS_ETM_EXCEPTION);
402}
403
404static ocsd_datapath_resp_t
405cs_etm_decoder__buffer_exception_ret(struct cs_etm_decoder *decoder,
406 const uint8_t trace_chan_id)
407{
408 return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
409 CS_ETM_EXCEPTION_RET);
348} 410}
349 411
350static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( 412static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
@@ -359,26 +421,25 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
359 switch (elem->elem_type) { 421 switch (elem->elem_type) {
360 case OCSD_GEN_TRC_ELEM_UNKNOWN: 422 case OCSD_GEN_TRC_ELEM_UNKNOWN:
361 break; 423 break;
424 case OCSD_GEN_TRC_ELEM_EO_TRACE:
362 case OCSD_GEN_TRC_ELEM_NO_SYNC: 425 case OCSD_GEN_TRC_ELEM_NO_SYNC:
363 decoder->trace_on = false;
364 break;
365 case OCSD_GEN_TRC_ELEM_TRACE_ON: 426 case OCSD_GEN_TRC_ELEM_TRACE_ON:
366 resp = cs_etm_decoder__buffer_trace_on(decoder, 427 resp = cs_etm_decoder__buffer_discontinuity(decoder,
367 trace_chan_id); 428 trace_chan_id);
368 decoder->trace_on = true;
369 break; 429 break;
370 case OCSD_GEN_TRC_ELEM_INSTR_RANGE: 430 case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
371 resp = cs_etm_decoder__buffer_range(decoder, elem, 431 resp = cs_etm_decoder__buffer_range(decoder, elem,
372 trace_chan_id); 432 trace_chan_id);
373 break; 433 break;
374 case OCSD_GEN_TRC_ELEM_EXCEPTION: 434 case OCSD_GEN_TRC_ELEM_EXCEPTION:
375 decoder->packet_buffer[decoder->tail].exc = true; 435 resp = cs_etm_decoder__buffer_exception(decoder,
436 trace_chan_id);
376 break; 437 break;
377 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET: 438 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
378 decoder->packet_buffer[decoder->tail].exc_ret = true; 439 resp = cs_etm_decoder__buffer_exception_ret(decoder,
440 trace_chan_id);
379 break; 441 break;
380 case OCSD_GEN_TRC_ELEM_PE_CONTEXT: 442 case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
381 case OCSD_GEN_TRC_ELEM_EO_TRACE:
382 case OCSD_GEN_TRC_ELEM_ADDR_NACC: 443 case OCSD_GEN_TRC_ELEM_ADDR_NACC:
383 case OCSD_GEN_TRC_ELEM_TIMESTAMP: 444 case OCSD_GEN_TRC_ELEM_TIMESTAMP:
384 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT: 445 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
@@ -398,11 +459,20 @@ static int cs_etm_decoder__create_etm_packet_decoder(
398 struct cs_etm_decoder *decoder) 459 struct cs_etm_decoder *decoder)
399{ 460{
400 const char *decoder_name; 461 const char *decoder_name;
462 ocsd_etmv3_cfg config_etmv3;
401 ocsd_etmv4_cfg trace_config_etmv4; 463 ocsd_etmv4_cfg trace_config_etmv4;
402 void *trace_config; 464 void *trace_config;
403 u8 csid; 465 u8 csid;
404 466
405 switch (t_params->protocol) { 467 switch (t_params->protocol) {
468 case CS_ETM_PROTO_ETMV3:
469 case CS_ETM_PROTO_PTM:
470 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
471 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
472 OCSD_BUILTIN_DCD_ETMV3 :
473 OCSD_BUILTIN_DCD_PTM;
474 trace_config = &config_etmv3;
475 break;
406 case CS_ETM_PROTO_ETMV4i: 476 case CS_ETM_PROTO_ETMV4i:
407 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 477 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
408 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 478 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 612b5755f742..a6407d41598f 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -23,18 +23,28 @@ struct cs_etm_buffer {
23}; 23};
24 24
25enum cs_etm_sample_type { 25enum cs_etm_sample_type {
26 CS_ETM_EMPTY = 0, 26 CS_ETM_EMPTY,
27 CS_ETM_RANGE = 1 << 0, 27 CS_ETM_RANGE,
28 CS_ETM_TRACE_ON = 1 << 1, 28 CS_ETM_DISCONTINUITY,
29 CS_ETM_EXCEPTION,
30 CS_ETM_EXCEPTION_RET,
31};
32
33enum cs_etm_isa {
34 CS_ETM_ISA_UNKNOWN,
35 CS_ETM_ISA_A64,
36 CS_ETM_ISA_A32,
37 CS_ETM_ISA_T32,
29}; 38};
30 39
31struct cs_etm_packet { 40struct cs_etm_packet {
32 enum cs_etm_sample_type sample_type; 41 enum cs_etm_sample_type sample_type;
42 enum cs_etm_isa isa;
33 u64 start_addr; 43 u64 start_addr;
34 u64 end_addr; 44 u64 end_addr;
45 u32 instr_count;
35 u8 last_instr_taken_branch; 46 u8 last_instr_taken_branch;
36 u8 exc; 47 u8 last_instr_size;
37 u8 exc_ret;
38 int cpu; 48 int cpu;
39}; 49};
40 50
@@ -43,6 +53,13 @@ struct cs_etm_queue;
43typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64, 53typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
44 size_t, u8 *); 54 size_t, u8 *);
45 55
56struct cs_etmv3_trace_params {
57 u32 reg_ctrl;
58 u32 reg_trc_id;
59 u32 reg_ccer;
60 u32 reg_idr;
61};
62
46struct cs_etmv4_trace_params { 63struct cs_etmv4_trace_params {
47 u32 reg_idr0; 64 u32 reg_idr0;
48 u32 reg_idr1; 65 u32 reg_idr1;
@@ -55,6 +72,7 @@ struct cs_etmv4_trace_params {
55struct cs_etm_trace_params { 72struct cs_etm_trace_params {
56 int protocol; 73 int protocol;
57 union { 74 union {
75 struct cs_etmv3_trace_params etmv3;
58 struct cs_etmv4_trace_params etmv4; 76 struct cs_etmv4_trace_params etmv4;
59 }; 77 };
60}; 78};
@@ -78,6 +96,7 @@ enum {
78 CS_ETM_PROTO_ETMV3 = 1, 96 CS_ETM_PROTO_ETMV3 = 1,
79 CS_ETM_PROTO_ETMV4i, 97 CS_ETM_PROTO_ETMV4i,
80 CS_ETM_PROTO_ETMV4d, 98 CS_ETM_PROTO_ETMV4d,
99 CS_ETM_PROTO_PTM,
81}; 100};
82 101
83enum { 102enum {
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 73430b73570d..27a374ddf661 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -31,14 +31,6 @@
31 31
32#define MAX_TIMESTAMP (~0ULL) 32#define MAX_TIMESTAMP (~0ULL)
33 33
34/*
35 * A64 instructions are always 4 bytes
36 *
37 * Only A64 is supported, so can use this constant for converting between
38 * addresses and instruction counts, calculting offsets etc
39 */
40#define A64_INSTR_SIZE 4
41
42struct cs_etm_auxtrace { 34struct cs_etm_auxtrace {
43 struct auxtrace auxtrace; 35 struct auxtrace auxtrace;
44 struct auxtrace_queues queues; 36 struct auxtrace_queues queues;
@@ -91,6 +83,19 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
91static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, 83static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
92 pid_t tid, u64 time_); 84 pid_t tid, u64 time_);
93 85
86/* PTMs ETMIDR [11:8] set to b0011 */
87#define ETMIDR_PTM_VERSION 0x00000300
88
89static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
90{
91 etmidr &= ETMIDR_PTM_VERSION;
92
93 if (etmidr == ETMIDR_PTM_VERSION)
94 return CS_ETM_PROTO_PTM;
95
96 return CS_ETM_PROTO_ETMV3;
97}
98
94static void cs_etm__packet_dump(const char *pkt_string) 99static void cs_etm__packet_dump(const char *pkt_string)
95{ 100{
96 const char *color = PERF_COLOR_BLUE; 101 const char *color = PERF_COLOR_BLUE;
@@ -122,15 +127,31 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
122 /* Use metadata to fill in trace parameters for trace decoder */ 127 /* Use metadata to fill in trace parameters for trace decoder */
123 t_params = zalloc(sizeof(*t_params) * etm->num_cpu); 128 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
124 for (i = 0; i < etm->num_cpu; i++) { 129 for (i = 0; i < etm->num_cpu; i++) {
125 t_params[i].protocol = CS_ETM_PROTO_ETMV4i; 130 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
126 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; 131 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
127 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; 132
128 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; 133 t_params[i].protocol =
129 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; 134 cs_etm__get_v7_protocol_version(etmidr);
130 t_params[i].etmv4.reg_configr = 135 t_params[i].etmv3.reg_ctrl =
136 etm->metadata[i][CS_ETM_ETMCR];
137 t_params[i].etmv3.reg_trc_id =
138 etm->metadata[i][CS_ETM_ETMTRACEIDR];
139 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
140 __perf_cs_etmv4_magic) {
141 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
142 t_params[i].etmv4.reg_idr0 =
143 etm->metadata[i][CS_ETMV4_TRCIDR0];
144 t_params[i].etmv4.reg_idr1 =
145 etm->metadata[i][CS_ETMV4_TRCIDR1];
146 t_params[i].etmv4.reg_idr2 =
147 etm->metadata[i][CS_ETMV4_TRCIDR2];
148 t_params[i].etmv4.reg_idr8 =
149 etm->metadata[i][CS_ETMV4_TRCIDR8];
150 t_params[i].etmv4.reg_configr =
131 etm->metadata[i][CS_ETMV4_TRCCONFIGR]; 151 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
132 t_params[i].etmv4.reg_traceidr = 152 t_params[i].etmv4.reg_traceidr =
133 etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; 153 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
154 }
134 } 155 }
135 156
136 /* Set decoder parameters to simply print the trace packets */ 157 /* Set decoder parameters to simply print the trace packets */
@@ -360,15 +381,31 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
360 goto out_free; 381 goto out_free;
361 382
362 for (i = 0; i < etm->num_cpu; i++) { 383 for (i = 0; i < etm->num_cpu; i++) {
363 t_params[i].protocol = CS_ETM_PROTO_ETMV4i; 384 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
364 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; 385 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
365 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; 386
366 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; 387 t_params[i].protocol =
367 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; 388 cs_etm__get_v7_protocol_version(etmidr);
368 t_params[i].etmv4.reg_configr = 389 t_params[i].etmv3.reg_ctrl =
390 etm->metadata[i][CS_ETM_ETMCR];
391 t_params[i].etmv3.reg_trc_id =
392 etm->metadata[i][CS_ETM_ETMTRACEIDR];
393 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
394 __perf_cs_etmv4_magic) {
395 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
396 t_params[i].etmv4.reg_idr0 =
397 etm->metadata[i][CS_ETMV4_TRCIDR0];
398 t_params[i].etmv4.reg_idr1 =
399 etm->metadata[i][CS_ETMV4_TRCIDR1];
400 t_params[i].etmv4.reg_idr2 =
401 etm->metadata[i][CS_ETMV4_TRCIDR2];
402 t_params[i].etmv4.reg_idr8 =
403 etm->metadata[i][CS_ETMV4_TRCIDR8];
404 t_params[i].etmv4.reg_configr =
369 etm->metadata[i][CS_ETMV4_TRCCONFIGR]; 405 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
370 t_params[i].etmv4.reg_traceidr = 406 t_params[i].etmv4.reg_traceidr =
371 etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; 407 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
408 }
372 } 409 }
373 410
374 /* Set decoder parameters to simply print the trace packets */ 411 /* Set decoder parameters to simply print the trace packets */
@@ -510,53 +547,54 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
510 etmq->last_branch_rb->nr = 0; 547 etmq->last_branch_rb->nr = 0;
511} 548}
512 549
513static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) 550static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
514{ 551 u64 addr) {
515 /* Returns 0 for the CS_ETM_TRACE_ON packet */ 552 u8 instrBytes[2];
516 if (packet->sample_type == CS_ETM_TRACE_ON)
517 return 0;
518 553
554 cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes);
519 /* 555 /*
520 * The packet records the execution range with an exclusive end address 556 * T32 instruction size is indicated by bits[15:11] of the first
521 * 557 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
522 * A64 instructions are constant size, so the last executed 558 * denote a 32-bit instruction.
523 * instruction is A64_INSTR_SIZE before the end address
524 * Will need to do instruction level decode for T32 instructions as
525 * they can be variable size (not yet supported).
526 */ 559 */
527 return packet->end_addr - A64_INSTR_SIZE; 560 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
528} 561}
529 562
530static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet) 563static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
531{ 564{
532 /* Returns 0 for the CS_ETM_TRACE_ON packet */ 565 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
533 if (packet->sample_type == CS_ETM_TRACE_ON) 566 if (packet->sample_type == CS_ETM_DISCONTINUITY)
534 return 0; 567 return 0;
535 568
536 return packet->start_addr; 569 return packet->start_addr;
537} 570}
538 571
539static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet) 572static inline
573u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
540{ 574{
541 /* 575 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
542 * Only A64 instructions are currently supported, so can get 576 if (packet->sample_type == CS_ETM_DISCONTINUITY)
543 * instruction count by dividing. 577 return 0;
544 * Will need to do instruction level decode for T32 instructions as 578
545 * they can be variable size (not yet supported). 579 return packet->end_addr - packet->last_instr_size;
546 */
547 return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
548} 580}
549 581
550static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet, 582static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
583 const struct cs_etm_packet *packet,
551 u64 offset) 584 u64 offset)
552{ 585{
553 /* 586 if (packet->isa == CS_ETM_ISA_T32) {
554 * Only A64 instructions are currently supported, so can get 587 u64 addr = packet->start_addr;
555 * instruction address by muliplying. 588
556 * Will need to do instruction level decode for T32 instructions as 589 while (offset > 0) {
557 * they can be variable size (not yet supported). 590 addr += cs_etm__t32_instr_size(etmq, addr);
558 */ 591 offset--;
559 return packet->start_addr + offset * A64_INSTR_SIZE; 592 }
593 return addr;
594 }
595
596 /* Assume a 4 byte instruction size (A32/A64) */
597 return packet->start_addr + offset * 4;
560} 598}
561 599
562static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq) 600static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
@@ -888,9 +926,8 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
888 struct cs_etm_auxtrace *etm = etmq->etm; 926 struct cs_etm_auxtrace *etm = etmq->etm;
889 struct cs_etm_packet *tmp; 927 struct cs_etm_packet *tmp;
890 int ret; 928 int ret;
891 u64 instrs_executed; 929 u64 instrs_executed = etmq->packet->instr_count;
892 930
893 instrs_executed = cs_etm__instr_count(etmq->packet);
894 etmq->period_instructions += instrs_executed; 931 etmq->period_instructions += instrs_executed;
895 932
896 /* 933 /*
@@ -920,7 +957,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
920 * executed, but PC has not advanced to next instruction) 957 * executed, but PC has not advanced to next instruction)
921 */ 958 */
922 u64 offset = (instrs_executed - instrs_over - 1); 959 u64 offset = (instrs_executed - instrs_over - 1);
923 u64 addr = cs_etm__instr_addr(etmq->packet, offset); 960 u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset);
924 961
925 ret = cs_etm__synth_instruction_sample( 962 ret = cs_etm__synth_instruction_sample(
926 etmq, addr, etm->instructions_sample_period); 963 etmq, addr, etm->instructions_sample_period);
@@ -935,7 +972,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
935 bool generate_sample = false; 972 bool generate_sample = false;
936 973
937 /* Generate sample for tracing on packet */ 974 /* Generate sample for tracing on packet */
938 if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON) 975 if (etmq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
939 generate_sample = true; 976 generate_sample = true;
940 977
941 /* Generate sample for branch taken packet */ 978 /* Generate sample for branch taken packet */
@@ -963,6 +1000,25 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
963 return 0; 1000 return 0;
964} 1001}
965 1002
1003static int cs_etm__exception(struct cs_etm_queue *etmq)
1004{
1005 /*
1006 * When the exception packet is inserted, whether the last instruction
1007 * in previous range packet is taken branch or not, we need to force
1008 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
1009 * to generate branch sample for the instruction range before the
1010 * exception is trapped to kernel or before the exception returning.
1011 *
1012 * The exception packet includes the dummy address values, so don't
1013 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
1014 * for generating instruction and branch samples.
1015 */
1016 if (etmq->prev_packet->sample_type == CS_ETM_RANGE)
1017 etmq->prev_packet->last_instr_taken_branch = true;
1018
1019 return 0;
1020}
1021
966static int cs_etm__flush(struct cs_etm_queue *etmq) 1022static int cs_etm__flush(struct cs_etm_queue *etmq)
967{ 1023{
968 int err = 0; 1024 int err = 0;
@@ -1005,7 +1061,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
1005 } 1061 }
1006 1062
1007swap_packet: 1063swap_packet:
1008 if (etmq->etm->synth_opts.last_branch) { 1064 if (etm->sample_branches || etm->synth_opts.last_branch) {
1009 /* 1065 /*
1010 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for 1066 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1011 * the next incoming packet. 1067 * the next incoming packet.
@@ -1018,6 +1074,39 @@ swap_packet:
1018 return err; 1074 return err;
1019} 1075}
1020 1076
1077static int cs_etm__end_block(struct cs_etm_queue *etmq)
1078{
1079 int err;
1080
1081 /*
1082 * It has no new packet coming and 'etmq->packet' contains the stale
1083 * packet which was set at the previous time with packets swapping;
1084 * so skip to generate branch sample to avoid stale packet.
1085 *
1086 * For this case only flush branch stack and generate a last branch
1087 * event for the branches left in the circular buffer at the end of
1088 * the trace.
1089 */
1090 if (etmq->etm->synth_opts.last_branch &&
1091 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1092 /*
1093 * Use the address of the end of the last reported execution
1094 * range.
1095 */
1096 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1097
1098 err = cs_etm__synth_instruction_sample(
1099 etmq, addr,
1100 etmq->period_instructions);
1101 if (err)
1102 return err;
1103
1104 etmq->period_instructions = 0;
1105 }
1106
1107 return 0;
1108}
1109
1021static int cs_etm__run_decoder(struct cs_etm_queue *etmq) 1110static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1022{ 1111{
1023 struct cs_etm_auxtrace *etm = etmq->etm; 1112 struct cs_etm_auxtrace *etm = etmq->etm;
@@ -1078,7 +1167,16 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1078 */ 1167 */
1079 cs_etm__sample(etmq); 1168 cs_etm__sample(etmq);
1080 break; 1169 break;
1081 case CS_ETM_TRACE_ON: 1170 case CS_ETM_EXCEPTION:
1171 case CS_ETM_EXCEPTION_RET:
1172 /*
1173 * If the exception packet is coming,
1174 * make sure the previous instruction
1175 * range packet to be handled properly.
1176 */
1177 cs_etm__exception(etmq);
1178 break;
1179 case CS_ETM_DISCONTINUITY:
1082 /* 1180 /*
1083 * Discontinuity in trace, flush 1181 * Discontinuity in trace, flush
1084 * previous branch stack 1182 * previous branch stack
@@ -1100,7 +1198,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1100 1198
1101 if (err == 0) 1199 if (err == 0)
1102 /* Flush any remaining branch stack entries */ 1200 /* Flush any remaining branch stack entries */
1103 err = cs_etm__flush(etmq); 1201 err = cs_etm__end_block(etmq);
1104 } 1202 }
1105 1203
1106 return err; 1204 return err;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bbed90e5d9bb..62c8cf622607 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
295 unlink(tmpbuf); 295 unlink(tmpbuf);
296 296
297 if (pathname && (fd >= 0)) 297 if (pathname && (fd >= 0))
298 strncpy(pathname, tmpbuf, len); 298 strlcpy(pathname, tmpbuf, len);
299 299
300 return fd; 300 return fd;
301} 301}
@@ -894,7 +894,7 @@ static ssize_t cached_read(struct dso *dso, struct machine *machine,
894 return r; 894 return r;
895} 895}
896 896
897static int data_file_size(struct dso *dso, struct machine *machine) 897int dso__data_file_size(struct dso *dso, struct machine *machine)
898{ 898{
899 int ret = 0; 899 int ret = 0;
900 struct stat st; 900 struct stat st;
@@ -943,7 +943,7 @@ out:
943 */ 943 */
944off_t dso__data_size(struct dso *dso, struct machine *machine) 944off_t dso__data_size(struct dso *dso, struct machine *machine)
945{ 945{
946 if (data_file_size(dso, machine)) 946 if (dso__data_file_size(dso, machine))
947 return -1; 947 return -1;
948 948
949 /* For now just estimate dso data size is close to file size */ 949 /* For now just estimate dso data size is close to file size */
@@ -953,7 +953,7 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
953static ssize_t data_read_offset(struct dso *dso, struct machine *machine, 953static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
954 u64 offset, u8 *data, ssize_t size) 954 u64 offset, u8 *data, ssize_t size)
955{ 955{
956 if (data_file_size(dso, machine)) 956 if (dso__data_file_size(dso, machine))
957 return -1; 957 return -1;
958 958
959 /* Check the offset sanity. */ 959 /* Check the offset sanity. */
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index c5380500bed4..8c8a7abe809d 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -322,6 +322,7 @@ int dso__data_get_fd(struct dso *dso, struct machine *machine);
322void dso__data_put_fd(struct dso *dso); 322void dso__data_put_fd(struct dso *dso);
323void dso__data_close(struct dso *dso); 323void dso__data_close(struct dso *dso);
324 324
325int dso__data_file_size(struct dso *dso, struct machine *machine);
325off_t dso__data_size(struct dso *dso, struct machine *machine); 326off_t dso__data_size(struct dso *dso, struct machine *machine);
326ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 327ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
327 u64 offset, u8 *data, ssize_t size); 328 u64 offset, u8 *data, ssize_t size);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7693f8..4c23779e271a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
166 struct utsname uts; 166 struct utsname uts;
167 char *arch_name; 167 char *arch_name;
168 168
169 if (!env) { /* Assume local operation */ 169 if (!env || !env->arch) { /* Assume local operation */
170 if (uname(&uts) < 0) 170 if (uname(&uts) < 0)
171 return NULL; 171 return NULL;
172 arch_name = uts.machine; 172 arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index e9c108a6b1c3..937a5a4f71cc 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -25,6 +25,8 @@
25#include "asm/bug.h" 25#include "asm/bug.h"
26#include "stat.h" 26#include "stat.h"
27 27
28#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
29
28static const char *perf_event__names[] = { 30static const char *perf_event__names[] = {
29 [0] = "TOTAL", 31 [0] = "TOTAL",
30 [PERF_RECORD_MMAP] = "MMAP", 32 [PERF_RECORD_MMAP] = "MMAP",
@@ -72,6 +74,8 @@ static const char *perf_ns__names[] = {
72 [CGROUP_NS_INDEX] = "cgroup", 74 [CGROUP_NS_INDEX] = "cgroup",
73}; 75};
74 76
77unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
78
75const char *perf_event__name(unsigned int id) 79const char *perf_event__name(unsigned int id)
76{ 80{
77 if (id >= ARRAY_SIZE(perf_event__names)) 81 if (id >= ARRAY_SIZE(perf_event__names))
@@ -323,8 +327,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
323 pid_t pid, pid_t tgid, 327 pid_t pid, pid_t tgid,
324 perf_event__handler_t process, 328 perf_event__handler_t process,
325 struct machine *machine, 329 struct machine *machine,
326 bool mmap_data, 330 bool mmap_data)
327 unsigned int proc_map_timeout)
328{ 331{
329 char filename[PATH_MAX]; 332 char filename[PATH_MAX];
330 FILE *fp; 333 FILE *fp;
@@ -521,8 +524,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
521 perf_event__handler_t process, 524 perf_event__handler_t process,
522 struct perf_tool *tool, 525 struct perf_tool *tool,
523 struct machine *machine, 526 struct machine *machine,
524 bool mmap_data, 527 bool mmap_data)
525 unsigned int proc_map_timeout)
526{ 528{
527 char filename[PATH_MAX]; 529 char filename[PATH_MAX];
528 DIR *tasks; 530 DIR *tasks;
@@ -548,8 +550,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
548 */ 550 */
549 if (pid == tgid && 551 if (pid == tgid &&
550 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 552 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
551 process, machine, mmap_data, 553 process, machine, mmap_data))
552 proc_map_timeout))
553 return -1; 554 return -1;
554 555
555 return 0; 556 return 0;
@@ -598,7 +599,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
598 if (_pid == pid) { 599 if (_pid == pid) {
599 /* process the parent's maps too */ 600 /* process the parent's maps too */
600 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 601 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
601 process, machine, mmap_data, proc_map_timeout); 602 process, machine, mmap_data);
602 if (rc) 603 if (rc)
603 break; 604 break;
604 } 605 }
@@ -612,8 +613,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
612 struct thread_map *threads, 613 struct thread_map *threads,
613 perf_event__handler_t process, 614 perf_event__handler_t process,
614 struct machine *machine, 615 struct machine *machine,
615 bool mmap_data, 616 bool mmap_data)
616 unsigned int proc_map_timeout)
617{ 617{
618 union perf_event *comm_event, *mmap_event, *fork_event; 618 union perf_event *comm_event, *mmap_event, *fork_event;
619 union perf_event *namespaces_event; 619 union perf_event *namespaces_event;
@@ -643,7 +643,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
643 fork_event, namespaces_event, 643 fork_event, namespaces_event,
644 thread_map__pid(threads, thread), 0, 644 thread_map__pid(threads, thread), 0,
645 process, tool, machine, 645 process, tool, machine,
646 mmap_data, proc_map_timeout)) { 646 mmap_data)) {
647 err = -1; 647 err = -1;
648 break; 648 break;
649 } 649 }
@@ -669,7 +669,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
669 fork_event, namespaces_event, 669 fork_event, namespaces_event,
670 comm_event->comm.pid, 0, 670 comm_event->comm.pid, 0,
671 process, tool, machine, 671 process, tool, machine,
672 mmap_data, proc_map_timeout)) { 672 mmap_data)) {
673 err = -1; 673 err = -1;
674 break; 674 break;
675 } 675 }
@@ -690,7 +690,6 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
690 perf_event__handler_t process, 690 perf_event__handler_t process,
691 struct machine *machine, 691 struct machine *machine,
692 bool mmap_data, 692 bool mmap_data,
693 unsigned int proc_map_timeout,
694 struct dirent **dirent, 693 struct dirent **dirent,
695 int start, 694 int start,
696 int num) 695 int num)
@@ -734,8 +733,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
734 */ 733 */
735 __event__synthesize_thread(comm_event, mmap_event, fork_event, 734 __event__synthesize_thread(comm_event, mmap_event, fork_event,
736 namespaces_event, pid, 1, process, 735 namespaces_event, pid, 1, process,
737 tool, machine, mmap_data, 736 tool, machine, mmap_data);
738 proc_map_timeout);
739 } 737 }
740 err = 0; 738 err = 0;
741 739
@@ -755,7 +753,6 @@ struct synthesize_threads_arg {
755 perf_event__handler_t process; 753 perf_event__handler_t process;
756 struct machine *machine; 754 struct machine *machine;
757 bool mmap_data; 755 bool mmap_data;
758 unsigned int proc_map_timeout;
759 struct dirent **dirent; 756 struct dirent **dirent;
760 int num; 757 int num;
761 int start; 758 int start;
@@ -767,7 +764,7 @@ static void *synthesize_threads_worker(void *arg)
767 764
768 __perf_event__synthesize_threads(args->tool, args->process, 765 __perf_event__synthesize_threads(args->tool, args->process,
769 args->machine, args->mmap_data, 766 args->machine, args->mmap_data,
770 args->proc_map_timeout, args->dirent, 767 args->dirent,
771 args->start, args->num); 768 args->start, args->num);
772 return NULL; 769 return NULL;
773} 770}
@@ -776,7 +773,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
776 perf_event__handler_t process, 773 perf_event__handler_t process,
777 struct machine *machine, 774 struct machine *machine,
778 bool mmap_data, 775 bool mmap_data,
779 unsigned int proc_map_timeout,
780 unsigned int nr_threads_synthesize) 776 unsigned int nr_threads_synthesize)
781{ 777{
782 struct synthesize_threads_arg *args = NULL; 778 struct synthesize_threads_arg *args = NULL;
@@ -806,7 +802,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
806 if (thread_nr <= 1) { 802 if (thread_nr <= 1) {
807 err = __perf_event__synthesize_threads(tool, process, 803 err = __perf_event__synthesize_threads(tool, process,
808 machine, mmap_data, 804 machine, mmap_data,
809 proc_map_timeout,
810 dirent, base, n); 805 dirent, base, n);
811 goto free_dirent; 806 goto free_dirent;
812 } 807 }
@@ -828,7 +823,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
828 args[i].process = process; 823 args[i].process = process;
829 args[i].machine = machine; 824 args[i].machine = machine;
830 args[i].mmap_data = mmap_data; 825 args[i].mmap_data = mmap_data;
831 args[i].proc_map_timeout = proc_map_timeout;
832 args[i].dirent = dirent; 826 args[i].dirent = dirent;
833 } 827 }
834 for (i = 0; i < m; i++) { 828 for (i = 0; i < m; i++) {
@@ -1577,6 +1571,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1577 return al->map; 1571 return al->map;
1578} 1572}
1579 1573
1574/*
1575 * For branch stacks or branch samples, the sample cpumode might not be correct
1576 * because it applies only to the sample 'ip' and not necessary to 'addr' or
1577 * branch stack addresses. If possible, use a fallback to deal with those cases.
1578 */
1579struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1580 struct addr_location *al)
1581{
1582 struct map *map = thread__find_map(thread, cpumode, addr, al);
1583 struct machine *machine = thread->mg->machine;
1584 u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1585
1586 if (map || addr_cpumode == cpumode)
1587 return map;
1588
1589 return thread__find_map(thread, addr_cpumode, addr, al);
1590}
1591
1580struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, 1592struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1581 u64 addr, struct addr_location *al) 1593 u64 addr, struct addr_location *al)
1582{ 1594{
@@ -1586,6 +1598,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1586 return al->sym; 1598 return al->sym;
1587} 1599}
1588 1600
1601struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1602 u64 addr, struct addr_location *al)
1603{
1604 al->sym = NULL;
1605 if (thread__find_map_fb(thread, cpumode, addr, al))
1606 al->sym = map__find_symbol(al->map, al->addr);
1607 return al->sym;
1608}
1609
1589/* 1610/*
1590 * Callers need to drop the reference to al->thread, obtained in 1611 * Callers need to drop the reference to al->thread, obtained in
1591 * machine__findnew_thread() 1612 * machine__findnew_thread()
@@ -1679,7 +1700,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1679void thread__resolve(struct thread *thread, struct addr_location *al, 1700void thread__resolve(struct thread *thread, struct addr_location *al,
1680 struct perf_sample *sample) 1701 struct perf_sample *sample)
1681{ 1702{
1682 thread__find_map(thread, sample->cpumode, sample->addr, al); 1703 thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1683 1704
1684 al->cpu = sample->cpu; 1705 al->cpu = sample->cpu;
1685 al->sym = NULL; 1706 al->sym = NULL;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index bfa60bcafbde..eb95f3384958 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -669,8 +669,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
669int perf_event__synthesize_thread_map(struct perf_tool *tool, 669int perf_event__synthesize_thread_map(struct perf_tool *tool,
670 struct thread_map *threads, 670 struct thread_map *threads,
671 perf_event__handler_t process, 671 perf_event__handler_t process,
672 struct machine *machine, bool mmap_data, 672 struct machine *machine, bool mmap_data);
673 unsigned int proc_map_timeout);
674int perf_event__synthesize_thread_map2(struct perf_tool *tool, 673int perf_event__synthesize_thread_map2(struct perf_tool *tool,
675 struct thread_map *threads, 674 struct thread_map *threads,
676 perf_event__handler_t process, 675 perf_event__handler_t process,
@@ -682,7 +681,6 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
682int perf_event__synthesize_threads(struct perf_tool *tool, 681int perf_event__synthesize_threads(struct perf_tool *tool,
683 perf_event__handler_t process, 682 perf_event__handler_t process,
684 struct machine *machine, bool mmap_data, 683 struct machine *machine, bool mmap_data,
685 unsigned int proc_map_timeout,
686 unsigned int nr_threads_synthesize); 684 unsigned int nr_threads_synthesize);
687int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 685int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
688 perf_event__handler_t process, 686 perf_event__handler_t process,
@@ -797,8 +795,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
797 pid_t pid, pid_t tgid, 795 pid_t pid, pid_t tgid,
798 perf_event__handler_t process, 796 perf_event__handler_t process,
799 struct machine *machine, 797 struct machine *machine,
800 bool mmap_data, 798 bool mmap_data);
801 unsigned int proc_map_timeout);
802 799
803int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, 800int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
804 perf_event__handler_t process, 801 perf_event__handler_t process,
@@ -829,5 +826,6 @@ int perf_event_paranoid(void);
829 826
830extern int sysctl_perf_event_max_stack; 827extern int sysctl_perf_event_max_stack;
831extern int sysctl_perf_event_max_contexts_per_stack; 828extern int sysctl_perf_event_max_contexts_per_stack;
829extern unsigned int proc_map_timeout;
832 830
833#endif /* __PERF_RECORD_H */ 831#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 668d2a9ef0f4..8c902276d4b4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -34,6 +34,10 @@
34#include <linux/log2.h> 34#include <linux/log2.h>
35#include <linux/err.h> 35#include <linux/err.h>
36 36
37#ifdef LACKS_SIGQUEUE_PROTOTYPE
38int sigqueue(pid_t pid, int sig, const union sigval value);
39#endif
40
37#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 41#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
38#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 42#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
39 43
@@ -1018,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1018 */ 1022 */
1019int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 1023int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1020 unsigned int auxtrace_pages, 1024 unsigned int auxtrace_pages,
1021 bool auxtrace_overwrite) 1025 bool auxtrace_overwrite, int nr_cblocks)
1022{ 1026{
1023 struct perf_evsel *evsel; 1027 struct perf_evsel *evsel;
1024 const struct cpu_map *cpus = evlist->cpus; 1028 const struct cpu_map *cpus = evlist->cpus;
@@ -1028,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1028 * Its value is decided by evsel's write_backward. 1032 * Its value is decided by evsel's write_backward.
1029 * So &mp should not be passed through const pointer. 1033 * So &mp should not be passed through const pointer.
1030 */ 1034 */
1031 struct mmap_params mp; 1035 struct mmap_params mp = { .nr_cblocks = nr_cblocks };
1032 1036
1033 if (!evlist->mmap) 1037 if (!evlist->mmap)
1034 evlist->mmap = perf_evlist__alloc_mmap(evlist, false); 1038 evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1060,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1060 1064
1061int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) 1065int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
1062{ 1066{
1063 return perf_evlist__mmap_ex(evlist, pages, 0, false); 1067 return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
1064} 1068}
1065 1069
1066int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1070int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1176,7 +1180,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e
1176 return err; 1180 return err;
1177} 1181}
1178 1182
1179int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 1183int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter)
1180{ 1184{
1181 struct perf_evsel *evsel; 1185 struct perf_evsel *evsel;
1182 int err = 0; 1186 int err = 0;
@@ -1193,7 +1197,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1193 return err; 1197 return err;
1194} 1198}
1195 1199
1196int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) 1200int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1197{ 1201{
1198 char *filter; 1202 char *filter;
1199 int ret = -1; 1203 int ret = -1;
@@ -1214,15 +1218,15 @@ int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t
1214 } 1218 }
1215 } 1219 }
1216 1220
1217 ret = perf_evlist__set_filter(evlist, filter); 1221 ret = perf_evlist__set_tp_filter(evlist, filter);
1218out_free: 1222out_free:
1219 free(filter); 1223 free(filter);
1220 return ret; 1224 return ret;
1221} 1225}
1222 1226
1223int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) 1227int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid)
1224{ 1228{
1225 return perf_evlist__set_filter_pids(evlist, 1, &pid); 1229 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
1226} 1230}
1227 1231
1228bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 1232bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 9919eed6d15b..868294491194 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -98,9 +98,9 @@ void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
98#define perf_evlist__reset_sample_bit(evlist, bit) \ 98#define perf_evlist__reset_sample_bit(evlist, bit) \
99 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit) 99 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
100 100
101int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); 101int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter);
102int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid); 102int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid);
103int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); 103int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
104 104
105struct perf_evsel * 105struct perf_evsel *
106perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); 106perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
162 162
163int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 163int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
164 unsigned int auxtrace_pages, 164 unsigned int auxtrace_pages,
165 bool auxtrace_overwrite); 165 bool auxtrace_overwrite, int nr_cblocks);
166int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); 166int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
167void perf_evlist__munmap(struct perf_evlist *evlist); 167void perf_evlist__munmap(struct perf_evlist *evlist);
168 168
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3147ca76c6fc..82a289ce8b0c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -106,7 +106,7 @@ struct perf_evsel {
106 char *name; 106 char *name;
107 double scale; 107 double scale;
108 const char *unit; 108 const char *unit;
109 struct tep_event_format *tp_format; 109 struct tep_event *tp_format;
110 off_t id_offset; 110 off_t id_offset;
111 struct perf_stat_evsel *stats; 111 struct perf_stat_evsel *stats;
112 void *priv; 112 void *priv;
@@ -216,7 +216,7 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *
216 216
217struct perf_evsel *perf_evsel__new_cycles(bool precise); 217struct perf_evsel *perf_evsel__new_cycles(bool precise);
218 218
219struct tep_event_format *event_format__new(const char *sys, const char *name); 219struct tep_event *event_format__new(const char *sys, const char *name);
220 220
221void perf_evsel__init(struct perf_evsel *evsel, 221void perf_evsel__init(struct perf_evsel *evsel,
222 struct perf_event_attr *attr, int idx); 222 struct perf_event_attr *attr, int idx);
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 0d0a4c6f368b..95ea147f9e18 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -173,6 +173,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
173 if (!print_oneline) 173 if (!print_oneline)
174 printed += fprintf(fp, "\n"); 174 printed += fprintf(fp, "\n");
175 175
176 /* Add srccode here too? */
176 if (symbol_conf.bt_stop_list && 177 if (symbol_conf.bt_stop_list &&
177 node->sym && 178 node->sym &&
178 strlist__has_entry(symbol_conf.bt_stop_list, 179 strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4fd45be95a43..dec6d218c31c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -988,6 +988,45 @@ static int write_group_desc(struct feat_fd *ff,
988} 988}
989 989
990/* 990/*
991 * Return the CPU id as a raw string.
992 *
993 * Each architecture should provide a more precise id string that
994 * can be use to match the architecture's "mapfile".
995 */
996char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
997{
998 return NULL;
999}
1000
1001/* Return zero when the cpuid from the mapfile.csv matches the
1002 * cpuid string generated on this platform.
1003 * Otherwise return non-zero.
1004 */
1005int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
1006{
1007 regex_t re;
1008 regmatch_t pmatch[1];
1009 int match;
1010
1011 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
1012 /* Warn unable to generate match particular string. */
1013 pr_info("Invalid regular expression %s\n", mapcpuid);
1014 return 1;
1015 }
1016
1017 match = !regexec(&re, cpuid, 1, pmatch, 0);
1018 regfree(&re);
1019 if (match) {
1020 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
1021
1022 /* Verify the entire string matched. */
1023 if (match_len == strlen(cpuid))
1024 return 0;
1025 }
1026 return 1;
1027}
1028
1029/*
991 * default get_cpuid(): nothing gets recorded 1030 * default get_cpuid(): nothing gets recorded
992 * actual implementation must be in arch/$(SRCARCH)/util/header.c 1031 * actual implementation must be in arch/$(SRCARCH)/util/header.c
993 */ 1032 */
@@ -2659,6 +2698,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2659 struct perf_header *header = &session->header; 2698 struct perf_header *header = &session->header;
2660 int fd = perf_data__fd(session->data); 2699 int fd = perf_data__fd(session->data);
2661 struct stat st; 2700 struct stat st;
2701 time_t stctime;
2662 int ret, bit; 2702 int ret, bit;
2663 2703
2664 hd.fp = fp; 2704 hd.fp = fp;
@@ -2668,7 +2708,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2668 if (ret == -1) 2708 if (ret == -1)
2669 return -1; 2709 return -1;
2670 2710
2671 fprintf(fp, "# captured on : %s", ctime(&st.st_ctime)); 2711 stctime = st.st_ctime;
2712 fprintf(fp, "# captured on : %s", ctime(&stctime));
2672 2713
2673 fprintf(fp, "# header version : %u\n", header->version); 2714 fprintf(fp, "# header version : %u\n", header->version);
2674 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2715 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
@@ -2759,7 +2800,7 @@ static int perf_header__adds_write(struct perf_header *header,
2759 lseek(fd, sec_start, SEEK_SET); 2800 lseek(fd, sec_start, SEEK_SET);
2760 /* 2801 /*
2761 * may write more than needed due to dropped feature, but 2802 * may write more than needed due to dropped feature, but
2762 * this is okay, reader will skip the mising entries 2803 * this is okay, reader will skip the missing entries
2763 */ 2804 */
2764 err = do_write(&ff, feat_sec, sec_size); 2805 err = do_write(&ff, feat_sec, sec_size);
2765 if (err < 0) 2806 if (err < 0)
@@ -3229,7 +3270,7 @@ static int read_attr(int fd, struct perf_header *ph,
3229static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3270static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3230 struct tep_handle *pevent) 3271 struct tep_handle *pevent)
3231{ 3272{
3232 struct tep_event_format *event; 3273 struct tep_event *event;
3233 char bf[128]; 3274 char bf[128];
3234 3275
3235 /* already prepared */ 3276 /* already prepared */
@@ -3544,7 +3585,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3544 if (ev == NULL) 3585 if (ev == NULL)
3545 return -ENOMEM; 3586 return -ENOMEM;
3546 3587
3547 strncpy(ev->data, evsel->unit, size); 3588 strlcpy(ev->data, evsel->unit, size + 1);
3548 err = process(tool, (union perf_event *)ev, NULL, NULL); 3589 err = process(tool, (union perf_event *)ev, NULL, NULL);
3549 free(ev); 3590 free(ev);
3550 return err; 3591 return err;
@@ -3583,7 +3624,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
3583 if (ev == NULL) 3624 if (ev == NULL)
3584 return -ENOMEM; 3625 return -ENOMEM;
3585 3626
3586 strncpy(ev->data, evsel->name, len); 3627 strlcpy(ev->data, evsel->name, len + 1);
3587 err = process(tool, (union perf_event*) ev, NULL, NULL); 3628 err = process(tool, (union perf_event*) ev, NULL, NULL);
3588 free(ev); 3629 free(ev);
3589 return err; 3630 return err;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 828cb9794c76..8aad8330e392 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1160,7 +1160,7 @@ void hist_entry__delete(struct hist_entry *he)
1160 1160
1161/* 1161/*
1162 * If this is not the last column, then we need to pad it according to the 1162 * If this is not the last column, then we need to pad it according to the
1163 * pre-calculated max lenght for this column, otherwise don't bother adding 1163 * pre-calculated max length for this column, otherwise don't bother adding
1164 * spaces because that would break viewing this with, for instance, 'less', 1164 * spaces because that would break viewing this with, for instance, 'less',
1165 * that would show tons of trailing spaces when a long C++ demangled method 1165 * that would show tons of trailing spaces when a long C++ demangled method
1166 * names is sampled. 1166 * names is sampled.
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3badd7f1e1b8..664b5eda8d51 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@ enum hist_column {
62 HISTC_TRACE, 62 HISTC_TRACE,
63 HISTC_SYM_SIZE, 63 HISTC_SYM_SIZE,
64 HISTC_DSO_SIZE, 64 HISTC_DSO_SIZE,
65 HISTC_SYMBOL_IPC,
65 HISTC_NR_COLS, /* Last entry */ 66 HISTC_NR_COLS, /* Last entry */
66}; 67};
67 68
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a1863000e972..bf249552a9b0 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -38,7 +38,7 @@ struct jit_buf_desc {
38 uint64_t sample_type; 38 uint64_t sample_type;
39 size_t bufsize; 39 size_t bufsize;
40 FILE *in; 40 FILE *in;
41 bool needs_bswap; /* handles cross-endianess */ 41 bool needs_bswap; /* handles cross-endianness */
42 bool use_arch_timestamp; 42 bool use_arch_timestamp;
43 void *debug_data; 43 void *debug_data;
44 void *unwinding_data; 44 void *unwinding_data;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8f36ce813bc5..6fcb3bce0442 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -137,7 +137,7 @@ struct machine *machine__new_kallsyms(void)
137 struct machine *machine = machine__new_host(); 137 struct machine *machine = machine__new_host();
138 /* 138 /*
139 * FIXME: 139 * FIXME:
140 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely 140 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
141 * ask for not using the kcore parsing code, once this one is fixed 141 * ask for not using the kcore parsing code, once this one is fixed
142 * to create a map per module. 142 * to create a map per module.
143 */ 143 */
@@ -2493,15 +2493,13 @@ int machines__for_each_thread(struct machines *machines,
2493int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2493int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2494 struct target *target, struct thread_map *threads, 2494 struct target *target, struct thread_map *threads,
2495 perf_event__handler_t process, bool data_mmap, 2495 perf_event__handler_t process, bool data_mmap,
2496 unsigned int proc_map_timeout,
2497 unsigned int nr_threads_synthesize) 2496 unsigned int nr_threads_synthesize)
2498{ 2497{
2499 if (target__has_task(target)) 2498 if (target__has_task(target))
2500 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 2499 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
2501 else if (target__has_cpu(target)) 2500 else if (target__has_cpu(target))
2502 return perf_event__synthesize_threads(tool, process, 2501 return perf_event__synthesize_threads(tool, process,
2503 machine, data_mmap, 2502 machine, data_mmap,
2504 proc_map_timeout,
2505 nr_threads_synthesize); 2503 nr_threads_synthesize);
2506 /* command specified */ 2504 /* command specified */
2507 return 0; 2505 return 0;
@@ -2592,6 +2590,33 @@ int machine__get_kernel_start(struct machine *machine)
2592 return err; 2590 return err;
2593} 2591}
2594 2592
2593u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
2594{
2595 u8 addr_cpumode = cpumode;
2596 bool kernel_ip;
2597
2598 if (!machine->single_address_space)
2599 goto out;
2600
2601 kernel_ip = machine__kernel_ip(machine, addr);
2602 switch (cpumode) {
2603 case PERF_RECORD_MISC_KERNEL:
2604 case PERF_RECORD_MISC_USER:
2605 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
2606 PERF_RECORD_MISC_USER;
2607 break;
2608 case PERF_RECORD_MISC_GUEST_KERNEL:
2609 case PERF_RECORD_MISC_GUEST_USER:
2610 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
2611 PERF_RECORD_MISC_GUEST_USER;
2612 break;
2613 default:
2614 break;
2615 }
2616out:
2617 return addr_cpumode;
2618}
2619
2595struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2620struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2596{ 2621{
2597 return dsos__findnew(&machine->dsos, filename); 2622 return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85862e2..a5d1da60f751 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@ struct machine {
42 u16 id_hdr_size; 42 u16 id_hdr_size;
43 bool comm_exec; 43 bool comm_exec;
44 bool kptr_restrict_warned; 44 bool kptr_restrict_warned;
45 bool single_address_space;
45 char *root_dir; 46 char *root_dir;
46 char *mmap_name; 47 char *mmap_name;
47 struct threads threads[THREADS__TABLE_SIZE]; 48 struct threads threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
99 return ip >= kernel_start; 100 return ip >= kernel_start;
100} 101}
101 102
103u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
104
102struct thread *machine__find_thread(struct machine *machine, pid_t pid, 105struct thread *machine__find_thread(struct machine *machine, pid_t pid,
103 pid_t tid); 106 pid_t tid);
104struct comm *machine__thread_exec_comm(struct machine *machine, 107struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -247,17 +250,14 @@ int machines__for_each_thread(struct machines *machines,
247int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 250int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
248 struct target *target, struct thread_map *threads, 251 struct target *target, struct thread_map *threads,
249 perf_event__handler_t process, bool data_mmap, 252 perf_event__handler_t process, bool data_mmap,
250 unsigned int proc_map_timeout,
251 unsigned int nr_threads_synthesize); 253 unsigned int nr_threads_synthesize);
252static inline 254static inline
253int machine__synthesize_threads(struct machine *machine, struct target *target, 255int machine__synthesize_threads(struct machine *machine, struct target *target,
254 struct thread_map *threads, bool data_mmap, 256 struct thread_map *threads, bool data_mmap,
255 unsigned int proc_map_timeout,
256 unsigned int nr_threads_synthesize) 257 unsigned int nr_threads_synthesize)
257{ 258{
258 return __machine__synthesize_threads(machine, NULL, target, threads, 259 return __machine__synthesize_threads(machine, NULL, target, threads,
259 perf_event__process, data_mmap, 260 perf_event__process, data_mmap,
260 proc_map_timeout,
261 nr_threads_synthesize); 261 nr_threads_synthesize);
262} 262}
263 263
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 354e54550d2b..6751301a755c 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -19,8 +19,10 @@
19#include "srcline.h" 19#include "srcline.h"
20#include "namespaces.h" 20#include "namespaces.h"
21#include "unwind.h" 21#include "unwind.h"
22#include "srccode.h"
22 23
23static void __maps__insert(struct maps *maps, struct map *map); 24static void __maps__insert(struct maps *maps, struct map *map);
25static void __maps__insert_name(struct maps *maps, struct map *map);
24 26
25static inline int is_anon_memory(const char *filename, u32 flags) 27static inline int is_anon_memory(const char *filename, u32 flags)
26{ 28{
@@ -420,6 +422,54 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
420 return ret; 422 return ret;
421} 423}
422 424
425int map__fprintf_srccode(struct map *map, u64 addr,
426 FILE *fp,
427 struct srccode_state *state)
428{
429 char *srcfile;
430 int ret = 0;
431 unsigned line;
432 int len;
433 char *srccode;
434
435 if (!map || !map->dso)
436 return 0;
437 srcfile = get_srcline_split(map->dso,
438 map__rip_2objdump(map, addr),
439 &line);
440 if (!srcfile)
441 return 0;
442
443 /* Avoid redundant printing */
444 if (state &&
445 state->srcfile &&
446 !strcmp(state->srcfile, srcfile) &&
447 state->line == line) {
448 free(srcfile);
449 return 0;
450 }
451
452 srccode = find_sourceline(srcfile, line, &len);
453 if (!srccode)
454 goto out_free_line;
455
456 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
457 state->srcfile = srcfile;
458 state->line = line;
459 return ret;
460
461out_free_line:
462 free(srcfile);
463 return ret;
464}
465
466
467void srccode_state_free(struct srccode_state *state)
468{
469 zfree(&state->srcfile);
470 state->line = 0;
471}
472
423/** 473/**
424 * map__rip_2objdump - convert symbol start address to objdump address. 474 * map__rip_2objdump - convert symbol start address to objdump address.
425 * @map: memory map 475 * @map: memory map
@@ -496,6 +546,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
496static void maps__init(struct maps *maps) 546static void maps__init(struct maps *maps)
497{ 547{
498 maps->entries = RB_ROOT; 548 maps->entries = RB_ROOT;
549 maps->names = RB_ROOT;
499 init_rwsem(&maps->lock); 550 init_rwsem(&maps->lock);
500} 551}
501 552
@@ -664,6 +715,7 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
664static void __map_groups__insert(struct map_groups *mg, struct map *map) 715static void __map_groups__insert(struct map_groups *mg, struct map *map)
665{ 716{
666 __maps__insert(&mg->maps, map); 717 __maps__insert(&mg->maps, map);
718 __maps__insert_name(&mg->maps, map);
667 map->groups = mg; 719 map->groups = mg;
668} 720}
669 721
@@ -824,10 +876,34 @@ static void __maps__insert(struct maps *maps, struct map *map)
824 map__get(map); 876 map__get(map);
825} 877}
826 878
879static void __maps__insert_name(struct maps *maps, struct map *map)
880{
881 struct rb_node **p = &maps->names.rb_node;
882 struct rb_node *parent = NULL;
883 struct map *m;
884 int rc;
885
886 while (*p != NULL) {
887 parent = *p;
888 m = rb_entry(parent, struct map, rb_node_name);
889 rc = strcmp(m->dso->short_name, map->dso->short_name);
890 if (rc < 0)
891 p = &(*p)->rb_left;
892 else if (rc > 0)
893 p = &(*p)->rb_right;
894 else
895 return;
896 }
897 rb_link_node(&map->rb_node_name, parent, p);
898 rb_insert_color(&map->rb_node_name, &maps->names);
899 map__get(map);
900}
901
827void maps__insert(struct maps *maps, struct map *map) 902void maps__insert(struct maps *maps, struct map *map)
828{ 903{
829 down_write(&maps->lock); 904 down_write(&maps->lock);
830 __maps__insert(maps, map); 905 __maps__insert(maps, map);
906 __maps__insert_name(maps, map);
831 up_write(&maps->lock); 907 up_write(&maps->lock);
832} 908}
833 909
@@ -846,19 +922,18 @@ void maps__remove(struct maps *maps, struct map *map)
846 922
847struct map *maps__find(struct maps *maps, u64 ip) 923struct map *maps__find(struct maps *maps, u64 ip)
848{ 924{
849 struct rb_node **p, *parent = NULL; 925 struct rb_node *p;
850 struct map *m; 926 struct map *m;
851 927
852 down_read(&maps->lock); 928 down_read(&maps->lock);
853 929
854 p = &maps->entries.rb_node; 930 p = maps->entries.rb_node;
855 while (*p != NULL) { 931 while (p != NULL) {
856 parent = *p; 932 m = rb_entry(p, struct map, rb_node);
857 m = rb_entry(parent, struct map, rb_node);
858 if (ip < m->start) 933 if (ip < m->start)
859 p = &(*p)->rb_left; 934 p = p->rb_left;
860 else if (ip >= m->end) 935 else if (ip >= m->end)
861 p = &(*p)->rb_right; 936 p = p->rb_right;
862 else 937 else
863 goto out; 938 goto out;
864 } 939 }
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index e0f327b51e66..09282aa45c80 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -25,6 +25,7 @@ struct map {
25 struct rb_node rb_node; 25 struct rb_node rb_node;
26 struct list_head node; 26 struct list_head node;
27 }; 27 };
28 struct rb_node rb_node_name;
28 u64 start; 29 u64 start;
29 u64 end; 30 u64 end;
30 bool erange_warned; 31 bool erange_warned;
@@ -57,6 +58,7 @@ struct kmap {
57 58
58struct maps { 59struct maps {
59 struct rb_root entries; 60 struct rb_root entries;
61 struct rb_root names;
60 struct rw_semaphore lock; 62 struct rw_semaphore lock;
61}; 63};
62 64
@@ -172,6 +174,22 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
172int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 174int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
173 FILE *fp); 175 FILE *fp);
174 176
177struct srccode_state {
178 char *srcfile;
179 unsigned line;
180};
181
182static inline void srccode_state_init(struct srccode_state *state)
183{
184 state->srcfile = NULL;
185 state->line = 0;
186}
187
188void srccode_state_free(struct srccode_state *state);
189
190int map__fprintf_srccode(struct map *map, u64 addr,
191 FILE *fp, struct srccode_state *state);
192
175int map__load(struct map *map); 193int map__load(struct map *map);
176struct symbol *map__find_symbol(struct map *map, u64 addr); 194struct symbol *map__find_symbol(struct map *map, u64 addr);
177struct symbol *map__find_symbol_by_name(struct map *map, const char *name); 195struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index cdb95b3a1213..8fc39311a30d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -153,8 +153,158 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
153{ 153{
154} 154}
155 155
156#ifdef HAVE_AIO_SUPPORT
157static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
158{
159 int delta_max, i, prio;
160
161 map->aio.nr_cblocks = mp->nr_cblocks;
162 if (map->aio.nr_cblocks) {
163 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
164 if (!map->aio.aiocb) {
165 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
166 return -1;
167 }
168 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
169 if (!map->aio.cblocks) {
170 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
171 return -1;
172 }
173 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
174 if (!map->aio.data) {
175 pr_debug2("failed to allocate data buffer, error %m\n");
176 return -1;
177 }
178 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
179 for (i = 0; i < map->aio.nr_cblocks; ++i) {
180 map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
181 if (!map->aio.data[i]) {
182 pr_debug2("failed to allocate data buffer area, error %m");
183 return -1;
184 }
185 /*
186 * Use cblock.aio_fildes value different from -1
187 * to denote started aio write operation on the
188 * cblock so it requires explicit record__aio_sync()
189 * call prior the cblock may be reused again.
190 */
191 map->aio.cblocks[i].aio_fildes = -1;
192 /*
193 * Allocate cblocks with priority delta to have
194 * faster aio write system calls because queued requests
195 * are kept in separate per-prio queues and adding
196 * a new request will iterate thru shorter per-prio
197 * list. Blocks with numbers higher than
198 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
199 */
200 prio = delta_max - i;
201 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
202 }
203 }
204
205 return 0;
206}
207
208static void perf_mmap__aio_munmap(struct perf_mmap *map)
209{
210 int i;
211
212 for (i = 0; i < map->aio.nr_cblocks; ++i)
213 zfree(&map->aio.data[i]);
214 if (map->aio.data)
215 zfree(&map->aio.data);
216 zfree(&map->aio.cblocks);
217 zfree(&map->aio.aiocb);
218}
219
220int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
221 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
222 off_t *off)
223{
224 u64 head = perf_mmap__read_head(md);
225 unsigned char *data = md->base + page_size;
226 unsigned long size, size0 = 0;
227 void *buf;
228 int rc = 0;
229
230 rc = perf_mmap__read_init(md);
231 if (rc < 0)
232 return (rc == -EAGAIN) ? 0 : -1;
233
234 /*
235 * md->base data is copied into md->data[idx] buffer to
236 * release space in the kernel buffer as fast as possible,
237 * thru perf_mmap__consume() below.
238 *
239 * That lets the kernel to proceed with storing more
240 * profiling data into the kernel buffer earlier than other
241 * per-cpu kernel buffers are handled.
242 *
243 * Coping can be done in two steps in case the chunk of
244 * profiling data crosses the upper bound of the kernel buffer.
245 * In this case we first move part of data from md->start
246 * till the upper bound and then the reminder from the
247 * beginning of the kernel buffer till the end of
248 * the data chunk.
249 */
250
251 size = md->end - md->start;
252
253 if ((md->start & md->mask) + size != (md->end & md->mask)) {
254 buf = &data[md->start & md->mask];
255 size = md->mask + 1 - (md->start & md->mask);
256 md->start += size;
257 memcpy(md->aio.data[idx], buf, size);
258 size0 = size;
259 }
260
261 buf = &data[md->start & md->mask];
262 size = md->end - md->start;
263 md->start += size;
264 memcpy(md->aio.data[idx] + size0, buf, size);
265
266 /*
267 * Increment md->refcount to guard md->data[idx] buffer
268 * from premature deallocation because md object can be
269 * released earlier than aio write request started
270 * on mmap->data[idx] is complete.
271 *
272 * perf_mmap__put() is done at record__aio_complete()
273 * after started request completion.
274 */
275 perf_mmap__get(md);
276
277 md->prev = head;
278 perf_mmap__consume(md);
279
280 rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off);
281 if (!rc) {
282 *off += size0 + size;
283 } else {
284 /*
285 * Decrement md->refcount back if aio write
286 * operation failed to start.
287 */
288 perf_mmap__put(md);
289 }
290
291 return rc;
292}
293#else
294static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
295 struct mmap_params *mp __maybe_unused)
296{
297 return 0;
298}
299
300static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
301{
302}
303#endif
304
156void perf_mmap__munmap(struct perf_mmap *map) 305void perf_mmap__munmap(struct perf_mmap *map)
157{ 306{
307 perf_mmap__aio_munmap(map);
158 if (map->base != NULL) { 308 if (map->base != NULL) {
159 munmap(map->base, perf_mmap__mmap_len(map)); 309 munmap(map->base, perf_mmap__mmap_len(map));
160 map->base = NULL; 310 map->base = NULL;
@@ -197,7 +347,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
197 &mp->auxtrace_mp, map->base, fd)) 347 &mp->auxtrace_mp, map->base, fd))
198 return -1; 348 return -1;
199 349
200 return 0; 350 return perf_mmap__aio_mmap(map, mp);
201} 351}
202 352
203static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) 353static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index cc5e2d6d17a9..aeb6942fdb00 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -6,9 +6,13 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/ring_buffer.h> 7#include <linux/ring_buffer.h>
8#include <stdbool.h> 8#include <stdbool.h>
9#ifdef HAVE_AIO_SUPPORT
10#include <aio.h>
11#endif
9#include "auxtrace.h" 12#include "auxtrace.h"
10#include "event.h" 13#include "event.h"
11 14
15struct aiocb;
12/** 16/**
13 * struct perf_mmap - perf's ring buffer mmap details 17 * struct perf_mmap - perf's ring buffer mmap details
14 * 18 *
@@ -26,6 +30,14 @@ struct perf_mmap {
26 bool overwrite; 30 bool overwrite;
27 struct auxtrace_mmap auxtrace_mmap; 31 struct auxtrace_mmap auxtrace_mmap;
28 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 32 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
33#ifdef HAVE_AIO_SUPPORT
34 struct {
35 void **data;
36 struct aiocb *cblocks;
37 struct aiocb **aiocb;
38 int nr_cblocks;
39 } aio;
40#endif
29}; 41};
30 42
31/* 43/*
@@ -57,7 +69,7 @@ enum bkw_mmap_state {
57}; 69};
58 70
59struct mmap_params { 71struct mmap_params {
60 int prot, mask; 72 int prot, mask, nr_cblocks;
61 struct auxtrace_mmap_params auxtrace_mp; 73 struct auxtrace_mmap_params auxtrace_mp;
62}; 74};
63 75
@@ -85,6 +97,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
85 97
86int perf_mmap__push(struct perf_mmap *md, void *to, 98int perf_mmap__push(struct perf_mmap *md, void *to,
87 int push(struct perf_mmap *map, void *to, void *buf, size_t size)); 99 int push(struct perf_mmap *map, void *to, void *buf, size_t size));
100#ifdef HAVE_AIO_SUPPORT
101int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
102 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
103 off_t *off);
104#else
105static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused,
106 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
107 off_t *off __maybe_unused)
108{
109 return 0;
110}
111#endif
88 112
89size_t perf_mmap__mmap_len(struct perf_mmap *map); 113size_t perf_mmap__mmap_len(struct perf_mmap *map);
90 114
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 1904e7f6ec84..897589507d97 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -219,13 +219,12 @@ int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
219 return 0; 219 return 0;
220} 220}
221 221
222static int __ordered_events__flush(struct ordered_events *oe) 222static int do_flush(struct ordered_events *oe, bool show_progress)
223{ 223{
224 struct list_head *head = &oe->events; 224 struct list_head *head = &oe->events;
225 struct ordered_event *tmp, *iter; 225 struct ordered_event *tmp, *iter;
226 u64 limit = oe->next_flush; 226 u64 limit = oe->next_flush;
227 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; 227 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
228 bool show_progress = limit == ULLONG_MAX;
229 struct ui_progress prog; 228 struct ui_progress prog;
230 int ret; 229 int ret;
231 230
@@ -263,7 +262,8 @@ static int __ordered_events__flush(struct ordered_events *oe)
263 return 0; 262 return 0;
264} 263}
265 264
266int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) 265static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
266 u64 timestamp)
267{ 267{
268 static const char * const str[] = { 268 static const char * const str[] = {
269 "NONE", 269 "NONE",
@@ -272,12 +272,16 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
272 "HALF ", 272 "HALF ",
273 }; 273 };
274 int err; 274 int err;
275 bool show_progress = false;
275 276
276 if (oe->nr_events == 0) 277 if (oe->nr_events == 0)
277 return 0; 278 return 0;
278 279
279 switch (how) { 280 switch (how) {
280 case OE_FLUSH__FINAL: 281 case OE_FLUSH__FINAL:
282 show_progress = true;
283 __fallthrough;
284 case OE_FLUSH__TOP:
281 oe->next_flush = ULLONG_MAX; 285 oe->next_flush = ULLONG_MAX;
282 break; 286 break;
283 287
@@ -298,6 +302,11 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
298 break; 302 break;
299 } 303 }
300 304
305 case OE_FLUSH__TIME:
306 oe->next_flush = timestamp;
307 show_progress = false;
308 break;
309
301 case OE_FLUSH__ROUND: 310 case OE_FLUSH__ROUND:
302 case OE_FLUSH__NONE: 311 case OE_FLUSH__NONE:
303 default: 312 default:
@@ -308,7 +317,7 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
308 str[how], oe->nr_events); 317 str[how], oe->nr_events);
309 pr_oe_time(oe->max_timestamp, "max_timestamp\n"); 318 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
310 319
311 err = __ordered_events__flush(oe); 320 err = do_flush(oe, show_progress);
312 321
313 if (!err) { 322 if (!err) {
314 if (how == OE_FLUSH__ROUND) 323 if (how == OE_FLUSH__ROUND)
@@ -324,7 +333,29 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
324 return err; 333 return err;
325} 334}
326 335
327void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver) 336int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
337{
338 return __ordered_events__flush(oe, how, 0);
339}
340
341int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
342{
343 return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
344}
345
346u64 ordered_events__first_time(struct ordered_events *oe)
347{
348 struct ordered_event *event;
349
350 if (list_empty(&oe->events))
351 return 0;
352
353 event = list_first_entry(&oe->events, struct ordered_event, list);
354 return event->timestamp;
355}
356
357void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
358 void *data)
328{ 359{
329 INIT_LIST_HEAD(&oe->events); 360 INIT_LIST_HEAD(&oe->events);
330 INIT_LIST_HEAD(&oe->cache); 361 INIT_LIST_HEAD(&oe->cache);
@@ -332,6 +363,7 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d
332 oe->max_alloc_size = (u64) -1; 363 oe->max_alloc_size = (u64) -1;
333 oe->cur_alloc_size = 0; 364 oe->cur_alloc_size = 0;
334 oe->deliver = deliver; 365 oe->deliver = deliver;
366 oe->data = data;
335} 367}
336 368
337static void 369static void
@@ -375,5 +407,5 @@ void ordered_events__reinit(struct ordered_events *oe)
375 407
376 ordered_events__free(oe); 408 ordered_events__free(oe);
377 memset(oe, '\0', sizeof(*oe)); 409 memset(oe, '\0', sizeof(*oe));
378 ordered_events__init(oe, old_deliver); 410 ordered_events__init(oe, old_deliver, oe->data);
379} 411}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 1338d5c345dc..0920fb0ec6cc 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -18,6 +18,8 @@ enum oe_flush {
18 OE_FLUSH__FINAL, 18 OE_FLUSH__FINAL,
19 OE_FLUSH__ROUND, 19 OE_FLUSH__ROUND,
20 OE_FLUSH__HALF, 20 OE_FLUSH__HALF,
21 OE_FLUSH__TOP,
22 OE_FLUSH__TIME,
21}; 23};
22 24
23struct ordered_events; 25struct ordered_events;
@@ -47,15 +49,19 @@ struct ordered_events {
47 enum oe_flush last_flush_type; 49 enum oe_flush last_flush_type;
48 u32 nr_unordered_events; 50 u32 nr_unordered_events;
49 bool copy_on_queue; 51 bool copy_on_queue;
52 void *data;
50}; 53};
51 54
52int ordered_events__queue(struct ordered_events *oe, union perf_event *event, 55int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
53 u64 timestamp, u64 file_offset); 56 u64 timestamp, u64 file_offset);
54void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); 57void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
55int ordered_events__flush(struct ordered_events *oe, enum oe_flush how); 58int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
56void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver); 59int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp);
60void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
61 void *data);
57void ordered_events__free(struct ordered_events *oe); 62void ordered_events__free(struct ordered_events *oe);
58void ordered_events__reinit(struct ordered_events *oe); 63void ordered_events__reinit(struct ordered_events *oe);
64u64 ordered_events__first_time(struct ordered_events *oe);
59 65
60static inline 66static inline
61void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size) 67void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 59be3466d64d..920e1e6551dd 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2462,7 +2462,7 @@ restart:
2462 if (!name_only && strlen(syms->alias)) 2462 if (!name_only && strlen(syms->alias))
2463 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); 2463 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
2464 else 2464 else
2465 strncpy(name, syms->symbol, MAX_NAME_LEN); 2465 strlcpy(name, syms->symbol, MAX_NAME_LEN);
2466 2466
2467 evt_list[evt_i] = strdup(name); 2467 evt_list[evt_i] = strdup(name);
2468 if (evt_list[evt_i] == NULL) 2468 if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7e49baad304d..11a234740632 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
145 int fd, ret = -1; 145 int fd, ret = -1;
146 char path[PATH_MAX]; 146 char path[PATH_MAX];
147 147
148 snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); 148 scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
149 149
150 fd = open(path, O_RDONLY); 150 fd = open(path, O_RDONLY);
151 if (fd == -1) 151 if (fd == -1)
@@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
175 ssize_t sret; 175 ssize_t sret;
176 int fd; 176 int fd;
177 177
178 snprintf(path, PATH_MAX, "%s/%s.unit", dir, name); 178 scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
179 179
180 fd = open(path, O_RDONLY); 180 fd = open(path, O_RDONLY);
181 if (fd == -1) 181 if (fd == -1)
@@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
205 char path[PATH_MAX]; 205 char path[PATH_MAX];
206 int fd; 206 int fd;
207 207
208 snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name); 208 scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
209 209
210 fd = open(path, O_RDONLY); 210 fd = open(path, O_RDONLY);
211 if (fd == -1) 211 if (fd == -1)
@@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
223 char path[PATH_MAX]; 223 char path[PATH_MAX];
224 int fd; 224 int fd;
225 225
226 snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name); 226 scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
227 227
228 fd = open(path, O_RDONLY); 228 fd = open(path, O_RDONLY);
229 if (fd == -1) 229 if (fd == -1)
@@ -655,45 +655,6 @@ static int is_arm_pmu_core(const char *name)
655 return 0; 655 return 0;
656} 656}
657 657
658/*
659 * Return the CPU id as a raw string.
660 *
661 * Each architecture should provide a more precise id string that
662 * can be use to match the architecture's "mapfile".
663 */
664char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
665{
666 return NULL;
667}
668
669/* Return zero when the cpuid from the mapfile.csv matches the
670 * cpuid string generated on this platform.
671 * Otherwise return non-zero.
672 */
673int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
674{
675 regex_t re;
676 regmatch_t pmatch[1];
677 int match;
678
679 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
680 /* Warn unable to generate match particular string. */
681 pr_info("Invalid regular expression %s\n", mapcpuid);
682 return 1;
683 }
684
685 match = !regexec(&re, cpuid, 1, pmatch, 0);
686 regfree(&re);
687 if (match) {
688 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
689
690 /* Verify the entire string matched. */
691 if (match_len == strlen(cpuid))
692 return 0;
693 }
694 return 1;
695}
696
697static char *perf_pmu__getcpuid(struct perf_pmu *pmu) 658static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
698{ 659{
699 char *cpuid; 660 char *cpuid;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e86f8be89157..18a59fba97ff 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -692,7 +692,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
692 return ret; 692 return ret;
693 693
694 for (i = 0; i < ntevs && ret >= 0; i++) { 694 for (i = 0; i < ntevs && ret >= 0; i++) {
695 /* point.address is the addres of point.symbol + point.offset */ 695 /* point.address is the address of point.symbol + point.offset */
696 tevs[i].point.address -= stext; 696 tevs[i].point.address -= stext;
697 tevs[i].point.module = strdup(exec); 697 tevs[i].point.module = strdup(exec);
698 if (!tevs[i].point.module) { 698 if (!tevs[i].point.module) {
@@ -3062,7 +3062,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
3062 /* 3062 /*
3063 * Give it a '0x' leading symbol name. 3063 * Give it a '0x' leading symbol name.
3064 * In __add_probe_trace_events, a NULL symbol is interpreted as 3064 * In __add_probe_trace_events, a NULL symbol is interpreted as
3065 * invalud. 3065 * invalid.
3066 */ 3066 */
3067 if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) 3067 if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
3068 goto errout; 3068 goto errout;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index aac7817d9e14..0b1195cad0e5 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
424 424
425 if (target && build_id_cache__cached(target)) { 425 if (target && build_id_cache__cached(target)) {
426 /* This is a cached buildid */ 426 /* This is a cached buildid */
427 strncpy(sbuildid, target, SBUILD_ID_SIZE); 427 strlcpy(sbuildid, target, SBUILD_ID_SIZE);
428 dir_name = build_id_cache__linkname(sbuildid, NULL, 0); 428 dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
429 goto found; 429 goto found;
430 } 430 }
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 50150dfc0cdf..47628e85c5eb 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -386,7 +386,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
386 struct tep_format_field *field; 386 struct tep_format_field *field;
387 387
388 if (!evsel->tp_format) { 388 if (!evsel->tp_format) {
389 struct tep_event_format *tp_format; 389 struct tep_event *tp_format;
390 390
391 tp_format = trace_event__tp_format_id(evsel->attr.config); 391 tp_format = trace_event__tp_format_id(evsel->attr.config);
392 if (!tp_format) 392 if (!tp_format)
@@ -1240,7 +1240,7 @@ static struct {
1240static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1240static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1241 PyObject *args, PyObject *kwargs) 1241 PyObject *args, PyObject *kwargs)
1242{ 1242{
1243 struct tep_event_format *tp_format; 1243 struct tep_event *tp_format;
1244 static char *kwlist[] = { "sys", "name", NULL }; 1244 static char *kwlist[] = { "sys", "name", NULL };
1245 char *sys = NULL; 1245 char *sys = NULL;
1246 char *name = NULL; 1246 char *name = NULL;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index a2eeebbfb25f..68b2570304ec 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -506,7 +506,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
506 aux_ts = get_trailer_time(buf); 506 aux_ts = get_trailer_time(buf);
507 if (!aux_ts) { 507 if (!aux_ts) {
508 pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n", 508 pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
509 sfq->buffer->data_offset); 509 (s64)sfq->buffer->data_offset);
510 aux_ts = ~0ULL; 510 aux_ts = ~0ULL;
511 goto out; 511 goto out;
512 } 512 }
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 89cb887648f9..b93f36b887b5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -189,7 +189,7 @@ static void define_flag_field(const char *ev_name,
189 LEAVE; 189 LEAVE;
190} 190}
191 191
192static void define_event_symbols(struct tep_event_format *event, 192static void define_event_symbols(struct tep_event *event,
193 const char *ev_name, 193 const char *ev_name,
194 struct tep_print_arg *args) 194 struct tep_print_arg *args)
195{ 195{
@@ -338,7 +338,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
338 struct addr_location *al) 338 struct addr_location *al)
339{ 339{
340 struct thread *thread = al->thread; 340 struct thread *thread = al->thread;
341 struct tep_event_format *event = evsel->tp_format; 341 struct tep_event *event = evsel->tp_format;
342 struct tep_format_field *field; 342 struct tep_format_field *field;
343 static char handler[256]; 343 static char handler[256];
344 unsigned long long val; 344 unsigned long long val;
@@ -537,7 +537,7 @@ static int perl_stop_script(void)
537 537
538static int perl_generate_script(struct tep_handle *pevent, const char *outfile) 538static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
539{ 539{
540 struct tep_event_format *event = NULL; 540 struct tep_event *event = NULL;
541 struct tep_format_field *f; 541 struct tep_format_field *f;
542 char fname[PATH_MAX]; 542 char fname[PATH_MAX];
543 int not_first, count; 543 int not_first, count;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 69aa93d4ee99..87ef16a1b17e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -264,7 +264,7 @@ static void define_field(enum tep_print_arg_type field_type,
264 Py_DECREF(t); 264 Py_DECREF(t);
265} 265}
266 266
267static void define_event_symbols(struct tep_event_format *event, 267static void define_event_symbols(struct tep_event *event,
268 const char *ev_name, 268 const char *ev_name,
269 struct tep_print_arg *args) 269 struct tep_print_arg *args)
270{ 270{
@@ -332,7 +332,7 @@ static void define_event_symbols(struct tep_event_format *event,
332 define_event_symbols(event, ev_name, args->next); 332 define_event_symbols(event, ev_name, args->next);
333} 333}
334 334
335static PyObject *get_field_numeric_entry(struct tep_event_format *event, 335static PyObject *get_field_numeric_entry(struct tep_event *event,
336 struct tep_format_field *field, void *data) 336 struct tep_format_field *field, void *data)
337{ 337{
338 bool is_array = field->flags & TEP_FIELD_IS_ARRAY; 338 bool is_array = field->flags & TEP_FIELD_IS_ARRAY;
@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
494 pydict_set_item_string_decref(pyelem, "cycles", 494 pydict_set_item_string_decref(pyelem, "cycles",
495 PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles)); 495 PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
496 496
497 thread__find_map(thread, sample->cpumode, 497 thread__find_map_fb(thread, sample->cpumode,
498 br->entries[i].from, &al); 498 br->entries[i].from, &al);
499 dsoname = get_dsoname(al.map); 499 dsoname = get_dsoname(al.map);
500 pydict_set_item_string_decref(pyelem, "from_dsoname", 500 pydict_set_item_string_decref(pyelem, "from_dsoname",
501 _PyUnicode_FromString(dsoname)); 501 _PyUnicode_FromString(dsoname));
502 502
503 thread__find_map(thread, sample->cpumode, 503 thread__find_map_fb(thread, sample->cpumode,
504 br->entries[i].to, &al); 504 br->entries[i].to, &al);
505 dsoname = get_dsoname(al.map); 505 dsoname = get_dsoname(al.map);
506 pydict_set_item_string_decref(pyelem, "to_dsoname", 506 pydict_set_item_string_decref(pyelem, "to_dsoname",
507 _PyUnicode_FromString(dsoname)); 507 _PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
576 if (!pyelem) 576 if (!pyelem)
577 Py_FatalError("couldn't create Python dictionary"); 577 Py_FatalError("couldn't create Python dictionary");
578 578
579 thread__find_symbol(thread, sample->cpumode, 579 thread__find_symbol_fb(thread, sample->cpumode,
580 br->entries[i].from, &al); 580 br->entries[i].from, &al);
581 get_symoff(al.sym, &al, true, bf, sizeof(bf)); 581 get_symoff(al.sym, &al, true, bf, sizeof(bf));
582 pydict_set_item_string_decref(pyelem, "from", 582 pydict_set_item_string_decref(pyelem, "from",
583 _PyUnicode_FromString(bf)); 583 _PyUnicode_FromString(bf));
584 584
585 thread__find_symbol(thread, sample->cpumode, 585 thread__find_symbol_fb(thread, sample->cpumode,
586 br->entries[i].to, &al); 586 br->entries[i].to, &al);
587 get_symoff(al.sym, &al, true, bf, sizeof(bf)); 587 get_symoff(al.sym, &al, true, bf, sizeof(bf));
588 pydict_set_item_string_decref(pyelem, "to", 588 pydict_set_item_string_decref(pyelem, "to",
589 _PyUnicode_FromString(bf)); 589 _PyUnicode_FromString(bf));
@@ -790,7 +790,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
790 struct perf_evsel *evsel, 790 struct perf_evsel *evsel,
791 struct addr_location *al) 791 struct addr_location *al)
792{ 792{
793 struct tep_event_format *event = evsel->tp_format; 793 struct tep_event *event = evsel->tp_format;
794 PyObject *handler, *context, *t, *obj = NULL, *callchain; 794 PyObject *handler, *context, *t, *obj = NULL, *callchain;
795 PyObject *dict = NULL, *all_entries_dict = NULL; 795 PyObject *dict = NULL, *all_entries_dict = NULL;
796 static char handler_name[256]; 796 static char handler_name[256];
@@ -1590,7 +1590,7 @@ static int python_stop_script(void)
1590 1590
1591static int python_generate_script(struct tep_handle *pevent, const char *outfile) 1591static int python_generate_script(struct tep_handle *pevent, const char *outfile)
1592{ 1592{
1593 struct tep_event_format *event = NULL; 1593 struct tep_event *event = NULL;
1594 struct tep_format_field *f; 1594 struct tep_format_field *f;
1595 char fname[PATH_MAX]; 1595 char fname[PATH_MAX];
1596 int not_first, count; 1596 int not_first, count;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7d2c8ce6cfad..78a067777144 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
24#include "thread.h" 24#include "thread.h"
25#include "thread-stack.h" 25#include "thread-stack.h"
26#include "stat.h" 26#include "stat.h"
27#include "arch/common.h"
27 28
28static int perf_session__deliver_event(struct perf_session *session, 29static int perf_session__deliver_event(struct perf_session *session,
29 union perf_event *event, 30 union perf_event *event,
@@ -125,7 +126,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
125 session->tool = tool; 126 session->tool = tool;
126 INIT_LIST_HEAD(&session->auxtrace_index); 127 INIT_LIST_HEAD(&session->auxtrace_index);
127 machines__init(&session->machines); 128 machines__init(&session->machines);
128 ordered_events__init(&session->ordered_events, ordered_events__deliver_event); 129 ordered_events__init(&session->ordered_events,
130 ordered_events__deliver_event, NULL);
129 131
130 if (data) { 132 if (data) {
131 if (perf_data__open(data)) 133 if (perf_data__open(data))
@@ -150,6 +152,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
150 session->machines.host.env = &perf_env; 152 session->machines.host.env = &perf_env;
151 } 153 }
152 154
155 session->machines.host.single_address_space =
156 perf_env__single_address_space(session->machines.host.env);
157
153 if (!data || perf_data__is_write(data)) { 158 if (!data || perf_data__is_write(data)) {
154 /* 159 /*
155 * In O_RDONLY mode this will be performed when reading the 160 * In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index f96c005b3c41..6c1a83768eb0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -13,6 +13,7 @@
13#include "strlist.h" 13#include "strlist.h"
14#include <traceevent/event-parse.h> 14#include <traceevent/event-parse.h>
15#include "mem-events.h" 15#include "mem-events.h"
16#include "annotate.h"
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17 18
18regex_t parent_regex; 19regex_t parent_regex;
@@ -36,7 +37,7 @@ enum sort_mode sort__mode = SORT_MODE__NORMAL;
36 * -t, --field-separator 37 * -t, --field-separator
37 * 38 *
38 * option, that uses a special separator character and don't pad with spaces, 39 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other 40 * replacing all occurrences of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator. 41 * output) with a '.' character, that thus it's the only non valid separator.
41*/ 42*/
42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
@@ -422,6 +423,64 @@ struct sort_entry sort_srcline_to = {
422 .se_width_idx = HISTC_SRCLINE_TO, 423 .se_width_idx = HISTC_SRCLINE_TO,
423}; 424};
424 425
426static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
427 size_t size, unsigned int width)
428{
429
430 struct symbol *sym = he->ms.sym;
431 struct map *map = he->ms.map;
432 struct perf_evsel *evsel = hists_to_evsel(he->hists);
433 struct annotation *notes;
434 double ipc = 0.0, coverage = 0.0;
435 char tmp[64];
436
437 if (!sym)
438 return repsep_snprintf(bf, size, "%-*s", width, "-");
439
440 if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
441 &annotation__default_options, NULL) < 0) {
442 return 0;
443 }
444
445 notes = symbol__annotation(sym);
446
447 if (notes->hit_cycles)
448 ipc = notes->hit_insn / ((double)notes->hit_cycles);
449
450 if (notes->total_insn) {
451 coverage = notes->cover_insn * 100.0 /
452 ((double)notes->total_insn);
453 }
454
455 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
456 return repsep_snprintf(bf, size, "%-*s", width, tmp);
457}
458
459struct sort_entry sort_sym_ipc = {
460 .se_header = "IPC [IPC Coverage]",
461 .se_cmp = sort__sym_cmp,
462 .se_snprintf = hist_entry__sym_ipc_snprintf,
463 .se_width_idx = HISTC_SYMBOL_IPC,
464};
465
466static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
467 __maybe_unused,
468 char *bf, size_t size,
469 unsigned int width)
470{
471 char tmp[64];
472
473 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
474 return repsep_snprintf(bf, size, "%-*s", width, tmp);
475}
476
477struct sort_entry sort_sym_ipc_null = {
478 .se_header = "IPC [IPC Coverage]",
479 .se_cmp = sort__sym_cmp,
480 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
481 .se_width_idx = HISTC_SYMBOL_IPC,
482};
483
425/* --sort srcfile */ 484/* --sort srcfile */
426 485
427static char no_srcfile[1]; 486static char no_srcfile[1];
@@ -1574,6 +1633,7 @@ static struct sort_dimension common_sort_dimensions[] = {
1574 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1633 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1575 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1634 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1576 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1635 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1636 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1577}; 1637};
1578 1638
1579#undef DIM 1639#undef DIM
@@ -1591,6 +1651,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
1591 DIM(SORT_CYCLES, "cycles", sort_cycles), 1651 DIM(SORT_CYCLES, "cycles", sort_cycles),
1592 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1652 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1593 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1653 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1654 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1594}; 1655};
1595 1656
1596#undef DIM 1657#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index a97cf8e6be86..130fe37fe2df 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -229,6 +229,7 @@ enum sort_type {
229 SORT_SYM_SIZE, 229 SORT_SYM_SIZE,
230 SORT_DSO_SIZE, 230 SORT_DSO_SIZE,
231 SORT_CGROUP_ID, 231 SORT_CGROUP_ID,
232 SORT_SYM_IPC_NULL,
232 233
233 /* branch stack specific sort keys */ 234 /* branch stack specific sort keys */
234 __SORT_BRANCH_STACK, 235 __SORT_BRANCH_STACK,
@@ -242,6 +243,7 @@ enum sort_type {
242 SORT_CYCLES, 243 SORT_CYCLES,
243 SORT_SRCLINE_FROM, 244 SORT_SRCLINE_FROM,
244 SORT_SRCLINE_TO, 245 SORT_SRCLINE_TO,
246 SORT_SYM_IPC,
245 247
246 /* memory mode specific sort keys */ 248 /* memory mode specific sort keys */
247 __SORT_MEMORY_MODE, 249 __SORT_MEMORY_MODE,
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
new file mode 100644
index 000000000000..fcc8630f6dff
--- /dev/null
+++ b/tools/perf/util/srccode.c
@@ -0,0 +1,186 @@
1/*
2 * Manage printing of source lines
3 * Copyright (c) 2017, Intel Corporation.
4 * Author: Andi Kleen
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#include "linux/list.h"
16#include <stdlib.h>
17#include <sys/mman.h>
18#include <sys/stat.h>
19#include <fcntl.h>
20#include <unistd.h>
21#include <assert.h>
22#include <string.h>
23#include "srccode.h"
24#include "debug.h"
25#include "util.h"
26
27#define MAXSRCCACHE (32*1024*1024)
28#define MAXSRCFILES 64
29#define SRC_HTAB_SZ 64
30
31struct srcfile {
32 struct hlist_node hash_nd;
33 struct list_head nd;
34 char *fn;
35 char **lines;
36 char *map;
37 unsigned numlines;
38 size_t maplen;
39};
40
41static struct hlist_head srcfile_htab[SRC_HTAB_SZ];
42static LIST_HEAD(srcfile_list);
43static long map_total_sz;
44static int num_srcfiles;
45
46static unsigned shash(unsigned char *s)
47{
48 unsigned h = 0;
49 while (*s)
50 h = 65599 * h + *s++;
51 return h ^ (h >> 16);
52}
53
54static int countlines(char *map, int maplen)
55{
56 int numl;
57 char *end = map + maplen;
58 char *p = map;
59
60 if (maplen == 0)
61 return 0;
62 numl = 0;
63 while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
64 numl++;
65 p++;
66 }
67 if (p < end)
68 numl++;
69 return numl;
70}
71
72static void fill_lines(char **lines, int maxline, char *map, int maplen)
73{
74 int l;
75 char *end = map + maplen;
76 char *p = map;
77
78 if (maplen == 0 || maxline == 0)
79 return;
80 l = 0;
81 lines[l++] = map;
82 while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
83 if (l >= maxline)
84 return;
85 lines[l++] = ++p;
86 }
87 if (p < end)
88 lines[l] = p;
89}
90
91static void free_srcfile(struct srcfile *sf)
92{
93 list_del(&sf->nd);
94 hlist_del(&sf->hash_nd);
95 map_total_sz -= sf->maplen;
96 munmap(sf->map, sf->maplen);
97 free(sf->lines);
98 free(sf->fn);
99 free(sf);
100 num_srcfiles--;
101}
102
103static struct srcfile *find_srcfile(char *fn)
104{
105 struct stat st;
106 struct srcfile *h;
107 int fd;
108 unsigned long sz;
109 unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ;
110
111 hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
112 if (!strcmp(fn, h->fn)) {
113 /* Move to front */
114 list_del(&h->nd);
115 list_add(&h->nd, &srcfile_list);
116 return h;
117 }
118 }
119
120 /* Only prune if there is more than one entry */
121 while ((num_srcfiles > MAXSRCFILES || map_total_sz > MAXSRCCACHE) &&
122 srcfile_list.next != &srcfile_list) {
123 assert(!list_empty(&srcfile_list));
124 h = list_entry(srcfile_list.prev, struct srcfile, nd);
125 free_srcfile(h);
126 }
127
128 fd = open(fn, O_RDONLY);
129 if (fd < 0 || fstat(fd, &st) < 0) {
130 pr_debug("cannot open source file %s\n", fn);
131 return NULL;
132 }
133
134 h = malloc(sizeof(struct srcfile));
135 if (!h)
136 return NULL;
137
138 h->fn = strdup(fn);
139 if (!h->fn)
140 goto out_h;
141
142 h->maplen = st.st_size;
143 sz = (h->maplen + page_size - 1) & ~(page_size - 1);
144 h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
145 close(fd);
146 if (h->map == (char *)-1) {
147 pr_debug("cannot mmap source file %s\n", fn);
148 goto out_fn;
149 }
150 h->numlines = countlines(h->map, h->maplen);
151 h->lines = calloc(h->numlines, sizeof(char *));
152 if (!h->lines)
153 goto out_map;
154 fill_lines(h->lines, h->numlines, h->map, h->maplen);
155 list_add(&h->nd, &srcfile_list);
156 hlist_add_head(&h->hash_nd, &srcfile_htab[hval]);
157 map_total_sz += h->maplen;
158 num_srcfiles++;
159 return h;
160
161out_map:
162 munmap(h->map, sz);
163out_fn:
164 free(h->fn);
165out_h:
166 free(h);
167 return NULL;
168}
169
170/* Result is not 0 terminated */
171char *find_sourceline(char *fn, unsigned line, int *lenp)
172{
173 char *l, *p;
174 struct srcfile *sf = find_srcfile(fn);
175 if (!sf)
176 return NULL;
177 line--;
178 if (line >= sf->numlines)
179 return NULL;
180 l = sf->lines[line];
181 if (!l)
182 return NULL;
183 p = memchr(l, '\n', sf->map + sf->maplen - l);
184 *lenp = p - l;
185 return l;
186}
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
new file mode 100644
index 000000000000..e500a746d5f1
--- /dev/null
+++ b/tools/perf/util/srccode.h
@@ -0,0 +1,7 @@
1#ifndef SRCCODE_H
2#define SRCCODE_H 1
3
4/* Result is not 0 terminated */
5char *find_sourceline(char *fn, unsigned line, int *lenp);
6
7#endif
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index e767c4a9d4d2..dc86597d0cc4 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -548,6 +548,34 @@ out:
548 return srcline; 548 return srcline;
549} 549}
550 550
551/* Returns filename and fills in line number in line */
552char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
553{
554 char *file = NULL;
555 const char *dso_name;
556
557 if (!dso->has_srcline)
558 goto out;
559
560 dso_name = dso__name(dso);
561 if (dso_name == NULL)
562 goto out;
563
564 if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
565 goto out;
566
567 dso->a2l_fails = 0;
568 return file;
569
570out:
571 if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
572 dso->has_srcline = 0;
573 dso__free_a2l(dso);
574 }
575
576 return NULL;
577}
578
551void free_srcline(char *srcline) 579void free_srcline(char *srcline)
552{ 580{
553 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) 581 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index b2bb5502fd62..5762212dc342 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -16,6 +16,7 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
16 bool show_sym, bool show_addr, bool unwind_inlines, 16 bool show_sym, bool show_addr, bool unwind_inlines,
17 u64 ip); 17 u64 ip);
18void free_srcline(char *srcline); 18void free_srcline(char *srcline);
19char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
19 20
20/* insert the srcline into the DSO, which will take ownership */ 21/* insert the srcline into the DSO, which will take ownership */
21void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline); 22void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index e7b4c44ebb62..665ee374fc01 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -59,6 +59,15 @@ static void print_noise(struct perf_stat_config *config,
59 print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg); 59 print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg);
60} 60}
61 61
62static void print_cgroup(struct perf_stat_config *config, struct perf_evsel *evsel)
63{
64 if (nr_cgroups) {
65 const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : "";
66 fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
67 }
68}
69
70
62static void aggr_printout(struct perf_stat_config *config, 71static void aggr_printout(struct perf_stat_config *config,
63 struct perf_evsel *evsel, int id, int nr) 72 struct perf_evsel *evsel, int id, int nr)
64{ 73{
@@ -336,8 +345,7 @@ static void abs_printout(struct perf_stat_config *config,
336 345
337 fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel)); 346 fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel));
338 347
339 if (evsel->cgrp) 348 print_cgroup(config, evsel);
340 fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name);
341} 349}
342 350
343static bool is_mixed_hw_group(struct perf_evsel *counter) 351static bool is_mixed_hw_group(struct perf_evsel *counter)
@@ -431,9 +439,7 @@ static void printout(struct perf_stat_config *config, int id, int nr,
431 config->csv_output ? 0 : -25, 439 config->csv_output ? 0 : -25,
432 perf_evsel__name(counter)); 440 perf_evsel__name(counter));
433 441
434 if (counter->cgrp) 442 print_cgroup(config, counter);
435 fprintf(config->output, "%s%s",
436 config->csv_sep, counter->cgrp->name);
437 443
438 if (!config->csv_output) 444 if (!config->csv_output)
439 pm(config, &os, NULL, NULL, "", 0); 445 pm(config, &os, NULL, NULL, "", 0);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 8ad32763cfff..3c22c58b3e90 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -209,12 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
209 int cpu, struct runtime_stat *st) 209 int cpu, struct runtime_stat *st)
210{ 210{
211 int ctx = evsel_context(counter); 211 int ctx = evsel_context(counter);
212 u64 count_ns = count;
212 213
213 count *= counter->scale; 214 count *= counter->scale;
214 215
215 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || 216 if (perf_evsel__is_clock(counter))
216 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) 217 update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
217 update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
218 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 218 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
219 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count); 219 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
220 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 220 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1cbada2dc6be..f735ee038713 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -334,7 +334,7 @@ static char *cpu_model(void)
334 if (file) { 334 if (file) {
335 while (fgets(buf, 255, file)) { 335 while (fgets(buf, 255, file)) {
336 if (strstr(buf, "model name")) { 336 if (strstr(buf, "model name")) {
337 strncpy(cpu_m, &buf[13], 255); 337 strlcpy(cpu_m, &buf[13], 255);
338 break; 338 break;
339 } 339 }
340 } 340 }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index d188b7588152..01f2c7385e38 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1537,17 +1537,6 @@ int dso__load(struct dso *dso, struct map *map)
1537 dso->adjust_symbols = 0; 1537 dso->adjust_symbols = 0;
1538 1538
1539 if (perfmap) { 1539 if (perfmap) {
1540 struct stat st;
1541
1542 if (lstat(map_path, &st) < 0)
1543 goto out;
1544
1545 if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1546 pr_warning("File %s not owned by current user or root, "
1547 "ignoring it (use -f to override).\n", map_path);
1548 goto out;
1549 }
1550
1551 ret = dso__load_perf_map(map_path, dso); 1540 ret = dso__load_perf_map(map_path, dso);
1552 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1541 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1553 DSO_BINARY_TYPE__NOT_FOUND; 1542 DSO_BINARY_TYPE__NOT_FOUND;
@@ -1680,11 +1669,22 @@ struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1680{ 1669{
1681 struct maps *maps = &mg->maps; 1670 struct maps *maps = &mg->maps;
1682 struct map *map; 1671 struct map *map;
1672 struct rb_node *node;
1683 1673
1684 down_read(&maps->lock); 1674 down_read(&maps->lock);
1685 1675
1686 for (map = maps__first(maps); map; map = map__next(map)) { 1676 for (node = maps->names.rb_node; node; ) {
1687 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1677 int rc;
1678
1679 map = rb_entry(node, struct map, rb_node_name);
1680
1681 rc = strcmp(map->dso->short_name, name);
1682 if (rc < 0)
1683 node = node->rb_left;
1684 else if (rc > 0)
1685 node = node->rb_right;
1686 else
1687
1688 goto out_unlock; 1688 goto out_unlock;
1689 } 1689 }
1690 1690
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index d026d215bdc6..14d9d438e7e2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -63,6 +63,7 @@ struct symbol {
63 u8 ignore:1; 63 u8 ignore:1;
64 u8 inlined:1; 64 u8 inlined:1;
65 u8 arch_sym; 65 u8 arch_sym;
66 bool annotate2;
66 char name[0]; 67 char name[0];
67}; 68};
68 69
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 3d9ed7d0e281..c83372329f89 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -64,6 +64,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
64 RB_CLEAR_NODE(&thread->rb_node); 64 RB_CLEAR_NODE(&thread->rb_node);
65 /* Thread holds first ref to nsdata. */ 65 /* Thread holds first ref to nsdata. */
66 thread->nsinfo = nsinfo__new(pid); 66 thread->nsinfo = nsinfo__new(pid);
67 srccode_state_init(&thread->srccode_state);
67 } 68 }
68 69
69 return thread; 70 return thread;
@@ -103,6 +104,7 @@ void thread__delete(struct thread *thread)
103 104
104 unwind__finish_access(thread); 105 unwind__finish_access(thread);
105 nsinfo__zput(thread->nsinfo); 106 nsinfo__zput(thread->nsinfo);
107 srccode_state_free(&thread->srccode_state);
106 108
107 exit_rwsem(&thread->namespaces_lock); 109 exit_rwsem(&thread->namespaces_lock);
108 exit_rwsem(&thread->comm_lock); 110 exit_rwsem(&thread->comm_lock);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 30e2b4c165fe..712dd48cc0ca 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -8,6 +8,7 @@
8#include <unistd.h> 8#include <unistd.h>
9#include <sys/types.h> 9#include <sys/types.h>
10#include "symbol.h" 10#include "symbol.h"
11#include "map.h"
11#include <strlist.h> 12#include <strlist.h>
12#include <intlist.h> 13#include <intlist.h>
13#include "rwsem.h" 14#include "rwsem.h"
@@ -38,6 +39,7 @@ struct thread {
38 void *priv; 39 void *priv;
39 struct thread_stack *ts; 40 struct thread_stack *ts;
40 struct nsinfo *nsinfo; 41 struct nsinfo *nsinfo;
42 struct srccode_state srccode_state;
41#ifdef HAVE_LIBUNWIND_SUPPORT 43#ifdef HAVE_LIBUNWIND_SUPPORT
42 void *addr_space; 44 void *addr_space;
43 struct unwind_libunwind_ops *unwind_libunwind_ops; 45 struct unwind_libunwind_ops *unwind_libunwind_ops;
@@ -96,9 +98,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
96 98
97struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, 99struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
98 struct addr_location *al); 100 struct addr_location *al);
101struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
102 struct addr_location *al);
99 103
100struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, 104struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
101 u64 addr, struct addr_location *al); 105 u64 addr, struct addr_location *al);
106struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
107 u64 addr, struct addr_location *al);
102 108
103void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, 109void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
104 struct addr_location *al); 110 struct addr_location *al);
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 8e517def925b..4c8da8c4435f 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -46,8 +46,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
46 samples_per_sec; 46 samples_per_sec;
47 ret = SNPRINTF(bf, size, 47 ret = SNPRINTF(bf, size,
48 " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" 48 " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
49 " exact: %4.1f%% [", samples_per_sec, 49 " exact: %4.1f%% lost: %" PRIu64 "/%" PRIu64 " drop: %" PRIu64 "/%" PRIu64 " [",
50 ksamples_percent, esamples_percent); 50 samples_per_sec, ksamples_percent, esamples_percent,
51 top->lost, top->lost_total, top->drop, top->drop_total);
51 } else { 52 } else {
52 float us_samples_per_sec = top->us_samples / top->delay_secs; 53 float us_samples_per_sec = top->us_samples / top->delay_secs;
53 float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; 54 float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
@@ -106,6 +107,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
106 top->evlist->cpus->nr > 1 ? "s" : ""); 107 top->evlist->cpus->nr > 1 ? "s" : "");
107 } 108 }
108 109
110 perf_top__reset_sample_counters(top);
109 return ret; 111 return ret;
110} 112}
111 113
@@ -113,5 +115,5 @@ void perf_top__reset_sample_counters(struct perf_top *top)
113{ 115{
114 top->samples = top->us_samples = top->kernel_samples = 116 top->samples = top->us_samples = top->kernel_samples =
115 top->exact_samples = top->guest_kernel_samples = 117 top->exact_samples = top->guest_kernel_samples =
116 top->guest_us_samples = 0; 118 top->guest_us_samples = top->lost = top->drop = 0;
117} 119}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 9add1f72ce95..19f95eaf75c8 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -22,7 +22,7 @@ struct perf_top {
22 * Symbols will be added here in perf_event__process_sample and will 22 * Symbols will be added here in perf_event__process_sample and will
23 * get out after decayed. 23 * get out after decayed.
24 */ 24 */
25 u64 samples; 25 u64 samples, lost, lost_total, drop, drop_total;
26 u64 kernel_samples, us_samples; 26 u64 kernel_samples, us_samples;
27 u64 exact_samples; 27 u64 exact_samples;
28 u64 guest_us_samples, guest_kernel_samples; 28 u64 guest_us_samples, guest_kernel_samples;
@@ -40,6 +40,14 @@ struct perf_top {
40 const char *sym_filter; 40 const char *sym_filter;
41 float min_percent; 41 float min_percent;
42 unsigned int nr_threads_synthesize; 42 unsigned int nr_threads_synthesize;
43
44 struct {
45 struct ordered_events *in;
46 struct ordered_events data[2];
47 bool rotate;
48 pthread_mutex_t mutex;
49 pthread_cond_t cond;
50 } qe;
43}; 51};
44 52
45#define CONSOLE_CLEAR "" 53#define CONSOLE_CLEAR ""
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 32e558a65af3..ad74be1f0e42 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -33,7 +33,7 @@ static int get_common_field(struct scripting_context *context,
33 int *offset, int *size, const char *type) 33 int *offset, int *size, const char *type)
34{ 34{
35 struct tep_handle *pevent = context->pevent; 35 struct tep_handle *pevent = context->pevent;
36 struct tep_event_format *event; 36 struct tep_event *event;
37 struct tep_format_field *field; 37 struct tep_format_field *field;
38 38
39 if (!*size) { 39 if (!*size) {
@@ -95,7 +95,7 @@ int common_pc(struct scripting_context *context)
95} 95}
96 96
97unsigned long long 97unsigned long long
98raw_field_value(struct tep_event_format *event, const char *name, void *data) 98raw_field_value(struct tep_event *event, const char *name, void *data)
99{ 99{
100 struct tep_format_field *field; 100 struct tep_format_field *field;
101 unsigned long long val; 101 unsigned long long val;
@@ -109,12 +109,12 @@ raw_field_value(struct tep_event_format *event, const char *name, void *data)
109 return val; 109 return val;
110} 110}
111 111
112unsigned long long read_size(struct tep_event_format *event, void *ptr, int size) 112unsigned long long read_size(struct tep_event *event, void *ptr, int size)
113{ 113{
114 return tep_read_number(event->pevent, ptr, size); 114 return tep_read_number(event->pevent, ptr, size);
115} 115}
116 116
117void event_format__fprintf(struct tep_event_format *event, 117void event_format__fprintf(struct tep_event *event,
118 int cpu, void *data, int size, FILE *fp) 118 int cpu, void *data, int size, FILE *fp)
119{ 119{
120 struct tep_record record; 120 struct tep_record record;
@@ -131,7 +131,7 @@ void event_format__fprintf(struct tep_event_format *event,
131 trace_seq_destroy(&s); 131 trace_seq_destroy(&s);
132} 132}
133 133
134void event_format__print(struct tep_event_format *event, 134void event_format__print(struct tep_event *event,
135 int cpu, void *data, int size) 135 int cpu, void *data, int size)
136{ 136{
137 return event_format__fprintf(event, cpu, data, size, stdout); 137 return event_format__fprintf(event, cpu, data, size, stdout);
@@ -190,12 +190,12 @@ int parse_event_file(struct tep_handle *pevent,
190 return tep_parse_event(pevent, buf, size, sys); 190 return tep_parse_event(pevent, buf, size, sys);
191} 191}
192 192
193struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, 193struct tep_event *trace_find_next_event(struct tep_handle *pevent,
194 struct tep_event_format *event) 194 struct tep_event *event)
195{ 195{
196 static int idx; 196 static int idx;
197 int events_count; 197 int events_count;
198 struct tep_event_format *all_events; 198 struct tep_event *all_events;
199 199
200 all_events = tep_get_first_event(pevent); 200 all_events = tep_get_first_event(pevent);
201 events_count = tep_get_events_count(pevent); 201 events_count = tep_get_events_count(pevent);
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 76f12c705ef9..efe2f58cff4e 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -102,7 +102,7 @@ static unsigned int read4(struct tep_handle *pevent)
102 102
103 if (do_read(&data, 4) < 0) 103 if (do_read(&data, 4) < 0)
104 return 0; 104 return 0;
105 return __tep_data2host4(pevent, data); 105 return tep_read_number(pevent, &data, 4);
106} 106}
107 107
108static unsigned long long read8(struct tep_handle *pevent) 108static unsigned long long read8(struct tep_handle *pevent)
@@ -111,7 +111,7 @@ static unsigned long long read8(struct tep_handle *pevent)
111 111
112 if (do_read(&data, 8) < 0) 112 if (do_read(&data, 8) < 0)
113 return 0; 113 return 0;
114 return __tep_data2host8(pevent, data); 114 return tep_read_number(pevent, &data, 8);
115} 115}
116 116
117static char *read_string(void) 117static char *read_string(void)
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 95664b2f771e..cbe0dd758e3a 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -72,12 +72,12 @@ void trace_event__cleanup(struct trace_event *t)
72/* 72/*
73 * Returns pointer with encoded error via <linux/err.h> interface. 73 * Returns pointer with encoded error via <linux/err.h> interface.
74 */ 74 */
75static struct tep_event_format* 75static struct tep_event*
76tp_format(const char *sys, const char *name) 76tp_format(const char *sys, const char *name)
77{ 77{
78 char *tp_dir = get_events_file(sys); 78 char *tp_dir = get_events_file(sys);
79 struct tep_handle *pevent = tevent.pevent; 79 struct tep_handle *pevent = tevent.pevent;
80 struct tep_event_format *event = NULL; 80 struct tep_event *event = NULL;
81 char path[PATH_MAX]; 81 char path[PATH_MAX];
82 size_t size; 82 size_t size;
83 char *data; 83 char *data;
@@ -102,7 +102,7 @@ tp_format(const char *sys, const char *name)
102/* 102/*
103 * Returns pointer with encoded error via <linux/err.h> interface. 103 * Returns pointer with encoded error via <linux/err.h> interface.
104 */ 104 */
105struct tep_event_format* 105struct tep_event*
106trace_event__tp_format(const char *sys, const char *name) 106trace_event__tp_format(const char *sys, const char *name)
107{ 107{
108 if (!tevent_initialized && trace_event__init2()) 108 if (!tevent_initialized && trace_event__init2())
@@ -111,7 +111,7 @@ trace_event__tp_format(const char *sys, const char *name)
111 return tp_format(sys, name); 111 return tp_format(sys, name);
112} 112}
113 113
114struct tep_event_format *trace_event__tp_format_id(int id) 114struct tep_event *trace_event__tp_format_id(int id)
115{ 115{
116 if (!tevent_initialized && trace_event__init2()) 116 if (!tevent_initialized && trace_event__init2())
117 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index f024d73bfc40..d9b0a942090a 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -22,17 +22,17 @@ int trace_event__init(struct trace_event *t);
22void trace_event__cleanup(struct trace_event *t); 22void trace_event__cleanup(struct trace_event *t);
23int trace_event__register_resolver(struct machine *machine, 23int trace_event__register_resolver(struct machine *machine,
24 tep_func_resolver_t *func); 24 tep_func_resolver_t *func);
25struct tep_event_format* 25struct tep_event*
26trace_event__tp_format(const char *sys, const char *name); 26trace_event__tp_format(const char *sys, const char *name);
27 27
28struct tep_event_format *trace_event__tp_format_id(int id); 28struct tep_event *trace_event__tp_format_id(int id);
29 29
30int bigendian(void); 30int bigendian(void);
31 31
32void event_format__fprintf(struct tep_event_format *event, 32void event_format__fprintf(struct tep_event *event,
33 int cpu, void *data, int size, FILE *fp); 33 int cpu, void *data, int size, FILE *fp);
34 34
35void event_format__print(struct tep_event_format *event, 35void event_format__print(struct tep_event *event,
36 int cpu, void *data, int size); 36 int cpu, void *data, int size);
37 37
38int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size); 38int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
@@ -40,7 +40,7 @@ int parse_event_file(struct tep_handle *pevent,
40 char *buf, unsigned long size, char *sys); 40 char *buf, unsigned long size, char *sys);
41 41
42unsigned long long 42unsigned long long
43raw_field_value(struct tep_event_format *event, const char *name, void *data); 43raw_field_value(struct tep_event *event, const char *name, void *data);
44 44
45void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); 45void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size);
46void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); 46void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size);
@@ -48,9 +48,9 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
48 48
49ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); 49ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
50 50
51struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, 51struct tep_event *trace_find_next_event(struct tep_handle *pevent,
52 struct tep_event_format *event); 52 struct tep_event *event);
53unsigned long long read_size(struct tep_event_format *event, void *ptr, int size); 53unsigned long long read_size(struct tep_event *event, void *ptr, int size);
54unsigned long long eval_flag(const char *flag); 54unsigned long long eval_flag(const char *flag);
55 55
56int read_tracing_data(int fd, struct list_head *pattrs); 56int read_tracing_data(int fd, struct list_head *pattrs);