aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2011-06-07 09:37:51 -0400
committerMichal Marek <mmarek@suse.cz>2011-06-07 09:37:51 -0400
commit2e483528cebad089d0bb3f9aebb0ada22d968ffa (patch)
treed701405826b271e819a9a8500838cebd37b1364a /tools/perf
parent163d3fe6a2357aba7b18b938d6ae6ce9570324e4 (diff)
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
Merge commit 'v3.0-rc1' into kbuild/kbuild
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt1
-rw-r--r--tools/perf/Documentation/perf-script-python.txt1
-rw-r--r--tools/perf/Documentation/perf-script.txt52
-rw-r--r--tools/perf/Makefile121
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-record.c44
-rw-r--r--tools/perf/builtin-report.c23
-rw-r--r--tools/perf/builtin-script.c297
-rw-r--r--tools/perf/builtin-stat.c580
-rw-r--r--tools/perf/builtin-test.c21
-rw-r--r--tools/perf/builtin-top.c99
-rw-r--r--tools/perf/config/feature-tests.mak (renamed from tools/perf/feature-tests.mak)16
-rw-r--r--tools/perf/config/utilities.mak188
-rw-r--r--tools/perf/util/cgroup.c2
-rw-r--r--tools/perf/util/debug.c10
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/event.c63
-rw-r--r--tools/perf/util/event.h12
-rw-r--r--tools/perf/util/evlist.c188
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c69
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/header.c39
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/include/asm/alternative-asm.h8
-rw-r--r--tools/perf/util/include/linux/const.h1
-rw-r--r--tools/perf/util/include/linux/list.h4
-rw-r--r--tools/perf/util/parse-events.c123
-rw-r--r--tools/perf/util/probe-event.c19
-rw-r--r--tools/perf/util/probe-finder.c232
-rw-r--r--tools/perf/util/probe-finder.h2
-rw-r--r--tools/perf/util/python.c27
-rw-r--r--tools/perf/util/session.c62
-rw-r--r--tools/perf/util/session.h5
-rw-r--r--tools/perf/util/setup.py7
-rw-r--r--tools/perf/util/string.c2
-rw-r--r--tools/perf/util/symbol.c676
-rw-r--r--tools/perf/util/symbol.h81
-rw-r--r--tools/perf/util/trace-event-parse.c1
-rw-r--r--tools/perf/util/ui/browsers/annotate.c7
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
41 files changed, 2266 insertions, 836 deletions
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 5bb41e55a3ac..3152cca15501 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -63,7 +63,6 @@ The format file for the sched_wakep event defines the following fields
63 field:unsigned char common_flags; 63 field:unsigned char common_flags;
64 field:unsigned char common_preempt_count; 64 field:unsigned char common_preempt_count;
65 field:int common_pid; 65 field:int common_pid;
66 field:int common_lock_depth;
67 66
68 field:char comm[TASK_COMM_LEN]; 67 field:char comm[TASK_COMM_LEN];
69 field:pid_t pid; 68 field:pid_t pid;
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 36b38277422c..471022069119 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -463,7 +463,6 @@ The format file for the sched_wakep event defines the following fields
463 field:unsigned char common_flags; 463 field:unsigned char common_flags;
464 field:unsigned char common_preempt_count; 464 field:unsigned char common_preempt_count;
465 field:int common_pid; 465 field:int common_pid;
466 field:int common_lock_depth;
467 466
468 field:char comm[TASK_COMM_LEN]; 467 field:char comm[TASK_COMM_LEN];
469 field:pid_t pid; 468 field:pid_t pid;
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 66f040b30729..86c87e214b11 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -113,13 +113,61 @@ OPTIONS
113 Do various checks like samples ordering and lost events. 113 Do various checks like samples ordering and lost events.
114 114
115-f:: 115-f::
116--fields 116--fields::
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, sym. Field 118 comm, tid, pid, time, cpu, event, trace, sym. Field
119 list must be prepended with the type, trace, sw or hw, 119 list can be prepended with the type, trace, sw or hw,
120 to indicate to which event type the field list applies. 120 to indicate to which event type the field list applies.
121 e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace 121 e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace
122 122
123 perf script -f <fields>
124
125 is equivalent to:
126
127 perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
128
129 i.e., the specified fields apply to all event types if the type string
130 is not given.
131
132 The arguments are processed in the order received. A later usage can
133 reset a prior request. e.g.:
134
135 -f trace: -f comm,tid,time,sym
136
137 The first -f suppresses trace events (field list is ""), but then the
138 second invocation sets the fields to comm,tid,time,sym. In this case a
139 warning is given to the user:
140
141 "Overriding previous field request for all events."
142
143 Alternativey, consider the order:
144
145 -f comm,tid,time,sym -f trace:
146
147 The first -f sets the fields for all events and the second -f
148 suppresses trace events. The user is given a warning message about
149 the override, and the result of the above is that only S/W and H/W
150 events are displayed with the given fields.
151
152 For the 'wildcard' option if a user selected field is invalid for an
153 event type, a message is displayed to the user that the option is
154 ignored for that type. For example:
155
156 $ perf script -f comm,tid,trace
157 'trace' not valid for hardware events. Ignoring.
158 'trace' not valid for software events. Ignoring.
159
160 Alternatively, if the type is given an invalid field is specified it
161 is an error. For example:
162
163 perf script -v -f sw:comm,tid,trace
164 'trace' not valid for software events.
165
166 At this point usage is displayed, and perf-script exits.
167
168 Finally, a user may not set fields to none for all event types.
169 i.e., -f "" is not allowed.
170
123-k:: 171-k::
124--vmlinux=<file>:: 172--vmlinux=<file>::
125 vmlinux pathname 173 vmlinux pathname
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 158c30e8210c..032ba6398a5c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -5,6 +5,8 @@ endif
5# The default target of this Makefile is... 5# The default target of this Makefile is...
6all: 6all:
7 7
8include config/utilities.mak
9
8ifneq ($(OUTPUT),) 10ifneq ($(OUTPUT),)
9# check that the output directory actually exists 11# check that the output directory actually exists
10OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 12OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
@@ -13,6 +15,12 @@ endif
13 15
14# Define V to have a more verbose compile. 16# Define V to have a more verbose compile.
15# 17#
18# Define PYTHON to point to the python binary if the default
19# `python' is not correct; for example: PYTHON=python2
20#
21# Define PYTHON_CONFIG to point to the python-config binary if
22# the default `$(PYTHON)-config' is not correct.
23#
16# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 24# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
17# 25#
18# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. 26# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
@@ -35,15 +43,21 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
35 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 43 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
36 -e s/sh[234].*/sh/ ) 44 -e s/sh[234].*/sh/ )
37 45
46CC = $(CROSS_COMPILE)gcc
47AR = $(CROSS_COMPILE)ar
48
38# Additional ARCH settings for x86 49# Additional ARCH settings for x86
39ifeq ($(ARCH),i386) 50ifeq ($(ARCH),i386)
40 ARCH := x86 51 ARCH := x86
41endif 52endif
42ifeq ($(ARCH),x86_64) 53ifeq ($(ARCH),x86_64)
43 RAW_ARCH := x86_64 54 ARCH := x86
44 ARCH := x86 55 IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
45 ARCH_CFLAGS := -DARCH_X86_64 56 ifeq (${IS_X86_64}, 1)
46 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S 57 RAW_ARCH := x86_64
58 ARCH_CFLAGS := -DARCH_X86_64
59 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
60 endif
47endif 61endif
48 62
49# 63#
@@ -119,8 +133,6 @@ lib = lib
119 133
120export prefix bindir sharedir sysconfdir 134export prefix bindir sharedir sysconfdir
121 135
122CC = $(CROSS_COMPILE)gcc
123AR = $(CROSS_COMPILE)ar
124RM = rm -f 136RM = rm -f
125MKDIR = mkdir 137MKDIR = mkdir
126FIND = find 138FIND = find
@@ -130,7 +142,7 @@ INSTALL = install
130# explicitly what architecture to check for. Fix this up for yours.. 142# explicitly what architecture to check for. Fix this up for yours..
131SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ 143SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
132 144
133-include feature-tests.mak 145-include config/feature-tests.mak
134 146
135ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) 147ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
136 CFLAGS := $(CFLAGS) -fstack-protector-all 148 CFLAGS := $(CFLAGS) -fstack-protector-all
@@ -165,8 +177,10 @@ grep-libs = $(filter -l%,$(1))
165strip-libs = $(filter-out -l%,$(1)) 177strip-libs = $(filter-out -l%,$(1))
166 178
167$(OUTPUT)python/perf.so: $(PYRF_OBJS) 179$(OUTPUT)python/perf.so: $(PYRF_OBJS)
168 $(QUIET_GEN)python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \ 180 $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \
169 --build-temp='$(OUTPUT)python/temp' 181 --quiet build_ext \
182 --build-lib='$(OUTPUT)python' \
183 --build-temp='$(OUTPUT)python/temp'
170# 184#
171# No Perl scripts right now: 185# No Perl scripts right now:
172# 186#
@@ -201,11 +215,13 @@ LIB_FILE=$(OUTPUT)libperf.a
201LIB_H += ../../include/linux/perf_event.h 215LIB_H += ../../include/linux/perf_event.h
202LIB_H += ../../include/linux/rbtree.h 216LIB_H += ../../include/linux/rbtree.h
203LIB_H += ../../include/linux/list.h 217LIB_H += ../../include/linux/list.h
218LIB_H += ../../include/linux/const.h
204LIB_H += ../../include/linux/hash.h 219LIB_H += ../../include/linux/hash.h
205LIB_H += ../../include/linux/stringify.h 220LIB_H += ../../include/linux/stringify.h
206LIB_H += util/include/linux/bitmap.h 221LIB_H += util/include/linux/bitmap.h
207LIB_H += util/include/linux/bitops.h 222LIB_H += util/include/linux/bitops.h
208LIB_H += util/include/linux/compiler.h 223LIB_H += util/include/linux/compiler.h
224LIB_H += util/include/linux/const.h
209LIB_H += util/include/linux/ctype.h 225LIB_H += util/include/linux/ctype.h
210LIB_H += util/include/linux/kernel.h 226LIB_H += util/include/linux/kernel.h
211LIB_H += util/include/linux/list.h 227LIB_H += util/include/linux/list.h
@@ -471,24 +487,74 @@ else
471 endif 487 endif
472endif 488endif
473 489
474ifdef NO_LIBPYTHON 490disable-python = $(eval $(disable-python_code))
475 BASIC_CFLAGS += -DNO_LIBPYTHON 491define disable-python_code
492 BASIC_CFLAGS += -DNO_LIBPYTHON
493 $(if $(1),$(warning No $(1) was found))
494 $(warning Python support won't be built)
495endef
496
497override PYTHON := \
498 $(call get-executable-or-default,PYTHON,python)
499
500ifndef PYTHON
501 $(call disable-python,python interpreter)
502 python-clean :=
476else 503else
477 PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null) 504
478 PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) 505 PYTHON_WORD := $(call shell-wordify,$(PYTHON))
479 PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) 506
480 PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` 507 python-clean := $(PYTHON_WORD) util/setup.py clean \
481 FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) 508 --build-lib='$(OUTPUT)python' \
482 ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) 509 --build-temp='$(OUTPUT)python/temp'
483 msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings) 510
484 BASIC_CFLAGS += -DNO_LIBPYTHON 511 ifdef NO_LIBPYTHON
485 else 512 $(call disable-python)
486 ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) 513 else
487 EXTLIBS += $(PYTHON_EMBED_LIBADD) 514
488 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o 515 override PYTHON_CONFIG := \
489 LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o 516 $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config)
490 LANG_BINDINGS += $(OUTPUT)python/perf.so 517
491 endif 518 ifndef PYTHON_CONFIG
519 $(call disable-python,python-config tool)
520 else
521
522 PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
523
524 PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
525 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
526 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
527 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
528 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
529
530 ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
531 $(call disable-python,Python.h (for Python 2.x))
532 else
533
534 ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y)
535 $(warning Python 3 is not yet supported; please set)
536 $(warning PYTHON and/or PYTHON_CONFIG appropriately.)
537 $(warning If you also have Python 2 installed, then)
538 $(warning try something like:)
539 $(warning $(and ,))
540 $(warning $(and ,) make PYTHON=python2)
541 $(warning $(and ,))
542 $(warning Otherwise, disable Python support entirely:)
543 $(warning $(and ,))
544 $(warning $(and ,) make NO_LIBPYTHON=1)
545 $(warning $(and ,))
546 $(error $(and ,))
547 else
548 ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
549 EXTLIBS += $(PYTHON_EMBED_LIBADD)
550 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
551 LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
552 LANG_BINDINGS += $(OUTPUT)python/perf.so
553 endif
554
555 endif
556 endif
557 endif
492endif 558endif
493 559
494ifdef NO_DEMANGLE 560ifdef NO_DEMANGLE
@@ -829,8 +895,7 @@ clean:
829 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* 895 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
830 $(MAKE) -C Documentation/ clean 896 $(MAKE) -C Documentation/ clean
831 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS 897 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
832 @python util/setup.py clean --build-lib='$(OUTPUT)python' \ 898 $(python-clean)
833 --build-temp='$(OUTPUT)python/temp'
834 899
835.PHONY: all install clean strip 900.PHONY: all install clean strip
836.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell 901.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e18eb7ed30ae..7b139e1e7e86 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -8,8 +8,6 @@
8#include "builtin.h" 8#include "builtin.h"
9 9
10#include "util/util.h" 10#include "util/util.h"
11
12#include "util/util.h"
13#include "util/color.h" 11#include "util/color.h"
14#include <linux/list.h> 12#include <linux/list.h>
15#include "util/cache.h" 13#include "util/cache.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6febcc168a8c..8e2c85798185 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -41,7 +41,7 @@ static u64 user_interval = ULLONG_MAX;
41static u64 default_interval = 0; 41static u64 default_interval = 0;
42 42
43static unsigned int page_size; 43static unsigned int page_size;
44static unsigned int mmap_pages = 128; 44static unsigned int mmap_pages = UINT_MAX;
45static unsigned int user_freq = UINT_MAX; 45static unsigned int user_freq = UINT_MAX;
46static int freq = 1000; 46static int freq = 1000;
47static int output; 47static int output;
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
163 struct perf_event_attr *attr = &evsel->attr; 163 struct perf_event_attr *attr = &evsel->attr;
164 int track = !evsel->idx; /* only the first counter needs these */ 164 int track = !evsel->idx; /* only the first counter needs these */
165 165
166 attr->inherit = !no_inherit;
166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
167 PERF_FORMAT_TOTAL_TIME_RUNNING | 168 PERF_FORMAT_TOTAL_TIME_RUNNING |
168 PERF_FORMAT_ID; 169 PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
251{ 252{
252 struct perf_evsel *pos; 253 struct perf_evsel *pos;
253 254
255 if (evlist->cpus->map[0] < 0)
256 no_inherit = true;
257
254 list_for_each_entry(pos, &evlist->entries, node) { 258 list_for_each_entry(pos, &evlist->entries, node) {
255 struct perf_event_attr *attr = &pos->attr; 259 struct perf_event_attr *attr = &pos->attr;
256 /* 260 /*
@@ -271,15 +275,13 @@ static void open_counters(struct perf_evlist *evlist)
271retry_sample_id: 275retry_sample_id:
272 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 276 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
273try_again: 277try_again:
274 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, 278 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
275 !no_inherit) < 0) {
276 int err = errno; 279 int err = errno;
277 280
278 if (err == EPERM || err == EACCES) 281 if (err == EPERM || err == EACCES) {
279 die("Permission error - are you root?\n" 282 ui__warning_paranoid();
280 "\t Consider tweaking" 283 exit(EXIT_FAILURE);
281 " /proc/sys/kernel/perf_event_paranoid.\n"); 284 } else if (err == ENODEV && cpu_list) {
282 else if (err == ENODEV && cpu_list) {
283 die("No such device - did you specify" 285 die("No such device - did you specify"
284 " an out-of-range profile CPU?\n"); 286 " an out-of-range profile CPU?\n");
285 } else if (err == EINVAL && sample_id_all_avail) { 287 } else if (err == EINVAL && sample_id_all_avail) {
@@ -302,11 +304,19 @@ try_again:
302 && attr->config == PERF_COUNT_HW_CPU_CYCLES) { 304 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
303 305
304 if (verbose) 306 if (verbose)
305 warning(" ... trying to fall back to cpu-clock-ticks\n"); 307 ui__warning("The cycles event is not supported, "
308 "trying to fall back to cpu-clock-ticks\n");
306 attr->type = PERF_TYPE_SOFTWARE; 309 attr->type = PERF_TYPE_SOFTWARE;
307 attr->config = PERF_COUNT_SW_CPU_CLOCK; 310 attr->config = PERF_COUNT_SW_CPU_CLOCK;
308 goto try_again; 311 goto try_again;
309 } 312 }
313
314 if (err == ENOENT) {
315 ui__warning("The %s event is not supported.\n",
316 event_name(pos));
317 exit(EXIT_FAILURE);
318 }
319
310 printf("\n"); 320 printf("\n");
311 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", 321 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
312 err, strerror(err)); 322 err, strerror(err));
@@ -417,7 +427,7 @@ static void mmap_read_all(void)
417{ 427{
418 int i; 428 int i;
419 429
420 for (i = 0; i < evsel_list->cpus->nr; i++) { 430 for (i = 0; i < evsel_list->nr_mmaps; i++) {
421 if (evsel_list->mmap[i].base) 431 if (evsel_list->mmap[i].base)
422 mmap_read(&evsel_list->mmap[i]); 432 mmap_read(&evsel_list->mmap[i]);
423 } 433 }
@@ -506,6 +516,10 @@ static int __cmd_record(int argc, const char **argv)
506 if (have_tracepoints(&evsel_list->entries)) 516 if (have_tracepoints(&evsel_list->entries))
507 perf_header__set_feat(&session->header, HEADER_TRACE_INFO); 517 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
508 518
519 /* 512 kiB: default amount of unprivileged mlocked memory */
520 if (mmap_pages == UINT_MAX)
521 mmap_pages = (512 * 1024) / page_size;
522
509 if (forks) { 523 if (forks) {
510 child_pid = fork(); 524 child_pid = fork();
511 if (child_pid < 0) { 525 if (child_pid < 0) {
@@ -809,6 +823,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
809 823
810 symbol__init(); 824 symbol__init();
811 825
826 if (symbol_conf.kptr_restrict)
827 pr_warning(
828"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
829"check /proc/sys/kernel/kptr_restrict.\n\n"
830"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
831"file is not found in the buildid cache or in the vmlinux path.\n\n"
832"Samples in kernel modules won't be resolved at all.\n\n"
833"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
834"even with a suitable vmlinux or kallsyms file.\n\n");
835
812 if (no_buildid_cache || no_buildid) 836 if (no_buildid_cache || no_buildid)
813 disable_buildid_cache(); 837 disable_buildid_cache();
814 838
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 498c6f70a747..287a173523a7 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -116,6 +116,9 @@ static int process_sample_event(union perf_event *event,
116 if (al.filtered || (hide_unresolved && al.sym == NULL)) 116 if (al.filtered || (hide_unresolved && al.sym == NULL))
117 return 0; 117 return 0;
118 118
119 if (al.map != NULL)
120 al.map->dso->hit = 1;
121
119 if (perf_session__add_hist_entry(session, &al, sample, evsel)) { 122 if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
120 pr_debug("problem incrementing symbol period, skipping event\n"); 123 pr_debug("problem incrementing symbol period, skipping event\n");
121 return -1; 124 return -1;
@@ -249,6 +252,8 @@ static int __cmd_report(void)
249 u64 nr_samples; 252 u64 nr_samples;
250 struct perf_session *session; 253 struct perf_session *session;
251 struct perf_evsel *pos; 254 struct perf_evsel *pos;
255 struct map *kernel_map;
256 struct kmap *kernel_kmap;
252 const char *help = "For a higher level overview, try: perf report --sort comm,dso"; 257 const char *help = "For a higher level overview, try: perf report --sort comm,dso";
253 258
254 signal(SIGINT, sig_handler); 259 signal(SIGINT, sig_handler);
@@ -268,6 +273,24 @@ static int __cmd_report(void)
268 if (ret) 273 if (ret)
269 goto out_delete; 274 goto out_delete;
270 275
276 kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION];
277 kernel_kmap = map__kmap(kernel_map);
278 if (kernel_map == NULL ||
279 (kernel_map->dso->hit &&
280 (kernel_kmap->ref_reloc_sym == NULL ||
281 kernel_kmap->ref_reloc_sym->addr == 0))) {
282 const struct dso *kdso = kernel_map->dso;
283
284 ui__warning(
285"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
286"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
287"Samples in kernel modules can't be resolved as well.\n\n",
288 RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ?
289"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
290"can't be resolved." :
291"If some relocation was applied (e.g. kexec) symbols may be misresolved.");
292 }
293
271 if (dump_trace) { 294 if (dump_trace) {
272 perf_session__fprintf_nr_events(session, stdout); 295 perf_session__fprintf_nr_events(session, stdout);
273 goto out_delete; 296 goto out_delete;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ac574ea23917..22747de7234b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,7 +10,6 @@
10#include "util/symbol.h" 10#include "util/symbol.h"
11#include "util/thread.h" 11#include "util/thread.h"
12#include "util/trace-event.h" 12#include "util/trace-event.h"
13#include "util/parse-options.h"
14#include "util/util.h" 13#include "util/util.h"
15#include "util/evlist.h" 14#include "util/evlist.h"
16#include "util/evsel.h" 15#include "util/evsel.h"
@@ -49,57 +48,169 @@ struct output_option {
49}; 48};
50 49
51/* default set to maintain compatibility with current format */ 50/* default set to maintain compatibility with current format */
52static u64 output_fields[PERF_TYPE_MAX] = { 51static struct {
53 [PERF_TYPE_HARDWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ 52 bool user_set;
54 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ 53 bool wildcard_set;
55 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, 54 u64 fields;
56 55 u64 invalid_fields;
57 [PERF_TYPE_SOFTWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ 56} output[PERF_TYPE_MAX] = {
58 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ 57
59 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, 58 [PERF_TYPE_HARDWARE] = {
60 59 .user_set = false,
61 [PERF_TYPE_TRACEPOINT] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ 60
62 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ 61 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
63 PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, 62 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
63 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM,
64
65 .invalid_fields = PERF_OUTPUT_TRACE,
66 },
67
68 [PERF_TYPE_SOFTWARE] = {
69 .user_set = false,
70
71 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
72 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
73 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM,
74
75 .invalid_fields = PERF_OUTPUT_TRACE,
76 },
77
78 [PERF_TYPE_TRACEPOINT] = {
79 .user_set = false,
80
81 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
82 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
83 PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE,
84 },
85
86 [PERF_TYPE_RAW] = {
87 .user_set = false,
88
89 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
90 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
91 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM,
92
93 .invalid_fields = PERF_OUTPUT_TRACE,
94 },
64}; 95};
65 96
66static bool output_set_by_user; 97static bool output_set_by_user(void)
98{
99 int j;
100 for (j = 0; j < PERF_TYPE_MAX; ++j) {
101 if (output[j].user_set)
102 return true;
103 }
104 return false;
105}
106
107static const char *output_field2str(enum perf_output_field field)
108{
109 int i, imax = ARRAY_SIZE(all_output_options);
110 const char *str = "";
111
112 for (i = 0; i < imax; ++i) {
113 if (all_output_options[i].field == field) {
114 str = all_output_options[i].str;
115 break;
116 }
117 }
118 return str;
119}
120
121#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x)
122
123static int perf_event_attr__check_stype(struct perf_event_attr *attr,
124 u64 sample_type, const char *sample_msg,
125 enum perf_output_field field)
126{
127 int type = attr->type;
128 const char *evname;
129
130 if (attr->sample_type & sample_type)
131 return 0;
67 132
68#define PRINT_FIELD(x) (output_fields[attr->type] & PERF_OUTPUT_##x) 133 if (output[type].user_set) {
134 evname = __event_name(attr->type, attr->config);
135 pr_err("Samples for '%s' event do not have %s attribute set. "
136 "Cannot print '%s' field.\n",
137 evname, sample_msg, output_field2str(field));
138 return -1;
139 }
140
141 /* user did not ask for it explicitly so remove from the default list */
142 output[type].fields &= ~field;
143 evname = __event_name(attr->type, attr->config);
144 pr_debug("Samples for '%s' event do not have %s attribute set. "
145 "Skipping '%s' field.\n",
146 evname, sample_msg, output_field2str(field));
147
148 return 0;
149}
69 150
70static int perf_session__check_attr(struct perf_session *session, 151static int perf_evsel__check_attr(struct perf_evsel *evsel,
71 struct perf_event_attr *attr) 152 struct perf_session *session)
72{ 153{
154 struct perf_event_attr *attr = &evsel->attr;
155
73 if (PRINT_FIELD(TRACE) && 156 if (PRINT_FIELD(TRACE) &&
74 !perf_session__has_traces(session, "record -R")) 157 !perf_session__has_traces(session, "record -R"))
75 return -EINVAL; 158 return -EINVAL;
76 159
77 if (PRINT_FIELD(SYM)) { 160 if (PRINT_FIELD(SYM)) {
78 if (!(session->sample_type & PERF_SAMPLE_IP)) { 161 if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP",
79 pr_err("Samples do not contain IP data.\n"); 162 PERF_OUTPUT_SYM))
80 return -EINVAL; 163 return -EINVAL;
81 } 164
82 if (!no_callchain && 165 if (!no_callchain &&
83 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 166 !(attr->sample_type & PERF_SAMPLE_CALLCHAIN))
84 symbol_conf.use_callchain = false; 167 symbol_conf.use_callchain = false;
85 } 168 }
86 169
87 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && 170 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
88 !(session->sample_type & PERF_SAMPLE_TID)) { 171 perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID",
89 pr_err("Samples do not contain TID/PID data.\n"); 172 PERF_OUTPUT_TID|PERF_OUTPUT_PID))
90 return -EINVAL; 173 return -EINVAL;
91 }
92 174
93 if (PRINT_FIELD(TIME) && 175 if (PRINT_FIELD(TIME) &&
94 !(session->sample_type & PERF_SAMPLE_TIME)) { 176 perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME",
95 pr_err("Samples do not contain timestamps.\n"); 177 PERF_OUTPUT_TIME))
96 return -EINVAL; 178 return -EINVAL;
97 }
98 179
99 if (PRINT_FIELD(CPU) && 180 if (PRINT_FIELD(CPU) &&
100 !(session->sample_type & PERF_SAMPLE_CPU)) { 181 perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU",
101 pr_err("Samples do not contain cpu.\n"); 182 PERF_OUTPUT_CPU))
102 return -EINVAL; 183 return -EINVAL;
184
185 return 0;
186}
187
188/*
189 * verify all user requested events exist and the samples
190 * have the expected data
191 */
192static int perf_session__check_output_opt(struct perf_session *session)
193{
194 int j;
195 struct perf_evsel *evsel;
196
197 for (j = 0; j < PERF_TYPE_MAX; ++j) {
198 evsel = perf_session__find_first_evtype(session, j);
199
200 /*
201 * even if fields is set to 0 (ie., show nothing) event must
202 * exist if user explicitly includes it on the command line
203 */
204 if (!evsel && output[j].user_set && !output[j].wildcard_set) {
205 pr_err("%s events do not exist. "
206 "Remove corresponding -f option to proceed.\n",
207 event_type(j));
208 return -1;
209 }
210
211 if (evsel && output[j].fields &&
212 perf_evsel__check_attr(evsel, session))
213 return -1;
103 } 214 }
104 215
105 return 0; 216 return 0;
@@ -168,10 +279,7 @@ static void process_event(union perf_event *event __unused,
168{ 279{
169 struct perf_event_attr *attr = &evsel->attr; 280 struct perf_event_attr *attr = &evsel->attr;
170 281
171 if (output_fields[attr->type] == 0) 282 if (output[attr->type].fields == 0)
172 return;
173
174 if (perf_session__check_attr(session, attr) < 0)
175 return; 283 return;
176 284
177 print_sample_start(sample, thread, attr); 285 print_sample_start(sample, thread, attr);
@@ -451,6 +559,7 @@ static int parse_output_fields(const struct option *opt __used,
451{ 559{
452 char *tok; 560 char *tok;
453 int i, imax = sizeof(all_output_options) / sizeof(struct output_option); 561 int i, imax = sizeof(all_output_options) / sizeof(struct output_option);
562 int j;
454 int rc = 0; 563 int rc = 0;
455 char *str = strdup(arg); 564 char *str = strdup(arg);
456 int type = -1; 565 int type = -1;
@@ -458,52 +567,99 @@ static int parse_output_fields(const struct option *opt __used,
458 if (!str) 567 if (!str)
459 return -ENOMEM; 568 return -ENOMEM;
460 569
461 tok = strtok(str, ":"); 570 /* first word can state for which event type the user is specifying
462 if (!tok) { 571 * the fields. If no type exists, the specified fields apply to all
463 fprintf(stderr, 572 * event types found in the file minus the invalid fields for a type.
464 "Invalid field string - not prepended with type.");
465 return -EINVAL;
466 }
467
468 /* first word should state which event type user
469 * is specifying the fields
470 */ 573 */
471 if (!strcmp(tok, "hw")) 574 tok = strchr(str, ':');
472 type = PERF_TYPE_HARDWARE; 575 if (tok) {
473 else if (!strcmp(tok, "sw")) 576 *tok = '\0';
474 type = PERF_TYPE_SOFTWARE; 577 tok++;
475 else if (!strcmp(tok, "trace")) 578 if (!strcmp(str, "hw"))
476 type = PERF_TYPE_TRACEPOINT; 579 type = PERF_TYPE_HARDWARE;
477 else { 580 else if (!strcmp(str, "sw"))
478 fprintf(stderr, "Invalid event type in field string."); 581 type = PERF_TYPE_SOFTWARE;
479 return -EINVAL; 582 else if (!strcmp(str, "trace"))
583 type = PERF_TYPE_TRACEPOINT;
584 else if (!strcmp(str, "raw"))
585 type = PERF_TYPE_RAW;
586 else {
587 fprintf(stderr, "Invalid event type in field string.\n");
588 return -EINVAL;
589 }
590
591 if (output[type].user_set)
592 pr_warning("Overriding previous field request for %s events.\n",
593 event_type(type));
594
595 output[type].fields = 0;
596 output[type].user_set = true;
597 output[type].wildcard_set = false;
598
599 } else {
600 tok = str;
601 if (strlen(str) == 0) {
602 fprintf(stderr,
603 "Cannot set fields to 'none' for all event types.\n");
604 rc = -EINVAL;
605 goto out;
606 }
607
608 if (output_set_by_user())
609 pr_warning("Overriding previous field request for all events.\n");
610
611 for (j = 0; j < PERF_TYPE_MAX; ++j) {
612 output[j].fields = 0;
613 output[j].user_set = true;
614 output[j].wildcard_set = true;
615 }
480 } 616 }
481 617
482 output_fields[type] = 0; 618 tok = strtok(tok, ",");
483 while (1) { 619 while (tok) {
484 tok = strtok(NULL, ",");
485 if (!tok)
486 break;
487 for (i = 0; i < imax; ++i) { 620 for (i = 0; i < imax; ++i) {
488 if (strcmp(tok, all_output_options[i].str) == 0) { 621 if (strcmp(tok, all_output_options[i].str) == 0)
489 output_fields[type] |= all_output_options[i].field;
490 break; 622 break;
491 }
492 } 623 }
493 if (i == imax) { 624 if (i == imax) {
494 fprintf(stderr, "Invalid field requested."); 625 fprintf(stderr, "Invalid field requested.\n");
495 rc = -EINVAL; 626 rc = -EINVAL;
496 break; 627 goto out;
497 } 628 }
498 }
499 629
500 if (output_fields[type] == 0) { 630 if (type == -1) {
501 pr_debug("No fields requested for %s type. " 631 /* add user option to all events types for
502 "Events will not be displayed\n", event_type(type)); 632 * which it is valid
633 */
634 for (j = 0; j < PERF_TYPE_MAX; ++j) {
635 if (output[j].invalid_fields & all_output_options[i].field) {
636 pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
637 all_output_options[i].str, event_type(j));
638 } else
639 output[j].fields |= all_output_options[i].field;
640 }
641 } else {
642 if (output[type].invalid_fields & all_output_options[i].field) {
643 fprintf(stderr, "\'%s\' not valid for %s events.\n",
644 all_output_options[i].str, event_type(type));
645
646 rc = -EINVAL;
647 goto out;
648 }
649 output[type].fields |= all_output_options[i].field;
650 }
651
652 tok = strtok(NULL, ",");
503 } 653 }
504 654
505 output_set_by_user = true; 655 if (type >= 0) {
656 if (output[type].fields == 0) {
657 pr_debug("No fields requested for %s type. "
658 "Events will not be displayed.\n", event_type(type));
659 }
660 }
506 661
662out:
507 free(str); 663 free(str);
508 return rc; 664 return rc;
509} 665}
@@ -829,7 +985,7 @@ static const struct option options[] = {
829 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 985 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
830 "Look for files with symbols relative to this directory"), 986 "Look for files with symbols relative to this directory"),
831 OPT_CALLBACK('f', "fields", NULL, "str", 987 OPT_CALLBACK('f', "fields", NULL, "str",
832 "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace. Fields: comm,tid,pid,time,cpu,event,trace,sym", 988 "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym",
833 parse_output_fields), 989 parse_output_fields),
834 990
835 OPT_END() 991 OPT_END()
@@ -1020,7 +1176,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
1020 struct stat perf_stat; 1176 struct stat perf_stat;
1021 int input; 1177 int input;
1022 1178
1023 if (output_set_by_user) { 1179 if (output_set_by_user()) {
1024 fprintf(stderr, 1180 fprintf(stderr,
1025 "custom fields not supported for generated scripts"); 1181 "custom fields not supported for generated scripts");
1026 return -1; 1182 return -1;
@@ -1060,6 +1216,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
1060 pr_debug("perf script started with script %s\n\n", script_name); 1216 pr_debug("perf script started with script %s\n\n", script_name);
1061 } 1217 }
1062 1218
1219
1220 err = perf_session__check_output_opt(session);
1221 if (err < 0)
1222 goto out;
1223
1063 err = __cmd_script(session); 1224 err = __cmd_script(session);
1064 1225
1065 perf_session__delete(session); 1226 perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e2109f9b43eb..a9f06715e44d 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -6,24 +6,28 @@
6 * 6 *
7 * Sample output: 7 * Sample output:
8 8
9 $ perf stat ~/hackbench 10 9 $ perf stat ./hackbench 10
10 Time: 0.104
11 10
12 Performance counter stats for '/home/mingo/hackbench': 11 Time: 0.118
13 12
14 1255.538611 task clock ticks # 10.143 CPU utilization factor 13 Performance counter stats for './hackbench 10':
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
22 14
23 Wall-clock time elapsed: 123.786620 msecs 15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
24 28
25 * 29 *
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
27 * 31 *
28 * Improvements and fixes by: 32 * Improvements and fixes by:
29 * 33 *
@@ -46,6 +50,7 @@
46#include "util/evlist.h" 50#include "util/evlist.h"
47#include "util/evsel.h" 51#include "util/evsel.h"
48#include "util/debug.h" 52#include "util/debug.h"
53#include "util/color.h"
49#include "util/header.h" 54#include "util/header.h"
50#include "util/cpumap.h" 55#include "util/cpumap.h"
51#include "util/thread.h" 56#include "util/thread.h"
@@ -65,14 +70,107 @@ static struct perf_event_attr default_attrs[] = {
65 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 70 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
66 71
67 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 72 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
73 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
74 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
68 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 75 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
69 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 76 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
70 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 77 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
71 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
72 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
73 78
74}; 79};
75 80
81/*
82 * Detailed stats (-d), covering the L1 and last level data caches:
83 */
84static struct perf_event_attr detailed_attrs[] = {
85
86 { .type = PERF_TYPE_HW_CACHE,
87 .config =
88 PERF_COUNT_HW_CACHE_L1D << 0 |
89 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
90 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
91
92 { .type = PERF_TYPE_HW_CACHE,
93 .config =
94 PERF_COUNT_HW_CACHE_L1D << 0 |
95 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
96 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
97
98 { .type = PERF_TYPE_HW_CACHE,
99 .config =
100 PERF_COUNT_HW_CACHE_LL << 0 |
101 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
102 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
103
104 { .type = PERF_TYPE_HW_CACHE,
105 .config =
106 PERF_COUNT_HW_CACHE_LL << 0 |
107 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
108 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
109};
110
111/*
112 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
113 */
114static struct perf_event_attr very_detailed_attrs[] = {
115
116 { .type = PERF_TYPE_HW_CACHE,
117 .config =
118 PERF_COUNT_HW_CACHE_L1I << 0 |
119 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
120 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
121
122 { .type = PERF_TYPE_HW_CACHE,
123 .config =
124 PERF_COUNT_HW_CACHE_L1I << 0 |
125 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
126 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
127
128 { .type = PERF_TYPE_HW_CACHE,
129 .config =
130 PERF_COUNT_HW_CACHE_DTLB << 0 |
131 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
132 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
133
134 { .type = PERF_TYPE_HW_CACHE,
135 .config =
136 PERF_COUNT_HW_CACHE_DTLB << 0 |
137 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
138 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
139
140 { .type = PERF_TYPE_HW_CACHE,
141 .config =
142 PERF_COUNT_HW_CACHE_ITLB << 0 |
143 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
144 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
145
146 { .type = PERF_TYPE_HW_CACHE,
147 .config =
148 PERF_COUNT_HW_CACHE_ITLB << 0 |
149 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
150 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
151
152};
153
154/*
155 * Very, very detailed stats (-d -d -d), adding prefetch events:
156 */
157static struct perf_event_attr very_very_detailed_attrs[] = {
158
159 { .type = PERF_TYPE_HW_CACHE,
160 .config =
161 PERF_COUNT_HW_CACHE_L1D << 0 |
162 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
163 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
164
165 { .type = PERF_TYPE_HW_CACHE,
166 .config =
167 PERF_COUNT_HW_CACHE_L1D << 0 |
168 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
169 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
170};
171
172
173
76struct perf_evlist *evsel_list; 174struct perf_evlist *evsel_list;
77 175
78static bool system_wide = false; 176static bool system_wide = false;
@@ -86,6 +184,8 @@ static pid_t target_pid = -1;
86static pid_t target_tid = -1; 184static pid_t target_tid = -1;
87static pid_t child_pid = -1; 185static pid_t child_pid = -1;
88static bool null_run = false; 186static bool null_run = false;
187static int detailed_run = 0;
188static bool sync_run = false;
89static bool big_num = true; 189static bool big_num = true;
90static int big_num_opt = -1; 190static int big_num_opt = -1;
91static const char *cpu_list; 191static const char *cpu_list;
@@ -156,7 +256,15 @@ static double stddev_stats(struct stats *stats)
156 256
157struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 257struct stats runtime_nsecs_stats[MAX_NR_CPUS];
158struct stats runtime_cycles_stats[MAX_NR_CPUS]; 258struct stats runtime_cycles_stats[MAX_NR_CPUS];
259struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
260struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
159struct stats runtime_branches_stats[MAX_NR_CPUS]; 261struct stats runtime_branches_stats[MAX_NR_CPUS];
262struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
263struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
264struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
265struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
266struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
267struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
160struct stats walltime_nsecs_stats; 268struct stats walltime_nsecs_stats;
161 269
162static int create_perf_stat_counter(struct perf_evsel *evsel) 270static int create_perf_stat_counter(struct perf_evsel *evsel)
@@ -167,16 +275,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 275 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
168 PERF_FORMAT_TOTAL_TIME_RUNNING; 276 PERF_FORMAT_TOTAL_TIME_RUNNING;
169 277
278 attr->inherit = !no_inherit;
279
170 if (system_wide) 280 if (system_wide)
171 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false); 281 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
172 282
173 attr->inherit = !no_inherit;
174 if (target_pid == -1 && target_tid == -1) { 283 if (target_pid == -1 && target_tid == -1) {
175 attr->disabled = 1; 284 attr->disabled = 1;
176 attr->enable_on_exec = 1; 285 attr->enable_on_exec = 1;
177 } 286 }
178 287
179 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false); 288 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
180} 289}
181 290
182/* 291/*
@@ -192,6 +301,37 @@ static inline int nsec_counter(struct perf_evsel *evsel)
192} 301}
193 302
194/* 303/*
304 * Update various tracking values we maintain to print
305 * more semantic information such as miss/hit ratios,
306 * instruction rates, etc:
307 */
308static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
309{
310 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
311 update_stats(&runtime_nsecs_stats[0], count[0]);
312 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
313 update_stats(&runtime_cycles_stats[0], count[0]);
314 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
315 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
316 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
317 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
318 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
319 update_stats(&runtime_branches_stats[0], count[0]);
320 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
321 update_stats(&runtime_cacherefs_stats[0], count[0]);
322 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
323 update_stats(&runtime_l1_dcache_stats[0], count[0]);
324 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
325 update_stats(&runtime_l1_icache_stats[0], count[0]);
326 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
327 update_stats(&runtime_ll_cache_stats[0], count[0]);
328 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
329 update_stats(&runtime_dtlb_cache_stats[0], count[0]);
330 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
331 update_stats(&runtime_itlb_cache_stats[0], count[0]);
332}
333
334/*
195 * Read out the results of a single counter: 335 * Read out the results of a single counter:
196 * aggregate counts across CPUs in system-wide mode 336 * aggregate counts across CPUs in system-wide mode
197 */ 337 */
@@ -216,12 +356,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
216 /* 356 /*
217 * Save the full runtime - to allow normalization during printout: 357 * Save the full runtime - to allow normalization during printout:
218 */ 358 */
219 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 359 update_shadow_stats(counter, count);
220 update_stats(&runtime_nsecs_stats[0], count[0]);
221 if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
222 update_stats(&runtime_cycles_stats[0], count[0]);
223 if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
224 update_stats(&runtime_branches_stats[0], count[0]);
225 360
226 return 0; 361 return 0;
227} 362}
@@ -241,12 +376,7 @@ static int read_counter(struct perf_evsel *counter)
241 376
242 count = counter->counts->cpu[cpu].values; 377 count = counter->counts->cpu[cpu].values;
243 378
244 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 379 update_shadow_stats(counter, count);
245 update_stats(&runtime_nsecs_stats[cpu], count[0]);
246 if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
247 update_stats(&runtime_cycles_stats[cpu], count[0]);
248 if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
249 update_stats(&runtime_branches_stats[cpu], count[0]);
250 } 380 }
251 381
252 return 0; 382 return 0;
@@ -314,13 +444,18 @@ static int run_perf_stat(int argc __used, const char **argv)
314 444
315 list_for_each_entry(counter, &evsel_list->entries, node) { 445 list_for_each_entry(counter, &evsel_list->entries, node) {
316 if (create_perf_stat_counter(counter) < 0) { 446 if (create_perf_stat_counter(counter) < 0) {
317 if (errno == -EPERM || errno == -EACCES) { 447 if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
448 if (verbose)
449 ui__warning("%s event is not supported by the kernel.\n",
450 event_name(counter));
451 continue;
452 }
453
454 if (errno == EPERM || errno == EACCES) {
318 error("You may not have permission to collect %sstats.\n" 455 error("You may not have permission to collect %sstats.\n"
319 "\t Consider tweaking" 456 "\t Consider tweaking"
320 " /proc/sys/kernel/perf_event_paranoid or running as root.", 457 " /proc/sys/kernel/perf_event_paranoid or running as root.",
321 system_wide ? "system-wide " : ""); 458 system_wide ? "system-wide " : "");
322 } else if (errno == ENOENT) {
323 error("%s event is not supported. ", event_name(counter));
324 } else { 459 } else {
325 error("open_counter returned with %d (%s). " 460 error("open_counter returned with %d (%s). "
326 "/bin/dmesg may provide additional information.\n", 461 "/bin/dmesg may provide additional information.\n",
@@ -371,6 +506,16 @@ static int run_perf_stat(int argc __used, const char **argv)
371 return WEXITSTATUS(status); 506 return WEXITSTATUS(status);
372} 507}
373 508
509static void print_noise_pct(double total, double avg)
510{
511 double pct = 0.0;
512
513 if (avg)
514 pct = 100.0*total/avg;
515
516 fprintf(stderr, " ( +-%6.2f%% )", pct);
517}
518
374static void print_noise(struct perf_evsel *evsel, double avg) 519static void print_noise(struct perf_evsel *evsel, double avg)
375{ 520{
376 struct perf_stat *ps; 521 struct perf_stat *ps;
@@ -379,15 +524,14 @@ static void print_noise(struct perf_evsel *evsel, double avg)
379 return; 524 return;
380 525
381 ps = evsel->priv; 526 ps = evsel->priv;
382 fprintf(stderr, " ( +- %7.3f%% )", 527 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
383 100 * stddev_stats(&ps->res_stats[0]) / avg);
384} 528}
385 529
386static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) 530static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
387{ 531{
388 double msecs = avg / 1e6; 532 double msecs = avg / 1e6;
389 char cpustr[16] = { '\0', }; 533 char cpustr[16] = { '\0', };
390 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s"; 534 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
391 535
392 if (no_aggr) 536 if (no_aggr)
393 sprintf(cpustr, "CPU%*d%s", 537 sprintf(cpustr, "CPU%*d%s",
@@ -403,8 +547,191 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
403 return; 547 return;
404 548
405 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 549 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
406 fprintf(stderr, " # %10.3f CPUs ", 550 fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats));
407 avg / avg_stats(&walltime_nsecs_stats)); 551}
552
553static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
554{
555 double total, ratio = 0.0;
556 const char *color;
557
558 total = avg_stats(&runtime_cycles_stats[cpu]);
559
560 if (total)
561 ratio = avg / total * 100.0;
562
563 color = PERF_COLOR_NORMAL;
564 if (ratio > 50.0)
565 color = PERF_COLOR_RED;
566 else if (ratio > 30.0)
567 color = PERF_COLOR_MAGENTA;
568 else if (ratio > 10.0)
569 color = PERF_COLOR_YELLOW;
570
571 fprintf(stderr, " # ");
572 color_fprintf(stderr, color, "%6.2f%%", ratio);
573 fprintf(stderr, " frontend cycles idle ");
574}
575
576static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
577{
578 double total, ratio = 0.0;
579 const char *color;
580
581 total = avg_stats(&runtime_cycles_stats[cpu]);
582
583 if (total)
584 ratio = avg / total * 100.0;
585
586 color = PERF_COLOR_NORMAL;
587 if (ratio > 75.0)
588 color = PERF_COLOR_RED;
589 else if (ratio > 50.0)
590 color = PERF_COLOR_MAGENTA;
591 else if (ratio > 20.0)
592 color = PERF_COLOR_YELLOW;
593
594 fprintf(stderr, " # ");
595 color_fprintf(stderr, color, "%6.2f%%", ratio);
596 fprintf(stderr, " backend cycles idle ");
597}
598
599static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
600{
601 double total, ratio = 0.0;
602 const char *color;
603
604 total = avg_stats(&runtime_branches_stats[cpu]);
605
606 if (total)
607 ratio = avg / total * 100.0;
608
609 color = PERF_COLOR_NORMAL;
610 if (ratio > 20.0)
611 color = PERF_COLOR_RED;
612 else if (ratio > 10.0)
613 color = PERF_COLOR_MAGENTA;
614 else if (ratio > 5.0)
615 color = PERF_COLOR_YELLOW;
616
617 fprintf(stderr, " # ");
618 color_fprintf(stderr, color, "%6.2f%%", ratio);
619 fprintf(stderr, " of all branches ");
620}
621
622static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
623{
624 double total, ratio = 0.0;
625 const char *color;
626
627 total = avg_stats(&runtime_l1_dcache_stats[cpu]);
628
629 if (total)
630 ratio = avg / total * 100.0;
631
632 color = PERF_COLOR_NORMAL;
633 if (ratio > 20.0)
634 color = PERF_COLOR_RED;
635 else if (ratio > 10.0)
636 color = PERF_COLOR_MAGENTA;
637 else if (ratio > 5.0)
638 color = PERF_COLOR_YELLOW;
639
640 fprintf(stderr, " # ");
641 color_fprintf(stderr, color, "%6.2f%%", ratio);
642 fprintf(stderr, " of all L1-dcache hits ");
643}
644
645static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
646{
647 double total, ratio = 0.0;
648 const char *color;
649
650 total = avg_stats(&runtime_l1_icache_stats[cpu]);
651
652 if (total)
653 ratio = avg / total * 100.0;
654
655 color = PERF_COLOR_NORMAL;
656 if (ratio > 20.0)
657 color = PERF_COLOR_RED;
658 else if (ratio > 10.0)
659 color = PERF_COLOR_MAGENTA;
660 else if (ratio > 5.0)
661 color = PERF_COLOR_YELLOW;
662
663 fprintf(stderr, " # ");
664 color_fprintf(stderr, color, "%6.2f%%", ratio);
665 fprintf(stderr, " of all L1-icache hits ");
666}
667
668static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
669{
670 double total, ratio = 0.0;
671 const char *color;
672
673 total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
674
675 if (total)
676 ratio = avg / total * 100.0;
677
678 color = PERF_COLOR_NORMAL;
679 if (ratio > 20.0)
680 color = PERF_COLOR_RED;
681 else if (ratio > 10.0)
682 color = PERF_COLOR_MAGENTA;
683 else if (ratio > 5.0)
684 color = PERF_COLOR_YELLOW;
685
686 fprintf(stderr, " # ");
687 color_fprintf(stderr, color, "%6.2f%%", ratio);
688 fprintf(stderr, " of all dTLB cache hits ");
689}
690
691static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
692{
693 double total, ratio = 0.0;
694 const char *color;
695
696 total = avg_stats(&runtime_itlb_cache_stats[cpu]);
697
698 if (total)
699 ratio = avg / total * 100.0;
700
701 color = PERF_COLOR_NORMAL;
702 if (ratio > 20.0)
703 color = PERF_COLOR_RED;
704 else if (ratio > 10.0)
705 color = PERF_COLOR_MAGENTA;
706 else if (ratio > 5.0)
707 color = PERF_COLOR_YELLOW;
708
709 fprintf(stderr, " # ");
710 color_fprintf(stderr, color, "%6.2f%%", ratio);
711 fprintf(stderr, " of all iTLB cache hits ");
712}
713
714static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
715{
716 double total, ratio = 0.0;
717 const char *color;
718
719 total = avg_stats(&runtime_ll_cache_stats[cpu]);
720
721 if (total)
722 ratio = avg / total * 100.0;
723
724 color = PERF_COLOR_NORMAL;
725 if (ratio > 20.0)
726 color = PERF_COLOR_RED;
727 else if (ratio > 10.0)
728 color = PERF_COLOR_MAGENTA;
729 else if (ratio > 5.0)
730 color = PERF_COLOR_YELLOW;
731
732 fprintf(stderr, " # ");
733 color_fprintf(stderr, color, "%6.2f%%", ratio);
734 fprintf(stderr, " of all LL-cache hits ");
408} 735}
409 736
410static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) 737static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
@@ -416,9 +743,9 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
416 if (csv_output) 743 if (csv_output)
417 fmt = "%s%.0f%s%s"; 744 fmt = "%s%.0f%s%s";
418 else if (big_num) 745 else if (big_num)
419 fmt = "%s%'18.0f%s%-24s"; 746 fmt = "%s%'18.0f%s%-25s";
420 else 747 else
421 fmt = "%s%18.0f%s%-24s"; 748 fmt = "%s%18.0f%s%-25s";
422 749
423 if (no_aggr) 750 if (no_aggr)
424 sprintf(cpustr, "CPU%*d%s", 751 sprintf(cpustr, "CPU%*d%s",
@@ -441,23 +768,83 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
441 if (total) 768 if (total)
442 ratio = avg / total; 769 ratio = avg / total;
443 770
444 fprintf(stderr, " # %10.3f IPC ", ratio); 771 fprintf(stderr, " # %5.2f insns per cycle ", ratio);
772
773 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
774 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
775
776 if (total && avg) {
777 ratio = total / avg;
778 fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio);
779 }
780
445 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && 781 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
446 runtime_branches_stats[cpu].n != 0) { 782 runtime_branches_stats[cpu].n != 0) {
447 total = avg_stats(&runtime_branches_stats[cpu]); 783 print_branch_misses(cpu, evsel, avg);
784 } else if (
785 evsel->attr.type == PERF_TYPE_HW_CACHE &&
786 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
787 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
788 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
789 runtime_l1_dcache_stats[cpu].n != 0) {
790 print_l1_dcache_misses(cpu, evsel, avg);
791 } else if (
792 evsel->attr.type == PERF_TYPE_HW_CACHE &&
793 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
794 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
795 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
796 runtime_l1_icache_stats[cpu].n != 0) {
797 print_l1_icache_misses(cpu, evsel, avg);
798 } else if (
799 evsel->attr.type == PERF_TYPE_HW_CACHE &&
800 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
801 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
802 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
803 runtime_dtlb_cache_stats[cpu].n != 0) {
804 print_dtlb_cache_misses(cpu, evsel, avg);
805 } else if (
806 evsel->attr.type == PERF_TYPE_HW_CACHE &&
807 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
808 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
809 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
810 runtime_itlb_cache_stats[cpu].n != 0) {
811 print_itlb_cache_misses(cpu, evsel, avg);
812 } else if (
813 evsel->attr.type == PERF_TYPE_HW_CACHE &&
814 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
815 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
816 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
817 runtime_ll_cache_stats[cpu].n != 0) {
818 print_ll_cache_misses(cpu, evsel, avg);
819 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
820 runtime_cacherefs_stats[cpu].n != 0) {
821 total = avg_stats(&runtime_cacherefs_stats[cpu]);
448 822
449 if (total) 823 if (total)
450 ratio = avg * 100 / total; 824 ratio = avg * 100 / total;
451 825
452 fprintf(stderr, " # %10.3f %% ", ratio); 826 fprintf(stderr, " # %8.3f %% of all cache refs ", ratio);
453 827
828 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
829 print_stalled_cycles_frontend(cpu, evsel, avg);
830 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
831 print_stalled_cycles_backend(cpu, evsel, avg);
832 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
833 total = avg_stats(&runtime_nsecs_stats[cpu]);
834
835 if (total)
836 ratio = 1.0 * avg / total;
837
838 fprintf(stderr, " # %8.3f GHz ", ratio);
454 } else if (runtime_nsecs_stats[cpu].n != 0) { 839 } else if (runtime_nsecs_stats[cpu].n != 0) {
455 total = avg_stats(&runtime_nsecs_stats[cpu]); 840 total = avg_stats(&runtime_nsecs_stats[cpu]);
456 841
457 if (total) 842 if (total)
458 ratio = 1000.0 * avg / total; 843 ratio = 1000.0 * avg / total;
459 844
460 fprintf(stderr, " # %10.3f M/sec", ratio); 845 fprintf(stderr, " # %8.3f M/sec ", ratio);
846 } else {
847 fprintf(stderr, " ");
461 } 848 }
462} 849}
463 850
@@ -504,8 +891,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
504 avg_enabled = avg_stats(&ps->res_stats[1]); 891 avg_enabled = avg_stats(&ps->res_stats[1]);
505 avg_running = avg_stats(&ps->res_stats[2]); 892 avg_running = avg_stats(&ps->res_stats[2]);
506 893
507 fprintf(stderr, " (scaled from %.2f%%)", 894 fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled);
508 100 * avg_running / avg_enabled);
509 } 895 }
510 fprintf(stderr, "\n"); 896 fprintf(stderr, "\n");
511} 897}
@@ -547,10 +933,8 @@ static void print_counter(struct perf_evsel *counter)
547 if (!csv_output) { 933 if (!csv_output) {
548 print_noise(counter, 1.0); 934 print_noise(counter, 1.0);
549 935
550 if (run != ena) { 936 if (run != ena)
551 fprintf(stderr, " (scaled from %.2f%%)", 937 fprintf(stderr, " (%.2f%%)", 100.0 * run / ena);
552 100.0 * run / ena);
553 }
554 } 938 }
555 fputc('\n', stderr); 939 fputc('\n', stderr);
556 } 940 }
@@ -590,13 +974,14 @@ static void print_stat(int argc, const char **argv)
590 } 974 }
591 975
592 if (!csv_output) { 976 if (!csv_output) {
593 fprintf(stderr, "\n"); 977 if (!null_run)
594 fprintf(stderr, " %18.9f seconds time elapsed", 978 fprintf(stderr, "\n");
979 fprintf(stderr, " %17.9f seconds time elapsed",
595 avg_stats(&walltime_nsecs_stats)/1e9); 980 avg_stats(&walltime_nsecs_stats)/1e9);
596 if (run_count > 1) { 981 if (run_count > 1) {
597 fprintf(stderr, " ( +- %7.3f%% )", 982 fprintf(stderr, " ");
598 100*stddev_stats(&walltime_nsecs_stats) / 983 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
599 avg_stats(&walltime_nsecs_stats)); 984 avg_stats(&walltime_nsecs_stats));
600 } 985 }
601 fprintf(stderr, "\n\n"); 986 fprintf(stderr, "\n\n");
602 } 987 }
@@ -658,6 +1043,10 @@ static const struct option options[] = {
658 "repeat command and print average + stddev (max: 100)"), 1043 "repeat command and print average + stddev (max: 100)"),
659 OPT_BOOLEAN('n', "null", &null_run, 1044 OPT_BOOLEAN('n', "null", &null_run,
660 "null run - dont start any counters"), 1045 "null run - dont start any counters"),
1046 OPT_INCR('d', "detailed", &detailed_run,
1047 "detailed run - start a lot of events"),
1048 OPT_BOOLEAN('S', "sync", &sync_run,
1049 "call sync() before starting a run"),
661 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1050 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
662 "print large numbers with thousands\' separators", 1051 "print large numbers with thousands\' separators",
663 stat__set_big_num), 1052 stat__set_big_num),
@@ -673,6 +1062,70 @@ static const struct option options[] = {
673 OPT_END() 1062 OPT_END()
674}; 1063};
675 1064
1065/*
1066 * Add default attributes, if there were no attributes specified or
1067 * if -d/--detailed, -d -d or -d -d -d is used:
1068 */
1069static int add_default_attributes(void)
1070{
1071 struct perf_evsel *pos;
1072 size_t attr_nr = 0;
1073 size_t c;
1074
1075 /* Set attrs if no event is selected and !null_run: */
1076 if (null_run)
1077 return 0;
1078
1079 if (!evsel_list->nr_entries) {
1080 for (c = 0; c < ARRAY_SIZE(default_attrs); c++) {
1081 pos = perf_evsel__new(default_attrs + c, c + attr_nr);
1082 if (pos == NULL)
1083 return -1;
1084 perf_evlist__add(evsel_list, pos);
1085 }
1086 attr_nr += c;
1087 }
1088
1089 /* Detailed events get appended to the event list: */
1090
1091 if (detailed_run < 1)
1092 return 0;
1093
1094 /* Append detailed run extra attributes: */
1095 for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) {
1096 pos = perf_evsel__new(detailed_attrs + c, c + attr_nr);
1097 if (pos == NULL)
1098 return -1;
1099 perf_evlist__add(evsel_list, pos);
1100 }
1101 attr_nr += c;
1102
1103 if (detailed_run < 2)
1104 return 0;
1105
1106 /* Append very detailed run extra attributes: */
1107 for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) {
1108 pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr);
1109 if (pos == NULL)
1110 return -1;
1111 perf_evlist__add(evsel_list, pos);
1112 }
1113
1114 if (detailed_run < 3)
1115 return 0;
1116
1117 /* Append very, very detailed run extra attributes: */
1118 for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) {
1119 pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr);
1120 if (pos == NULL)
1121 return -1;
1122 perf_evlist__add(evsel_list, pos);
1123 }
1124
1125
1126 return 0;
1127}
1128
676int cmd_stat(int argc, const char **argv, const char *prefix __used) 1129int cmd_stat(int argc, const char **argv, const char *prefix __used)
677{ 1130{
678 struct perf_evsel *pos; 1131 struct perf_evsel *pos;
@@ -718,17 +1171,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
718 usage_with_options(stat_usage, options); 1171 usage_with_options(stat_usage, options);
719 } 1172 }
720 1173
721 /* Set attrs and nr_counters if no event is selected and !null_run */ 1174 if (add_default_attributes())
722 if (!null_run && !evsel_list->nr_entries) { 1175 goto out;
723 size_t c;
724
725 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
726 pos = perf_evsel__new(&default_attrs[c], c);
727 if (pos == NULL)
728 goto out;
729 perf_evlist__add(evsel_list, pos);
730 }
731 }
732 1176
733 if (target_pid != -1) 1177 if (target_pid != -1)
734 target_tid = target_pid; 1178 target_tid = target_pid;
@@ -772,6 +1216,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
772 for (run_idx = 0; run_idx < run_count; run_idx++) { 1216 for (run_idx = 0; run_idx < run_count; run_idx++) {
773 if (run_count != 1 && verbose) 1217 if (run_count != 1 && verbose)
774 fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); 1218 fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
1219
1220 if (sync_run)
1221 sync();
1222
775 status = run_perf_stat(argc, argv); 1223 status = run_perf_stat(argc, argv);
776 } 1224 }
777 1225
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1b2106c58f66..b67186228c89 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
290 goto out_thread_map_delete; 290 goto out_thread_map_delete;
291 } 291 }
292 292
293 if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) { 293 if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
294 pr_debug("failed to open counter: %s, " 294 pr_debug("failed to open counter: %s, "
295 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 295 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
296 strerror(errno)); 296 strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
303 } 303 }
304 304
305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { 305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
306 pr_debug("perf_evsel__open_read_on_cpu\n"); 306 pr_debug("perf_evsel__read_on_cpu\n");
307 goto out_close_fd; 307 goto out_close_fd;
308 } 308 }
309 309
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
365 goto out_thread_map_delete; 365 goto out_thread_map_delete;
366 } 366 }
367 367
368 if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) { 368 if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
369 pr_debug("failed to open counter: %s, " 369 pr_debug("failed to open counter: %s, "
370 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 370 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
371 strerror(errno)); 371 strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
418 continue; 418 continue;
419 419
420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
421 pr_debug("perf_evsel__open_read_on_cpu\n"); 421 pr_debug("perf_evsel__read_on_cpu\n");
422 err = -1; 422 err = -1;
423 break; 423 break;
424 } 424 }
@@ -474,6 +474,7 @@ static int test__basic_mmap(void)
474 unsigned int nr_events[nsyscalls], 474 unsigned int nr_events[nsyscalls],
475 expected_nr_events[nsyscalls], i, j; 475 expected_nr_events[nsyscalls], i, j;
476 struct perf_evsel *evsels[nsyscalls], *evsel; 476 struct perf_evsel *evsels[nsyscalls], *evsel;
477 int sample_size = perf_sample_size(attr.sample_type);
477 478
478 for (i = 0; i < nsyscalls; ++i) { 479 for (i = 0; i < nsyscalls; ++i) {
479 char name[64]; 480 char name[64];
@@ -529,7 +530,7 @@ static int test__basic_mmap(void)
529 530
530 perf_evlist__add(evlist, evsels[i]); 531 perf_evlist__add(evlist, evsels[i]);
531 532
532 if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) { 533 if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
533 pr_debug("failed to open counter: %s, " 534 pr_debug("failed to open counter: %s, "
534 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 535 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
535 strerror(errno)); 536 strerror(errno));
@@ -549,7 +550,7 @@ static int test__basic_mmap(void)
549 ++foo; 550 ++foo;
550 } 551 }
551 552
552 while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { 553 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
553 struct perf_sample sample; 554 struct perf_sample sample;
554 555
555 if (event->header.type != PERF_RECORD_SAMPLE) { 556 if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -558,7 +559,13 @@ static int test__basic_mmap(void)
558 goto out_munmap; 559 goto out_munmap;
559 } 560 }
560 561
561 perf_event__parse_sample(event, attr.sample_type, false, &sample); 562 err = perf_event__parse_sample(event, attr.sample_type, sample_size,
563 false, &sample);
564 if (err) {
565 pr_err("Can't parse sample, err = %d\n", err);
566 goto out_munmap;
567 }
568
562 evsel = perf_evlist__id2evsel(evlist, sample.id); 569 evsel = perf_evlist__id2evsel(evlist, sample.id);
563 if (evsel == NULL) { 570 if (evsel == NULL) {
564 pr_debug("event with id %" PRIu64 571 pr_debug("event with id %" PRIu64
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 676b4fb0070f..f2f3f4937aa2 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -62,8 +62,6 @@
62#include <linux/unistd.h> 62#include <linux/unistd.h>
63#include <linux/types.h> 63#include <linux/types.h>
64 64
65#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
66
67static struct perf_top top = { 65static struct perf_top top = {
68 .count_filter = 5, 66 .count_filter = 5,
69 .delay_secs = 2, 67 .delay_secs = 2,
@@ -82,6 +80,8 @@ static bool use_tui, use_stdio;
82 80
83static int default_interval = 0; 81static int default_interval = 0;
84 82
83static bool kptr_restrict_warned;
84static bool vmlinux_warned;
85static bool inherit = false; 85static bool inherit = false;
86static int realtime_prio = 0; 86static int realtime_prio = 0;
87static bool group = false; 87static bool group = false;
@@ -740,7 +740,22 @@ static void perf_event__process_sample(const union perf_event *event,
740 al.filtered) 740 al.filtered)
741 return; 741 return;
742 742
743 if (!kptr_restrict_warned &&
744 symbol_conf.kptr_restrict &&
745 al.cpumode == PERF_RECORD_MISC_KERNEL) {
746 ui__warning(
747"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
748"Check /proc/sys/kernel/kptr_restrict.\n\n"
749"Kernel%s samples will not be resolved.\n",
750 !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
751 " modules" : "");
752 if (use_browser <= 0)
753 sleep(5);
754 kptr_restrict_warned = true;
755 }
756
743 if (al.sym == NULL) { 757 if (al.sym == NULL) {
758 const char *msg = "Kernel samples will not be resolved.\n";
744 /* 759 /*
745 * As we do lazy loading of symtabs we only will know if the 760 * As we do lazy loading of symtabs we only will know if the
746 * specified vmlinux file is invalid when we actually have a 761 * specified vmlinux file is invalid when we actually have a
@@ -752,12 +767,20 @@ static void perf_event__process_sample(const union perf_event *event,
752 * --hide-kernel-symbols, even if the user specifies an 767 * --hide-kernel-symbols, even if the user specifies an
753 * invalid --vmlinux ;-) 768 * invalid --vmlinux ;-)
754 */ 769 */
755 if (al.map == machine->vmlinux_maps[MAP__FUNCTION] && 770 if (!kptr_restrict_warned && !vmlinux_warned &&
771 al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
756 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { 772 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
757 ui__warning("The %s file can't be used\n", 773 if (symbol_conf.vmlinux_name) {
758 symbol_conf.vmlinux_name); 774 ui__warning("The %s file can't be used.\n%s",
759 exit_browser(0); 775 symbol_conf.vmlinux_name, msg);
760 exit(1); 776 } else {
777 ui__warning("A vmlinux file was not found.\n%s",
778 msg);
779 }
780
781 if (use_browser <= 0)
782 sleep(5);
783 vmlinux_warned = true;
761 } 784 }
762 785
763 return; 786 return;
@@ -801,13 +824,18 @@ static void perf_event__process_sample(const union perf_event *event,
801 } 824 }
802} 825}
803 826
804static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) 827static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
805{ 828{
806 struct perf_sample sample; 829 struct perf_sample sample;
807 union perf_event *event; 830 union perf_event *event;
831 int ret;
808 832
809 while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { 833 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
810 perf_session__parse_sample(self, event, &sample); 834 ret = perf_session__parse_sample(self, event, &sample);
835 if (ret) {
836 pr_err("Can't parse sample, err = %d\n", ret);
837 continue;
838 }
811 839
812 if (event->header.type == PERF_RECORD_SAMPLE) 840 if (event->header.type == PERF_RECORD_SAMPLE)
813 perf_event__process_sample(event, &sample, self); 841 perf_event__process_sample(event, &sample, self);
@@ -820,8 +848,8 @@ static void perf_session__mmap_read(struct perf_session *self)
820{ 848{
821 int i; 849 int i;
822 850
823 for (i = 0; i < top.evlist->cpus->nr; i++) 851 for (i = 0; i < top.evlist->nr_mmaps; i++)
824 perf_session__mmap_read_cpu(self, i); 852 perf_session__mmap_read_idx(self, i);
825} 853}
826 854
827static void start_counters(struct perf_evlist *evlist) 855static void start_counters(struct perf_evlist *evlist)
@@ -845,15 +873,16 @@ static void start_counters(struct perf_evlist *evlist)
845 } 873 }
846 874
847 attr->mmap = 1; 875 attr->mmap = 1;
876 attr->inherit = inherit;
848try_again: 877try_again:
849 if (perf_evsel__open(counter, top.evlist->cpus, 878 if (perf_evsel__open(counter, top.evlist->cpus,
850 top.evlist->threads, group, inherit) < 0) { 879 top.evlist->threads, group) < 0) {
851 int err = errno; 880 int err = errno;
852 881
853 if (err == EPERM || err == EACCES) 882 if (err == EPERM || err == EACCES) {
854 die("Permission error - are you root?\n" 883 ui__warning_paranoid();
855 "\t Consider tweaking" 884 goto out_err;
856 " /proc/sys/kernel/perf_event_paranoid.\n"); 885 }
857 /* 886 /*
858 * If it's cycles then fall back to hrtimer 887 * If it's cycles then fall back to hrtimer
859 * based cpu-clock-tick sw counter, which 888 * based cpu-clock-tick sw counter, which
@@ -861,25 +890,41 @@ try_again:
861 */ 890 */
862 if (attr->type == PERF_TYPE_HARDWARE && 891 if (attr->type == PERF_TYPE_HARDWARE &&
863 attr->config == PERF_COUNT_HW_CPU_CYCLES) { 892 attr->config == PERF_COUNT_HW_CPU_CYCLES) {
864
865 if (verbose) 893 if (verbose)
866 warning(" ... trying to fall back to cpu-clock-ticks\n"); 894 ui__warning("Cycles event not supported,\n"
895 "trying to fall back to cpu-clock-ticks\n");
867 896
868 attr->type = PERF_TYPE_SOFTWARE; 897 attr->type = PERF_TYPE_SOFTWARE;
869 attr->config = PERF_COUNT_SW_CPU_CLOCK; 898 attr->config = PERF_COUNT_SW_CPU_CLOCK;
870 goto try_again; 899 goto try_again;
871 } 900 }
872 printf("\n"); 901
873 error("sys_perf_event_open() syscall returned with %d " 902 if (err == ENOENT) {
874 "(%s). /bin/dmesg may provide additional information.\n", 903 ui__warning("The %s event is not supported.\n",
875 err, strerror(err)); 904 event_name(counter));
876 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 905 goto out_err;
877 exit(-1); 906 }
907
908 ui__warning("The sys_perf_event_open() syscall "
909 "returned with %d (%s). /bin/dmesg "
910 "may provide additional information.\n"
911 "No CONFIG_PERF_EVENTS=y kernel support "
912 "configured?\n", err, strerror(err));
913 goto out_err;
878 } 914 }
879 } 915 }
880 916
881 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) 917 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) {
882 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 918 ui__warning("Failed to mmap with %d (%s)\n",
919 errno, strerror(errno));
920 goto out_err;
921 }
922
923 return;
924
925out_err:
926 exit_browser(0);
927 exit(0);
883} 928}
884 929
885static int __cmd_top(void) 930static int __cmd_top(void)
diff --git a/tools/perf/feature-tests.mak b/tools/perf/config/feature-tests.mak
index b041ca67a2cb..6170fd2531b5 100644
--- a/tools/perf/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -79,9 +79,15 @@ endef
79endif 79endif
80 80
81ifndef NO_LIBPYTHON 81ifndef NO_LIBPYTHON
82define SOURCE_PYTHON_VERSION
83#include <Python.h>
84#if PY_VERSION_HEX >= 0x03000000
85 #error
86#endif
87int main(void){}
88endef
82define SOURCE_PYTHON_EMBED 89define SOURCE_PYTHON_EMBED
83#include <Python.h> 90#include <Python.h>
84
85int main(void) 91int main(void)
86{ 92{
87 Py_Initialize(); 93 Py_Initialize();
@@ -120,11 +126,3 @@ int main(void)
120 return 0; 126 return 0;
121} 127}
122endef 128endef
123
124# try-cc
125# Usage: option = $(call try-cc, source-to-build, cc-options)
126try-cc = $(shell sh -c \
127 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \
128 echo "$(1)" | \
129 $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
130 rm -f "$$TMP"')
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
new file mode 100644
index 000000000000..8046182a19eb
--- /dev/null
+++ b/tools/perf/config/utilities.mak
@@ -0,0 +1,188 @@
1# This allows us to work with the newline character:
2define newline
3
4
5endef
6newline := $(newline)
7
8# nl-escape
9#
10# Usage: escape = $(call nl-escape[,escape])
11#
12# This is used as the common way to specify
13# what should replace a newline when escaping
14# newlines; the default is a bizarre string.
15#
16nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n)
17
18# escape-nl
19#
20# Usage: escaped-text = $(call escape-nl,text[,escape])
21#
22# GNU make's $(shell ...) function converts to a
23# single space each newline character in the output
24# produced during the expansion; this may not be
25# desirable.
26#
27# The only solution is to change each newline into
28# something that won't be converted, so that the
29# information can be recovered later with
30# $(call unescape-nl...)
31#
32escape-nl = $(subst $(newline),$(call nl-escape,$(2)),$(1))
33
34# unescape-nl
35#
36# Usage: text = $(call unescape-nl,escaped-text[,escape])
37#
38# See escape-nl.
39#
40unescape-nl = $(subst $(call nl-escape,$(2)),$(newline),$(1))
41
42# shell-escape-nl
43#
44# Usage: $(shell some-command | $(call shell-escape-nl[,escape]))
45#
46# Use this to escape newlines from within a shell call;
47# the default escape is a bizarre string.
48#
49# NOTE: The escape is used directly as a string constant
50# in an `awk' program that is delimited by shell
51# single-quotes, so be wary of the characters
52# that are chosen.
53#
54define shell-escape-nl
55awk 'NR==1 {t=$$0} NR>1 {t=t "$(nl-escape)" $$0} END {printf t}'
56endef
57
58# shell-unescape-nl
59#
60# Usage: $(shell some-command | $(call shell-unescape-nl[,escape]))
61#
62# Use this to unescape newlines from within a shell call;
63# the default escape is a bizarre string.
64#
65# NOTE: The escape is used directly as an extended regular
66# expression constant in an `awk' program that is
67# delimited by shell single-quotes, so be wary
68# of the characters that are chosen.
69#
70# (The bash shell has a bug where `{gsub(...),...}' is
71# misinterpreted as a brace expansion; this can be
72# overcome by putting a space between `{' and `gsub').
73#
74define shell-unescape-nl
75awk 'NR==1 {t=$$0} NR>1 {t=t "\n" $$0} END { gsub(/$(nl-escape)/,"\n",t); printf t }'
76endef
77
78# escape-for-shell-sq
79#
80# Usage: embeddable-text = $(call escape-for-shell-sq,text)
81#
82# This function produces text that is suitable for
83# embedding in a shell string that is delimited by
84# single-quotes.
85#
86escape-for-shell-sq = $(subst ','\'',$(1))
87
88# shell-sq
89#
90# Usage: single-quoted-and-escaped-text = $(call shell-sq,text)
91#
92shell-sq = '$(escape-for-shell-sq)'
93
94# shell-wordify
95#
96# Usage: wordified-text = $(call shell-wordify,text)
97#
98# For instance:
99#
100# |define text
101# |hello
102# |world
103# |endef
104# |
105# |target:
106# | echo $(call shell-wordify,$(text))
107#
108# At least GNU make gets confused by expanding a newline
109# within the context of a command line of a makefile rule
110# (this is in constrast to a `$(shell ...)' function call,
111# which can handle it just fine).
112#
113# This function avoids the problem by producing a string
114# that works as a shell word, regardless of whether or
115# not it contains a newline.
116#
117# If the text to be wordified contains a newline, then
118# an intrictate shell command substitution is constructed
119# to render the text as a single line; when the shell
120# processes the resulting escaped text, it transforms
121# it into the original unescaped text.
122#
123# If the text does not contain a newline, then this function
124# produces the same results as the `$(shell-sq)' function.
125#
126shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq))
127define _sw-esc-nl
128"$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))"
129endef
130
131# is-absolute
132#
133# Usage: bool-value = $(call is-absolute,path)
134#
135is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y)
136
137# lookup
138#
139# Usage: absolute-executable-path-or-empty = $(call lookup,path)
140#
141# (It's necessary to use `sh -c' because GNU make messes up by
142# trying too hard and getting things wrong).
143#
144lookup = $(call unescape-nl,$(shell sh -c $(_l-sh)))
145_l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,))
146
147# is-executable
148#
149# Usage: bool-value = $(call is-executable,path)
150#
151# (It's necessary to use `sh -c' because GNU make messes up by
152# trying too hard and getting things wrong).
153#
154is-executable = $(call _is-executable-helper,$(shell-sq))
155_is-executable-helper = $(shell sh -c $(_is-executable-sh))
156_is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y)
157
158# get-executable
159#
160# Usage: absolute-executable-path-or-empty = $(call get-executable,path)
161#
162# The goal is to get an absolute path for an executable;
163# the `command -v' is defined by POSIX, but it's not
164# necessarily very portable, so it's only used if
165# relative path resolution is requested, as determined
166# by the presence of a leading `/'.
167#
168get-executable = $(if $(1),$(if $(is-absolute),$(_ge-abspath),$(lookup)))
169_ge-abspath = $(if $(is-executable),$(1))
170
171# get-supplied-or-default-executable
172#
173# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
174#
175define get-executable-or-default
176$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
177endef
178_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2)))
179_gea_warn = $(warning The path '$(1)' is not executable.)
180_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
181
182# try-cc
183# Usage: option = $(call try-cc, source-to-build, cc-options)
184try-cc = $(shell sh -c \
185 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \
186 echo "$(1)" | \
187 $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
188 rm -f "$$TMP"')
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 9fea75535221..96bee5c46008 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
13{ 13{
14 FILE *fp; 14 FILE *fp;
15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; 15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
16 char *token, *saved_ptr; 16 char *token, *saved_ptr = NULL;
17 int found = 0; 17 int found = 0;
18 18
19 fp = fopen("/proc/mounts", "r"); 19 fp = fopen("/proc/mounts", "r");
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index d4536a9e0d8c..155749d74350 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -57,6 +57,16 @@ void ui__warning(const char *format, ...)
57} 57}
58#endif 58#endif
59 59
60void ui__warning_paranoid(void)
61{
62 ui__warning("Permission error - are you root?\n"
63 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
64 " -1 - Not paranoid at all\n"
65 " 0 - Disallow raw tracepoint access for unpriv\n"
66 " 1 - Disallow cpu events for unpriv\n"
67 " 2 - Disallow kernel profiling for unpriv\n");
68}
69
60void trace_event(union perf_event *event) 70void trace_event(union perf_event *event)
61{ 71{
62 unsigned char *raw_event = (void *)event; 72 unsigned char *raw_event = (void *)event;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 93516cf4682c..fd53db47e3de 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -36,5 +36,6 @@ int ui_helpline__show_help(const char *format, va_list ap);
36#endif 36#endif
37 37
38void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 38void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
39void ui__warning_paranoid(void);
39 40
40#endif /* __PERF_DEBUG_H */ 41#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 2b15c362ef56..0fe9adf76379 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -9,21 +9,21 @@
9#include "thread_map.h" 9#include "thread_map.h"
10 10
11static const char *perf_event__names[] = { 11static const char *perf_event__names[] = {
12 [0] = "TOTAL", 12 [0] = "TOTAL",
13 [PERF_RECORD_MMAP] = "MMAP", 13 [PERF_RECORD_MMAP] = "MMAP",
14 [PERF_RECORD_LOST] = "LOST", 14 [PERF_RECORD_LOST] = "LOST",
15 [PERF_RECORD_COMM] = "COMM", 15 [PERF_RECORD_COMM] = "COMM",
16 [PERF_RECORD_EXIT] = "EXIT", 16 [PERF_RECORD_EXIT] = "EXIT",
17 [PERF_RECORD_THROTTLE] = "THROTTLE", 17 [PERF_RECORD_THROTTLE] = "THROTTLE",
18 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 18 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
19 [PERF_RECORD_FORK] = "FORK", 19 [PERF_RECORD_FORK] = "FORK",
20 [PERF_RECORD_READ] = "READ", 20 [PERF_RECORD_READ] = "READ",
21 [PERF_RECORD_SAMPLE] = "SAMPLE", 21 [PERF_RECORD_SAMPLE] = "SAMPLE",
22 [PERF_RECORD_HEADER_ATTR] = "ATTR", 22 [PERF_RECORD_HEADER_ATTR] = "ATTR",
23 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 23 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
24 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 24 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
25 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 25 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
26 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 26 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
27}; 27};
28 28
29const char *perf_event__name(unsigned int id) 29const char *perf_event__name(unsigned int id)
@@ -35,6 +35,22 @@ const char *perf_event__name(unsigned int id)
35 return perf_event__names[id]; 35 return perf_event__names[id];
36} 36}
37 37
38int perf_sample_size(u64 sample_type)
39{
40 u64 mask = sample_type & PERF_SAMPLE_MASK;
41 int size = 0;
42 int i;
43
44 for (i = 0; i < 64; i++) {
45 if (mask & (1ULL << i))
46 size++;
47 }
48
49 size *= sizeof(u64);
50
51 return size;
52}
53
38static struct perf_sample synth_sample = { 54static struct perf_sample synth_sample = {
39 .pid = -1, 55 .pid = -1,
40 .tid = -1, 56 .tid = -1,
@@ -537,9 +553,18 @@ static int perf_event__process_kernel_mmap(union perf_event *event,
537 goto out_problem; 553 goto out_problem;
538 554
539 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); 555 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
540 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 556
541 symbol_name, 557 /*
542 event->mmap.pgoff); 558 * Avoid using a zero address (kptr_restrict) for the ref reloc
559 * symbol. Effectively having zero here means that at record
560 * time /proc/sys/kernel/kptr_restrict was non zero.
561 */
562 if (event->mmap.pgoff != 0) {
563 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
564 symbol_name,
565 event->mmap.pgoff);
566 }
567
543 if (machine__is_default_guest(machine)) { 568 if (machine__is_default_guest(machine)) {
544 /* 569 /*
545 * preload dso of guest kernel and modules 570 * preload dso of guest kernel and modules
@@ -710,7 +735,7 @@ try_again:
710 * in the whole kernel symbol list. 735 * in the whole kernel symbol list.
711 */ 736 */
712 if ((long long)al->addr < 0 && 737 if ((long long)al->addr < 0 &&
713 cpumode == PERF_RECORD_MISC_KERNEL && 738 cpumode == PERF_RECORD_MISC_USER &&
714 machine && mg != &machine->kmaps) { 739 machine && mg != &machine->kmaps) {
715 mg = &machine->kmaps; 740 mg = &machine->kmaps;
716 goto try_again; 741 goto try_again;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 9c35170fb379..c08332871408 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -56,6 +56,13 @@ struct read_event {
56 u64 id; 56 u64 id;
57}; 57};
58 58
59
60#define PERF_SAMPLE_MASK \
61 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
62 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
63 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
64 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
65
59struct sample_event { 66struct sample_event {
60 struct perf_event_header header; 67 struct perf_event_header header;
61 u64 array[]; 68 u64 array[];
@@ -75,6 +82,8 @@ struct perf_sample {
75 struct ip_callchain *callchain; 82 struct ip_callchain *callchain;
76}; 83};
77 84
85int perf_sample_size(u64 sample_type);
86
78#define BUILD_ID_SIZE 20 87#define BUILD_ID_SIZE 20
79 88
80struct build_id_event { 89struct build_id_event {
@@ -178,6 +187,7 @@ int perf_event__preprocess_sample(const union perf_event *self,
178const char *perf_event__name(unsigned int id); 187const char *perf_event__name(unsigned int id);
179 188
180int perf_event__parse_sample(const union perf_event *event, u64 type, 189int perf_event__parse_sample(const union perf_event *event, u64 type,
181 bool sample_id_all, struct perf_sample *sample); 190 int sample_size, bool sample_id_all,
191 struct perf_sample *sample);
182 192
183#endif /* __PERF_RECORD_H */ 193#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d852cefa20de..50aa34879c33 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,6 +12,7 @@
12#include "evlist.h" 12#include "evlist.h"
13#include "evsel.h" 13#include "evsel.h"
14#include "util.h" 14#include "util.h"
15#include "debug.h"
15 16
16#include <sys/mman.h> 17#include <sys/mman.h>
17 18
@@ -165,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
165 return NULL; 166 return NULL;
166} 167}
167 168
168union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) 169union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
169{ 170{
170 /* XXX Move this to perf.c, making it generally available */ 171 /* XXX Move this to perf.c, making it generally available */
171 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 172 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
172 struct perf_mmap *md = &evlist->mmap[cpu]; 173 struct perf_mmap *md = &evlist->mmap[idx];
173 unsigned int head = perf_mmap__read_head(md); 174 unsigned int head = perf_mmap__read_head(md);
174 unsigned int old = md->prev; 175 unsigned int old = md->prev;
175 unsigned char *data = md->base + page_size; 176 unsigned char *data = md->base + page_size;
@@ -234,36 +235,126 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
234 235
235void perf_evlist__munmap(struct perf_evlist *evlist) 236void perf_evlist__munmap(struct perf_evlist *evlist)
236{ 237{
237 int cpu; 238 int i;
238 239
239 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 240 for (i = 0; i < evlist->nr_mmaps; i++) {
240 if (evlist->mmap[cpu].base != NULL) { 241 if (evlist->mmap[i].base != NULL) {
241 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 242 munmap(evlist->mmap[i].base, evlist->mmap_len);
242 evlist->mmap[cpu].base = NULL; 243 evlist->mmap[i].base = NULL;
243 } 244 }
244 } 245 }
246
247 free(evlist->mmap);
248 evlist->mmap = NULL;
245} 249}
246 250
247int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 251int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
248{ 252{
249 evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); 253 evlist->nr_mmaps = evlist->cpus->nr;
254 if (evlist->cpus->map[0] == -1)
255 evlist->nr_mmaps = evlist->threads->nr;
256 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
250 return evlist->mmap != NULL ? 0 : -ENOMEM; 257 return evlist->mmap != NULL ? 0 : -ENOMEM;
251} 258}
252 259
253static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, 260static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
254 int mask, int fd) 261 int idx, int prot, int mask, int fd)
255{ 262{
256 evlist->mmap[cpu].prev = 0; 263 evlist->mmap[idx].prev = 0;
257 evlist->mmap[cpu].mask = mask; 264 evlist->mmap[idx].mask = mask;
258 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
259 MAP_SHARED, fd, 0); 266 MAP_SHARED, fd, 0);
260 if (evlist->mmap[cpu].base == MAP_FAILED) 267 if (evlist->mmap[idx].base == MAP_FAILED) {
268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
269 ui__warning("Inherit is not allowed on per-task "
270 "events using mmap.\n");
261 return -1; 271 return -1;
272 }
262 273
263 perf_evlist__add_pollfd(evlist, fd); 274 perf_evlist__add_pollfd(evlist, fd);
264 return 0; 275 return 0;
265} 276}
266 277
278static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
279{
280 struct perf_evsel *evsel;
281 int cpu, thread;
282
283 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
284 int output = -1;
285
286 for (thread = 0; thread < evlist->threads->nr; thread++) {
287 list_for_each_entry(evsel, &evlist->entries, node) {
288 int fd = FD(evsel, cpu, thread);
289
290 if (output == -1) {
291 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu,
293 prot, mask, output) < 0)
294 goto out_unmap;
295 } else {
296 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
297 goto out_unmap;
298 }
299
300 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
301 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
302 goto out_unmap;
303 }
304 }
305 }
306
307 return 0;
308
309out_unmap:
310 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
311 if (evlist->mmap[cpu].base != NULL) {
312 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
313 evlist->mmap[cpu].base = NULL;
314 }
315 }
316 return -1;
317}
318
319static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
320{
321 struct perf_evsel *evsel;
322 int thread;
323
324 for (thread = 0; thread < evlist->threads->nr; thread++) {
325 int output = -1;
326
327 list_for_each_entry(evsel, &evlist->entries, node) {
328 int fd = FD(evsel, 0, thread);
329
330 if (output == -1) {
331 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread,
333 prot, mask, output) < 0)
334 goto out_unmap;
335 } else {
336 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
337 goto out_unmap;
338 }
339
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
342 goto out_unmap;
343 }
344 }
345
346 return 0;
347
348out_unmap:
349 for (thread = 0; thread < evlist->threads->nr; thread++) {
350 if (evlist->mmap[thread].base != NULL) {
351 munmap(evlist->mmap[thread].base, evlist->mmap_len);
352 evlist->mmap[thread].base = NULL;
353 }
354 }
355 return -1;
356}
357
267/** perf_evlist__mmap - Create per cpu maps to receive events 358/** perf_evlist__mmap - Create per cpu maps to receive events
268 * 359 *
269 * @evlist - list of events 360 * @evlist - list of events
@@ -282,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
282int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 373int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
283{ 374{
284 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 375 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
285 int mask = pages * page_size - 1, cpu; 376 int mask = pages * page_size - 1;
286 struct perf_evsel *first_evsel, *evsel; 377 struct perf_evsel *evsel;
287 const struct cpu_map *cpus = evlist->cpus; 378 const struct cpu_map *cpus = evlist->cpus;
288 const struct thread_map *threads = evlist->threads; 379 const struct thread_map *threads = evlist->threads;
289 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 380 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
290 381
291 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 382 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
292 return -ENOMEM; 383 return -ENOMEM;
@@ -296,42 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
296 387
297 evlist->overwrite = overwrite; 388 evlist->overwrite = overwrite;
298 evlist->mmap_len = (pages + 1) * page_size; 389 evlist->mmap_len = (pages + 1) * page_size;
299 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
300 390
301 list_for_each_entry(evsel, &evlist->entries, node) { 391 list_for_each_entry(evsel, &evlist->entries, node) {
302 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 392 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
303 evsel->sample_id == NULL && 393 evsel->sample_id == NULL &&
304 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 394 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
305 return -ENOMEM; 395 return -ENOMEM;
306
307 for (cpu = 0; cpu < cpus->nr; cpu++) {
308 for (thread = 0; thread < threads->nr; thread++) {
309 int fd = FD(evsel, cpu, thread);
310
311 if (evsel->idx || thread) {
312 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
313 FD(first_evsel, cpu, 0)) != 0)
314 goto out_unmap;
315 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
316 goto out_unmap;
317
318 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
319 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
320 goto out_unmap;
321 }
322 }
323 } 396 }
324 397
325 return 0; 398 if (evlist->cpus->map[0] == -1)
399 return perf_evlist__mmap_per_thread(evlist, prot, mask);
326 400
327out_unmap: 401 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
328 for (cpu = 0; cpu < cpus->nr; cpu++) {
329 if (evlist->mmap[cpu].base != NULL) {
330 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
331 evlist->mmap[cpu].base = NULL;
332 }
333 }
334 return -1;
335} 402}
336 403
337int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 404int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
@@ -342,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
342 if (evlist->threads == NULL) 409 if (evlist->threads == NULL)
343 return -1; 410 return -1;
344 411
345 if (target_tid != -1) 412 if (cpu_list == NULL && target_tid != -1)
346 evlist->cpus = cpu_map__dummy_new(); 413 evlist->cpus = cpu_map__dummy_new();
347 else 414 else
348 evlist->cpus = cpu_map__new(cpu_list); 415 evlist->cpus = cpu_map__new(cpu_list);
@@ -392,3 +459,34 @@ int perf_evlist__set_filters(struct perf_evlist *evlist)
392 459
393 return 0; 460 return 0;
394} 461}
462
463u64 perf_evlist__sample_type(struct perf_evlist *evlist)
464{
465 struct perf_evsel *pos;
466 u64 type = 0;
467
468 list_for_each_entry(pos, &evlist->entries, node) {
469 if (!type)
470 type = pos->attr.sample_type;
471 else if (type != pos->attr.sample_type)
472 die("non matching sample_type");
473 }
474
475 return type;
476}
477
478bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
479{
480 bool value = false, first = true;
481 struct perf_evsel *pos;
482
483 list_for_each_entry(pos, &evlist->entries, node) {
484 if (first) {
485 value = pos->attr.sample_id_all;
486 first = false;
487 } else if (value != pos->attr.sample_id_all)
488 die("non matching sample_id_all");
489 }
490
491 return value;
492}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 8b1cb7a4c5f1..0a1ef1f051f0 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,6 +17,7 @@ struct perf_evlist {
17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
18 int nr_entries; 18 int nr_entries;
19 int nr_fds; 19 int nr_fds;
20 int nr_mmaps;
20 int mmap_len; 21 int mmap_len;
21 bool overwrite; 22 bool overwrite;
22 union perf_event event_copy; 23 union perf_event event_copy;
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
46 47
47struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 48struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
48 49
49union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); 50union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
50 51
51int perf_evlist__alloc_mmap(struct perf_evlist *evlist); 52int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
52int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); 53int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
@@ -65,4 +66,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
65void perf_evlist__delete_maps(struct perf_evlist *evlist); 66void perf_evlist__delete_maps(struct perf_evlist *evlist);
66int perf_evlist__set_filters(struct perf_evlist *evlist); 67int perf_evlist__set_filters(struct perf_evlist *evlist);
67 68
69u64 perf_evlist__sample_type(struct perf_evlist *evlist);
70bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
71
68#endif /* __PERF_EVLIST_H */ 72#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 662596afd7f1..cca29ededb5b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -35,7 +35,17 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
35 35
36int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 36int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
37{ 37{
38 int cpu, thread;
38 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 39 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
40
41 if (evsel->fd) {
42 for (cpu = 0; cpu < ncpus; cpu++) {
43 for (thread = 0; thread < nthreads; thread++) {
44 FD(evsel, cpu, thread) = -1;
45 }
46 }
47 }
48
39 return evsel->fd != NULL ? 0 : -ENOMEM; 49 return evsel->fd != NULL ? 0 : -ENOMEM;
40} 50}
41 51
@@ -175,7 +185,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
175} 185}
176 186
177static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 187static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
178 struct thread_map *threads, bool group, bool inherit) 188 struct thread_map *threads, bool group)
179{ 189{
180 int cpu, thread; 190 int cpu, thread;
181 unsigned long flags = 0; 191 unsigned long flags = 0;
@@ -192,19 +202,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
192 202
193 for (cpu = 0; cpu < cpus->nr; cpu++) { 203 for (cpu = 0; cpu < cpus->nr; cpu++) {
194 int group_fd = -1; 204 int group_fd = -1;
195 /*
196 * Don't allow mmap() of inherited per-task counters. This
197 * would create a performance issue due to all children writing
198 * to the same buffer.
199 *
200 * FIXME:
201 * Proper fix is not to pass 'inherit' to perf_evsel__open*,
202 * but a 'flags' parameter, with 'group' folded there as well,
203 * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
204 * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
205 * set. Lets go for the minimal fix first tho.
206 */
207 evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
208 205
209 for (thread = 0; thread < threads->nr; thread++) { 206 for (thread = 0; thread < threads->nr; thread++) {
210 207
@@ -253,7 +250,7 @@ static struct {
253}; 250};
254 251
255int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 252int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
256 struct thread_map *threads, bool group, bool inherit) 253 struct thread_map *threads, bool group)
257{ 254{
258 if (cpus == NULL) { 255 if (cpus == NULL) {
259 /* Work around old compiler warnings about strict aliasing */ 256 /* Work around old compiler warnings about strict aliasing */
@@ -263,19 +260,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
263 if (threads == NULL) 260 if (threads == NULL)
264 threads = &empty_thread_map.map; 261 threads = &empty_thread_map.map;
265 262
266 return __perf_evsel__open(evsel, cpus, threads, group, inherit); 263 return __perf_evsel__open(evsel, cpus, threads, group);
267} 264}
268 265
269int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 266int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
270 struct cpu_map *cpus, bool group, bool inherit) 267 struct cpu_map *cpus, bool group)
271{ 268{
272 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit); 269 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
273} 270}
274 271
275int perf_evsel__open_per_thread(struct perf_evsel *evsel, 272int perf_evsel__open_per_thread(struct perf_evsel *evsel,
276 struct thread_map *threads, bool group, bool inherit) 273 struct thread_map *threads, bool group)
277{ 274{
278 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); 275 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
279} 276}
280 277
281static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 278static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
@@ -316,8 +313,20 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
316 return 0; 313 return 0;
317} 314}
318 315
316static bool sample_overlap(const union perf_event *event,
317 const void *offset, u64 size)
318{
319 const void *base = event;
320
321 if (offset + size > base + event->header.size)
322 return true;
323
324 return false;
325}
326
319int perf_event__parse_sample(const union perf_event *event, u64 type, 327int perf_event__parse_sample(const union perf_event *event, u64 type,
320 bool sample_id_all, struct perf_sample *data) 328 int sample_size, bool sample_id_all,
329 struct perf_sample *data)
321{ 330{
322 const u64 *array; 331 const u64 *array;
323 332
@@ -332,6 +341,9 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
332 341
333 array = event->sample.array; 342 array = event->sample.array;
334 343
344 if (sample_size + sizeof(event->header) > event->header.size)
345 return -EFAULT;
346
335 if (type & PERF_SAMPLE_IP) { 347 if (type & PERF_SAMPLE_IP) {
336 data->ip = event->ip.ip; 348 data->ip = event->ip.ip;
337 array++; 349 array++;
@@ -382,14 +394,29 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
382 } 394 }
383 395
384 if (type & PERF_SAMPLE_CALLCHAIN) { 396 if (type & PERF_SAMPLE_CALLCHAIN) {
397 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
398 return -EFAULT;
399
385 data->callchain = (struct ip_callchain *)array; 400 data->callchain = (struct ip_callchain *)array;
401
402 if (sample_overlap(event, array, data->callchain->nr))
403 return -EFAULT;
404
386 array += 1 + data->callchain->nr; 405 array += 1 + data->callchain->nr;
387 } 406 }
388 407
389 if (type & PERF_SAMPLE_RAW) { 408 if (type & PERF_SAMPLE_RAW) {
390 u32 *p = (u32 *)array; 409 u32 *p = (u32 *)array;
410
411 if (sample_overlap(event, array, sizeof(u32)))
412 return -EFAULT;
413
391 data->raw_size = *p; 414 data->raw_size = *p;
392 p++; 415 p++;
416
417 if (sample_overlap(event, p, data->raw_size))
418 return -EFAULT;
419
393 data->raw_data = p; 420 data->raw_data = p;
394 } 421 }
395 422
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6710ab538342..f79bb2c09a6c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
82 82
83int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 83int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
84 struct cpu_map *cpus, bool group, bool inherit); 84 struct cpu_map *cpus, bool group);
85int perf_evsel__open_per_thread(struct perf_evsel *evsel, 85int perf_evsel__open_per_thread(struct perf_evsel *evsel,
86 struct thread_map *threads, bool group, bool inherit); 86 struct thread_map *threads, bool group);
87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
88 struct thread_map *threads, bool group, bool inherit); 88 struct thread_map *threads, bool group);
89 89
90#define perf_evsel__match(evsel, t, c) \ 90#define perf_evsel__match(evsel, t, c) \
91 (evsel->attr.type == PERF_TYPE_##t && \ 91 (evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 93862a8027ea..afb0849fe530 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -193,9 +193,13 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
193 *linkname = malloc(size), *targetname; 193 *linkname = malloc(size), *targetname;
194 int len, err = -1; 194 int len, err = -1;
195 195
196 if (is_kallsyms) 196 if (is_kallsyms) {
197 if (symbol_conf.kptr_restrict) {
198 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
199 return 0;
200 }
197 realname = (char *)name; 201 realname = (char *)name;
198 else 202 } else
199 realname = realpath(name, NULL); 203 realname = realpath(name, NULL);
200 204
201 if (realname == NULL || filename == NULL || linkname == NULL) 205 if (realname == NULL || filename == NULL || linkname == NULL)
@@ -934,37 +938,6 @@ out_delete_evlist:
934 return -ENOMEM; 938 return -ENOMEM;
935} 939}
936 940
937u64 perf_evlist__sample_type(struct perf_evlist *evlist)
938{
939 struct perf_evsel *pos;
940 u64 type = 0;
941
942 list_for_each_entry(pos, &evlist->entries, node) {
943 if (!type)
944 type = pos->attr.sample_type;
945 else if (type != pos->attr.sample_type)
946 die("non matching sample_type");
947 }
948
949 return type;
950}
951
952bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
953{
954 bool value = false, first = true;
955 struct perf_evsel *pos;
956
957 list_for_each_entry(pos, &evlist->entries, node) {
958 if (first) {
959 value = pos->attr.sample_id_all;
960 first = false;
961 } else if (value != pos->attr.sample_id_all)
962 die("non matching sample_id_all");
963 }
964
965 return value;
966}
967
968int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, 941int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
969 perf_event__handler_t process, 942 perf_event__handler_t process,
970 struct perf_session *session) 943 struct perf_session *session)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 456661d7f10e..1886256768a1 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -64,8 +64,6 @@ int perf_header__write_pipe(int fd);
64int perf_header__push_event(u64 id, const char *name); 64int perf_header__push_event(u64 id, const char *name);
65char *perf_header__find_event(u64 id); 65char *perf_header__find_event(u64 id);
66 66
67u64 perf_evlist__sample_type(struct perf_evlist *evlist);
68bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
69void perf_header__set_feat(struct perf_header *header, int feat); 67void perf_header__set_feat(struct perf_header *header, int feat);
70void perf_header__clear_feat(struct perf_header *header, int feat); 68void perf_header__clear_feat(struct perf_header *header, int feat);
71bool perf_header__has_feat(const struct perf_header *header, int feat); 69bool perf_header__has_feat(const struct perf_header *header, int feat);
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..6789d788d494
--- /dev/null
+++ b/tools/perf/util/include/asm/alternative-asm.h
@@ -0,0 +1,8 @@
1#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
2#define _PERF_ASM_ALTERNATIVE_ASM_H
3
4/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
5
6#define altinstruction_entry #
7
8#endif
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
new file mode 100644
index 000000000000..1b476c9ae649
--- /dev/null
+++ b/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
#include "../../../../include/linux/const.h"
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
index 356c7e467b83..1d928a0ce997 100644
--- a/tools/perf/util/include/linux/list.h
+++ b/tools/perf/util/include/linux/list.h
@@ -1,4 +1,6 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/prefetch.h>
3
2#include "../../../../include/linux/list.h" 4#include "../../../../include/linux/list.h"
3 5
4#ifndef PERF_LIST_H 6#ifndef PERF_LIST_H
@@ -23,5 +25,5 @@ static inline void list_del_range(struct list_head *begin,
23 * @head: the head for your list. 25 * @head: the head for your list.
24 */ 26 */
25#define list_for_each_from(pos, head) \ 27#define list_for_each_from(pos, head) \
26 for (; prefetch(pos->next), pos != (head); pos = pos->next) 28 for (; pos != (head); pos = pos->next)
27#endif 29#endif
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 952b4ae3d954..41982c373faf 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -31,34 +31,36 @@ char debugfs_path[MAXPATHLEN];
31#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x 31#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
32 32
33static struct event_symbol event_symbols[] = { 33static struct event_symbol event_symbols[] = {
34 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, 34 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
35 { CHW(INSTRUCTIONS), "instructions", "" }, 35 { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
36 { CHW(CACHE_REFERENCES), "cache-references", "" }, 36 { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
37 { CHW(CACHE_MISSES), "cache-misses", "" }, 37 { CHW(INSTRUCTIONS), "instructions", "" },
38 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, 38 { CHW(CACHE_REFERENCES), "cache-references", "" },
39 { CHW(BRANCH_MISSES), "branch-misses", "" }, 39 { CHW(CACHE_MISSES), "cache-misses", "" },
40 { CHW(BUS_CYCLES), "bus-cycles", "" }, 40 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
41 41 { CHW(BRANCH_MISSES), "branch-misses", "" },
42 { CSW(CPU_CLOCK), "cpu-clock", "" }, 42 { CHW(BUS_CYCLES), "bus-cycles", "" },
43 { CSW(TASK_CLOCK), "task-clock", "" }, 43
44 { CSW(PAGE_FAULTS), "page-faults", "faults" }, 44 { CSW(CPU_CLOCK), "cpu-clock", "" },
45 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, 45 { CSW(TASK_CLOCK), "task-clock", "" },
46 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, 46 { CSW(PAGE_FAULTS), "page-faults", "faults" },
47 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, 47 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 48 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
49 { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, 49 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
50 { CSW(EMULATION_FAULTS), "emulation-faults", "" }, 50 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
51 { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
52 { CSW(EMULATION_FAULTS), "emulation-faults", "" },
51}; 53};
52 54
53#define __PERF_EVENT_FIELD(config, name) \ 55#define __PERF_EVENT_FIELD(config, name) \
54 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 56 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
55 57
56#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 58#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
57#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 59#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
58#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 60#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
59#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 61#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
60 62
61static const char *hw_event_names[] = { 63static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
62 "cycles", 64 "cycles",
63 "instructions", 65 "instructions",
64 "cache-references", 66 "cache-references",
@@ -66,11 +68,13 @@ static const char *hw_event_names[] = {
66 "branches", 68 "branches",
67 "branch-misses", 69 "branch-misses",
68 "bus-cycles", 70 "bus-cycles",
71 "stalled-cycles-frontend",
72 "stalled-cycles-backend",
69}; 73};
70 74
71static const char *sw_event_names[] = { 75static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
72 "cpu-clock-msecs", 76 "cpu-clock",
73 "task-clock-msecs", 77 "task-clock",
74 "page-faults", 78 "page-faults",
75 "context-switches", 79 "context-switches",
76 "CPU-migrations", 80 "CPU-migrations",
@@ -307,7 +311,7 @@ const char *__event_name(int type, u64 config)
307 311
308 switch (type) { 312 switch (type) {
309 case PERF_TYPE_HARDWARE: 313 case PERF_TYPE_HARDWARE:
310 if (config < PERF_COUNT_HW_MAX) 314 if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
311 return hw_event_names[config]; 315 return hw_event_names[config];
312 return "unknown-hardware"; 316 return "unknown-hardware";
313 317
@@ -333,7 +337,7 @@ const char *__event_name(int type, u64 config)
333 } 337 }
334 338
335 case PERF_TYPE_SOFTWARE: 339 case PERF_TYPE_SOFTWARE:
336 if (config < PERF_COUNT_SW_MAX) 340 if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
337 return sw_event_names[config]; 341 return sw_event_names[config];
338 return "unknown-software"; 342 return "unknown-software";
339 343
@@ -648,13 +652,15 @@ static int check_events(const char *str, unsigned int i)
648 int n; 652 int n;
649 653
650 n = strlen(event_symbols[i].symbol); 654 n = strlen(event_symbols[i].symbol);
651 if (!strncmp(str, event_symbols[i].symbol, n)) 655 if (!strncasecmp(str, event_symbols[i].symbol, n))
652 return n; 656 return n;
653 657
654 n = strlen(event_symbols[i].alias); 658 n = strlen(event_symbols[i].alias);
655 if (n) 659 if (n) {
656 if (!strncmp(str, event_symbols[i].alias, n)) 660 if (!strncasecmp(str, event_symbols[i].alias, n))
657 return n; 661 return n;
662 }
663
658 return 0; 664 return 0;
659} 665}
660 666
@@ -718,15 +724,22 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr)
718 return EVT_FAILED; 724 return EVT_FAILED;
719} 725}
720 726
721static enum event_result 727static int
722parse_event_modifier(const char **strp, struct perf_event_attr *attr) 728parse_event_modifier(const char **strp, struct perf_event_attr *attr)
723{ 729{
724 const char *str = *strp; 730 const char *str = *strp;
725 int exclude = 0; 731 int exclude = 0;
726 int eu = 0, ek = 0, eh = 0, precise = 0; 732 int eu = 0, ek = 0, eh = 0, precise = 0;
727 733
728 if (*str++ != ':') 734 if (!*str)
735 return 0;
736
737 if (*str == ',')
729 return 0; 738 return 0;
739
740 if (*str++ != ':')
741 return -1;
742
730 while (*str) { 743 while (*str) {
731 if (*str == 'u') { 744 if (*str == 'u') {
732 if (!exclude) 745 if (!exclude)
@@ -747,14 +760,16 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
747 760
748 ++str; 761 ++str;
749 } 762 }
750 if (str >= *strp + 2) { 763 if (str < *strp + 2)
751 *strp = str; 764 return -1;
752 attr->exclude_user = eu; 765
753 attr->exclude_kernel = ek; 766 *strp = str;
754 attr->exclude_hv = eh; 767
755 attr->precise_ip = precise; 768 attr->exclude_user = eu;
756 return 1; 769 attr->exclude_kernel = ek;
757 } 770 attr->exclude_hv = eh;
771 attr->precise_ip = precise;
772
758 return 0; 773 return 0;
759} 774}
760 775
@@ -797,7 +812,12 @@ parse_event_symbols(const struct option *opt, const char **str,
797 return EVT_FAILED; 812 return EVT_FAILED;
798 813
799modifier: 814modifier:
800 parse_event_modifier(str, attr); 815 if (parse_event_modifier(str, attr) < 0) {
816 fprintf(stderr, "invalid event modifier: '%s'\n", *str);
817 fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
818
819 return EVT_FAILED;
820 }
801 821
802 return ret; 822 return ret;
803} 823}
@@ -912,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
912 932
913 snprintf(evt_path, MAXPATHLEN, "%s:%s", 933 snprintf(evt_path, MAXPATHLEN, "%s:%s",
914 sys_dirent.d_name, evt_dirent.d_name); 934 sys_dirent.d_name, evt_dirent.d_name);
915 printf(" %-42s [%s]\n", evt_path, 935 printf(" %-50s [%s]\n", evt_path,
916 event_type_descriptors[PERF_TYPE_TRACEPOINT]); 936 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
917 } 937 }
918 closedir(evt_dir); 938 closedir(evt_dir);
@@ -977,7 +997,7 @@ void print_events_type(u8 type)
977 else 997 else
978 snprintf(name, sizeof(name), "%s", syms->symbol); 998 snprintf(name, sizeof(name), "%s", syms->symbol);
979 999
980 printf(" %-42s [%s]\n", name, 1000 printf(" %-50s [%s]\n", name,
981 event_type_descriptors[type]); 1001 event_type_descriptors[type]);
982 } 1002 }
983} 1003}
@@ -995,11 +1015,10 @@ int print_hwcache_events(const char *event_glob)
995 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1015 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
996 char *name = event_cache_name(type, op, i); 1016 char *name = event_cache_name(type, op, i);
997 1017
998 if (event_glob != NULL && 1018 if (event_glob != NULL && !strglobmatch(name, event_glob))
999 !strglobmatch(name, event_glob))
1000 continue; 1019 continue;
1001 1020
1002 printf(" %-42s [%s]\n", name, 1021 printf(" %-50s [%s]\n", name,
1003 event_type_descriptors[PERF_TYPE_HW_CACHE]); 1022 event_type_descriptors[PERF_TYPE_HW_CACHE]);
1004 ++printed; 1023 ++printed;
1005 } 1024 }
@@ -1009,14 +1028,16 @@ int print_hwcache_events(const char *event_glob)
1009 return printed; 1028 return printed;
1010} 1029}
1011 1030
1031#define MAX_NAME_LEN 100
1032
1012/* 1033/*
1013 * Print the help text for the event symbols: 1034 * Print the help text for the event symbols:
1014 */ 1035 */
1015void print_events(const char *event_glob) 1036void print_events(const char *event_glob)
1016{ 1037{
1017 struct event_symbol *syms = event_symbols;
1018 unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; 1038 unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
1019 char name[40]; 1039 struct event_symbol *syms = event_symbols;
1040 char name[MAX_NAME_LEN];
1020 1041
1021 printf("\n"); 1042 printf("\n");
1022 printf("List of pre-defined events (to be used in -e):\n"); 1043 printf("List of pre-defined events (to be used in -e):\n");
@@ -1036,10 +1057,10 @@ void print_events(const char *event_glob)
1036 continue; 1057 continue;
1037 1058
1038 if (strlen(syms->alias)) 1059 if (strlen(syms->alias))
1039 sprintf(name, "%s OR %s", syms->symbol, syms->alias); 1060 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1040 else 1061 else
1041 strcpy(name, syms->symbol); 1062 strncpy(name, syms->symbol, MAX_NAME_LEN);
1042 printf(" %-42s [%s]\n", name, 1063 printf(" %-50s [%s]\n", name,
1043 event_type_descriptors[type]); 1064 event_type_descriptors[type]);
1044 1065
1045 prev_type = type; 1066 prev_type = type;
@@ -1056,12 +1077,12 @@ void print_events(const char *event_glob)
1056 return; 1077 return;
1057 1078
1058 printf("\n"); 1079 printf("\n");
1059 printf(" %-42s [%s]\n", 1080 printf(" %-50s [%s]\n",
1060 "rNNN (see 'perf list --help' on how to encode it)", 1081 "rNNN (see 'perf list --help' on how to encode it)",
1061 event_type_descriptors[PERF_TYPE_RAW]); 1082 event_type_descriptors[PERF_TYPE_RAW]);
1062 printf("\n"); 1083 printf("\n");
1063 1084
1064 printf(" %-42s [%s]\n", 1085 printf(" %-50s [%s]\n",
1065 "mem:<addr>[:access]", 1086 "mem:<addr>[:access]",
1066 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1087 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1067 printf("\n"); 1088 printf("\n");
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 5ddee66020a7..f0223166e761 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -234,7 +234,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
234 234
235 /* Searching trace events corresponding to probe event */ 235 /* Searching trace events corresponding to probe event */
236 ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs); 236 ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs);
237 close(fd);
238 237
239 if (ntevs > 0) { /* Succeeded to find trace events */ 238 if (ntevs > 0) { /* Succeeded to find trace events */
240 pr_debug("find %d probe_trace_events.\n", ntevs); 239 pr_debug("find %d probe_trace_events.\n", ntevs);
@@ -388,7 +387,6 @@ int show_line_range(struct line_range *lr, const char *module)
388 } 387 }
389 388
390 ret = find_line_range(fd, lr); 389 ret = find_line_range(fd, lr);
391 close(fd);
392 if (ret == 0) { 390 if (ret == 0) {
393 pr_warning("Specified source line is not found.\n"); 391 pr_warning("Specified source line is not found.\n");
394 return -ENOENT; 392 return -ENOENT;
@@ -512,19 +510,18 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
512 if (ret < 0) 510 if (ret < 0)
513 return ret; 511 return ret;
514 512
515 fd = open_vmlinux(module);
516 if (fd < 0) {
517 pr_warning("Failed to open debug information file.\n");
518 return fd;
519 }
520
521 setup_pager(); 513 setup_pager();
522 514
523 for (i = 0; i < npevs && ret >= 0; i++) 515 for (i = 0; i < npevs && ret >= 0; i++) {
516 fd = open_vmlinux(module);
517 if (fd < 0) {
518 pr_warning("Failed to open debug information file.\n");
519 ret = fd;
520 break;
521 }
524 ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter, 522 ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter,
525 externs); 523 externs);
526 524 }
527 close(fd);
528 return ret; 525 return ret;
529} 526}
530 527
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 194f9e2a3285..3b9d0b800d5c 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -273,6 +273,25 @@ static const char *cu_get_comp_dir(Dwarf_Die *cu_die)
273 return dwarf_formstring(&attr); 273 return dwarf_formstring(&attr);
274} 274}
275 275
276/* Get a line number and file name for given address */
277static int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
278 const char **fname, int *lineno)
279{
280 Dwarf_Line *line;
281 Dwarf_Addr laddr;
282
283 line = dwarf_getsrc_die(cudie, (Dwarf_Addr)addr);
284 if (line && dwarf_lineaddr(line, &laddr) == 0 &&
285 addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) {
286 *fname = dwarf_linesrc(line, NULL, NULL);
287 if (!*fname)
288 /* line number is useless without filename */
289 *lineno = 0;
290 }
291
292 return *lineno ?: -ENOENT;
293}
294
276/* Compare diename and tname */ 295/* Compare diename and tname */
277static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) 296static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
278{ 297{
@@ -497,7 +516,20 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
497static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 516static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
498 Dwarf_Die *die_mem) 517 Dwarf_Die *die_mem)
499{ 518{
500 return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); 519 Dwarf_Die tmp_die;
520
521 sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die);
522 if (!sp_die)
523 return NULL;
524
525 /* Inlined function could be recursive. Trace it until fail */
526 while (sp_die) {
527 memcpy(die_mem, sp_die, sizeof(Dwarf_Die));
528 sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr,
529 &tmp_die);
530 }
531
532 return die_mem;
501} 533}
502 534
503/* Walker on lines (Note: line number will not be sorted) */ 535/* Walker on lines (Note: line number will not be sorted) */
@@ -1395,6 +1427,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
1395 !die_compare_name(sp_die, pp->function)) 1427 !die_compare_name(sp_die, pp->function))
1396 return DWARF_CB_OK; 1428 return DWARF_CB_OK;
1397 1429
1430 /* Check declared file */
1431 if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
1432 return DWARF_CB_OK;
1433
1398 pf->fname = dwarf_decl_file(sp_die); 1434 pf->fname = dwarf_decl_file(sp_die);
1399 if (pp->line) { /* Function relative line */ 1435 if (pp->line) { /* Function relative line */
1400 dwarf_decl_line(sp_die, &pf->lno); 1436 dwarf_decl_line(sp_die, &pf->lno);
@@ -1435,6 +1471,38 @@ static int find_probe_point_by_func(struct probe_finder *pf)
1435 return _param.retval; 1471 return _param.retval;
1436} 1472}
1437 1473
1474struct pubname_callback_param {
1475 char *function;
1476 char *file;
1477 Dwarf_Die *cu_die;
1478 Dwarf_Die *sp_die;
1479 int found;
1480};
1481
1482static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
1483{
1484 struct pubname_callback_param *param = data;
1485
1486 if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) {
1487 if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
1488 return DWARF_CB_OK;
1489
1490 if (die_compare_name(param->sp_die, param->function)) {
1491 if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
1492 return DWARF_CB_OK;
1493
1494 if (param->file &&
1495 strtailcmp(param->file, dwarf_decl_file(param->sp_die)))
1496 return DWARF_CB_OK;
1497
1498 param->found = 1;
1499 return DWARF_CB_ABORT;
1500 }
1501 }
1502
1503 return DWARF_CB_OK;
1504}
1505
1438/* Find probe points from debuginfo */ 1506/* Find probe points from debuginfo */
1439static int find_probes(int fd, struct probe_finder *pf) 1507static int find_probes(int fd, struct probe_finder *pf)
1440{ 1508{
@@ -1451,6 +1519,7 @@ static int find_probes(int fd, struct probe_finder *pf)
1451 if (!dbg) { 1519 if (!dbg) {
1452 pr_warning("No debug information found in the vmlinux - " 1520 pr_warning("No debug information found in the vmlinux - "
1453 "please rebuild with CONFIG_DEBUG_INFO=y.\n"); 1521 "please rebuild with CONFIG_DEBUG_INFO=y.\n");
1522 close(fd); /* Without dwfl_end(), fd isn't closed. */
1454 return -EBADF; 1523 return -EBADF;
1455 } 1524 }
1456 1525
@@ -1461,6 +1530,28 @@ static int find_probes(int fd, struct probe_finder *pf)
1461 1530
1462 off = 0; 1531 off = 0;
1463 line_list__init(&pf->lcache); 1532 line_list__init(&pf->lcache);
1533
1534 /* Fastpath: lookup by function name from .debug_pubnames section */
1535 if (pp->function) {
1536 struct pubname_callback_param pubname_param = {
1537 .function = pp->function,
1538 .file = pp->file,
1539 .cu_die = &pf->cu_die,
1540 .sp_die = &pf->sp_die,
1541 .found = 0,
1542 };
1543 struct dwarf_callback_param probe_param = {
1544 .data = pf,
1545 };
1546
1547 dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0);
1548 if (pubname_param.found) {
1549 ret = probe_point_search_cb(&pf->sp_die, &probe_param);
1550 if (ret)
1551 goto found;
1552 }
1553 }
1554
1464 /* Loop on CUs (Compilation Unit) */ 1555 /* Loop on CUs (Compilation Unit) */
1465 while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { 1556 while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
1466 /* Get the DIE(Debugging Information Entry) of this CU */ 1557 /* Get the DIE(Debugging Information Entry) of this CU */
@@ -1488,6 +1579,8 @@ static int find_probes(int fd, struct probe_finder *pf)
1488 } 1579 }
1489 off = noff; 1580 off = noff;
1490 } 1581 }
1582
1583found:
1491 line_list__free(&pf->lcache); 1584 line_list__free(&pf->lcache);
1492 if (dwfl) 1585 if (dwfl)
1493 dwfl_end(dwfl); 1586 dwfl_end(dwfl);
@@ -1686,11 +1779,9 @@ int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
1686 Dwarf_Die cudie, spdie, indie; 1779 Dwarf_Die cudie, spdie, indie;
1687 Dwarf *dbg = NULL; 1780 Dwarf *dbg = NULL;
1688 Dwfl *dwfl = NULL; 1781 Dwfl *dwfl = NULL;
1689 Dwarf_Line *line; 1782 Dwarf_Addr _addr, baseaddr, bias = 0;
1690 Dwarf_Addr laddr, eaddr, bias = 0; 1783 const char *fname = NULL, *func = NULL, *tmp;
1691 const char *tmp; 1784 int baseline = 0, lineno = 0, ret = 0;
1692 int lineno, ret = 0;
1693 bool found = false;
1694 1785
1695 /* Open the live linux kernel */ 1786 /* Open the live linux kernel */
1696 dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); 1787 dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias);
@@ -1711,68 +1802,79 @@ int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
1711 goto end; 1802 goto end;
1712 } 1803 }
1713 1804
1714 /* Find a corresponding line */ 1805 /* Find a corresponding line (filename and lineno) */
1715 line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr); 1806 cu_find_lineinfo(&cudie, addr, &fname, &lineno);
1716 if (line) { 1807 /* Don't care whether it failed or not */
1717 if (dwarf_lineaddr(line, &laddr) == 0 &&
1718 (Dwarf_Addr)addr == laddr &&
1719 dwarf_lineno(line, &lineno) == 0) {
1720 tmp = dwarf_linesrc(line, NULL, NULL);
1721 if (tmp) {
1722 ppt->line = lineno;
1723 ppt->file = strdup(tmp);
1724 if (ppt->file == NULL) {
1725 ret = -ENOMEM;
1726 goto end;
1727 }
1728 found = true;
1729 }
1730 }
1731 }
1732 1808
1733 /* Find a corresponding function */ 1809 /* Find a corresponding function (name, baseline and baseaddr) */
1734 if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) { 1810 if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) {
1811 /* Get function entry information */
1735 tmp = dwarf_diename(&spdie); 1812 tmp = dwarf_diename(&spdie);
1736 if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0) 1813 if (!tmp ||
1737 goto end; 1814 dwarf_entrypc(&spdie, &baseaddr) != 0 ||
1738 1815 dwarf_decl_line(&spdie, &baseline) != 0)
1739 if (ppt->line) { 1816 goto post;
1740 if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, 1817 func = tmp;
1741 &indie)) { 1818
1742 /* addr in an inline function */ 1819 if (addr == (unsigned long)baseaddr)
1820 /* Function entry - Relative line number is 0 */
1821 lineno = baseline;
1822 else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
1823 &indie)) {
1824 if (dwarf_entrypc(&indie, &_addr) == 0 &&
1825 _addr == addr)
1826 /*
1827 * addr is at an inline function entry.
1828 * In this case, lineno should be the call-site
1829 * line number.
1830 */
1831 lineno = die_get_call_lineno(&indie);
1832 else {
1833 /*
1834 * addr is in an inline function body.
1835 * Since lineno points one of the lines
1836 * of the inline function, baseline should
1837 * be the entry line of the inline function.
1838 */
1743 tmp = dwarf_diename(&indie); 1839 tmp = dwarf_diename(&indie);
1744 if (!tmp) 1840 if (tmp &&
1745 goto end; 1841 dwarf_decl_line(&spdie, &baseline) == 0)
1746 ret = dwarf_decl_line(&indie, &lineno); 1842 func = tmp;
1747 } else {
1748 if (eaddr == addr) { /* Function entry */
1749 lineno = ppt->line;
1750 ret = 0;
1751 } else
1752 ret = dwarf_decl_line(&spdie, &lineno);
1753 }
1754 if (ret == 0) {
1755 /* Make a relative line number */
1756 ppt->line -= lineno;
1757 goto found;
1758 } 1843 }
1759 } 1844 }
1760 /* We don't have a line number, let's use offset */ 1845 }
1761 ppt->offset = addr - (unsigned long)eaddr; 1846
1762found: 1847post:
1763 ppt->function = strdup(tmp); 1848 /* Make a relative line number or an offset */
1849 if (lineno)
1850 ppt->line = lineno - baseline;
1851 else if (func)
1852 ppt->offset = addr - (unsigned long)baseaddr;
1853
1854 /* Duplicate strings */
1855 if (func) {
1856 ppt->function = strdup(func);
1764 if (ppt->function == NULL) { 1857 if (ppt->function == NULL) {
1765 ret = -ENOMEM; 1858 ret = -ENOMEM;
1766 goto end; 1859 goto end;
1767 } 1860 }
1768 found = true;
1769 } 1861 }
1770 1862 if (fname) {
1863 ppt->file = strdup(fname);
1864 if (ppt->file == NULL) {
1865 if (ppt->function) {
1866 free(ppt->function);
1867 ppt->function = NULL;
1868 }
1869 ret = -ENOMEM;
1870 goto end;
1871 }
1872 }
1771end: 1873end:
1772 if (dwfl) 1874 if (dwfl)
1773 dwfl_end(dwfl); 1875 dwfl_end(dwfl);
1774 if (ret >= 0) 1876 if (ret == 0 && (fname || func))
1775 ret = found ? 1 : 0; 1877 ret = 1; /* Found a point */
1776 return ret; 1878 return ret;
1777} 1879}
1778 1880
@@ -1840,6 +1942,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1840 struct line_finder *lf = param->data; 1942 struct line_finder *lf = param->data;
1841 struct line_range *lr = lf->lr; 1943 struct line_range *lr = lf->lr;
1842 1944
1945 /* Check declared file */
1946 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
1947 return DWARF_CB_OK;
1948
1843 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1949 if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
1844 die_compare_name(sp_die, lr->function)) { 1950 die_compare_name(sp_die, lr->function)) {
1845 lf->fname = dwarf_decl_file(sp_die); 1951 lf->fname = dwarf_decl_file(sp_die);
@@ -1892,9 +1998,26 @@ int find_line_range(int fd, struct line_range *lr)
1892 if (!dbg) { 1998 if (!dbg) {
1893 pr_warning("No debug information found in the vmlinux - " 1999 pr_warning("No debug information found in the vmlinux - "
1894 "please rebuild with CONFIG_DEBUG_INFO=y.\n"); 2000 "please rebuild with CONFIG_DEBUG_INFO=y.\n");
2001 close(fd); /* Without dwfl_end(), fd isn't closed. */
1895 return -EBADF; 2002 return -EBADF;
1896 } 2003 }
1897 2004
2005 /* Fastpath: lookup by function name from .debug_pubnames section */
2006 if (lr->function) {
2007 struct pubname_callback_param pubname_param = {
2008 .function = lr->function, .file = lr->file,
2009 .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0};
2010 struct dwarf_callback_param line_range_param = {
2011 .data = (void *)&lf, .retval = 0};
2012
2013 dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0);
2014 if (pubname_param.found) {
2015 line_range_search_cb(&lf.sp_die, &line_range_param);
2016 if (lf.found)
2017 goto found;
2018 }
2019 }
2020
1898 /* Loop on CUs (Compilation Unit) */ 2021 /* Loop on CUs (Compilation Unit) */
1899 while (!lf.found && ret >= 0) { 2022 while (!lf.found && ret >= 0) {
1900 if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) 2023 if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0)
@@ -1923,6 +2046,7 @@ int find_line_range(int fd, struct line_range *lr)
1923 off = noff; 2046 off = noff;
1924 } 2047 }
1925 2048
2049found:
1926 /* Store comp_dir */ 2050 /* Store comp_dir */
1927 if (lf.found) { 2051 if (lf.found) {
1928 comp_dir = cu_get_comp_dir(&lf.cu_die); 2052 comp_dir = cu_get_comp_dir(&lf.cu_die);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index beaefc3c1223..605730a366db 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -49,6 +49,7 @@ struct probe_finder {
49 Dwarf_Addr addr; /* Address */ 49 Dwarf_Addr addr; /* Address */
50 const char *fname; /* Real file name */ 50 const char *fname; /* Real file name */
51 Dwarf_Die cu_die; /* Current CU */ 51 Dwarf_Die cu_die; /* Current CU */
52 Dwarf_Die sp_die;
52 struct list_head lcache; /* Line cache for lazy match */ 53 struct list_head lcache; /* Line cache for lazy match */
53 54
54 /* For variable searching */ 55 /* For variable searching */
@@ -83,6 +84,7 @@ struct line_finder {
83 int lno_s; /* Start line number */ 84 int lno_s; /* Start line number */
84 int lno_e; /* End line number */ 85 int lno_e; /* End line number */
85 Dwarf_Die cu_die; /* Current CU */ 86 Dwarf_Die cu_die; /* Current CU */
87 Dwarf_Die sp_die;
86 int found; 88 int found;
87}; 89};
88 90
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a9f2d7e1204d..69436b3200a4 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
498 struct cpu_map *cpus = NULL; 498 struct cpu_map *cpus = NULL;
499 struct thread_map *threads = NULL; 499 struct thread_map *threads = NULL;
500 PyObject *pcpus = NULL, *pthreads = NULL; 500 PyObject *pcpus = NULL, *pthreads = NULL;
501 int group = 0, overwrite = 0; 501 int group = 0, inherit = 0;
502 static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL}; 502 static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
503 503
504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
505 &pcpus, &pthreads, &group, &overwrite)) 505 &pcpus, &pthreads, &group, &inherit))
506 return NULL; 506 return NULL;
507 507
508 if (pthreads != NULL) 508 if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
511 if (pcpus != NULL) 511 if (pcpus != NULL)
512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
513 513
514 if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) { 514 evsel->attr.inherit = inherit;
515 if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
515 PyErr_SetFromErrno(PyExc_OSError); 516 PyErr_SetFromErrno(PyExc_OSError);
516 return NULL; 517 return NULL;
517 } 518 }
@@ -674,12 +675,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
674 union perf_event *event; 675 union perf_event *event;
675 int sample_id_all = 1, cpu; 676 int sample_id_all = 1, cpu;
676 static char *kwlist[] = {"sample_id_all", NULL, NULL}; 677 static char *kwlist[] = {"sample_id_all", NULL, NULL};
678 int err;
677 679
678 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
679 &cpu, &sample_id_all)) 681 &cpu, &sample_id_all))
680 return NULL; 682 return NULL;
681 683
682 event = perf_evlist__read_on_cpu(evlist, cpu); 684 event = perf_evlist__mmap_read(evlist, cpu);
683 if (event != NULL) { 685 if (event != NULL) {
684 struct perf_evsel *first; 686 struct perf_evsel *first;
685 PyObject *pyevent = pyrf_event__new(event); 687 PyObject *pyevent = pyrf_event__new(event);
@@ -689,11 +691,17 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
689 return PyErr_NoMemory(); 691 return PyErr_NoMemory();
690 692
691 first = list_entry(evlist->entries.next, struct perf_evsel, node); 693 first = list_entry(evlist->entries.next, struct perf_evsel, node);
692 perf_event__parse_sample(event, first->attr.sample_type, sample_id_all, 694 err = perf_event__parse_sample(event, first->attr.sample_type,
693 &pevent->sample); 695 perf_sample_size(first->attr.sample_type),
696 sample_id_all, &pevent->sample);
697 if (err) {
698 pr_err("Can't parse sample, err = %d\n", err);
699 goto end;
700 }
701
694 return pyevent; 702 return pyevent;
695 } 703 }
696 704end:
697 Py_INCREF(Py_None); 705 Py_INCREF(Py_None);
698 return Py_None; 706 return Py_None;
699} 707}
@@ -809,6 +817,9 @@ static struct {
809 { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, 817 { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
810 { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, 818 { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS },
811 819
820 { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
821 { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
822
812 { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, 823 { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK },
813 { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, 824 { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK },
814 { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, 825 { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS },
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index caa224522fea..64500fc78799 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -97,6 +97,7 @@ out:
97void perf_session__update_sample_type(struct perf_session *self) 97void perf_session__update_sample_type(struct perf_session *self)
98{ 98{
99 self->sample_type = perf_evlist__sample_type(self->evlist); 99 self->sample_type = perf_evlist__sample_type(self->evlist);
100 self->sample_size = perf_sample_size(self->sample_type);
100 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 101 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
101 perf_session__id_header_size(self); 102 perf_session__id_header_size(self);
102} 103}
@@ -479,6 +480,7 @@ static void flush_sample_queue(struct perf_session *s,
479 struct perf_sample sample; 480 struct perf_sample sample;
480 u64 limit = os->next_flush; 481 u64 limit = os->next_flush;
481 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 482 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
483 int ret;
482 484
483 if (!ops->ordered_samples || !limit) 485 if (!ops->ordered_samples || !limit)
484 return; 486 return;
@@ -487,9 +489,12 @@ static void flush_sample_queue(struct perf_session *s,
487 if (iter->timestamp > limit) 489 if (iter->timestamp > limit)
488 break; 490 break;
489 491
490 perf_session__parse_sample(s, iter->event, &sample); 492 ret = perf_session__parse_sample(s, iter->event, &sample);
491 perf_session_deliver_event(s, iter->event, &sample, ops, 493 if (ret)
492 iter->file_offset); 494 pr_err("Can't parse sample, err = %d\n", ret);
495 else
496 perf_session_deliver_event(s, iter->event, &sample, ops,
497 iter->file_offset);
493 498
494 os->last_flush = iter->timestamp; 499 os->last_flush = iter->timestamp;
495 list_del(&iter->list); 500 list_del(&iter->list);
@@ -805,7 +810,9 @@ static int perf_session__process_event(struct perf_session *session,
805 /* 810 /*
806 * For all kernel events we get the sample data 811 * For all kernel events we get the sample data
807 */ 812 */
808 perf_session__parse_sample(session, event, &sample); 813 ret = perf_session__parse_sample(session, event, &sample);
814 if (ret)
815 return ret;
809 816
810 /* Preprocess sample records - precheck callchains */ 817 /* Preprocess sample records - precheck callchains */
811 if (perf_session__preprocess_sample(session, event, &sample)) 818 if (perf_session__preprocess_sample(session, event, &sample))
@@ -953,6 +960,30 @@ out_err:
953 return err; 960 return err;
954} 961}
955 962
963static union perf_event *
964fetch_mmaped_event(struct perf_session *session,
965 u64 head, size_t mmap_size, char *buf)
966{
967 union perf_event *event;
968
969 /*
970 * Ensure we have enough space remaining to read
971 * the size of the event in the headers.
972 */
973 if (head + sizeof(event->header) > mmap_size)
974 return NULL;
975
976 event = (union perf_event *)(buf + head);
977
978 if (session->header.needs_swap)
979 perf_event_header__bswap(&event->header);
980
981 if (head + event->header.size > mmap_size)
982 return NULL;
983
984 return event;
985}
986
956int __perf_session__process_events(struct perf_session *session, 987int __perf_session__process_events(struct perf_session *session,
957 u64 data_offset, u64 data_size, 988 u64 data_offset, u64 data_size,
958 u64 file_size, struct perf_event_ops *ops) 989 u64 file_size, struct perf_event_ops *ops)
@@ -1007,15 +1038,8 @@ remap:
1007 file_pos = file_offset + head; 1038 file_pos = file_offset + head;
1008 1039
1009more: 1040more:
1010 event = (union perf_event *)(buf + head); 1041 event = fetch_mmaped_event(session, head, mmap_size, buf);
1011 1042 if (!event) {
1012 if (session->header.needs_swap)
1013 perf_event_header__bswap(&event->header);
1014 size = event->header.size;
1015 if (size == 0)
1016 size = 8;
1017
1018 if (head + event->header.size > mmap_size) {
1019 if (mmaps[map_idx]) { 1043 if (mmaps[map_idx]) {
1020 munmap(mmaps[map_idx], mmap_size); 1044 munmap(mmaps[map_idx], mmap_size);
1021 mmaps[map_idx] = NULL; 1045 mmaps[map_idx] = NULL;
@@ -1156,6 +1180,18 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1156 return ret; 1180 return ret;
1157} 1181}
1158 1182
1183struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1184 unsigned int type)
1185{
1186 struct perf_evsel *pos;
1187
1188 list_for_each_entry(pos, &session->evlist->entries, node) {
1189 if (pos->attr.type == type)
1190 return pos;
1191 }
1192 return NULL;
1193}
1194
1159void perf_session__print_symbols(union perf_event *event, 1195void perf_session__print_symbols(union perf_event *event,
1160 struct perf_sample *sample, 1196 struct perf_sample *sample,
1161 struct perf_session *session) 1197 struct perf_session *session)
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 1ac481fc1100..66d4e1490879 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -43,6 +43,7 @@ struct perf_session {
43 */ 43 */
44 struct hists hists; 44 struct hists hists;
45 u64 sample_type; 45 u64 sample_type;
46 int sample_size;
46 int fd; 47 int fd;
47 bool fd_pipe; 48 bool fd_pipe;
48 bool repipe; 49 bool repipe;
@@ -159,9 +160,13 @@ static inline int perf_session__parse_sample(struct perf_session *session,
159 struct perf_sample *sample) 160 struct perf_sample *sample)
160{ 161{
161 return perf_event__parse_sample(event, session->sample_type, 162 return perf_event__parse_sample(event, session->sample_type,
163 session->sample_size,
162 session->sample_id_all, sample); 164 session->sample_id_all, sample);
163} 165}
164 166
167struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
168 unsigned int type);
169
165void perf_session__print_symbols(union perf_event *event, 170void perf_session__print_symbols(union perf_event *event,
166 struct perf_sample *sample, 171 struct perf_sample *sample,
167 struct perf_session *session); 172 struct perf_session *session);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index e24ffadb20b2..bbc982f5dd8b 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,13 +1,18 @@
1#!/usr/bin/python2 1#!/usr/bin/python2
2 2
3from distutils.core import setup, Extension 3from distutils.core import setup, Extension
4from os import getenv
5
6cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
7cflags += getenv('CFLAGS', '').split()
4 8
5perf = Extension('perf', 9perf = Extension('perf',
6 sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', 10 sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
7 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', 11 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
8 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'], 12 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
9 include_dirs = ['util/include'], 13 include_dirs = ['util/include'],
10 extra_compile_args = ['-fno-strict-aliasing', '-Wno-write-strings']) 14 extra_compile_args = cflags,
15 )
11 16
12setup(name='perf', 17setup(name='perf',
13 version='0.1', 18 version='0.1',
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 8fc0bd3a3a4a..b9a985dadd08 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -85,7 +85,7 @@ out:
85 85
86/* 86/*
87 * Helper function for splitting a string into an argv-like array. 87 * Helper function for splitting a string into an argv-like array.
88 * originaly copied from lib/argv_split.c 88 * originally copied from lib/argv_split.c
89 */ 89 */
90static const char *skip_sep(const char *cp) 90static const char *skip_sep(const char *cp)
91{ 91{
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 17df793c8924..eec196329fd9 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -31,13 +31,13 @@
31#define NT_GNU_BUILD_ID 3 31#define NT_GNU_BUILD_ID 3
32#endif 32#endif
33 33
34static bool dso__build_id_equal(const struct dso *self, u8 *build_id); 34static bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
35static int elf_read_build_id(Elf *elf, void *bf, size_t size); 35static int elf_read_build_id(Elf *elf, void *bf, size_t size);
36static void dsos__add(struct list_head *head, struct dso *dso); 36static void dsos__add(struct list_head *head, struct dso *dso);
37static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 37static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
38static int dso__load_kernel_sym(struct dso *self, struct map *map, 38static int dso__load_kernel_sym(struct dso *dso, struct map *map,
39 symbol_filter_t filter); 39 symbol_filter_t filter);
40static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, 40static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
41 symbol_filter_t filter); 41 symbol_filter_t filter);
42static int vmlinux_path__nr_entries; 42static int vmlinux_path__nr_entries;
43static char **vmlinux_path; 43static char **vmlinux_path;
@@ -49,27 +49,27 @@ struct symbol_conf symbol_conf = {
49 .symfs = "", 49 .symfs = "",
50}; 50};
51 51
52int dso__name_len(const struct dso *self) 52int dso__name_len(const struct dso *dso)
53{ 53{
54 if (verbose) 54 if (verbose)
55 return self->long_name_len; 55 return dso->long_name_len;
56 56
57 return self->short_name_len; 57 return dso->short_name_len;
58} 58}
59 59
60bool dso__loaded(const struct dso *self, enum map_type type) 60bool dso__loaded(const struct dso *dso, enum map_type type)
61{ 61{
62 return self->loaded & (1 << type); 62 return dso->loaded & (1 << type);
63} 63}
64 64
65bool dso__sorted_by_name(const struct dso *self, enum map_type type) 65bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
66{ 66{
67 return self->sorted_by_name & (1 << type); 67 return dso->sorted_by_name & (1 << type);
68} 68}
69 69
70static void dso__set_sorted_by_name(struct dso *self, enum map_type type) 70static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
71{ 71{
72 self->sorted_by_name |= (1 << type); 72 dso->sorted_by_name |= (1 << type);
73} 73}
74 74
75bool symbol_type__is_a(char symbol_type, enum map_type map_type) 75bool symbol_type__is_a(char symbol_type, enum map_type map_type)
@@ -84,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type)
84 } 84 }
85} 85}
86 86
87static void symbols__fixup_end(struct rb_root *self) 87static void symbols__fixup_end(struct rb_root *symbols)
88{ 88{
89 struct rb_node *nd, *prevnd = rb_first(self); 89 struct rb_node *nd, *prevnd = rb_first(symbols);
90 struct symbol *curr, *prev; 90 struct symbol *curr, *prev;
91 91
92 if (prevnd == NULL) 92 if (prevnd == NULL)
@@ -107,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *self)
107 curr->end = roundup(curr->start, 4096); 107 curr->end = roundup(curr->start, 4096);
108} 108}
109 109
110static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) 110static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
111{ 111{
112 struct map *prev, *curr; 112 struct map *prev, *curr;
113 struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); 113 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
114 114
115 if (prevnd == NULL) 115 if (prevnd == NULL)
116 return; 116 return;
@@ -130,128 +130,128 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
130 curr->end = ~0ULL; 130 curr->end = ~0ULL;
131} 131}
132 132
133static void map_groups__fixup_end(struct map_groups *self) 133static void map_groups__fixup_end(struct map_groups *mg)
134{ 134{
135 int i; 135 int i;
136 for (i = 0; i < MAP__NR_TYPES; ++i) 136 for (i = 0; i < MAP__NR_TYPES; ++i)
137 __map_groups__fixup_end(self, i); 137 __map_groups__fixup_end(mg, i);
138} 138}
139 139
140static struct symbol *symbol__new(u64 start, u64 len, u8 binding, 140static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
141 const char *name) 141 const char *name)
142{ 142{
143 size_t namelen = strlen(name) + 1; 143 size_t namelen = strlen(name) + 1;
144 struct symbol *self = calloc(1, (symbol_conf.priv_size + 144 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
145 sizeof(*self) + namelen)); 145 sizeof(*sym) + namelen));
146 if (self == NULL) 146 if (sym == NULL)
147 return NULL; 147 return NULL;
148 148
149 if (symbol_conf.priv_size) 149 if (symbol_conf.priv_size)
150 self = ((void *)self) + symbol_conf.priv_size; 150 sym = ((void *)sym) + symbol_conf.priv_size;
151
152 self->start = start;
153 self->end = len ? start + len - 1 : start;
154 self->binding = binding;
155 self->namelen = namelen - 1;
156 151
157 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end); 152 sym->start = start;
153 sym->end = len ? start + len - 1 : start;
154 sym->binding = binding;
155 sym->namelen = namelen - 1;
158 156
159 memcpy(self->name, name, namelen); 157 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
158 __func__, name, start, sym->end);
159 memcpy(sym->name, name, namelen);
160 160
161 return self; 161 return sym;
162} 162}
163 163
164void symbol__delete(struct symbol *self) 164void symbol__delete(struct symbol *sym)
165{ 165{
166 free(((void *)self) - symbol_conf.priv_size); 166 free(((void *)sym) - symbol_conf.priv_size);
167} 167}
168 168
169static size_t symbol__fprintf(struct symbol *self, FILE *fp) 169static size_t symbol__fprintf(struct symbol *sym, FILE *fp)
170{ 170{
171 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", 171 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
172 self->start, self->end, 172 sym->start, sym->end,
173 self->binding == STB_GLOBAL ? 'g' : 173 sym->binding == STB_GLOBAL ? 'g' :
174 self->binding == STB_LOCAL ? 'l' : 'w', 174 sym->binding == STB_LOCAL ? 'l' : 'w',
175 self->name); 175 sym->name);
176} 176}
177 177
178void dso__set_long_name(struct dso *self, char *name) 178void dso__set_long_name(struct dso *dso, char *name)
179{ 179{
180 if (name == NULL) 180 if (name == NULL)
181 return; 181 return;
182 self->long_name = name; 182 dso->long_name = name;
183 self->long_name_len = strlen(name); 183 dso->long_name_len = strlen(name);
184} 184}
185 185
186static void dso__set_short_name(struct dso *self, const char *name) 186static void dso__set_short_name(struct dso *dso, const char *name)
187{ 187{
188 if (name == NULL) 188 if (name == NULL)
189 return; 189 return;
190 self->short_name = name; 190 dso->short_name = name;
191 self->short_name_len = strlen(name); 191 dso->short_name_len = strlen(name);
192} 192}
193 193
194static void dso__set_basename(struct dso *self) 194static void dso__set_basename(struct dso *dso)
195{ 195{
196 dso__set_short_name(self, basename(self->long_name)); 196 dso__set_short_name(dso, basename(dso->long_name));
197} 197}
198 198
199struct dso *dso__new(const char *name) 199struct dso *dso__new(const char *name)
200{ 200{
201 struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); 201 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
202 202
203 if (self != NULL) { 203 if (dso != NULL) {
204 int i; 204 int i;
205 strcpy(self->name, name); 205 strcpy(dso->name, name);
206 dso__set_long_name(self, self->name); 206 dso__set_long_name(dso, dso->name);
207 dso__set_short_name(self, self->name); 207 dso__set_short_name(dso, dso->name);
208 for (i = 0; i < MAP__NR_TYPES; ++i) 208 for (i = 0; i < MAP__NR_TYPES; ++i)
209 self->symbols[i] = self->symbol_names[i] = RB_ROOT; 209 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
210 self->symtab_type = SYMTAB__NOT_FOUND; 210 dso->symtab_type = SYMTAB__NOT_FOUND;
211 self->loaded = 0; 211 dso->loaded = 0;
212 self->sorted_by_name = 0; 212 dso->sorted_by_name = 0;
213 self->has_build_id = 0; 213 dso->has_build_id = 0;
214 self->kernel = DSO_TYPE_USER; 214 dso->kernel = DSO_TYPE_USER;
215 INIT_LIST_HEAD(&self->node); 215 INIT_LIST_HEAD(&dso->node);
216 } 216 }
217 217
218 return self; 218 return dso;
219} 219}
220 220
221static void symbols__delete(struct rb_root *self) 221static void symbols__delete(struct rb_root *symbols)
222{ 222{
223 struct symbol *pos; 223 struct symbol *pos;
224 struct rb_node *next = rb_first(self); 224 struct rb_node *next = rb_first(symbols);
225 225
226 while (next) { 226 while (next) {
227 pos = rb_entry(next, struct symbol, rb_node); 227 pos = rb_entry(next, struct symbol, rb_node);
228 next = rb_next(&pos->rb_node); 228 next = rb_next(&pos->rb_node);
229 rb_erase(&pos->rb_node, self); 229 rb_erase(&pos->rb_node, symbols);
230 symbol__delete(pos); 230 symbol__delete(pos);
231 } 231 }
232} 232}
233 233
234void dso__delete(struct dso *self) 234void dso__delete(struct dso *dso)
235{ 235{
236 int i; 236 int i;
237 for (i = 0; i < MAP__NR_TYPES; ++i) 237 for (i = 0; i < MAP__NR_TYPES; ++i)
238 symbols__delete(&self->symbols[i]); 238 symbols__delete(&dso->symbols[i]);
239 if (self->sname_alloc) 239 if (dso->sname_alloc)
240 free((char *)self->short_name); 240 free((char *)dso->short_name);
241 if (self->lname_alloc) 241 if (dso->lname_alloc)
242 free(self->long_name); 242 free(dso->long_name);
243 free(self); 243 free(dso);
244} 244}
245 245
246void dso__set_build_id(struct dso *self, void *build_id) 246void dso__set_build_id(struct dso *dso, void *build_id)
247{ 247{
248 memcpy(self->build_id, build_id, sizeof(self->build_id)); 248 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
249 self->has_build_id = 1; 249 dso->has_build_id = 1;
250} 250}
251 251
252static void symbols__insert(struct rb_root *self, struct symbol *sym) 252static void symbols__insert(struct rb_root *symbols, struct symbol *sym)
253{ 253{
254 struct rb_node **p = &self->rb_node; 254 struct rb_node **p = &symbols->rb_node;
255 struct rb_node *parent = NULL; 255 struct rb_node *parent = NULL;
256 const u64 ip = sym->start; 256 const u64 ip = sym->start;
257 struct symbol *s; 257 struct symbol *s;
@@ -265,17 +265,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym)
265 p = &(*p)->rb_right; 265 p = &(*p)->rb_right;
266 } 266 }
267 rb_link_node(&sym->rb_node, parent, p); 267 rb_link_node(&sym->rb_node, parent, p);
268 rb_insert_color(&sym->rb_node, self); 268 rb_insert_color(&sym->rb_node, symbols);
269} 269}
270 270
271static struct symbol *symbols__find(struct rb_root *self, u64 ip) 271static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
272{ 272{
273 struct rb_node *n; 273 struct rb_node *n;
274 274
275 if (self == NULL) 275 if (symbols == NULL)
276 return NULL; 276 return NULL;
277 277
278 n = self->rb_node; 278 n = symbols->rb_node;
279 279
280 while (n) { 280 while (n) {
281 struct symbol *s = rb_entry(n, struct symbol, rb_node); 281 struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -296,9 +296,9 @@ struct symbol_name_rb_node {
296 struct symbol sym; 296 struct symbol sym;
297}; 297};
298 298
299static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) 299static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
300{ 300{
301 struct rb_node **p = &self->rb_node; 301 struct rb_node **p = &symbols->rb_node;
302 struct rb_node *parent = NULL; 302 struct rb_node *parent = NULL;
303 struct symbol_name_rb_node *symn, *s; 303 struct symbol_name_rb_node *symn, *s;
304 304
@@ -313,27 +313,29 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
313 p = &(*p)->rb_right; 313 p = &(*p)->rb_right;
314 } 314 }
315 rb_link_node(&symn->rb_node, parent, p); 315 rb_link_node(&symn->rb_node, parent, p);
316 rb_insert_color(&symn->rb_node, self); 316 rb_insert_color(&symn->rb_node, symbols);
317} 317}
318 318
319static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) 319static void symbols__sort_by_name(struct rb_root *symbols,
320 struct rb_root *source)
320{ 321{
321 struct rb_node *nd; 322 struct rb_node *nd;
322 323
323 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 324 for (nd = rb_first(source); nd; nd = rb_next(nd)) {
324 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 325 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
325 symbols__insert_by_name(self, pos); 326 symbols__insert_by_name(symbols, pos);
326 } 327 }
327} 328}
328 329
329static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) 330static struct symbol *symbols__find_by_name(struct rb_root *symbols,
331 const char *name)
330{ 332{
331 struct rb_node *n; 333 struct rb_node *n;
332 334
333 if (self == NULL) 335 if (symbols == NULL)
334 return NULL; 336 return NULL;
335 337
336 n = self->rb_node; 338 n = symbols->rb_node;
337 339
338 while (n) { 340 while (n) {
339 struct symbol_name_rb_node *s; 341 struct symbol_name_rb_node *s;
@@ -353,29 +355,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *self, const char *na
353 return NULL; 355 return NULL;
354} 356}
355 357
356struct symbol *dso__find_symbol(struct dso *self, 358struct symbol *dso__find_symbol(struct dso *dso,
357 enum map_type type, u64 addr) 359 enum map_type type, u64 addr)
358{ 360{
359 return symbols__find(&self->symbols[type], addr); 361 return symbols__find(&dso->symbols[type], addr);
360} 362}
361 363
362struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, 364struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
363 const char *name) 365 const char *name)
364{ 366{
365 return symbols__find_by_name(&self->symbol_names[type], name); 367 return symbols__find_by_name(&dso->symbol_names[type], name);
366} 368}
367 369
368void dso__sort_by_name(struct dso *self, enum map_type type) 370void dso__sort_by_name(struct dso *dso, enum map_type type)
369{ 371{
370 dso__set_sorted_by_name(self, type); 372 dso__set_sorted_by_name(dso, type);
371 return symbols__sort_by_name(&self->symbol_names[type], 373 return symbols__sort_by_name(&dso->symbol_names[type],
372 &self->symbols[type]); 374 &dso->symbols[type]);
373} 375}
374 376
375int build_id__sprintf(const u8 *self, int len, char *bf) 377int build_id__sprintf(const u8 *build_id, int len, char *bf)
376{ 378{
377 char *bid = bf; 379 char *bid = bf;
378 const u8 *raw = self; 380 const u8 *raw = build_id;
379 int i; 381 int i;
380 382
381 for (i = 0; i < len; ++i) { 383 for (i = 0; i < len; ++i) {
@@ -384,24 +386,25 @@ int build_id__sprintf(const u8 *self, int len, char *bf)
384 bid += 2; 386 bid += 2;
385 } 387 }
386 388
387 return raw - self; 389 return raw - build_id;
388} 390}
389 391
390size_t dso__fprintf_buildid(struct dso *self, FILE *fp) 392size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
391{ 393{
392 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 394 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
393 395
394 build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); 396 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
395 return fprintf(fp, "%s", sbuild_id); 397 return fprintf(fp, "%s", sbuild_id);
396} 398}
397 399
398size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp) 400size_t dso__fprintf_symbols_by_name(struct dso *dso,
401 enum map_type type, FILE *fp)
399{ 402{
400 size_t ret = 0; 403 size_t ret = 0;
401 struct rb_node *nd; 404 struct rb_node *nd;
402 struct symbol_name_rb_node *pos; 405 struct symbol_name_rb_node *pos;
403 406
404 for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) { 407 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
405 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 408 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
406 fprintf(fp, "%s\n", pos->sym.name); 409 fprintf(fp, "%s\n", pos->sym.name);
407 } 410 }
@@ -409,18 +412,18 @@ size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *
409 return ret; 412 return ret;
410} 413}
411 414
412size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) 415size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
413{ 416{
414 struct rb_node *nd; 417 struct rb_node *nd;
415 size_t ret = fprintf(fp, "dso: %s (", self->short_name); 418 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
416 419
417 if (self->short_name != self->long_name) 420 if (dso->short_name != dso->long_name)
418 ret += fprintf(fp, "%s, ", self->long_name); 421 ret += fprintf(fp, "%s, ", dso->long_name);
419 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 422 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
420 self->loaded ? "" : "NOT "); 423 dso->loaded ? "" : "NOT ");
421 ret += dso__fprintf_buildid(self, fp); 424 ret += dso__fprintf_buildid(dso, fp);
422 ret += fprintf(fp, ")\n"); 425 ret += fprintf(fp, ")\n");
423 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { 426 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
424 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 427 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
425 ret += symbol__fprintf(pos, fp); 428 ret += symbol__fprintf(pos, fp);
426 } 429 }
@@ -543,10 +546,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
543 * so that we can in the next step set the symbol ->end address and then 546 * so that we can in the next step set the symbol ->end address and then
544 * call kernel_maps__split_kallsyms. 547 * call kernel_maps__split_kallsyms.
545 */ 548 */
546static int dso__load_all_kallsyms(struct dso *self, const char *filename, 549static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
547 struct map *map) 550 struct map *map)
548{ 551{
549 struct process_kallsyms_args args = { .map = map, .dso = self, }; 552 struct process_kallsyms_args args = { .map = map, .dso = dso, };
550 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 553 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
551} 554}
552 555
@@ -555,7 +558,7 @@ static int dso__load_all_kallsyms(struct dso *self, const char *filename,
555 * kernel range is broken in several maps, named [kernel].N, as we don't have 558 * kernel range is broken in several maps, named [kernel].N, as we don't have
556 * the original ELF section names vmlinux have. 559 * the original ELF section names vmlinux have.
557 */ 560 */
558static int dso__split_kallsyms(struct dso *self, struct map *map, 561static int dso__split_kallsyms(struct dso *dso, struct map *map,
559 symbol_filter_t filter) 562 symbol_filter_t filter)
560{ 563{
561 struct map_groups *kmaps = map__kmap(map)->kmaps; 564 struct map_groups *kmaps = map__kmap(map)->kmaps;
@@ -563,7 +566,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
563 struct map *curr_map = map; 566 struct map *curr_map = map;
564 struct symbol *pos; 567 struct symbol *pos;
565 int count = 0, moved = 0; 568 int count = 0, moved = 0;
566 struct rb_root *root = &self->symbols[map->type]; 569 struct rb_root *root = &dso->symbols[map->type];
567 struct rb_node *next = rb_first(root); 570 struct rb_node *next = rb_first(root);
568 int kernel_range = 0; 571 int kernel_range = 0;
569 572
@@ -582,7 +585,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
582 585
583 if (strcmp(curr_map->dso->short_name, module)) { 586 if (strcmp(curr_map->dso->short_name, module)) {
584 if (curr_map != map && 587 if (curr_map != map &&
585 self->kernel == DSO_TYPE_GUEST_KERNEL && 588 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
586 machine__is_default_guest(machine)) { 589 machine__is_default_guest(machine)) {
587 /* 590 /*
588 * We assume all symbols of a module are 591 * We assume all symbols of a module are
@@ -618,14 +621,14 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
618 pos->end = curr_map->map_ip(curr_map, pos->end); 621 pos->end = curr_map->map_ip(curr_map, pos->end);
619 } else if (curr_map != map) { 622 } else if (curr_map != map) {
620 char dso_name[PATH_MAX]; 623 char dso_name[PATH_MAX];
621 struct dso *dso; 624 struct dso *ndso;
622 625
623 if (count == 0) { 626 if (count == 0) {
624 curr_map = map; 627 curr_map = map;
625 goto filter_symbol; 628 goto filter_symbol;
626 } 629 }
627 630
628 if (self->kernel == DSO_TYPE_GUEST_KERNEL) 631 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
629 snprintf(dso_name, sizeof(dso_name), 632 snprintf(dso_name, sizeof(dso_name),
630 "[guest.kernel].%d", 633 "[guest.kernel].%d",
631 kernel_range++); 634 kernel_range++);
@@ -634,15 +637,15 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
634 "[kernel].%d", 637 "[kernel].%d",
635 kernel_range++); 638 kernel_range++);
636 639
637 dso = dso__new(dso_name); 640 ndso = dso__new(dso_name);
638 if (dso == NULL) 641 if (ndso == NULL)
639 return -1; 642 return -1;
640 643
641 dso->kernel = self->kernel; 644 ndso->kernel = dso->kernel;
642 645
643 curr_map = map__new2(pos->start, dso, map->type); 646 curr_map = map__new2(pos->start, ndso, map->type);
644 if (curr_map == NULL) { 647 if (curr_map == NULL) {
645 dso__delete(dso); 648 dso__delete(ndso);
646 return -1; 649 return -1;
647 } 650 }
648 651
@@ -665,7 +668,7 @@ discard_symbol: rb_erase(&pos->rb_node, root);
665 } 668 }
666 669
667 if (curr_map != map && 670 if (curr_map != map &&
668 self->kernel == DSO_TYPE_GUEST_KERNEL && 671 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
669 machine__is_default_guest(kmaps->machine)) { 672 machine__is_default_guest(kmaps->machine)) {
670 dso__set_loaded(curr_map->dso, curr_map->type); 673 dso__set_loaded(curr_map->dso, curr_map->type);
671 } 674 }
@@ -673,21 +676,42 @@ discard_symbol: rb_erase(&pos->rb_node, root);
673 return count + moved; 676 return count + moved;
674} 677}
675 678
676int dso__load_kallsyms(struct dso *self, const char *filename, 679static bool symbol__restricted_filename(const char *filename,
680 const char *restricted_filename)
681{
682 bool restricted = false;
683
684 if (symbol_conf.kptr_restrict) {
685 char *r = realpath(filename, NULL);
686
687 if (r != NULL) {
688 restricted = strcmp(r, restricted_filename) == 0;
689 free(r);
690 return restricted;
691 }
692 }
693
694 return restricted;
695}
696
697int dso__load_kallsyms(struct dso *dso, const char *filename,
677 struct map *map, symbol_filter_t filter) 698 struct map *map, symbol_filter_t filter)
678{ 699{
679 if (dso__load_all_kallsyms(self, filename, map) < 0) 700 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
701 return -1;
702
703 if (dso__load_all_kallsyms(dso, filename, map) < 0)
680 return -1; 704 return -1;
681 705
682 if (self->kernel == DSO_TYPE_GUEST_KERNEL) 706 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
683 self->symtab_type = SYMTAB__GUEST_KALLSYMS; 707 dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
684 else 708 else
685 self->symtab_type = SYMTAB__KALLSYMS; 709 dso->symtab_type = SYMTAB__KALLSYMS;
686 710
687 return dso__split_kallsyms(self, map, filter); 711 return dso__split_kallsyms(dso, map, filter);
688} 712}
689 713
690static int dso__load_perf_map(struct dso *self, struct map *map, 714static int dso__load_perf_map(struct dso *dso, struct map *map,
691 symbol_filter_t filter) 715 symbol_filter_t filter)
692{ 716{
693 char *line = NULL; 717 char *line = NULL;
@@ -695,7 +719,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map,
695 FILE *file; 719 FILE *file;
696 int nr_syms = 0; 720 int nr_syms = 0;
697 721
698 file = fopen(self->long_name, "r"); 722 file = fopen(dso->long_name, "r");
699 if (file == NULL) 723 if (file == NULL)
700 goto out_failure; 724 goto out_failure;
701 725
@@ -733,7 +757,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map,
733 if (filter && filter(map, sym)) 757 if (filter && filter(map, sym))
734 symbol__delete(sym); 758 symbol__delete(sym);
735 else { 759 else {
736 symbols__insert(&self->symbols[map->type], sym); 760 symbols__insert(&dso->symbols[map->type], sym);
737 nr_syms++; 761 nr_syms++;
738 } 762 }
739 } 763 }
@@ -752,7 +776,7 @@ out_failure:
752/** 776/**
753 * elf_symtab__for_each_symbol - iterate thru all the symbols 777 * elf_symtab__for_each_symbol - iterate thru all the symbols
754 * 778 *
755 * @self: struct elf_symtab instance to iterate 779 * @syms: struct elf_symtab instance to iterate
756 * @idx: uint32_t idx 780 * @idx: uint32_t idx
757 * @sym: GElf_Sym iterator 781 * @sym: GElf_Sym iterator
758 */ 782 */
@@ -852,7 +876,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
852 * And always look at the original dso, not at debuginfo packages, that 876 * And always look at the original dso, not at debuginfo packages, that
853 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 877 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
854 */ 878 */
855static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, 879static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map,
856 symbol_filter_t filter) 880 symbol_filter_t filter)
857{ 881{
858 uint32_t nr_rel_entries, idx; 882 uint32_t nr_rel_entries, idx;
@@ -871,7 +895,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
871 char name[PATH_MAX]; 895 char name[PATH_MAX];
872 896
873 snprintf(name, sizeof(name), "%s%s", 897 snprintf(name, sizeof(name), "%s%s",
874 symbol_conf.symfs, self->long_name); 898 symbol_conf.symfs, dso->long_name);
875 fd = open(name, O_RDONLY); 899 fd = open(name, O_RDONLY);
876 if (fd < 0) 900 if (fd < 0)
877 goto out; 901 goto out;
@@ -947,7 +971,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
947 if (filter && filter(map, f)) 971 if (filter && filter(map, f))
948 symbol__delete(f); 972 symbol__delete(f);
949 else { 973 else {
950 symbols__insert(&self->symbols[map->type], f); 974 symbols__insert(&dso->symbols[map->type], f);
951 ++nr; 975 ++nr;
952 } 976 }
953 } 977 }
@@ -969,7 +993,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
969 if (filter && filter(map, f)) 993 if (filter && filter(map, f))
970 symbol__delete(f); 994 symbol__delete(f);
971 else { 995 else {
972 symbols__insert(&self->symbols[map->type], f); 996 symbols__insert(&dso->symbols[map->type], f);
973 ++nr; 997 ++nr;
974 } 998 }
975 } 999 }
@@ -985,29 +1009,30 @@ out_close:
985 return nr; 1009 return nr;
986out: 1010out:
987 pr_debug("%s: problems reading %s PLT info.\n", 1011 pr_debug("%s: problems reading %s PLT info.\n",
988 __func__, self->long_name); 1012 __func__, dso->long_name);
989 return 0; 1013 return 0;
990} 1014}
991 1015
992static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) 1016static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
993{ 1017{
994 switch (type) { 1018 switch (type) {
995 case MAP__FUNCTION: 1019 case MAP__FUNCTION:
996 return elf_sym__is_function(self); 1020 return elf_sym__is_function(sym);
997 case MAP__VARIABLE: 1021 case MAP__VARIABLE:
998 return elf_sym__is_object(self); 1022 return elf_sym__is_object(sym);
999 default: 1023 default:
1000 return false; 1024 return false;
1001 } 1025 }
1002} 1026}
1003 1027
1004static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) 1028static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
1029 enum map_type type)
1005{ 1030{
1006 switch (type) { 1031 switch (type) {
1007 case MAP__FUNCTION: 1032 case MAP__FUNCTION:
1008 return elf_sec__is_text(self, secstrs); 1033 return elf_sec__is_text(shdr, secstrs);
1009 case MAP__VARIABLE: 1034 case MAP__VARIABLE:
1010 return elf_sec__is_data(self, secstrs); 1035 return elf_sec__is_data(shdr, secstrs);
1011 default: 1036 default:
1012 return false; 1037 return false;
1013 } 1038 }
@@ -1032,13 +1057,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
1032 return -1; 1057 return -1;
1033} 1058}
1034 1059
1035static int dso__load_sym(struct dso *self, struct map *map, const char *name, 1060static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1036 int fd, symbol_filter_t filter, int kmodule, 1061 int fd, symbol_filter_t filter, int kmodule,
1037 int want_symtab) 1062 int want_symtab)
1038{ 1063{
1039 struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; 1064 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
1040 struct map *curr_map = map; 1065 struct map *curr_map = map;
1041 struct dso *curr_dso = self; 1066 struct dso *curr_dso = dso;
1042 Elf_Data *symstrs, *secstrs; 1067 Elf_Data *symstrs, *secstrs;
1043 uint32_t nr_syms; 1068 uint32_t nr_syms;
1044 int err = -1; 1069 int err = -1;
@@ -1064,14 +1089,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1064 } 1089 }
1065 1090
1066 /* Always reject images with a mismatched build-id: */ 1091 /* Always reject images with a mismatched build-id: */
1067 if (self->has_build_id) { 1092 if (dso->has_build_id) {
1068 u8 build_id[BUILD_ID_SIZE]; 1093 u8 build_id[BUILD_ID_SIZE];
1069 1094
1070 if (elf_read_build_id(elf, build_id, 1095 if (elf_read_build_id(elf, build_id,
1071 BUILD_ID_SIZE) != BUILD_ID_SIZE) 1096 BUILD_ID_SIZE) != BUILD_ID_SIZE)
1072 goto out_elf_end; 1097 goto out_elf_end;
1073 1098
1074 if (!dso__build_id_equal(self, build_id)) 1099 if (!dso__build_id_equal(dso, build_id))
1075 goto out_elf_end; 1100 goto out_elf_end;
1076 } 1101 }
1077 1102
@@ -1112,13 +1137,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1112 nr_syms = shdr.sh_size / shdr.sh_entsize; 1137 nr_syms = shdr.sh_size / shdr.sh_entsize;
1113 1138
1114 memset(&sym, 0, sizeof(sym)); 1139 memset(&sym, 0, sizeof(sym));
1115 if (self->kernel == DSO_TYPE_USER) { 1140 if (dso->kernel == DSO_TYPE_USER) {
1116 self->adjust_symbols = (ehdr.e_type == ET_EXEC || 1141 dso->adjust_symbols = (ehdr.e_type == ET_EXEC ||
1117 elf_section_by_name(elf, &ehdr, &shdr, 1142 elf_section_by_name(elf, &ehdr, &shdr,
1118 ".gnu.prelink_undo", 1143 ".gnu.prelink_undo",
1119 NULL) != NULL); 1144 NULL) != NULL);
1120 } else self->adjust_symbols = 0; 1145 } else {
1121 1146 dso->adjust_symbols = 0;
1147 }
1122 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1148 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1123 struct symbol *f; 1149 struct symbol *f;
1124 const char *elf_name = elf_sym__name(&sym, symstrs); 1150 const char *elf_name = elf_sym__name(&sym, symstrs);
@@ -1168,22 +1194,22 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1168 (sym.st_value & 1)) 1194 (sym.st_value & 1))
1169 --sym.st_value; 1195 --sym.st_value;
1170 1196
1171 if (self->kernel != DSO_TYPE_USER || kmodule) { 1197 if (dso->kernel != DSO_TYPE_USER || kmodule) {
1172 char dso_name[PATH_MAX]; 1198 char dso_name[PATH_MAX];
1173 1199
1174 if (strcmp(section_name, 1200 if (strcmp(section_name,
1175 (curr_dso->short_name + 1201 (curr_dso->short_name +
1176 self->short_name_len)) == 0) 1202 dso->short_name_len)) == 0)
1177 goto new_symbol; 1203 goto new_symbol;
1178 1204
1179 if (strcmp(section_name, ".text") == 0) { 1205 if (strcmp(section_name, ".text") == 0) {
1180 curr_map = map; 1206 curr_map = map;
1181 curr_dso = self; 1207 curr_dso = dso;
1182 goto new_symbol; 1208 goto new_symbol;
1183 } 1209 }
1184 1210
1185 snprintf(dso_name, sizeof(dso_name), 1211 snprintf(dso_name, sizeof(dso_name),
1186 "%s%s", self->short_name, section_name); 1212 "%s%s", dso->short_name, section_name);
1187 1213
1188 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); 1214 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
1189 if (curr_map == NULL) { 1215 if (curr_map == NULL) {
@@ -1195,7 +1221,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1195 curr_dso = dso__new(dso_name); 1221 curr_dso = dso__new(dso_name);
1196 if (curr_dso == NULL) 1222 if (curr_dso == NULL)
1197 goto out_elf_end; 1223 goto out_elf_end;
1198 curr_dso->kernel = self->kernel; 1224 curr_dso->kernel = dso->kernel;
1225 curr_dso->long_name = dso->long_name;
1226 curr_dso->long_name_len = dso->long_name_len;
1199 curr_map = map__new2(start, curr_dso, 1227 curr_map = map__new2(start, curr_dso,
1200 map->type); 1228 map->type);
1201 if (curr_map == NULL) { 1229 if (curr_map == NULL) {
@@ -1204,9 +1232,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1204 } 1232 }
1205 curr_map->map_ip = identity__map_ip; 1233 curr_map->map_ip = identity__map_ip;
1206 curr_map->unmap_ip = identity__map_ip; 1234 curr_map->unmap_ip = identity__map_ip;
1207 curr_dso->symtab_type = self->symtab_type; 1235 curr_dso->symtab_type = dso->symtab_type;
1208 map_groups__insert(kmap->kmaps, curr_map); 1236 map_groups__insert(kmap->kmaps, curr_map);
1209 dsos__add(&self->node, curr_dso); 1237 dsos__add(&dso->node, curr_dso);
1210 dso__set_loaded(curr_dso, map->type); 1238 dso__set_loaded(curr_dso, map->type);
1211 } else 1239 } else
1212 curr_dso = curr_map->dso; 1240 curr_dso = curr_map->dso;
@@ -1248,7 +1276,7 @@ new_symbol:
1248 * For misannotated, zeroed, ASM function sizes. 1276 * For misannotated, zeroed, ASM function sizes.
1249 */ 1277 */
1250 if (nr > 0) { 1278 if (nr > 0) {
1251 symbols__fixup_end(&self->symbols[map->type]); 1279 symbols__fixup_end(&dso->symbols[map->type]);
1252 if (kmap) { 1280 if (kmap) {
1253 /* 1281 /*
1254 * We need to fixup this here too because we create new 1282 * We need to fixup this here too because we create new
@@ -1264,9 +1292,9 @@ out_close:
1264 return err; 1292 return err;
1265} 1293}
1266 1294
1267static bool dso__build_id_equal(const struct dso *self, u8 *build_id) 1295static bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1268{ 1296{
1269 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; 1297 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1270} 1298}
1271 1299
1272bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1300bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
@@ -1427,7 +1455,7 @@ out:
1427 return err; 1455 return err;
1428} 1456}
1429 1457
1430char dso__symtab_origin(const struct dso *self) 1458char dso__symtab_origin(const struct dso *dso)
1431{ 1459{
1432 static const char origin[] = { 1460 static const char origin[] = {
1433 [SYMTAB__KALLSYMS] = 'k', 1461 [SYMTAB__KALLSYMS] = 'k',
@@ -1442,12 +1470,12 @@ char dso__symtab_origin(const struct dso *self)
1442 [SYMTAB__GUEST_KMODULE] = 'G', 1470 [SYMTAB__GUEST_KMODULE] = 'G',
1443 }; 1471 };
1444 1472
1445 if (self == NULL || self->symtab_type == SYMTAB__NOT_FOUND) 1473 if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND)
1446 return '!'; 1474 return '!';
1447 return origin[self->symtab_type]; 1475 return origin[dso->symtab_type];
1448} 1476}
1449 1477
1450int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) 1478int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1451{ 1479{
1452 int size = PATH_MAX; 1480 int size = PATH_MAX;
1453 char *name; 1481 char *name;
@@ -1457,12 +1485,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1457 const char *root_dir; 1485 const char *root_dir;
1458 int want_symtab; 1486 int want_symtab;
1459 1487
1460 dso__set_loaded(self, map->type); 1488 dso__set_loaded(dso, map->type);
1461 1489
1462 if (self->kernel == DSO_TYPE_KERNEL) 1490 if (dso->kernel == DSO_TYPE_KERNEL)
1463 return dso__load_kernel_sym(self, map, filter); 1491 return dso__load_kernel_sym(dso, map, filter);
1464 else if (self->kernel == DSO_TYPE_GUEST_KERNEL) 1492 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1465 return dso__load_guest_kernel_sym(self, map, filter); 1493 return dso__load_guest_kernel_sym(dso, map, filter);
1466 1494
1467 if (map->groups && map->groups->machine) 1495 if (map->groups && map->groups->machine)
1468 machine = map->groups->machine; 1496 machine = map->groups->machine;
@@ -1473,11 +1501,11 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1473 if (!name) 1501 if (!name)
1474 return -1; 1502 return -1;
1475 1503
1476 self->adjust_symbols = 0; 1504 dso->adjust_symbols = 0;
1477 1505
1478 if (strncmp(self->name, "/tmp/perf-", 10) == 0) { 1506 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1479 ret = dso__load_perf_map(self, map, filter); 1507 ret = dso__load_perf_map(dso, map, filter);
1480 self->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : 1508 dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT :
1481 SYMTAB__NOT_FOUND; 1509 SYMTAB__NOT_FOUND;
1482 return ret; 1510 return ret;
1483 } 1511 }
@@ -1488,33 +1516,33 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1488 */ 1516 */
1489 want_symtab = 1; 1517 want_symtab = 1;
1490restart: 1518restart:
1491 for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; 1519 for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE;
1492 self->symtab_type != SYMTAB__NOT_FOUND; 1520 dso->symtab_type != SYMTAB__NOT_FOUND;
1493 self->symtab_type++) { 1521 dso->symtab_type++) {
1494 switch (self->symtab_type) { 1522 switch (dso->symtab_type) {
1495 case SYMTAB__BUILD_ID_CACHE: 1523 case SYMTAB__BUILD_ID_CACHE:
1496 /* skip the locally configured cache if a symfs is given */ 1524 /* skip the locally configured cache if a symfs is given */
1497 if (symbol_conf.symfs[0] || 1525 if (symbol_conf.symfs[0] ||
1498 (dso__build_id_filename(self, name, size) == NULL)) { 1526 (dso__build_id_filename(dso, name, size) == NULL)) {
1499 continue; 1527 continue;
1500 } 1528 }
1501 break; 1529 break;
1502 case SYMTAB__FEDORA_DEBUGINFO: 1530 case SYMTAB__FEDORA_DEBUGINFO:
1503 snprintf(name, size, "%s/usr/lib/debug%s.debug", 1531 snprintf(name, size, "%s/usr/lib/debug%s.debug",
1504 symbol_conf.symfs, self->long_name); 1532 symbol_conf.symfs, dso->long_name);
1505 break; 1533 break;
1506 case SYMTAB__UBUNTU_DEBUGINFO: 1534 case SYMTAB__UBUNTU_DEBUGINFO:
1507 snprintf(name, size, "%s/usr/lib/debug%s", 1535 snprintf(name, size, "%s/usr/lib/debug%s",
1508 symbol_conf.symfs, self->long_name); 1536 symbol_conf.symfs, dso->long_name);
1509 break; 1537 break;
1510 case SYMTAB__BUILDID_DEBUGINFO: { 1538 case SYMTAB__BUILDID_DEBUGINFO: {
1511 char build_id_hex[BUILD_ID_SIZE * 2 + 1]; 1539 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1512 1540
1513 if (!self->has_build_id) 1541 if (!dso->has_build_id)
1514 continue; 1542 continue;
1515 1543
1516 build_id__sprintf(self->build_id, 1544 build_id__sprintf(dso->build_id,
1517 sizeof(self->build_id), 1545 sizeof(dso->build_id),
1518 build_id_hex); 1546 build_id_hex);
1519 snprintf(name, size, 1547 snprintf(name, size,
1520 "%s/usr/lib/debug/.build-id/%.2s/%s.debug", 1548 "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
@@ -1523,7 +1551,7 @@ restart:
1523 break; 1551 break;
1524 case SYMTAB__SYSTEM_PATH_DSO: 1552 case SYMTAB__SYSTEM_PATH_DSO:
1525 snprintf(name, size, "%s%s", 1553 snprintf(name, size, "%s%s",
1526 symbol_conf.symfs, self->long_name); 1554 symbol_conf.symfs, dso->long_name);
1527 break; 1555 break;
1528 case SYMTAB__GUEST_KMODULE: 1556 case SYMTAB__GUEST_KMODULE:
1529 if (map->groups && machine) 1557 if (map->groups && machine)
@@ -1531,12 +1559,12 @@ restart:
1531 else 1559 else
1532 root_dir = ""; 1560 root_dir = "";
1533 snprintf(name, size, "%s%s%s", symbol_conf.symfs, 1561 snprintf(name, size, "%s%s%s", symbol_conf.symfs,
1534 root_dir, self->long_name); 1562 root_dir, dso->long_name);
1535 break; 1563 break;
1536 1564
1537 case SYMTAB__SYSTEM_PATH_KMODULE: 1565 case SYMTAB__SYSTEM_PATH_KMODULE:
1538 snprintf(name, size, "%s%s", symbol_conf.symfs, 1566 snprintf(name, size, "%s%s", symbol_conf.symfs,
1539 self->long_name); 1567 dso->long_name);
1540 break; 1568 break;
1541 default:; 1569 default:;
1542 } 1570 }
@@ -1546,7 +1574,7 @@ restart:
1546 if (fd < 0) 1574 if (fd < 0)
1547 continue; 1575 continue;
1548 1576
1549 ret = dso__load_sym(self, map, name, fd, filter, 0, 1577 ret = dso__load_sym(dso, map, name, fd, filter, 0,
1550 want_symtab); 1578 want_symtab);
1551 close(fd); 1579 close(fd);
1552 1580
@@ -1558,7 +1586,8 @@ restart:
1558 continue; 1586 continue;
1559 1587
1560 if (ret > 0) { 1588 if (ret > 0) {
1561 int nr_plt = dso__synthesize_plt_symbols(self, map, filter); 1589 int nr_plt = dso__synthesize_plt_symbols(dso, map,
1590 filter);
1562 if (nr_plt > 0) 1591 if (nr_plt > 0)
1563 ret += nr_plt; 1592 ret += nr_plt;
1564 break; 1593 break;
@@ -1575,17 +1604,17 @@ restart:
1575 } 1604 }
1576 1605
1577 free(name); 1606 free(name);
1578 if (ret < 0 && strstr(self->name, " (deleted)") != NULL) 1607 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1579 return 0; 1608 return 0;
1580 return ret; 1609 return ret;
1581} 1610}
1582 1611
1583struct map *map_groups__find_by_name(struct map_groups *self, 1612struct map *map_groups__find_by_name(struct map_groups *mg,
1584 enum map_type type, const char *name) 1613 enum map_type type, const char *name)
1585{ 1614{
1586 struct rb_node *nd; 1615 struct rb_node *nd;
1587 1616
1588 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 1617 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
1589 struct map *map = rb_entry(nd, struct map, rb_node); 1618 struct map *map = rb_entry(nd, struct map, rb_node);
1590 1619
1591 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1620 if (map->dso && strcmp(map->dso->short_name, name) == 0)
@@ -1595,28 +1624,28 @@ struct map *map_groups__find_by_name(struct map_groups *self,
1595 return NULL; 1624 return NULL;
1596} 1625}
1597 1626
1598static int dso__kernel_module_get_build_id(struct dso *self, 1627static int dso__kernel_module_get_build_id(struct dso *dso,
1599 const char *root_dir) 1628 const char *root_dir)
1600{ 1629{
1601 char filename[PATH_MAX]; 1630 char filename[PATH_MAX];
1602 /* 1631 /*
1603 * kernel module short names are of the form "[module]" and 1632 * kernel module short names are of the form "[module]" and
1604 * we need just "module" here. 1633 * we need just "module" here.
1605 */ 1634 */
1606 const char *name = self->short_name + 1; 1635 const char *name = dso->short_name + 1;
1607 1636
1608 snprintf(filename, sizeof(filename), 1637 snprintf(filename, sizeof(filename),
1609 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1638 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1610 root_dir, (int)strlen(name) - 1, name); 1639 root_dir, (int)strlen(name) - 1, name);
1611 1640
1612 if (sysfs__read_build_id(filename, self->build_id, 1641 if (sysfs__read_build_id(filename, dso->build_id,
1613 sizeof(self->build_id)) == 0) 1642 sizeof(dso->build_id)) == 0)
1614 self->has_build_id = true; 1643 dso->has_build_id = true;
1615 1644
1616 return 0; 1645 return 0;
1617} 1646}
1618 1647
1619static int map_groups__set_modules_path_dir(struct map_groups *self, 1648static int map_groups__set_modules_path_dir(struct map_groups *mg,
1620 const char *dir_name) 1649 const char *dir_name)
1621{ 1650{
1622 struct dirent *dent; 1651 struct dirent *dent;
@@ -1644,7 +1673,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
1644 1673
1645 snprintf(path, sizeof(path), "%s/%s", 1674 snprintf(path, sizeof(path), "%s/%s",
1646 dir_name, dent->d_name); 1675 dir_name, dent->d_name);
1647 ret = map_groups__set_modules_path_dir(self, path); 1676 ret = map_groups__set_modules_path_dir(mg, path);
1648 if (ret < 0) 1677 if (ret < 0)
1649 goto out; 1678 goto out;
1650 } else { 1679 } else {
@@ -1659,7 +1688,8 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
1659 (int)(dot - dent->d_name), dent->d_name); 1688 (int)(dot - dent->d_name), dent->d_name);
1660 1689
1661 strxfrchar(dso_name, '-', '_'); 1690 strxfrchar(dso_name, '-', '_');
1662 map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); 1691 map = map_groups__find_by_name(mg, MAP__FUNCTION,
1692 dso_name);
1663 if (map == NULL) 1693 if (map == NULL)
1664 continue; 1694 continue;
1665 1695
@@ -1709,20 +1739,20 @@ static char *get_kernel_version(const char *root_dir)
1709 return strdup(name); 1739 return strdup(name);
1710} 1740}
1711 1741
1712static int machine__set_modules_path(struct machine *self) 1742static int machine__set_modules_path(struct machine *machine)
1713{ 1743{
1714 char *version; 1744 char *version;
1715 char modules_path[PATH_MAX]; 1745 char modules_path[PATH_MAX];
1716 1746
1717 version = get_kernel_version(self->root_dir); 1747 version = get_kernel_version(machine->root_dir);
1718 if (!version) 1748 if (!version)
1719 return -1; 1749 return -1;
1720 1750
1721 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 1751 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
1722 self->root_dir, version); 1752 machine->root_dir, version);
1723 free(version); 1753 free(version);
1724 1754
1725 return map_groups__set_modules_path_dir(&self->kmaps, modules_path); 1755 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
1726} 1756}
1727 1757
1728/* 1758/*
@@ -1732,23 +1762,23 @@ static int machine__set_modules_path(struct machine *self)
1732 */ 1762 */
1733static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 1763static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1734{ 1764{
1735 struct map *self = calloc(1, (sizeof(*self) + 1765 struct map *map = calloc(1, (sizeof(*map) +
1736 (dso->kernel ? sizeof(struct kmap) : 0))); 1766 (dso->kernel ? sizeof(struct kmap) : 0)));
1737 if (self != NULL) { 1767 if (map != NULL) {
1738 /* 1768 /*
1739 * ->end will be filled after we load all the symbols 1769 * ->end will be filled after we load all the symbols
1740 */ 1770 */
1741 map__init(self, type, start, 0, 0, dso); 1771 map__init(map, type, start, 0, 0, dso);
1742 } 1772 }
1743 1773
1744 return self; 1774 return map;
1745} 1775}
1746 1776
1747struct map *machine__new_module(struct machine *self, u64 start, 1777struct map *machine__new_module(struct machine *machine, u64 start,
1748 const char *filename) 1778 const char *filename)
1749{ 1779{
1750 struct map *map; 1780 struct map *map;
1751 struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); 1781 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
1752 1782
1753 if (dso == NULL) 1783 if (dso == NULL)
1754 return NULL; 1784 return NULL;
@@ -1757,15 +1787,15 @@ struct map *machine__new_module(struct machine *self, u64 start,
1757 if (map == NULL) 1787 if (map == NULL)
1758 return NULL; 1788 return NULL;
1759 1789
1760 if (machine__is_host(self)) 1790 if (machine__is_host(machine))
1761 dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; 1791 dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE;
1762 else 1792 else
1763 dso->symtab_type = SYMTAB__GUEST_KMODULE; 1793 dso->symtab_type = SYMTAB__GUEST_KMODULE;
1764 map_groups__insert(&self->kmaps, map); 1794 map_groups__insert(&machine->kmaps, map);
1765 return map; 1795 return map;
1766} 1796}
1767 1797
1768static int machine__create_modules(struct machine *self) 1798static int machine__create_modules(struct machine *machine)
1769{ 1799{
1770 char *line = NULL; 1800 char *line = NULL;
1771 size_t n; 1801 size_t n;
@@ -1774,13 +1804,16 @@ static int machine__create_modules(struct machine *self)
1774 const char *modules; 1804 const char *modules;
1775 char path[PATH_MAX]; 1805 char path[PATH_MAX];
1776 1806
1777 if (machine__is_default_guest(self)) 1807 if (machine__is_default_guest(machine))
1778 modules = symbol_conf.default_guest_modules; 1808 modules = symbol_conf.default_guest_modules;
1779 else { 1809 else {
1780 sprintf(path, "%s/proc/modules", self->root_dir); 1810 sprintf(path, "%s/proc/modules", machine->root_dir);
1781 modules = path; 1811 modules = path;
1782 } 1812 }
1783 1813
1814 if (symbol__restricted_filename(path, "/proc/modules"))
1815 return -1;
1816
1784 file = fopen(modules, "r"); 1817 file = fopen(modules, "r");
1785 if (file == NULL) 1818 if (file == NULL)
1786 return -1; 1819 return -1;
@@ -1813,16 +1846,16 @@ static int machine__create_modules(struct machine *self)
1813 *sep = '\0'; 1846 *sep = '\0';
1814 1847
1815 snprintf(name, sizeof(name), "[%s]", line); 1848 snprintf(name, sizeof(name), "[%s]", line);
1816 map = machine__new_module(self, start, name); 1849 map = machine__new_module(machine, start, name);
1817 if (map == NULL) 1850 if (map == NULL)
1818 goto out_delete_line; 1851 goto out_delete_line;
1819 dso__kernel_module_get_build_id(map->dso, self->root_dir); 1852 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1820 } 1853 }
1821 1854
1822 free(line); 1855 free(line);
1823 fclose(file); 1856 fclose(file);
1824 1857
1825 return machine__set_modules_path(self); 1858 return machine__set_modules_path(machine);
1826 1859
1827out_delete_line: 1860out_delete_line:
1828 free(line); 1861 free(line);
@@ -1830,7 +1863,7 @@ out_failure:
1830 return -1; 1863 return -1;
1831} 1864}
1832 1865
1833int dso__load_vmlinux(struct dso *self, struct map *map, 1866int dso__load_vmlinux(struct dso *dso, struct map *map,
1834 const char *vmlinux, symbol_filter_t filter) 1867 const char *vmlinux, symbol_filter_t filter)
1835{ 1868{
1836 int err = -1, fd; 1869 int err = -1, fd;
@@ -1842,8 +1875,9 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
1842 if (fd < 0) 1875 if (fd < 0)
1843 return -1; 1876 return -1;
1844 1877
1845 dso__set_loaded(self, map->type); 1878 dso__set_long_name(dso, (char *)vmlinux);
1846 err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); 1879 dso__set_loaded(dso, map->type);
1880 err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0);
1847 close(fd); 1881 close(fd);
1848 1882
1849 if (err > 0) 1883 if (err > 0)
@@ -1852,7 +1886,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
1852 return err; 1886 return err;
1853} 1887}
1854 1888
1855int dso__load_vmlinux_path(struct dso *self, struct map *map, 1889int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1856 symbol_filter_t filter) 1890 symbol_filter_t filter)
1857{ 1891{
1858 int i, err = 0; 1892 int i, err = 0;
@@ -1861,20 +1895,20 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map,
1861 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1895 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1862 vmlinux_path__nr_entries + 1); 1896 vmlinux_path__nr_entries + 1);
1863 1897
1864 filename = dso__build_id_filename(self, NULL, 0); 1898 filename = dso__build_id_filename(dso, NULL, 0);
1865 if (filename != NULL) { 1899 if (filename != NULL) {
1866 err = dso__load_vmlinux(self, map, filename, filter); 1900 err = dso__load_vmlinux(dso, map, filename, filter);
1867 if (err > 0) { 1901 if (err > 0) {
1868 dso__set_long_name(self, filename); 1902 dso__set_long_name(dso, filename);
1869 goto out; 1903 goto out;
1870 } 1904 }
1871 free(filename); 1905 free(filename);
1872 } 1906 }
1873 1907
1874 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1908 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1875 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); 1909 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
1876 if (err > 0) { 1910 if (err > 0) {
1877 dso__set_long_name(self, strdup(vmlinux_path[i])); 1911 dso__set_long_name(dso, strdup(vmlinux_path[i]));
1878 break; 1912 break;
1879 } 1913 }
1880 } 1914 }
@@ -1882,7 +1916,7 @@ out:
1882 return err; 1916 return err;
1883} 1917}
1884 1918
1885static int dso__load_kernel_sym(struct dso *self, struct map *map, 1919static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1886 symbol_filter_t filter) 1920 symbol_filter_t filter)
1887{ 1921{
1888 int err; 1922 int err;
@@ -1909,10 +1943,10 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
1909 } 1943 }
1910 1944
1911 if (symbol_conf.vmlinux_name != NULL) { 1945 if (symbol_conf.vmlinux_name != NULL) {
1912 err = dso__load_vmlinux(self, map, 1946 err = dso__load_vmlinux(dso, map,
1913 symbol_conf.vmlinux_name, filter); 1947 symbol_conf.vmlinux_name, filter);
1914 if (err > 0) { 1948 if (err > 0) {
1915 dso__set_long_name(self, 1949 dso__set_long_name(dso,
1916 strdup(symbol_conf.vmlinux_name)); 1950 strdup(symbol_conf.vmlinux_name));
1917 goto out_fixup; 1951 goto out_fixup;
1918 } 1952 }
@@ -1920,7 +1954,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
1920 } 1954 }
1921 1955
1922 if (vmlinux_path != NULL) { 1956 if (vmlinux_path != NULL) {
1923 err = dso__load_vmlinux_path(self, map, filter); 1957 err = dso__load_vmlinux_path(dso, map, filter);
1924 if (err > 0) 1958 if (err > 0)
1925 goto out_fixup; 1959 goto out_fixup;
1926 } 1960 }
@@ -1934,13 +1968,13 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
1934 * we have a build-id, so check if it is the same as the running kernel, 1968 * we have a build-id, so check if it is the same as the running kernel,
1935 * using it if it is. 1969 * using it if it is.
1936 */ 1970 */
1937 if (self->has_build_id) { 1971 if (dso->has_build_id) {
1938 u8 kallsyms_build_id[BUILD_ID_SIZE]; 1972 u8 kallsyms_build_id[BUILD_ID_SIZE];
1939 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1973 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1940 1974
1941 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, 1975 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
1942 sizeof(kallsyms_build_id)) == 0) { 1976 sizeof(kallsyms_build_id)) == 0) {
1943 if (dso__build_id_equal(self, kallsyms_build_id)) { 1977 if (dso__build_id_equal(dso, kallsyms_build_id)) {
1944 kallsyms_filename = "/proc/kallsyms"; 1978 kallsyms_filename = "/proc/kallsyms";
1945 goto do_kallsyms; 1979 goto do_kallsyms;
1946 } 1980 }
@@ -1949,7 +1983,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
1949 * Now look if we have it on the build-id cache in 1983 * Now look if we have it on the build-id cache in
1950 * $HOME/.debug/[kernel.kallsyms]. 1984 * $HOME/.debug/[kernel.kallsyms].
1951 */ 1985 */
1952 build_id__sprintf(self->build_id, sizeof(self->build_id), 1986 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1953 sbuild_id); 1987 sbuild_id);
1954 1988
1955 if (asprintf(&kallsyms_allocated_filename, 1989 if (asprintf(&kallsyms_allocated_filename,
@@ -1976,7 +2010,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
1976 } 2010 }
1977 2011
1978do_kallsyms: 2012do_kallsyms:
1979 err = dso__load_kallsyms(self, kallsyms_filename, map, filter); 2013 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1980 if (err > 0) 2014 if (err > 0)
1981 pr_debug("Using %s for symbols\n", kallsyms_filename); 2015 pr_debug("Using %s for symbols\n", kallsyms_filename);
1982 free(kallsyms_allocated_filename); 2016 free(kallsyms_allocated_filename);
@@ -1984,7 +2018,7 @@ do_kallsyms:
1984 if (err > 0) { 2018 if (err > 0) {
1985out_fixup: 2019out_fixup:
1986 if (kallsyms_filename != NULL) 2020 if (kallsyms_filename != NULL)
1987 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 2021 dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
1988 map__fixup_start(map); 2022 map__fixup_start(map);
1989 map__fixup_end(map); 2023 map__fixup_end(map);
1990 } 2024 }
@@ -1992,8 +2026,8 @@ out_fixup:
1992 return err; 2026 return err;
1993} 2027}
1994 2028
1995static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, 2029static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
1996 symbol_filter_t filter) 2030 symbol_filter_t filter)
1997{ 2031{
1998 int err; 2032 int err;
1999 const char *kallsyms_filename = NULL; 2033 const char *kallsyms_filename = NULL;
@@ -2013,7 +2047,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
2013 * Or use file guest_kallsyms inputted by user on commandline 2047 * Or use file guest_kallsyms inputted by user on commandline
2014 */ 2048 */
2015 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2049 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2016 err = dso__load_vmlinux(self, map, 2050 err = dso__load_vmlinux(dso, map,
2017 symbol_conf.default_guest_vmlinux_name, filter); 2051 symbol_conf.default_guest_vmlinux_name, filter);
2018 goto out_try_fixup; 2052 goto out_try_fixup;
2019 } 2053 }
@@ -2026,7 +2060,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
2026 kallsyms_filename = path; 2060 kallsyms_filename = path;
2027 } 2061 }
2028 2062
2029 err = dso__load_kallsyms(self, kallsyms_filename, map, filter); 2063 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
2030 if (err > 0) 2064 if (err > 0)
2031 pr_debug("Using %s for symbols\n", kallsyms_filename); 2065 pr_debug("Using %s for symbols\n", kallsyms_filename);
2032 2066
@@ -2034,7 +2068,7 @@ out_try_fixup:
2034 if (err > 0) { 2068 if (err > 0) {
2035 if (kallsyms_filename != NULL) { 2069 if (kallsyms_filename != NULL) {
2036 machine__mmap_name(machine, path, sizeof(path)); 2070 machine__mmap_name(machine, path, sizeof(path));
2037 dso__set_long_name(self, strdup(path)); 2071 dso__set_long_name(dso, strdup(path));
2038 } 2072 }
2039 map__fixup_start(map); 2073 map__fixup_start(map);
2040 map__fixup_end(map); 2074 map__fixup_end(map);
@@ -2087,12 +2121,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp)
2087 return ret; 2121 return ret;
2088} 2122}
2089 2123
2090size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) 2124size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
2091{ 2125{
2092 struct rb_node *nd; 2126 struct rb_node *nd;
2093 size_t ret = 0; 2127 size_t ret = 0;
2094 2128
2095 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 2129 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
2096 struct machine *pos = rb_entry(nd, struct machine, rb_node); 2130 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2097 ret += __dsos__fprintf(&pos->kernel_dsos, fp); 2131 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
2098 ret += __dsos__fprintf(&pos->user_dsos, fp); 2132 ret += __dsos__fprintf(&pos->user_dsos, fp);
@@ -2116,18 +2150,20 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
2116 return ret; 2150 return ret;
2117} 2151}
2118 2152
2119size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) 2153size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
2154 bool with_hits)
2120{ 2155{
2121 return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + 2156 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
2122 __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); 2157 __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
2123} 2158}
2124 2159
2125size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) 2160size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
2161 FILE *fp, bool with_hits)
2126{ 2162{
2127 struct rb_node *nd; 2163 struct rb_node *nd;
2128 size_t ret = 0; 2164 size_t ret = 0;
2129 2165
2130 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 2166 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
2131 struct machine *pos = rb_entry(nd, struct machine, rb_node); 2167 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2132 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); 2168 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
2133 } 2169 }
@@ -2136,59 +2172,59 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_
2136 2172
2137struct dso *dso__new_kernel(const char *name) 2173struct dso *dso__new_kernel(const char *name)
2138{ 2174{
2139 struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); 2175 struct dso *dso = dso__new(name ?: "[kernel.kallsyms]");
2140 2176
2141 if (self != NULL) { 2177 if (dso != NULL) {
2142 dso__set_short_name(self, "[kernel]"); 2178 dso__set_short_name(dso, "[kernel]");
2143 self->kernel = DSO_TYPE_KERNEL; 2179 dso->kernel = DSO_TYPE_KERNEL;
2144 } 2180 }
2145 2181
2146 return self; 2182 return dso;
2147} 2183}
2148 2184
2149static struct dso *dso__new_guest_kernel(struct machine *machine, 2185static struct dso *dso__new_guest_kernel(struct machine *machine,
2150 const char *name) 2186 const char *name)
2151{ 2187{
2152 char bf[PATH_MAX]; 2188 char bf[PATH_MAX];
2153 struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); 2189 struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf,
2154 2190 sizeof(bf)));
2155 if (self != NULL) { 2191 if (dso != NULL) {
2156 dso__set_short_name(self, "[guest.kernel]"); 2192 dso__set_short_name(dso, "[guest.kernel]");
2157 self->kernel = DSO_TYPE_GUEST_KERNEL; 2193 dso->kernel = DSO_TYPE_GUEST_KERNEL;
2158 } 2194 }
2159 2195
2160 return self; 2196 return dso;
2161} 2197}
2162 2198
2163void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) 2199void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
2164{ 2200{
2165 char path[PATH_MAX]; 2201 char path[PATH_MAX];
2166 2202
2167 if (machine__is_default_guest(machine)) 2203 if (machine__is_default_guest(machine))
2168 return; 2204 return;
2169 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 2205 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
2170 if (sysfs__read_build_id(path, self->build_id, 2206 if (sysfs__read_build_id(path, dso->build_id,
2171 sizeof(self->build_id)) == 0) 2207 sizeof(dso->build_id)) == 0)
2172 self->has_build_id = true; 2208 dso->has_build_id = true;
2173} 2209}
2174 2210
2175static struct dso *machine__create_kernel(struct machine *self) 2211static struct dso *machine__create_kernel(struct machine *machine)
2176{ 2212{
2177 const char *vmlinux_name = NULL; 2213 const char *vmlinux_name = NULL;
2178 struct dso *kernel; 2214 struct dso *kernel;
2179 2215
2180 if (machine__is_host(self)) { 2216 if (machine__is_host(machine)) {
2181 vmlinux_name = symbol_conf.vmlinux_name; 2217 vmlinux_name = symbol_conf.vmlinux_name;
2182 kernel = dso__new_kernel(vmlinux_name); 2218 kernel = dso__new_kernel(vmlinux_name);
2183 } else { 2219 } else {
2184 if (machine__is_default_guest(self)) 2220 if (machine__is_default_guest(machine))
2185 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 2221 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
2186 kernel = dso__new_guest_kernel(self, vmlinux_name); 2222 kernel = dso__new_guest_kernel(machine, vmlinux_name);
2187 } 2223 }
2188 2224
2189 if (kernel != NULL) { 2225 if (kernel != NULL) {
2190 dso__read_running_kernel_build_id(kernel, self); 2226 dso__read_running_kernel_build_id(kernel, machine);
2191 dsos__add(&self->kernel_dsos, kernel); 2227 dsos__add(&machine->kernel_dsos, kernel);
2192 } 2228 }
2193 return kernel; 2229 return kernel;
2194} 2230}
@@ -2227,47 +2263,52 @@ static u64 machine__get_kernel_start_addr(struct machine *machine)
2227 } 2263 }
2228 } 2264 }
2229 2265
2266 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
2267 return 0;
2268
2230 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 2269 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
2231 return 0; 2270 return 0;
2232 2271
2233 return args.start; 2272 return args.start;
2234} 2273}
2235 2274
2236int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) 2275int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
2237{ 2276{
2238 enum map_type type; 2277 enum map_type type;
2239 u64 start = machine__get_kernel_start_addr(self); 2278 u64 start = machine__get_kernel_start_addr(machine);
2240 2279
2241 for (type = 0; type < MAP__NR_TYPES; ++type) { 2280 for (type = 0; type < MAP__NR_TYPES; ++type) {
2242 struct kmap *kmap; 2281 struct kmap *kmap;
2243 2282
2244 self->vmlinux_maps[type] = map__new2(start, kernel, type); 2283 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
2245 if (self->vmlinux_maps[type] == NULL) 2284 if (machine->vmlinux_maps[type] == NULL)
2246 return -1; 2285 return -1;
2247 2286
2248 self->vmlinux_maps[type]->map_ip = 2287 machine->vmlinux_maps[type]->map_ip =
2249 self->vmlinux_maps[type]->unmap_ip = identity__map_ip; 2288 machine->vmlinux_maps[type]->unmap_ip =
2250 2289 identity__map_ip;
2251 kmap = map__kmap(self->vmlinux_maps[type]); 2290 kmap = map__kmap(machine->vmlinux_maps[type]);
2252 kmap->kmaps = &self->kmaps; 2291 kmap->kmaps = &machine->kmaps;
2253 map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); 2292 map_groups__insert(&machine->kmaps,
2293 machine->vmlinux_maps[type]);
2254 } 2294 }
2255 2295
2256 return 0; 2296 return 0;
2257} 2297}
2258 2298
2259void machine__destroy_kernel_maps(struct machine *self) 2299void machine__destroy_kernel_maps(struct machine *machine)
2260{ 2300{
2261 enum map_type type; 2301 enum map_type type;
2262 2302
2263 for (type = 0; type < MAP__NR_TYPES; ++type) { 2303 for (type = 0; type < MAP__NR_TYPES; ++type) {
2264 struct kmap *kmap; 2304 struct kmap *kmap;
2265 2305
2266 if (self->vmlinux_maps[type] == NULL) 2306 if (machine->vmlinux_maps[type] == NULL)
2267 continue; 2307 continue;
2268 2308
2269 kmap = map__kmap(self->vmlinux_maps[type]); 2309 kmap = map__kmap(machine->vmlinux_maps[type]);
2270 map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); 2310 map_groups__remove(&machine->kmaps,
2311 machine->vmlinux_maps[type]);
2271 if (kmap->ref_reloc_sym) { 2312 if (kmap->ref_reloc_sym) {
2272 /* 2313 /*
2273 * ref_reloc_sym is shared among all maps, so free just 2314 * ref_reloc_sym is shared among all maps, so free just
@@ -2281,25 +2322,25 @@ void machine__destroy_kernel_maps(struct machine *self)
2281 kmap->ref_reloc_sym = NULL; 2322 kmap->ref_reloc_sym = NULL;
2282 } 2323 }
2283 2324
2284 map__delete(self->vmlinux_maps[type]); 2325 map__delete(machine->vmlinux_maps[type]);
2285 self->vmlinux_maps[type] = NULL; 2326 machine->vmlinux_maps[type] = NULL;
2286 } 2327 }
2287} 2328}
2288 2329
2289int machine__create_kernel_maps(struct machine *self) 2330int machine__create_kernel_maps(struct machine *machine)
2290{ 2331{
2291 struct dso *kernel = machine__create_kernel(self); 2332 struct dso *kernel = machine__create_kernel(machine);
2292 2333
2293 if (kernel == NULL || 2334 if (kernel == NULL ||
2294 __machine__create_kernel_maps(self, kernel) < 0) 2335 __machine__create_kernel_maps(machine, kernel) < 0)
2295 return -1; 2336 return -1;
2296 2337
2297 if (symbol_conf.use_modules && machine__create_modules(self) < 0) 2338 if (symbol_conf.use_modules && machine__create_modules(machine) < 0)
2298 pr_debug("Problems creating module maps, continuing anyway...\n"); 2339 pr_debug("Problems creating module maps, continuing anyway...\n");
2299 /* 2340 /*
2300 * Now that we have all the maps created, just set the ->end of them: 2341 * Now that we have all the maps created, just set the ->end of them:
2301 */ 2342 */
2302 map_groups__fixup_end(&self->kmaps); 2343 map_groups__fixup_end(&machine->kmaps);
2303 return 0; 2344 return 0;
2304} 2345}
2305 2346
@@ -2363,11 +2404,11 @@ out_fail:
2363 return -1; 2404 return -1;
2364} 2405}
2365 2406
2366size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) 2407size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
2367{ 2408{
2368 int i; 2409 int i;
2369 size_t printed = 0; 2410 size_t printed = 0;
2370 struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; 2411 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
2371 2412
2372 if (kdso->has_build_id) { 2413 if (kdso->has_build_id) {
2373 char filename[PATH_MAX]; 2414 char filename[PATH_MAX];
@@ -2396,6 +2437,25 @@ static int setup_list(struct strlist **list, const char *list_str,
2396 return 0; 2437 return 0;
2397} 2438}
2398 2439
2440static bool symbol__read_kptr_restrict(void)
2441{
2442 bool value = false;
2443
2444 if (geteuid() != 0) {
2445 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2446 if (fp != NULL) {
2447 char line[8];
2448
2449 if (fgets(line, sizeof(line), fp) != NULL)
2450 value = atoi(line) != 0;
2451
2452 fclose(fp);
2453 }
2454 }
2455
2456 return value;
2457}
2458
2399int symbol__init(void) 2459int symbol__init(void)
2400{ 2460{
2401 const char *symfs; 2461 const char *symfs;
@@ -2403,6 +2463,8 @@ int symbol__init(void)
2403 if (symbol_conf.initialized) 2463 if (symbol_conf.initialized)
2404 return 0; 2464 return 0;
2405 2465
2466 symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64));
2467
2406 elf_version(EV_CURRENT); 2468 elf_version(EV_CURRENT);
2407 if (symbol_conf.sort_by_name) 2469 if (symbol_conf.sort_by_name)
2408 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2470 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2440,6 +2502,8 @@ int symbol__init(void)
2440 if (symfs != symbol_conf.symfs) 2502 if (symfs != symbol_conf.symfs)
2441 free((void *)symfs); 2503 free((void *)symfs);
2442 2504
2505 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2506
2443 symbol_conf.initialized = true; 2507 symbol_conf.initialized = true;
2444 return 0; 2508 return 0;
2445 2509
@@ -2462,9 +2526,9 @@ void symbol__exit(void)
2462 symbol_conf.initialized = false; 2526 symbol_conf.initialized = false;
2463} 2527}
2464 2528
2465int machines__create_kernel_maps(struct rb_root *self, pid_t pid) 2529int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
2466{ 2530{
2467 struct machine *machine = machines__findnew(self, pid); 2531 struct machine *machine = machines__findnew(machines, pid);
2468 2532
2469 if (machine == NULL) 2533 if (machine == NULL)
2470 return -1; 2534 return -1;
@@ -2515,7 +2579,7 @@ char *strxfrchar(char *s, char from, char to)
2515 return s; 2579 return s;
2516} 2580}
2517 2581
2518int machines__create_guest_kernel_maps(struct rb_root *self) 2582int machines__create_guest_kernel_maps(struct rb_root *machines)
2519{ 2583{
2520 int ret = 0; 2584 int ret = 0;
2521 struct dirent **namelist = NULL; 2585 struct dirent **namelist = NULL;
@@ -2526,7 +2590,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self)
2526 if (symbol_conf.default_guest_vmlinux_name || 2590 if (symbol_conf.default_guest_vmlinux_name ||
2527 symbol_conf.default_guest_modules || 2591 symbol_conf.default_guest_modules ||
2528 symbol_conf.default_guest_kallsyms) { 2592 symbol_conf.default_guest_kallsyms) {
2529 machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); 2593 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
2530 } 2594 }
2531 2595
2532 if (symbol_conf.guestmount) { 2596 if (symbol_conf.guestmount) {
@@ -2547,7 +2611,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self)
2547 pr_debug("Can't access file %s\n", path); 2611 pr_debug("Can't access file %s\n", path);
2548 goto failure; 2612 goto failure;
2549 } 2613 }
2550 machines__create_kernel_maps(self, pid); 2614 machines__create_kernel_maps(machines, pid);
2551 } 2615 }
2552failure: 2616failure:
2553 free(namelist); 2617 free(namelist);
@@ -2556,23 +2620,23 @@ failure:
2556 return ret; 2620 return ret;
2557} 2621}
2558 2622
2559void machines__destroy_guest_kernel_maps(struct rb_root *self) 2623void machines__destroy_guest_kernel_maps(struct rb_root *machines)
2560{ 2624{
2561 struct rb_node *next = rb_first(self); 2625 struct rb_node *next = rb_first(machines);
2562 2626
2563 while (next) { 2627 while (next) {
2564 struct machine *pos = rb_entry(next, struct machine, rb_node); 2628 struct machine *pos = rb_entry(next, struct machine, rb_node);
2565 2629
2566 next = rb_next(&pos->rb_node); 2630 next = rb_next(&pos->rb_node);
2567 rb_erase(&pos->rb_node, self); 2631 rb_erase(&pos->rb_node, machines);
2568 machine__delete(pos); 2632 machine__delete(pos);
2569 } 2633 }
2570} 2634}
2571 2635
2572int machine__load_kallsyms(struct machine *self, const char *filename, 2636int machine__load_kallsyms(struct machine *machine, const char *filename,
2573 enum map_type type, symbol_filter_t filter) 2637 enum map_type type, symbol_filter_t filter)
2574{ 2638{
2575 struct map *map = self->vmlinux_maps[type]; 2639 struct map *map = machine->vmlinux_maps[type];
2576 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 2640 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
2577 2641
2578 if (ret > 0) { 2642 if (ret > 0) {
@@ -2582,16 +2646,16 @@ int machine__load_kallsyms(struct machine *self, const char *filename,
2582 * kernel, with modules between them, fixup the end of all 2646 * kernel, with modules between them, fixup the end of all
2583 * sections. 2647 * sections.
2584 */ 2648 */
2585 __map_groups__fixup_end(&self->kmaps, type); 2649 __map_groups__fixup_end(&machine->kmaps, type);
2586 } 2650 }
2587 2651
2588 return ret; 2652 return ret;
2589} 2653}
2590 2654
2591int machine__load_vmlinux_path(struct machine *self, enum map_type type, 2655int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
2592 symbol_filter_t filter) 2656 symbol_filter_t filter)
2593{ 2657{
2594 struct map *map = self->vmlinux_maps[type]; 2658 struct map *map = machine->vmlinux_maps[type];
2595 int ret = dso__load_vmlinux_path(map->dso, map, filter); 2659 int ret = dso__load_vmlinux_path(map->dso, map, filter);
2596 2660
2597 if (ret > 0) { 2661 if (ret > 0) {
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 713b0b40cc4a..325ee36a9d29 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -62,7 +62,7 @@ struct symbol {
62 char name[0]; 62 char name[0];
63}; 63};
64 64
65void symbol__delete(struct symbol *self); 65void symbol__delete(struct symbol *sym);
66 66
67struct strlist; 67struct strlist;
68 68
@@ -75,7 +75,8 @@ struct symbol_conf {
75 use_callchain, 75 use_callchain,
76 exclude_other, 76 exclude_other,
77 show_cpu_utilization, 77 show_cpu_utilization,
78 initialized; 78 initialized,
79 kptr_restrict;
79 const char *vmlinux_name, 80 const char *vmlinux_name,
80 *kallsyms_name, 81 *kallsyms_name,
81 *source_prefix, 82 *source_prefix,
@@ -96,9 +97,9 @@ struct symbol_conf {
96 97
97extern struct symbol_conf symbol_conf; 98extern struct symbol_conf symbol_conf;
98 99
99static inline void *symbol__priv(struct symbol *self) 100static inline void *symbol__priv(struct symbol *sym)
100{ 101{
101 return ((void *)self) - symbol_conf.priv_size; 102 return ((void *)sym) - symbol_conf.priv_size;
102} 103}
103 104
104struct ref_reloc_sym { 105struct ref_reloc_sym {
@@ -155,43 +156,45 @@ struct dso {
155 156
156struct dso *dso__new(const char *name); 157struct dso *dso__new(const char *name);
157struct dso *dso__new_kernel(const char *name); 158struct dso *dso__new_kernel(const char *name);
158void dso__delete(struct dso *self); 159void dso__delete(struct dso *dso);
159 160
160int dso__name_len(const struct dso *self); 161int dso__name_len(const struct dso *dso);
161 162
162bool dso__loaded(const struct dso *self, enum map_type type); 163bool dso__loaded(const struct dso *dso, enum map_type type);
163bool dso__sorted_by_name(const struct dso *self, enum map_type type); 164bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
164 165
165static inline void dso__set_loaded(struct dso *self, enum map_type type) 166static inline void dso__set_loaded(struct dso *dso, enum map_type type)
166{ 167{
167 self->loaded |= (1 << type); 168 dso->loaded |= (1 << type);
168} 169}
169 170
170void dso__sort_by_name(struct dso *self, enum map_type type); 171void dso__sort_by_name(struct dso *dso, enum map_type type);
171 172
172struct dso *__dsos__findnew(struct list_head *head, const char *name); 173struct dso *__dsos__findnew(struct list_head *head, const char *name);
173 174
174int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); 175int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
175int dso__load_vmlinux(struct dso *self, struct map *map, 176int dso__load_vmlinux(struct dso *dso, struct map *map,
176 const char *vmlinux, symbol_filter_t filter); 177 const char *vmlinux, symbol_filter_t filter);
177int dso__load_vmlinux_path(struct dso *self, struct map *map, 178int dso__load_vmlinux_path(struct dso *dso, struct map *map,
178 symbol_filter_t filter); 179 symbol_filter_t filter);
179int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, 180int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
180 symbol_filter_t filter); 181 symbol_filter_t filter);
181int machine__load_kallsyms(struct machine *self, const char *filename, 182int machine__load_kallsyms(struct machine *machine, const char *filename,
182 enum map_type type, symbol_filter_t filter); 183 enum map_type type, symbol_filter_t filter);
183int machine__load_vmlinux_path(struct machine *self, enum map_type type, 184int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
184 symbol_filter_t filter); 185 symbol_filter_t filter);
185 186
186size_t __dsos__fprintf(struct list_head *head, FILE *fp); 187size_t __dsos__fprintf(struct list_head *head, FILE *fp);
187 188
188size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); 189size_t machine__fprintf_dsos_buildid(struct machine *machine,
189size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); 190 FILE *fp, bool with_hits);
190size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); 191size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp);
191 192size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
192size_t dso__fprintf_buildid(struct dso *self, FILE *fp); 193 FILE *fp, bool with_hits);
193size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp); 194size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
194size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); 195size_t dso__fprintf_symbols_by_name(struct dso *dso,
196 enum map_type type, FILE *fp);
197size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
195 198
196enum symtab_type { 199enum symtab_type {
197 SYMTAB__KALLSYMS = 0, 200 SYMTAB__KALLSYMS = 0,
@@ -207,34 +210,36 @@ enum symtab_type {
207 SYMTAB__NOT_FOUND, 210 SYMTAB__NOT_FOUND,
208}; 211};
209 212
210char dso__symtab_origin(const struct dso *self); 213char dso__symtab_origin(const struct dso *dso);
211void dso__set_long_name(struct dso *self, char *name); 214void dso__set_long_name(struct dso *dso, char *name);
212void dso__set_build_id(struct dso *self, void *build_id); 215void dso__set_build_id(struct dso *dso, void *build_id);
213void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); 216void dso__read_running_kernel_build_id(struct dso *dso,
214struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); 217 struct machine *machine);
215struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, 218struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
219 u64 addr);
220struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
216 const char *name); 221 const char *name);
217 222
218int filename__read_build_id(const char *filename, void *bf, size_t size); 223int filename__read_build_id(const char *filename, void *bf, size_t size);
219int sysfs__read_build_id(const char *filename, void *bf, size_t size); 224int sysfs__read_build_id(const char *filename, void *bf, size_t size);
220bool __dsos__read_build_ids(struct list_head *head, bool with_hits); 225bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
221int build_id__sprintf(const u8 *self, int len, char *bf); 226int build_id__sprintf(const u8 *build_id, int len, char *bf);
222int kallsyms__parse(const char *filename, void *arg, 227int kallsyms__parse(const char *filename, void *arg,
223 int (*process_symbol)(void *arg, const char *name, 228 int (*process_symbol)(void *arg, const char *name,
224 char type, u64 start, u64 end)); 229 char type, u64 start, u64 end));
225 230
226void machine__destroy_kernel_maps(struct machine *self); 231void machine__destroy_kernel_maps(struct machine *machine);
227int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); 232int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
228int machine__create_kernel_maps(struct machine *self); 233int machine__create_kernel_maps(struct machine *machine);
229 234
230int machines__create_kernel_maps(struct rb_root *self, pid_t pid); 235int machines__create_kernel_maps(struct rb_root *machines, pid_t pid);
231int machines__create_guest_kernel_maps(struct rb_root *self); 236int machines__create_guest_kernel_maps(struct rb_root *machines);
232void machines__destroy_guest_kernel_maps(struct rb_root *self); 237void machines__destroy_guest_kernel_maps(struct rb_root *machines);
233 238
234int symbol__init(void); 239int symbol__init(void);
235void symbol__exit(void); 240void symbol__exit(void);
236bool symbol_type__is_a(char symbol_type, enum map_type map_type); 241bool symbol_type__is_a(char symbol_type, enum map_type map_type);
237 242
238size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); 243size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
239 244
240#endif /* __PERF_SYMBOL */ 245#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0a7ed5b5e281..1e88485c16a0 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,7 +2187,6 @@ static const struct flag flags[] = {
2187 { "TASKLET_SOFTIRQ", 6 }, 2187 { "TASKLET_SOFTIRQ", 6 },
2188 { "SCHED_SOFTIRQ", 7 }, 2188 { "SCHED_SOFTIRQ", 7 },
2189 { "HRTIMER_SOFTIRQ", 8 }, 2189 { "HRTIMER_SOFTIRQ", 8 },
2190 { "RCU_SOFTIRQ", 9 },
2191 2190
2192 { "HRTIMER_NORESTART", 0 }, 2191 { "HRTIMER_NORESTART", 0 },
2193 { "HRTIMER_RESTART", 1 }, 2192 { "HRTIMER_RESTART", 1 },
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 8c17a8730e4a..0229723aceb3 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -5,7 +5,6 @@
5#include "../../hist.h" 5#include "../../hist.h"
6#include "../../sort.h" 6#include "../../sort.h"
7#include "../../symbol.h" 7#include "../../symbol.h"
8#include "../../annotate.h"
9#include <pthread.h> 8#include <pthread.h>
10 9
11static void ui__error_window(const char *fmt, ...) 10static void ui__error_window(const char *fmt, ...)
@@ -256,10 +255,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
256 int refresh) 255 int refresh)
257{ 256{
258 struct objdump_line *pos, *n; 257 struct objdump_line *pos, *n;
259 struct annotation *notes = symbol__annotation(sym); 258 struct annotation *notes;
260 struct annotate_browser browser = { 259 struct annotate_browser browser = {
261 .b = { 260 .b = {
262 .entries = &notes->src->source,
263 .refresh = ui_browser__list_head_refresh, 261 .refresh = ui_browser__list_head_refresh,
264 .seek = ui_browser__list_head_seek, 262 .seek = ui_browser__list_head_seek,
265 .write = annotate_browser__write, 263 .write = annotate_browser__write,
@@ -281,6 +279,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
281 279
282 ui_helpline__push("Press <- or ESC to exit"); 280 ui_helpline__push("Press <- or ESC to exit");
283 281
282 notes = symbol__annotation(sym);
283
284 list_for_each_entry(pos, &notes->src->source, node) { 284 list_for_each_entry(pos, &notes->src->source, node) {
285 struct objdump_line_rb_node *rbpos; 285 struct objdump_line_rb_node *rbpos;
286 size_t line_len = strlen(pos->line); 286 size_t line_len = strlen(pos->line);
@@ -291,6 +291,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
291 rbpos->idx = browser.b.nr_entries++; 291 rbpos->idx = browser.b.nr_entries++;
292 } 292 }
293 293
294 browser.b.entries = &notes->src->source,
294 browser.b.width += 18; /* Percentage */ 295 browser.b.width += 18; /* Percentage */
295 ret = annotate_browser__run(&browser, evidx, refresh); 296 ret = annotate_browser__run(&browser, evidx, refresh);
296 list_for_each_entry_safe(pos, n, &notes->src->source, node) { 297 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 798efdca3ead..5d767c622dfc 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
851 goto out_free_stack; 851 goto out_free_stack;
852 case 'a': 852 case 'a':
853 if (browser->selection == NULL || 853 if (browser->selection == NULL ||
854 browser->selection->map == NULL || 854 browser->selection->sym == NULL ||
855 browser->selection->map->dso->annotate_warned) 855 browser->selection->map->dso->annotate_warned)
856 continue; 856 continue;
857 goto do_annotate; 857 goto do_annotate;