aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-06-22 15:20:35 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-06-22 15:20:35 -0400
commit7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch)
tree879f18ccbe274122f2d4f095b43cbc7f953e0ada /tools/perf
parent48e315618dc4dc8904182cd221e3d395d5d97005 (diff)
parent9ffc59d57228d74809700be6f7ecb1db10292f05 (diff)
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Documentation/Makefile29
-rw-r--r--tools/perf/Documentation/asciidoctor-extensions.rb29
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt7
-rw-r--r--tools/perf/Documentation/perf-list.txt6
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Documentation/perf-script-python.txt26
-rw-r--r--tools/perf/Documentation/perf-stat.txt56
-rw-r--r--tools/perf/Makefile.config14
-rw-r--r--tools/perf/Makefile.perf10
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/common.c4
-rw-r--r--tools/perf/arch/common.h4
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c3
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/x86/util/Build2
-rw-r--r--tools/perf/arch/x86/util/event.c76
-rw-r--r--tools/perf/arch/x86/util/machine.c103
-rw-r--r--tools/perf/builtin-annotate.c45
-rw-r--r--tools/perf/builtin-buildid-cache.c81
-rw-r--r--tools/perf/builtin-c2c.c2
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/builtin-kallsyms.c2
-rw-r--r--tools/perf/builtin-kmem.c6
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-probe.c3
-rw-r--r--tools/perf/builtin-report.c66
-rw-r--r--tools/perf/builtin-sched.c14
-rw-r--r--tools/perf/builtin-script.c68
-rw-r--r--tools/perf/builtin-stat.c120
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c57
-rw-r--r--tools/perf/builtin-trace.c13
-rwxr-xr-xtools/perf/check-headers.sh30
-rw-r--r--tools/perf/examples/bpf/5sec.c49
-rw-r--r--tools/perf/examples/bpf/empty.c3
-rw-r--r--tools/perf/include/bpf/bpf.h13
-rw-r--r--tools/perf/perf.c25
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-record2
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-report2
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py200
-rw-r--r--tools/perf/tests/builtin-test.c9
-rw-r--r--tools/perf/tests/code-reading.c6
-rw-r--r--tools/perf/tests/hists_common.c6
-rw-r--r--tools/perf/tests/kmod-path.c16
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c7
-rw-r--r--tools/perf/tests/parse-events.c26
-rw-r--r--tools/perf/tests/python-use.c3
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh14
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c20
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c29
-rw-r--r--tools/perf/ui/browsers/hists.c43
-rw-r--r--tools/perf/ui/browsers/hists.h3
-rw-r--r--tools/perf/ui/browsers/map.c2
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/ui/gtk/hists.c5
-rw-r--r--tools/perf/ui/hist.c2
-rw-r--r--tools/perf/ui/stdio/hist.c7
-rw-r--r--tools/perf/util/Build3
-rw-r--r--tools/perf/util/annotate.c222
-rw-r--r--tools/perf/util/annotate.h62
-rw-r--r--tools/perf/util/auxtrace.c12
-rw-r--r--tools/perf/util/bpf-prologue.c2
-rw-r--r--tools/perf/util/build-id.c4
-rw-r--r--tools/perf/util/cgroup.c9
-rw-r--r--tools/perf/util/config.c16
-rw-r--r--tools/perf/util/config.h1
-rw-r--r--tools/perf/util/cs-etm.c4
-rw-r--r--tools/perf/util/db-export.c7
-rw-r--r--tools/perf/util/dso.c36
-rw-r--r--tools/perf/util/dso.h37
-rw-r--r--tools/perf/util/env.c31
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/event.c73
-rw-r--r--tools/perf/util/event.h8
-rw-r--r--tools/perf/util/evlist.c15
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c6
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/genelf.c2
-rw-r--r--tools/perf/util/header.c24
-rw-r--r--tools/perf/util/hist.c15
-rw-r--r--tools/perf/util/hist.h26
-rw-r--r--tools/perf/util/intel-bts.c3
-rw-r--r--tools/perf/util/intel-pt-decoder/insn.h18
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c23
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h9
-rw-r--r--tools/perf/util/intel-pt.c13
-rw-r--r--tools/perf/util/llvm-utils.c19
-rw-r--r--tools/perf/util/machine.c355
-rw-r--r--tools/perf/util/machine.h72
-rw-r--r--tools/perf/util/map.c147
-rw-r--r--tools/perf/util/map.h75
-rw-r--r--tools/perf/util/parse-events.c73
-rw-r--r--tools/perf/util/parse-events.l18
-rw-r--r--tools/perf/util/parse-events.y14
-rw-r--r--tools/perf/util/probe-event.c32
-rw-r--r--tools/perf/util/probe-file.c3
-rw-r--r--tools/perf/util/quote.c62
-rw-r--r--tools/perf/util/quote.h31
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c250
-rw-r--r--tools/perf/util/session.c15
-rw-r--r--tools/perf/util/sort.c91
-rw-r--r--tools/perf/util/sort.h11
-rw-r--r--tools/perf/util/srcline.c1
-rw-r--r--tools/perf/util/stat.h3
-rw-r--r--tools/perf/util/symbol-elf.c494
-rw-r--r--tools/perf/util/symbol-minimal.c3
-rw-r--r--tools/perf/util/symbol.c265
-rw-r--r--tools/perf/util/symbol.h27
-rw-r--r--tools/perf/util/symbol_fprintf.c4
-rw-r--r--tools/perf/util/thread.c35
-rw-r--r--tools/perf/util/thread.h13
-rw-r--r--tools/perf/util/top.h3
-rw-r--r--tools/perf/util/trace-event-info.c11
-rw-r--r--tools/perf/util/trace-event.c8
-rw-r--r--tools/perf/util/unwind-libdw.c23
-rw-r--r--tools/perf/util/unwind-libunwind-local.c19
-rw-r--r--tools/perf/util/util.c34
-rw-r--r--tools/perf/util/util.h4
-rw-r--r--tools/perf/util/vdso.c6
123 files changed, 2874 insertions, 1436 deletions
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index db11478e30b4..42261a9b280e 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -47,7 +47,8 @@ man5dir=$(mandir)/man5
47man7dir=$(mandir)/man7 47man7dir=$(mandir)/man7
48 48
49ASCIIDOC=asciidoc 49ASCIIDOC=asciidoc
50ASCIIDOC_EXTRA = --unsafe 50ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
51ASCIIDOC_HTML = xhtml11
51MANPAGE_XSL = manpage-normal.xsl 52MANPAGE_XSL = manpage-normal.xsl
52XMLTO_EXTRA = 53XMLTO_EXTRA =
53INSTALL?=install 54INSTALL?=install
@@ -55,6 +56,14 @@ RM ?= rm -f
55DOC_REF = origin/man 56DOC_REF = origin/man
56HTML_REF = origin/html 57HTML_REF = origin/html
57 58
59ifdef USE_ASCIIDOCTOR
60ASCIIDOC = asciidoctor
61ASCIIDOC_EXTRA = -a compat-mode
62ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
63ASCIIDOC_EXTRA += -a mansource="perf" -a manmanual="perf Manual"
64ASCIIDOC_HTML = xhtml5
65endif
66
58infodir?=$(prefix)/share/info 67infodir?=$(prefix)/share/info
59MAKEINFO=makeinfo 68MAKEINFO=makeinfo
60INSTALL_INFO=install-info 69INSTALL_INFO=install-info
@@ -73,10 +82,12 @@ ifeq ($(_tmp_tool_path),)
73 missing_tools = $(ASCIIDOC) 82 missing_tools = $(ASCIIDOC)
74endif 83endif
75 84
85ifndef USE_ASCIIDOCTOR
76_tmp_tool_path := $(call get-executable,$(XMLTO)) 86_tmp_tool_path := $(call get-executable,$(XMLTO))
77ifeq ($(_tmp_tool_path),) 87ifeq ($(_tmp_tool_path),)
78 missing_tools += $(XMLTO) 88 missing_tools += $(XMLTO)
79endif 89endif
90endif
80 91
81# 92#
82# For asciidoc ... 93# For asciidoc ...
@@ -264,9 +275,17 @@ clean:
264 275
265$(MAN_HTML): $(OUTPUT)%.html : %.txt 276$(MAN_HTML): $(OUTPUT)%.html : %.txt
266 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ 277 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
267 $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ 278 $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
279 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
280 mv $@+ $@
281
282ifdef USE_ASCIIDOCTOR
283$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
284 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
285 $(ASCIIDOC) -b manpage -d manpage \
268 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ 286 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
269 mv $@+ $@ 287 mv $@+ $@
288endif
270 289
271$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml 290$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
272 $(QUIET_XMLTO)$(RM) $@ && \ 291 $(QUIET_XMLTO)$(RM) $@ && \
@@ -274,7 +293,7 @@ $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
274 293
275$(OUTPUT)%.xml : %.txt 294$(OUTPUT)%.xml : %.txt
276 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ 295 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
277 $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ 296 $(ASCIIDOC) -b docbook -d manpage \
278 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ 297 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
279 mv $@+ $@ 298 mv $@+ $@
280 299
@@ -321,13 +340,13 @@ howto-index.txt: howto-index.sh $(wildcard howto/*.txt)
321 mv $@+ $@ 340 mv $@+ $@
322 341
323$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt 342$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt
324 $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt 343 $(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) $*.txt
325 344
326WEBDOC_DEST = /pub/software/tools/perf/docs 345WEBDOC_DEST = /pub/software/tools/perf/docs
327 346
328$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt 347$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
329 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ 348 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
330 sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \ 349 sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b $(ASCIIDOC_HTML) - >$@+ && \
331 mv $@+ $@ 350 mv $@+ $@
332 351
333# UNIMPLEMENTED 352# UNIMPLEMENTED
diff --git a/tools/perf/Documentation/asciidoctor-extensions.rb b/tools/perf/Documentation/asciidoctor-extensions.rb
new file mode 100644
index 000000000000..d148fe95c0c4
--- /dev/null
+++ b/tools/perf/Documentation/asciidoctor-extensions.rb
@@ -0,0 +1,29 @@
1require 'asciidoctor'
2require 'asciidoctor/extensions'
3
4module Perf
5 module Documentation
6 class LinkPerfProcessor < Asciidoctor::Extensions::InlineMacroProcessor
7 use_dsl
8
9 named :chrome
10
11 def process(parent, target, attrs)
12 if parent.document.basebackend? 'html'
13 %(<a href="#{target}.html">#{target}(#{attrs[1]})</a>\n)
14 elsif parent.document.basebackend? 'manpage'
15 "#{target}(#{attrs[1]})"
16 elsif parent.document.basebackend? 'docbook'
17 "<citerefentry>\n" \
18 "<refentrytitle>#{target}</refentrytitle>" \
19 "<manvolnum>#{attrs[1]}</manvolnum>\n" \
20 "</citerefentry>\n"
21 end
22 end
23 end
24 end
25end
26
27Asciidoctor::Extensions.register do
28 inline_macro Perf::Documentation::LinkPerfProcessor, :linkperf
29end
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 73c2650bd0db..f6de0952ff3c 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -48,6 +48,9 @@ OPTIONS
48--purge=:: 48--purge=::
49 Purge all cached binaries including older caches which have specified 49 Purge all cached binaries including older caches which have specified
50 path from the cache. 50 path from the cache.
51-P::
52--purge-all::
53 Purge all cached binaries. This will flush out entire cache.
51-M:: 54-M::
52--missing=:: 55--missing=::
53 List missing build ids in the cache for the specified file. 56 List missing build ids in the cache for the specified file.
@@ -59,7 +62,9 @@ OPTIONS
59 exactly same build-id, that is replaced by new one. It can be used 62 exactly same build-id, that is replaced by new one. It can be used
60 to update kallsyms and kernel dso to vmlinux in order to support 63 to update kallsyms and kernel dso to vmlinux in order to support
61 annotation. 64 annotation.
62 65-l::
66--list::
67 List all valid binaries from cache.
63-v:: 68-v::
64--verbose:: 69--verbose::
65 Be more verbose. 70 Be more verbose.
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 2549c34a7895..11300dbe35c5 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -124,7 +124,11 @@ The available PMUs and their raw parameters can be listed with
124For example the raw event "LSD.UOPS" core pmu event above could 124For example the raw event "LSD.UOPS" core pmu event above could
125be specified as 125be specified as
126 126
127 perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=1/ ... 127 perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=0x1/ ...
128
129 or using extended name syntax
130
131 perf stat -e cpu/event=0xa8,umask=0x1,cmask=0x1,name=\'LSD.UOPS_CYCLES:cmask=0x1\'/ ...
128 132
129PER SOCKET PMUS 133PER SOCKET PMUS
130--------------- 134---------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index cc37b3a4be76..04168da4268e 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -57,6 +57,9 @@ OPTIONS
57 FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and 57 FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and
58 "no" for disable callgraph. 58 "no" for disable callgraph.
59 - 'stack-size': user stack size for dwarf mode 59 - 'stack-size': user stack size for dwarf mode
60 - 'name' : User defined event name. Single quotes (') may be used to
61 escape symbols in the name from parsing by shell and tool
62 like this: name=\'CPU_CLK_UNHALTED.THREAD:cmask=0x1\'.
60 63
61 See the linkperf:perf-list[1] man page for more parameters. 64 See the linkperf:perf-list[1] man page for more parameters.
62 65
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 51ec2d20068a..0fb9eda3cbca 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -610,6 +610,32 @@ Various utility functions for use with perf script:
610 nsecs_str(nsecs) - returns printable string in the form secs.nsecs 610 nsecs_str(nsecs) - returns printable string in the form secs.nsecs
611 avg(total, n) - returns average given a sum and a total number of values 611 avg(total, n) - returns average given a sum and a total number of values
612 612
613SUPPORTED FIELDS
614----------------
615
616Currently supported fields:
617
618ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr,
619symbol, dso, time_enabled, time_running, values, callchain,
620brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs,
621weight, transaction, raw_buf, attr.
622
623Some fields have sub items:
624
625brstack:
626 from, to, from_dsoname, to_dsoname, mispred,
627 predicted, in_tx, abort, cycles.
628
629brstacksym:
630 items: from, to, pred, in_tx, abort (converted string)
631
632For example,
633We can use this code to print brstack "from", "to", "cycles".
634
635if 'brstack' in dict:
636 for entry in dict['brstack']:
637 print "from %s, to %s, cycles %s" % (entry["from"], entry["to"], entry["cycles"])
638
613SEE ALSO 639SEE ALSO
614-------- 640--------
615linkperf:perf-script[1] 641linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index e6c3b4e555c2..5dfe102fb5b5 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -116,6 +116,22 @@ Do not aggregate counts across all monitored CPUs.
116print counts using a CSV-style output to make it easy to import directly into 116print counts using a CSV-style output to make it easy to import directly into
117spreadsheets. Columns are separated by the string specified in SEP. 117spreadsheets. Columns are separated by the string specified in SEP.
118 118
119--table:: Display time for each run (-r option), in a table format, e.g.:
120
121 $ perf stat --null -r 5 --table perf bench sched pipe
122
123 Performance counter stats for 'perf bench sched pipe' (5 runs):
124
125 # Table of individual measurements:
126 5.189 (-0.293) #
127 5.189 (-0.294) #
128 5.186 (-0.296) #
129 5.663 (+0.181) ##
130 6.186 (+0.703) ####
131
132 # Final result:
133 5.483 +- 0.198 seconds time elapsed ( +- 3.62% )
134
119-G name:: 135-G name::
120--cgroup name:: 136--cgroup name::
121monitor only in the container (cgroup) called "name". This option is available only 137monitor only in the container (cgroup) called "name". This option is available only
@@ -294,20 +310,38 @@ Users who wants to get the actual value can apply --no-metric-only.
294EXAMPLES 310EXAMPLES
295-------- 311--------
296 312
297$ perf stat -- make -j 313$ perf stat -- make
314
315 Performance counter stats for 'make':
316
317 83723.452481 task-clock:u (msec) # 1.004 CPUs utilized
318 0 context-switches:u # 0.000 K/sec
319 0 cpu-migrations:u # 0.000 K/sec
320 3,228,188 page-faults:u # 0.039 M/sec
321 229,570,665,834 cycles:u # 2.742 GHz
322 313,163,853,778 instructions:u # 1.36 insn per cycle
323 69,704,684,856 branches:u # 832.559 M/sec
324 2,078,861,393 branch-misses:u # 2.98% of all branches
325
326 83.409183620 seconds time elapsed
327
328 74.684747000 seconds user
329 8.739217000 seconds sys
330
331TIMINGS
332-------
333As displayed in the example above we can display 3 types of timings.
334We always display the time the counters were enabled/alive:
335
336 83.409183620 seconds time elapsed
298 337
299 Performance counter stats for 'make -j': 338For workload sessions we also display time the workloads spent in
339user/system lands:
300 340
301 8117.370256 task clock ticks # 11.281 CPU utilization factor 341 74.684747000 seconds user
302 678 context switches # 0.000 M/sec 342 8.739217000 seconds sys
303 133 CPU migrations # 0.000 M/sec
304 235724 pagefaults # 0.029 M/sec
305 24821162526 CPU cycles # 3057.784 M/sec
306 18687303457 instructions # 2302.138 M/sec
307 172158895 cache references # 21.209 M/sec
308 27075259 cache misses # 3.335 M/sec
309 343
310 Wall-clock time elapsed: 719.554352 msecs 344Those times are the very same as displayed by the 'time' tool.
311 345
312CSV FORMAT 346CSV FORMAT
313---------- 347----------
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index ae7dc46e8f8a..b5ac356ba323 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -885,6 +885,8 @@ endif
885 885
886# Among the variables below, these: 886# Among the variables below, these:
887# perfexecdir 887# perfexecdir
888# perf_include_dir
889# perf_examples_dir
888# template_dir 890# template_dir
889# mandir 891# mandir
890# infodir 892# infodir
@@ -904,6 +906,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
904mandir = share/man 906mandir = share/man
905infodir = share/info 907infodir = share/info
906perfexecdir = libexec/perf-core 908perfexecdir = libexec/perf-core
909perf_include_dir = lib/include/perf
910perf_examples_dir = lib/examples/perf
907sharedir = $(prefix)/share 911sharedir = $(prefix)/share
908template_dir = share/perf-core/templates 912template_dir = share/perf-core/templates
909STRACE_GROUPS_DIR = share/perf-core/strace/groups 913STRACE_GROUPS_DIR = share/perf-core/strace/groups
@@ -934,6 +938,8 @@ bindir_SQ = $(subst ','\'',$(bindir))
934mandir_SQ = $(subst ','\'',$(mandir)) 938mandir_SQ = $(subst ','\'',$(mandir))
935infodir_SQ = $(subst ','\'',$(infodir)) 939infodir_SQ = $(subst ','\'',$(infodir))
936perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) 940perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
941perf_include_dir_SQ = $(subst ','\'',$(perf_include_dir))
942perf_examples_dir_SQ = $(subst ','\'',$(perf_examples_dir))
937template_dir_SQ = $(subst ','\'',$(template_dir)) 943template_dir_SQ = $(subst ','\'',$(template_dir))
938htmldir_SQ = $(subst ','\'',$(htmldir)) 944htmldir_SQ = $(subst ','\'',$(htmldir))
939tipdir_SQ = $(subst ','\'',$(tipdir)) 945tipdir_SQ = $(subst ','\'',$(tipdir))
@@ -944,14 +950,20 @@ srcdir_SQ = $(subst ','\'',$(srcdir))
944 950
945ifneq ($(filter /%,$(firstword $(perfexecdir))),) 951ifneq ($(filter /%,$(firstword $(perfexecdir))),)
946perfexec_instdir = $(perfexecdir) 952perfexec_instdir = $(perfexecdir)
953perf_include_instdir = $(perf_include_dir)
954perf_examples_instdir = $(perf_examples_dir)
947STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR) 955STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
948tip_instdir = $(tipdir) 956tip_instdir = $(tipdir)
949else 957else
950perfexec_instdir = $(prefix)/$(perfexecdir) 958perfexec_instdir = $(prefix)/$(perfexecdir)
959perf_include_instdir = $(prefix)/$(perf_include_dir)
960perf_examples_instdir = $(prefix)/$(perf_examples_dir)
951STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR) 961STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
952tip_instdir = $(prefix)/$(tipdir) 962tip_instdir = $(prefix)/$(tipdir)
953endif 963endif
954perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) 964perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
965perf_include_instdir_SQ = $(subst ','\'',$(perf_include_instdir))
966perf_examples_instdir_SQ = $(subst ','\'',$(perf_examples_instdir))
955STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR)) 967STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
956tip_instdir_SQ = $(subst ','\'',$(tip_instdir)) 968tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
957 969
@@ -999,6 +1011,8 @@ $(call detected_var,ETC_PERFCONFIG_SQ)
999$(call detected_var,STRACE_GROUPS_DIR_SQ) 1011$(call detected_var,STRACE_GROUPS_DIR_SQ)
1000$(call detected_var,prefix_SQ) 1012$(call detected_var,prefix_SQ)
1001$(call detected_var,perfexecdir_SQ) 1013$(call detected_var,perfexecdir_SQ)
1014$(call detected_var,perf_include_dir_SQ)
1015$(call detected_var,perf_examples_dir_SQ)
1002$(call detected_var,tipdir_SQ) 1016$(call detected_var,tipdir_SQ)
1003$(call detected_var,srcdir_SQ) 1017$(call detected_var,srcdir_SQ)
1004$(call detected_var,LIBDIR) 1018$(call detected_var,LIBDIR)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 83e453de36f8..ecc9fc952655 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -767,6 +767,16 @@ ifndef NO_JVMTI
767endif 767endif
768 $(call QUIET_INSTALL, libexec) \ 768 $(call QUIET_INSTALL, libexec) \
769 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 769 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
770ifndef NO_LIBBPF
771 $(call QUIET_INSTALL, lib) \
772 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
773 $(call QUIET_INSTALL, include/bpf) \
774 $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
775 $(call QUIET_INSTALL, lib) \
776 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
777 $(call QUIET_INSTALL, examples/bpf) \
778 $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
779endif
770 $(call QUIET_INSTALL, perf-archive) \ 780 $(call QUIET_INSTALL, perf-archive) \
771 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 781 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
772 $(call QUIET_INSTALL, perf-with-kcore) \ 782 $(call QUIET_INSTALL, perf-with-kcore) \
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 8cb347760233..9a0242e74cfc 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample,
25 25
26 sp = (unsigned long) regs[PERF_REG_ARM_SP]; 26 sp = (unsigned long) regs[PERF_REG_ARM_SP];
27 27
28 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); 28 map = map_groups__find(thread->mg, (u64)sp);
29 if (!map) { 29 if (!map) {
30 pr_debug("failed to get stack map\n"); 30 pr_debug("failed to get stack map\n");
31 free(buf); 31 free(buf);
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index e907f0f4c20c..5522ce384723 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample,
25 25
26 sp = (unsigned long) regs[PERF_REG_ARM64_SP]; 26 sp = (unsigned long) regs[PERF_REG_ARM64_SP];
27 27
28 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); 28 map = map_groups__find(thread->mg, (u64)sp);
29 if (!map) { 29 if (!map) {
30 pr_debug("failed to get stack map\n"); 30 pr_debug("failed to get stack map\n");
31 free(buf); 31 free(buf);
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index c6f373508a4f..82657c01a3b8 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -189,7 +189,7 @@ out_error:
189 return -1; 189 return -1;
190} 190}
191 191
192int perf_env__lookup_objdump(struct perf_env *env) 192int perf_env__lookup_objdump(struct perf_env *env, const char **path)
193{ 193{
194 /* 194 /*
195 * For live mode, env->arch will be NULL and we can use 195 * For live mode, env->arch will be NULL and we can use
@@ -198,5 +198,5 @@ int perf_env__lookup_objdump(struct perf_env *env)
198 if (env->arch == NULL) 198 if (env->arch == NULL)
199 return 0; 199 return 0;
200 200
201 return perf_env__lookup_binutils_path(env, "objdump", &objdump_path); 201 return perf_env__lookup_binutils_path(env, "objdump", path);
202} 202}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2d875baa92e6..2167001b18c5 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -4,8 +4,6 @@
4 4
5#include "../util/env.h" 5#include "../util/env.h"
6 6
7extern const char *objdump_path; 7int perf_env__lookup_objdump(struct perf_env *env, const char **path);
8
9int perf_env__lookup_objdump(struct perf_env *env);
10 8
11#endif /* ARCH_PERF_COMMON_H */ 9#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 30cbbd6d5be0..5f39efef0856 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
26 26
27 sp = (unsigned long) regs[PERF_REG_POWERPC_R1]; 27 sp = (unsigned long) regs[PERF_REG_POWERPC_R1];
28 28
29 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); 29 map = map_groups__find(thread->mg, (u64)sp);
30 if (!map) { 30 if (!map) {
31 pr_debug("failed to get stack map\n"); 31 pr_debug("failed to get stack map\n");
32 free(buf); 32 free(buf);
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 0c370f81e002..3598b8b75d27 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -248,8 +248,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
248 248
249 ip = chain->ips[2]; 249 ip = chain->ips[2];
250 250
251 thread__find_addr_location(thread, PERF_RECORD_MISC_USER, 251 thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
252 MAP__FUNCTION, ip, &al);
253 252
254 if (al.map) 253 if (al.map)
255 dso = al.map->dso; 254 dso = al.map->dso;
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 95036c7a59e8..7879df34569a 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
26 26
27 sp = (unsigned long) regs[PERF_REG_X86_SP]; 27 sp = (unsigned long) regs[PERF_REG_X86_SP];
28 28
29 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); 29 map = map_groups__find(thread->mg, (u64)sp);
30 if (!map) { 30 if (!map) {
31 pr_debug("failed to get stack map\n"); 31 pr_debug("failed to get stack map\n");
32 free(buf); 32 free(buf);
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index f95e6f46ef0d..844b8f335532 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -4,6 +4,8 @@ libperf-y += pmu.o
4libperf-y += kvm-stat.o 4libperf-y += kvm-stat.o
5libperf-y += perf_regs.o 5libperf-y += perf_regs.o
6libperf-y += group.o 6libperf-y += group.o
7libperf-y += machine.o
8libperf-y += event.o
7 9
8libperf-$(CONFIG_DWARF) += dwarf-regs.o 10libperf-$(CONFIG_DWARF) += dwarf-regs.o
9libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o 11libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
new file mode 100644
index 000000000000..675a0213044d
--- /dev/null
+++ b/tools/perf/arch/x86/util/event.c
@@ -0,0 +1,76 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/types.h>
3#include <linux/string.h>
4
5#include "../../util/machine.h"
6#include "../../util/tool.h"
7#include "../../util/map.h"
8#include "../../util/util.h"
9#include "../../util/debug.h"
10
11#if defined(__x86_64__)
12
13int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
14 perf_event__handler_t process,
15 struct machine *machine)
16{
17 int rc = 0;
18 struct map *pos;
19 struct map_groups *kmaps = &machine->kmaps;
20 struct maps *maps = &kmaps->maps;
21 union perf_event *event = zalloc(sizeof(event->mmap) +
22 machine->id_hdr_size);
23
24 if (!event) {
25 pr_debug("Not enough memory synthesizing mmap event "
26 "for extra kernel maps\n");
27 return -1;
28 }
29
30 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
31 struct kmap *kmap;
32 size_t size;
33
34 if (!__map__is_extra_kernel_map(pos))
35 continue;
36
37 kmap = map__kmap(pos);
38
39 size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
40 PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
41 machine->id_hdr_size;
42
43 memset(event, 0, size);
44
45 event->mmap.header.type = PERF_RECORD_MMAP;
46
47 /*
48 * kernel uses 0 for user space maps, see kernel/perf_event.c
49 * __perf_event_mmap
50 */
51 if (machine__is_host(machine))
52 event->header.misc = PERF_RECORD_MISC_KERNEL;
53 else
54 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
55
56 event->mmap.header.size = size;
57
58 event->mmap.start = pos->start;
59 event->mmap.len = pos->end - pos->start;
60 event->mmap.pgoff = pos->pgoff;
61 event->mmap.pid = machine->pid;
62
63 strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
64
65 if (perf_tool__process_synth_event(tool, event, machine,
66 process) != 0) {
67 rc = -1;
68 break;
69 }
70 }
71
72 free(event);
73 return rc;
74}
75
76#endif
diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c
new file mode 100644
index 000000000000..4520ac53caa9
--- /dev/null
+++ b/tools/perf/arch/x86/util/machine.c
@@ -0,0 +1,103 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/types.h>
3#include <linux/string.h>
4#include <stdlib.h>
5
6#include "../../util/machine.h"
7#include "../../util/map.h"
8#include "../../util/symbol.h"
9#include "../../util/sane_ctype.h"
10
11#include <symbol/kallsyms.h>
12
13#if defined(__x86_64__)
14
15struct extra_kernel_map_info {
16 int cnt;
17 int max_cnt;
18 struct extra_kernel_map *maps;
19 bool get_entry_trampolines;
20 u64 entry_trampoline;
21};
22
23static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start,
24 u64 end, u64 pgoff, const char *name)
25{
26 if (mi->cnt >= mi->max_cnt) {
27 void *buf;
28 size_t sz;
29
30 mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32;
31 sz = sizeof(struct extra_kernel_map) * mi->max_cnt;
32 buf = realloc(mi->maps, sz);
33 if (!buf)
34 return -1;
35 mi->maps = buf;
36 }
37
38 mi->maps[mi->cnt].start = start;
39 mi->maps[mi->cnt].end = end;
40 mi->maps[mi->cnt].pgoff = pgoff;
41 strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN);
42
43 mi->cnt += 1;
44
45 return 0;
46}
47
48static int find_extra_kernel_maps(void *arg, const char *name, char type,
49 u64 start)
50{
51 struct extra_kernel_map_info *mi = arg;
52
53 if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL &&
54 !strcmp(name, "_entry_trampoline")) {
55 mi->entry_trampoline = start;
56 return 0;
57 }
58
59 if (is_entry_trampoline(name)) {
60 u64 end = start + page_size;
61
62 return add_extra_kernel_map(mi, start, end, 0, name);
63 }
64
65 return 0;
66}
67
68int machine__create_extra_kernel_maps(struct machine *machine,
69 struct dso *kernel)
70{
71 struct extra_kernel_map_info mi = { .cnt = 0, };
72 char filename[PATH_MAX];
73 int ret;
74 int i;
75
76 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
77
78 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
79 return 0;
80
81 ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps);
82 if (ret)
83 goto out_free;
84
85 if (!mi.entry_trampoline)
86 goto out_free;
87
88 for (i = 0; i < mi.cnt; i++) {
89 struct extra_kernel_map *xm = &mi.maps[i];
90
91 xm->pgoff = mi.entry_trampoline;
92 ret = machine__create_extra_kernel_map(machine, kernel, xm);
93 if (ret)
94 goto out_free;
95 }
96
97 machine->trampolines_mapped = mi.cnt;
98out_free:
99 free(mi.maps);
100 return ret;
101}
102
103#endif
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 51709a961496..5eb22cc56363 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -40,11 +40,11 @@
40struct perf_annotate { 40struct perf_annotate {
41 struct perf_tool tool; 41 struct perf_tool tool;
42 struct perf_session *session; 42 struct perf_session *session;
43 struct annotation_options opts;
43 bool use_tui, use_stdio, use_stdio2, use_gtk; 44 bool use_tui, use_stdio, use_stdio2, use_gtk;
44 bool full_paths;
45 bool print_line;
46 bool skip_missing; 45 bool skip_missing;
47 bool has_br_stack; 46 bool has_br_stack;
47 bool group_set;
48 const char *sym_hist_filter; 48 const char *sym_hist_filter;
49 const char *cpu_list; 49 const char *cpu_list;
50 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 50 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -161,12 +161,12 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
161 hist__account_cycles(sample->branch_stack, al, sample, false); 161 hist__account_cycles(sample->branch_stack, al, sample, false);
162 162
163 bi = he->branch_info; 163 bi = he->branch_info;
164 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx); 164 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
165 165
166 if (err) 166 if (err)
167 goto out; 167 goto out;
168 168
169 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx); 169 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
170 170
171out: 171out:
172 return err; 172 return err;
@@ -228,7 +228,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
228 */ 228 */
229 if (al->sym != NULL) { 229 if (al->sym != NULL) {
230 rb_erase(&al->sym->rb_node, 230 rb_erase(&al->sym->rb_node,
231 &al->map->dso->symbols[al->map->type]); 231 &al->map->dso->symbols);
232 symbol__delete(al->sym); 232 symbol__delete(al->sym);
233 dso__reset_find_symbol_cache(al->map->dso); 233 dso__reset_find_symbol_cache(al->map->dso);
234 } 234 }
@@ -248,7 +248,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
248 if (he == NULL) 248 if (he == NULL)
249 return -ENOMEM; 249 return -ENOMEM;
250 250
251 ret = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr); 251 ret = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
252 hists__inc_nr_samples(hists, true); 252 hists__inc_nr_samples(hists, true);
253 return ret; 253 return ret;
254} 254}
@@ -288,10 +288,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
288 struct perf_annotate *ann) 288 struct perf_annotate *ann)
289{ 289{
290 if (!ann->use_stdio2) 290 if (!ann->use_stdio2)
291 return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, 291 return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, &ann->opts);
292 ann->print_line, ann->full_paths, 0, 0); 292
293 return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel, 293 return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel, &ann->opts);
294 ann->print_line, ann->full_paths);
295} 294}
296 295
297static void hists__find_annotations(struct hists *hists, 296static void hists__find_annotations(struct hists *hists,
@@ -342,7 +341,7 @@ find_next:
342 /* skip missing symbols */ 341 /* skip missing symbols */
343 nd = rb_next(nd); 342 nd = rb_next(nd);
344 } else if (use_browser == 1) { 343 } else if (use_browser == 1) {
345 key = hist_entry__tui_annotate(he, evsel, NULL); 344 key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
346 345
347 switch (key) { 346 switch (key) {
348 case -1: 347 case -1:
@@ -389,8 +388,9 @@ static int __cmd_annotate(struct perf_annotate *ann)
389 goto out; 388 goto out;
390 } 389 }
391 390
392 if (!objdump_path) { 391 if (!ann->opts.objdump_path) {
393 ret = perf_env__lookup_objdump(&session->header.env); 392 ret = perf_env__lookup_objdump(&session->header.env,
393 &ann->opts.objdump_path);
394 if (ret) 394 if (ret)
395 goto out; 395 goto out;
396 } 396 }
@@ -475,6 +475,7 @@ int cmd_annotate(int argc, const char **argv)
475 .ordered_events = true, 475 .ordered_events = true,
476 .ordering_requires_timestamps = true, 476 .ordering_requires_timestamps = true,
477 }, 477 },
478 .opts = annotation__default_options,
478 }; 479 };
479 struct perf_data data = { 480 struct perf_data data = {
480 .mode = PERF_DATA_MODE_READ, 481 .mode = PERF_DATA_MODE_READ,
@@ -502,23 +503,26 @@ int cmd_annotate(int argc, const char **argv)
502 "file", "vmlinux pathname"), 503 "file", "vmlinux pathname"),
503 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 504 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
504 "load module symbols - WARNING: use only with -k and LIVE kernel"), 505 "load module symbols - WARNING: use only with -k and LIVE kernel"),
505 OPT_BOOLEAN('l', "print-line", &annotate.print_line, 506 OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
506 "print matching source lines (may be slow)"), 507 "print matching source lines (may be slow)"),
507 OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, 508 OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
508 "Don't shorten the displayed pathnames"), 509 "Don't shorten the displayed pathnames"),
509 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, 510 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
510 "Skip symbols that cannot be annotated"), 511 "Skip symbols that cannot be annotated"),
512 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group,
513 &annotate.group_set,
514 "Show event group information together"),
511 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), 515 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
512 OPT_CALLBACK(0, "symfs", NULL, "directory", 516 OPT_CALLBACK(0, "symfs", NULL, "directory",
513 "Look for files with symbols relative to this directory", 517 "Look for files with symbols relative to this directory",
514 symbol__config_symfs), 518 symbol__config_symfs),
515 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, 519 OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
516 "Interleave source code with assembly code (default)"), 520 "Interleave source code with assembly code (default)"),
517 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, 521 OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
518 "Display raw encoding of assembly instructions (default)"), 522 "Display raw encoding of assembly instructions (default)"),
519 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 523 OPT_STRING('M', "disassembler-style", &annotate.opts.disassembler_style, "disassembler style",
520 "Specify disassembler style (e.g. -M intel for intel syntax)"), 524 "Specify disassembler style (e.g. -M intel for intel syntax)"),
521 OPT_STRING(0, "objdump", &objdump_path, "path", 525 OPT_STRING(0, "objdump", &annotate.opts.objdump_path, "path",
522 "objdump binary to use for disassembly and annotations"), 526 "objdump binary to use for disassembly and annotations"),
523 OPT_BOOLEAN(0, "group", &symbol_conf.event_group, 527 OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
524 "Show event group information together"), 528 "Show event group information together"),
@@ -570,6 +574,9 @@ int cmd_annotate(int argc, const char **argv)
570 annotate.has_br_stack = perf_header__has_feat(&annotate.session->header, 574 annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
571 HEADER_BRANCH_STACK); 575 HEADER_BRANCH_STACK);
572 576
577 if (annotate.group_set)
578 perf_evlist__force_leader(annotate.session->evlist);
579
573 ret = symbol__annotation_init(); 580 ret = symbol__annotation_init();
574 if (ret < 0) 581 if (ret < 0)
575 goto out_delete; 582 goto out_delete;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 41db2cba77eb..115110a4796a 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -25,6 +25,7 @@
25#include "util/session.h" 25#include "util/session.h"
26#include "util/symbol.h" 26#include "util/symbol.h"
27#include "util/time-utils.h" 27#include "util/time-utils.h"
28#include "util/probe-file.h"
28 29
29static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid) 30static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
30{ 31{
@@ -239,6 +240,34 @@ out:
239 return err; 240 return err;
240} 241}
241 242
243static int build_id_cache__purge_all(void)
244{
245 struct strlist *list;
246 struct str_node *pos;
247 int err = 0;
248 char *buf;
249
250 list = build_id_cache__list_all(false);
251 if (!list) {
252 pr_debug("Failed to get buildids: -%d\n", errno);
253 return -EINVAL;
254 }
255
256 strlist__for_each_entry(pos, list) {
257 buf = build_id_cache__origname(pos->s);
258 err = build_id_cache__remove_s(pos->s);
259 pr_debug("Removing %s (%s): %s\n", buf, pos->s,
260 err ? "FAIL" : "Ok");
261 free(buf);
262 if (err)
263 break;
264 }
265 strlist__delete(list);
266
267 pr_debug("Purged all: %s\n", err ? "FAIL" : "Ok");
268 return err;
269}
270
242static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) 271static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
243{ 272{
244 char filename[PATH_MAX]; 273 char filename[PATH_MAX];
@@ -297,6 +326,26 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
297 return err; 326 return err;
298} 327}
299 328
329static int build_id_cache__show_all(void)
330{
331 struct strlist *bidlist;
332 struct str_node *nd;
333 char *buf;
334
335 bidlist = build_id_cache__list_all(true);
336 if (!bidlist) {
337 pr_debug("Failed to get buildids: -%d\n", errno);
338 return -1;
339 }
340 strlist__for_each_entry(nd, bidlist) {
341 buf = build_id_cache__origname(nd->s);
342 fprintf(stdout, "%s %s\n", nd->s, buf);
343 free(buf);
344 }
345 strlist__delete(bidlist);
346 return 0;
347}
348
300int cmd_buildid_cache(int argc, const char **argv) 349int cmd_buildid_cache(int argc, const char **argv)
301{ 350{
302 struct strlist *list; 351 struct strlist *list;
@@ -304,6 +353,9 @@ int cmd_buildid_cache(int argc, const char **argv)
304 int ret = 0; 353 int ret = 0;
305 int ns_id = -1; 354 int ns_id = -1;
306 bool force = false; 355 bool force = false;
356 bool list_files = false;
357 bool opts_flag = false;
358 bool purge_all = false;
307 char const *add_name_list_str = NULL, 359 char const *add_name_list_str = NULL,
308 *remove_name_list_str = NULL, 360 *remove_name_list_str = NULL,
309 *purge_name_list_str = NULL, 361 *purge_name_list_str = NULL,
@@ -327,6 +379,8 @@ int cmd_buildid_cache(int argc, const char **argv)
327 "file(s) to remove"), 379 "file(s) to remove"),
328 OPT_STRING('p', "purge", &purge_name_list_str, "file list", 380 OPT_STRING('p', "purge", &purge_name_list_str, "file list",
329 "file(s) to remove (remove old caches too)"), 381 "file(s) to remove (remove old caches too)"),
382 OPT_BOOLEAN('P', "purge-all", &purge_all, "purge all cached files"),
383 OPT_BOOLEAN('l', "list", &list_files, "list all cached files"),
330 OPT_STRING('M', "missing", &missing_filename, "file", 384 OPT_STRING('M', "missing", &missing_filename, "file",
331 "to find missing build ids in the cache"), 385 "to find missing build ids in the cache"),
332 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 386 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -344,11 +398,20 @@ int cmd_buildid_cache(int argc, const char **argv)
344 argc = parse_options(argc, argv, buildid_cache_options, 398 argc = parse_options(argc, argv, buildid_cache_options,
345 buildid_cache_usage, 0); 399 buildid_cache_usage, 0);
346 400
347 if (argc || (!add_name_list_str && !kcore_filename && 401 opts_flag = add_name_list_str || kcore_filename ||
348 !remove_name_list_str && !purge_name_list_str && 402 remove_name_list_str || purge_name_list_str ||
349 !missing_filename && !update_name_list_str)) 403 missing_filename || update_name_list_str ||
404 purge_all;
405
406 if (argc || !(list_files || opts_flag))
350 usage_with_options(buildid_cache_usage, buildid_cache_options); 407 usage_with_options(buildid_cache_usage, buildid_cache_options);
351 408
409 /* -l is exclusive. It can not be used with other options. */
410 if (list_files && opts_flag) {
411 usage_with_options_msg(buildid_cache_usage,
412 buildid_cache_options, "-l is exclusive.\n");
413 }
414
352 if (ns_id > 0) 415 if (ns_id > 0)
353 nsi = nsinfo__new(ns_id); 416 nsi = nsinfo__new(ns_id);
354 417
@@ -366,6 +429,11 @@ int cmd_buildid_cache(int argc, const char **argv)
366 429
367 setup_pager(); 430 setup_pager();
368 431
432 if (list_files) {
433 ret = build_id_cache__show_all();
434 goto out;
435 }
436
369 if (add_name_list_str) { 437 if (add_name_list_str) {
370 list = strlist__new(add_name_list_str, NULL); 438 list = strlist__new(add_name_list_str, NULL);
371 if (list) { 439 if (list) {
@@ -420,6 +488,13 @@ int cmd_buildid_cache(int argc, const char **argv)
420 } 488 }
421 } 489 }
422 490
491 if (purge_all) {
492 if (build_id_cache__purge_all()) {
493 pr_warning("Couldn't remove some caches. Error: %s.\n",
494 str_error_r(errno, sbuf, sizeof(sbuf)));
495 }
496 }
497
423 if (missing_filename) 498 if (missing_filename)
424 ret = build_id_cache__fprintf_missing(session, stdout); 499 ret = build_id_cache__fprintf_missing(session, stdout);
425 500
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 2126bfbcb385..307b3594525f 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -1976,7 +1976,7 @@ static int filter_cb(struct hist_entry *he)
1976 c2c_he = container_of(he, struct c2c_hist_entry, he); 1976 c2c_he = container_of(he, struct c2c_hist_entry, he);
1977 1977
1978 if (c2c.show_src && !he->srcline) 1978 if (c2c.show_src && !he->srcline)
1979 he->srcline = hist_entry__get_srcline(he); 1979 he->srcline = hist_entry__srcline(he);
1980 1980
1981 calc_width(c2c_he); 1981 calc_width(c2c_he);
1982 1982
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 40fe919bbcf3..a3b346359ba0 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -440,9 +440,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
440 goto repipe; 440 goto repipe;
441 } 441 }
442 442
443 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al); 443 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
444
445 if (al.map != NULL) {
446 if (!al.map->dso->hit) { 444 if (!al.map->dso->hit) {
447 al.map->dso->hit = 1; 445 al.map->dso->hit = 1;
448 if (map__load(al.map) >= 0) { 446 if (map__load(al.map) >= 0) {
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index bcfb363112d3..90d1a2305b72 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -27,7 +27,7 @@ static int __cmd_kallsyms(int argc, const char **argv)
27 27
28 for (i = 0; i < argc; ++i) { 28 for (i = 0; i < argc; ++i) {
29 struct map *map; 29 struct map *map;
30 struct symbol *symbol = machine__find_kernel_function_by_name(machine, argv[i], &map); 30 struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map);
31 31
32 if (symbol == NULL) { 32 if (symbol == NULL) {
33 printf("%s: not found\n", argv[i]); 33 printf("%s: not found\n", argv[i]);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index ae11e4c3516a..54d3f21b0e62 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1004,7 +1004,7 @@ static void __print_slab_result(struct rb_root *root,
1004 if (is_caller) { 1004 if (is_caller) {
1005 addr = data->call_site; 1005 addr = data->call_site;
1006 if (!raw_ip) 1006 if (!raw_ip)
1007 sym = machine__find_kernel_function(machine, addr, &map); 1007 sym = machine__find_kernel_symbol(machine, addr, &map);
1008 } else 1008 } else
1009 addr = data->ptr; 1009 addr = data->ptr;
1010 1010
@@ -1068,7 +1068,7 @@ static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1068 char *caller = buf; 1068 char *caller = buf;
1069 1069
1070 data = rb_entry(next, struct page_stat, node); 1070 data = rb_entry(next, struct page_stat, node);
1071 sym = machine__find_kernel_function(machine, data->callsite, &map); 1071 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1072 if (sym) 1072 if (sym)
1073 caller = sym->name; 1073 caller = sym->name;
1074 else 1074 else
@@ -1110,7 +1110,7 @@ static void __print_page_caller_result(struct perf_session *session, int n_lines
1110 char *caller = buf; 1110 char *caller = buf;
1111 1111
1112 data = rb_entry(next, struct page_stat, node); 1112 data = rb_entry(next, struct page_stat, node);
1113 sym = machine__find_kernel_function(machine, data->callsite, &map); 1113 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1114 if (sym) 1114 if (sym)
1115 caller = sym->name; 1115 caller = sym->name;
1116 else 1116 else
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 72e2ca096bf5..2b1ef704169f 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1438,8 +1438,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1438 goto out; 1438 goto out;
1439 } 1439 }
1440 1440
1441 symbol_conf.nr_events = kvm->evlist->nr_entries;
1442
1443 if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0) 1441 if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1444 usage_with_options(live_usage, live_options); 1442 usage_with_options(live_usage, live_options);
1445 1443
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c0065923a525..99de91698de1 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -81,8 +81,7 @@ static int parse_probe_event(const char *str)
81 params.target_used = true; 81 params.target_used = true;
82 } 82 }
83 83
84 if (params.nsi) 84 pev->nsi = nsinfo__get(params.nsi);
85 pev->nsi = nsinfo__get(params.nsi);
86 85
87 /* Parse a perf-probe command into event */ 86 /* Parse a perf-probe command into event */
88 ret = parse_perf_probe_command(str, pev); 87 ret = parse_perf_probe_command(str, pev);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 0f198f6d9b77..cdb5b6949832 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -71,6 +71,7 @@ struct report {
71 bool group_set; 71 bool group_set;
72 int max_stack; 72 int max_stack;
73 struct perf_read_values show_threads_values; 73 struct perf_read_values show_threads_values;
74 struct annotation_options annotation_opts;
74 const char *pretty_printing_style; 75 const char *pretty_printing_style;
75 const char *cpu_list; 76 const char *cpu_list;
76 const char *symbol_filter_str; 77 const char *symbol_filter_str;
@@ -136,26 +137,25 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
136 137
137 if (sort__mode == SORT_MODE__BRANCH) { 138 if (sort__mode == SORT_MODE__BRANCH) {
138 bi = he->branch_info; 139 bi = he->branch_info;
139 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx); 140 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
140 if (err) 141 if (err)
141 goto out; 142 goto out;
142 143
143 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx); 144 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
144 145
145 } else if (rep->mem_mode) { 146 } else if (rep->mem_mode) {
146 mi = he->mem_info; 147 mi = he->mem_info;
147 err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel->idx); 148 err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel);
148 if (err) 149 if (err)
149 goto out; 150 goto out;
150 151
151 err = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr); 152 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
152 153
153 } else if (symbol_conf.cumulate_callchain) { 154 } else if (symbol_conf.cumulate_callchain) {
154 if (single) 155 if (single)
155 err = hist_entry__inc_addr_samples(he, sample, evsel->idx, 156 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
156 al->addr);
157 } else { 157 } else {
158 err = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr); 158 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
159 } 159 }
160 160
161out: 161out:
@@ -181,11 +181,11 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
181 rep->nonany_branch_mode); 181 rep->nonany_branch_mode);
182 182
183 bi = he->branch_info; 183 bi = he->branch_info;
184 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx); 184 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
185 if (err) 185 if (err)
186 goto out; 186 goto out;
187 187
188 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx); 188 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
189 189
190 branch_type_count(&rep->brtype_stat, &bi->flags, 190 branch_type_count(&rep->brtype_stat, &bi->flags,
191 bi->from.addr, bi->to.addr); 191 bi->from.addr, bi->to.addr);
@@ -194,20 +194,11 @@ out:
194 return err; 194 return err;
195} 195}
196 196
197/*
198 * Events in data file are not collect in groups, but we still want
199 * the group display. Set the artificial group and set the leader's
200 * forced_leader flag to notify the display code.
201 */
202static void setup_forced_leader(struct report *report, 197static void setup_forced_leader(struct report *report,
203 struct perf_evlist *evlist) 198 struct perf_evlist *evlist)
204{ 199{
205 if (report->group_set && !evlist->nr_groups) { 200 if (report->group_set)
206 struct perf_evsel *leader = perf_evlist__first(evlist); 201 perf_evlist__force_leader(evlist);
207
208 perf_evlist__set_leader(evlist);
209 leader->forced_leader = true;
210 }
211} 202}
212 203
213static int process_feature_event(struct perf_tool *tool, 204static int process_feature_event(struct perf_tool *tool,
@@ -523,12 +514,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
523 "As no suitable kallsyms nor vmlinux was found, kernel samples\n" 514 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
524 "can't be resolved."; 515 "can't be resolved.";
525 516
526 if (kernel_map) { 517 if (kernel_map && map__has_symbols(kernel_map)) {
527 const struct dso *kdso = kernel_map->dso; 518 desc = "If some relocation was applied (e.g. "
528 if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) { 519 "kexec) symbols may be misresolved.";
529 desc = "If some relocation was applied (e.g. "
530 "kexec) symbols may be misresolved.";
531 }
532 } 520 }
533 521
534 ui__warning( 522 ui__warning(
@@ -573,7 +561,7 @@ static int report__browse_hists(struct report *rep)
573 ret = perf_evlist__tui_browse_hists(evlist, help, NULL, 561 ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
574 rep->min_percent, 562 rep->min_percent,
575 &session->header.env, 563 &session->header.env,
576 true); 564 true, &rep->annotation_opts);
577 /* 565 /*
578 * Usually "ret" is the last pressed key, and we only 566 * Usually "ret" is the last pressed key, and we only
579 * care if the key notifies us to switch data file. 567 * care if the key notifies us to switch data file.
@@ -718,10 +706,7 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
718 706
719static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp) 707static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
720{ 708{
721 int printed = 0, i; 709 return maps__fprintf_task(&mg->maps, indent, fp);
722 for (i = 0; i < MAP__NR_TYPES; ++i)
723 printed += maps__fprintf_task(&mg->maps[i], indent, fp);
724 return printed;
725} 710}
726 711
727static void task__print_level(struct task *task, FILE *fp, int level) 712static void task__print_level(struct task *task, FILE *fp, int level)
@@ -961,12 +946,6 @@ parse_percent_limit(const struct option *opt, const char *str,
961 return 0; 946 return 0;
962} 947}
963 948
964#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
965
966const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
967 CALLCHAIN_REPORT_HELP
968 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
969
970int cmd_report(int argc, const char **argv) 949int cmd_report(int argc, const char **argv)
971{ 950{
972 struct perf_session *session; 951 struct perf_session *session;
@@ -975,6 +954,10 @@ int cmd_report(int argc, const char **argv)
975 bool has_br_stack = false; 954 bool has_br_stack = false;
976 int branch_mode = -1; 955 int branch_mode = -1;
977 bool branch_call_mode = false; 956 bool branch_call_mode = false;
957#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
958 const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
959 CALLCHAIN_REPORT_HELP
960 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
978 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT; 961 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
979 const char * const report_usage[] = { 962 const char * const report_usage[] = {
980 "perf report [<options>]", 963 "perf report [<options>]",
@@ -1004,6 +987,7 @@ int cmd_report(int argc, const char **argv)
1004 .max_stack = PERF_MAX_STACK_DEPTH, 987 .max_stack = PERF_MAX_STACK_DEPTH,
1005 .pretty_printing_style = "normal", 988 .pretty_printing_style = "normal",
1006 .socket_filter = -1, 989 .socket_filter = -1,
990 .annotation_opts = annotation__default_options,
1007 }; 991 };
1008 const struct option options[] = { 992 const struct option options[] = {
1009 OPT_STRING('i', "input", &input_name, "file", 993 OPT_STRING('i', "input", &input_name, "file",
@@ -1093,11 +1077,11 @@ int cmd_report(int argc, const char **argv)
1093 "list of cpus to profile"), 1077 "list of cpus to profile"),
1094 OPT_BOOLEAN('I', "show-info", &report.show_full_info, 1078 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1095 "Display extended information about perf.data file"), 1079 "Display extended information about perf.data file"),
1096 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, 1080 OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
1097 "Interleave source code with assembly code (default)"), 1081 "Interleave source code with assembly code (default)"),
1098 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, 1082 OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
1099 "Display raw encoding of assembly instructions (default)"), 1083 "Display raw encoding of assembly instructions (default)"),
1100 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1084 OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
1101 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1085 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1102 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1086 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1103 "Show a column with the sum of periods"), 1087 "Show a column with the sum of periods"),
@@ -1108,7 +1092,7 @@ int cmd_report(int argc, const char **argv)
1108 parse_branch_mode), 1092 parse_branch_mode),
1109 OPT_BOOLEAN(0, "branch-history", &branch_call_mode, 1093 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1110 "add last branch records to call history"), 1094 "add last branch records to call history"),
1111 OPT_STRING(0, "objdump", &objdump_path, "path", 1095 OPT_STRING(0, "objdump", &report.annotation_opts.objdump_path, "path",
1112 "objdump binary to use for disassembly and annotations"), 1096 "objdump binary to use for disassembly and annotations"),
1113 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 1097 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1114 "Disable symbol demangling"), 1098 "Disable symbol demangling"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 4dfdee668b0c..cbf39dab19c1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2143,7 +2143,7 @@ static void save_task_callchain(struct perf_sched *sched,
2143 return; 2143 return;
2144 } 2144 }
2145 2145
2146 if (!symbol_conf.use_callchain || sample->callchain == NULL) 2146 if (!sched->show_callchain || sample->callchain == NULL)
2147 return; 2147 return;
2148 2148
2149 if (thread__resolve_callchain(thread, cursor, evsel, sample, 2149 if (thread__resolve_callchain(thread, cursor, evsel, sample,
@@ -2271,10 +2271,11 @@ static struct thread *get_idle_thread(int cpu)
2271 return idle_threads[cpu]; 2271 return idle_threads[cpu];
2272} 2272}
2273 2273
2274static void save_idle_callchain(struct idle_thread_runtime *itr, 2274static void save_idle_callchain(struct perf_sched *sched,
2275 struct idle_thread_runtime *itr,
2275 struct perf_sample *sample) 2276 struct perf_sample *sample)
2276{ 2277{
2277 if (!symbol_conf.use_callchain || sample->callchain == NULL) 2278 if (!sched->show_callchain || sample->callchain == NULL)
2278 return; 2279 return;
2279 2280
2280 callchain_cursor__copy(&itr->cursor, &callchain_cursor); 2281 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
@@ -2320,7 +2321,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
2320 2321
2321 /* copy task callchain when entering to idle */ 2322 /* copy task callchain when entering to idle */
2322 if (perf_evsel__intval(evsel, sample, "next_pid") == 0) 2323 if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2323 save_idle_callchain(itr, sample); 2324 save_idle_callchain(sched, itr, sample);
2324 } 2325 }
2325 } 2326 }
2326 2327
@@ -2849,7 +2850,7 @@ static void timehist_print_summary(struct perf_sched *sched,
2849 printf(" CPU %2d idle entire time window\n", i); 2850 printf(" CPU %2d idle entire time window\n", i);
2850 } 2851 }
2851 2852
2852 if (sched->idle_hist && symbol_conf.use_callchain) { 2853 if (sched->idle_hist && sched->show_callchain) {
2853 callchain_param.mode = CHAIN_FOLDED; 2854 callchain_param.mode = CHAIN_FOLDED;
2854 callchain_param.value = CCVAL_PERIOD; 2855 callchain_param.value = CCVAL_PERIOD;
2855 2856
@@ -2933,8 +2934,7 @@ static int timehist_check_attr(struct perf_sched *sched,
2933 return -1; 2934 return -1;
2934 } 2935 }
2935 2936
2936 if (sched->show_callchain && 2937 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2937 !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
2938 pr_info("Samples do not have callchains.\n"); 2938 pr_info("Samples do not have callchains.\n");
2939 sched->show_callchain = 0; 2939 sched->show_callchain = 0;
2940 symbol_conf.use_callchain = 0; 2940 symbol_conf.use_callchain = 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index e0a9845b6cbc..b3bf35512d21 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -153,8 +153,8 @@ static struct {
153 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 153 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
154 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 154 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
155 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 155 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
156 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | 156 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
157 PERF_OUTPUT_PERIOD, 157 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
158 158
159 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 159 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
160 }, 160 },
@@ -165,8 +165,9 @@ static struct {
165 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 165 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
166 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 166 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
167 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 167 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
168 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | 168 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
169 PERF_OUTPUT_PERIOD | PERF_OUTPUT_BPF_OUTPUT, 169 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
170 PERF_OUTPUT_BPF_OUTPUT,
170 171
171 .invalid_fields = PERF_OUTPUT_TRACE, 172 .invalid_fields = PERF_OUTPUT_TRACE,
172 }, 173 },
@@ -185,10 +186,10 @@ static struct {
185 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 186 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
186 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 187 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
187 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 188 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
188 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | 189 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
189 PERF_OUTPUT_PERIOD | PERF_OUTPUT_ADDR | 190 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
190 PERF_OUTPUT_DATA_SRC | PERF_OUTPUT_WEIGHT | 191 PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
191 PERF_OUTPUT_PHYS_ADDR, 192 PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR,
192 193
193 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 194 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
194 }, 195 },
@@ -199,8 +200,8 @@ static struct {
199 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 200 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
200 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 201 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
201 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 202 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
202 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | 203 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
203 PERF_OUTPUT_PERIOD, 204 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
204 205
205 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 206 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
206 }, 207 },
@@ -211,8 +212,8 @@ static struct {
211 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 212 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
212 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 213 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
213 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | 214 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
214 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | 215 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
215 PERF_OUTPUT_SYNTH, 216 PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH,
216 217
217 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, 218 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
218 }, 219 },
@@ -516,7 +517,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
516 517
517 evlist__for_each_entry(session->evlist, evsel) { 518 evlist__for_each_entry(session->evlist, evsel) {
518 not_pipe = true; 519 not_pipe = true;
519 if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 520 if (evsel__has_callchain(evsel)) {
520 use_callchain = true; 521 use_callchain = true;
521 break; 522 break;
522 } 523 }
@@ -531,21 +532,18 @@ static int perf_session__check_output_opt(struct perf_session *session)
531 */ 532 */
532 if (symbol_conf.use_callchain && 533 if (symbol_conf.use_callchain &&
533 !output[PERF_TYPE_TRACEPOINT].user_set) { 534 !output[PERF_TYPE_TRACEPOINT].user_set) {
534 struct perf_event_attr *attr;
535
536 j = PERF_TYPE_TRACEPOINT; 535 j = PERF_TYPE_TRACEPOINT;
537 536
538 evlist__for_each_entry(session->evlist, evsel) { 537 evlist__for_each_entry(session->evlist, evsel) {
539 if (evsel->attr.type != j) 538 if (evsel->attr.type != j)
540 continue; 539 continue;
541 540
542 attr = &evsel->attr; 541 if (evsel__has_callchain(evsel)) {
543
544 if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
545 output[j].fields |= PERF_OUTPUT_IP; 542 output[j].fields |= PERF_OUTPUT_IP;
546 output[j].fields |= PERF_OUTPUT_SYM; 543 output[j].fields |= PERF_OUTPUT_SYM;
544 output[j].fields |= PERF_OUTPUT_SYMOFFSET;
547 output[j].fields |= PERF_OUTPUT_DSO; 545 output[j].fields |= PERF_OUTPUT_DSO;
548 set_print_ip_opts(attr); 546 set_print_ip_opts(&evsel->attr);
549 goto out; 547 goto out;
550 } 548 }
551 } 549 }
@@ -608,7 +606,7 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
608 if (PRINT_FIELD(COMM)) { 606 if (PRINT_FIELD(COMM)) {
609 if (latency_format) 607 if (latency_format)
610 printed += fprintf(fp, "%8.8s ", thread__comm_str(thread)); 608 printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
611 else if (PRINT_FIELD(IP) && symbol_conf.use_callchain) 609 else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
612 printed += fprintf(fp, "%s ", thread__comm_str(thread)); 610 printed += fprintf(fp, "%s ", thread__comm_str(thread));
613 else 611 else
614 printed += fprintf(fp, "%16s ", thread__comm_str(thread)); 612 printed += fprintf(fp, "%16s ", thread__comm_str(thread));
@@ -717,8 +715,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
717 if (PRINT_FIELD(DSO)) { 715 if (PRINT_FIELD(DSO)) {
718 memset(&alf, 0, sizeof(alf)); 716 memset(&alf, 0, sizeof(alf));
719 memset(&alt, 0, sizeof(alt)); 717 memset(&alt, 0, sizeof(alt));
720 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); 718 thread__find_map(thread, sample->cpumode, from, &alf);
721 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); 719 thread__find_map(thread, sample->cpumode, to, &alt);
722 } 720 }
723 721
724 printed += fprintf(fp, " 0x%"PRIx64, from); 722 printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -764,13 +762,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
764 from = br->entries[i].from; 762 from = br->entries[i].from;
765 to = br->entries[i].to; 763 to = br->entries[i].to;
766 764
767 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); 765 thread__find_symbol(thread, sample->cpumode, from, &alf);
768 if (alf.map) 766 thread__find_symbol(thread, sample->cpumode, to, &alt);
769 alf.sym = map__find_symbol(alf.map, alf.addr);
770
771 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
772 if (alt.map)
773 alt.sym = map__find_symbol(alt.map, alt.addr);
774 767
775 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp); 768 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
776 if (PRINT_FIELD(DSO)) { 769 if (PRINT_FIELD(DSO)) {
@@ -814,12 +807,12 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
814 from = br->entries[i].from; 807 from = br->entries[i].from;
815 to = br->entries[i].to; 808 to = br->entries[i].to;
816 809
817 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); 810 if (thread__find_map(thread, sample->cpumode, from, &alf) &&
818 if (alf.map && !alf.map->dso->adjust_symbols) 811 !alf.map->dso->adjust_symbols)
819 from = map__map_ip(alf.map, from); 812 from = map__map_ip(alf.map, from);
820 813
821 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); 814 if (thread__find_map(thread, sample->cpumode, to, &alt) &&
822 if (alt.map && !alt.map->dso->adjust_symbols) 815 !alt.map->dso->adjust_symbols)
823 to = map__map_ip(alt.map, to); 816 to = map__map_ip(alt.map, to);
824 817
825 printed += fprintf(fp, " 0x%"PRIx64, from); 818 printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -882,8 +875,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
882 return 0; 875 return 0;
883 } 876 }
884 877
885 thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al); 878 if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) {
886 if (!al.map || !al.map->dso) {
887 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); 879 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
888 return 0; 880 return 0;
889 } 881 }
@@ -933,10 +925,8 @@ static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
933 925
934 memset(&al, 0, sizeof(al)); 926 memset(&al, 0, sizeof(al));
935 927
936 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); 928 thread__find_map(thread, cpumode, addr, &al);
937 if (!al.map) 929
938 thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
939 addr, &al);
940 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) 930 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
941 return 0; 931 return 0;
942 932
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f17dc601b0f3..096ccb25c11f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -80,6 +80,9 @@
80#include <sys/stat.h> 80#include <sys/stat.h>
81#include <sys/wait.h> 81#include <sys/wait.h>
82#include <unistd.h> 82#include <unistd.h>
83#include <sys/time.h>
84#include <sys/resource.h>
85#include <sys/wait.h>
83 86
84#include "sane_ctype.h" 87#include "sane_ctype.h"
85 88
@@ -164,6 +167,7 @@ static bool forever = false;
164static bool metric_only = false; 167static bool metric_only = false;
165static bool force_metric_only = false; 168static bool force_metric_only = false;
166static bool no_merge = false; 169static bool no_merge = false;
170static bool walltime_run_table = false;
167static struct timespec ref_time; 171static struct timespec ref_time;
168static struct cpu_map *aggr_map; 172static struct cpu_map *aggr_map;
169static aggr_get_id_t aggr_get_id; 173static aggr_get_id_t aggr_get_id;
@@ -173,6 +177,9 @@ static const char *output_name;
173static int output_fd; 177static int output_fd;
174static int print_free_counters_hint; 178static int print_free_counters_hint;
175static int print_mixed_hw_group_error; 179static int print_mixed_hw_group_error;
180static u64 *walltime_run;
181static bool ru_display = false;
182static struct rusage ru_data;
176 183
177struct perf_stat { 184struct perf_stat {
178 bool record; 185 bool record;
@@ -569,7 +576,7 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
569 return leader; 576 return leader;
570} 577}
571 578
572static int __run_perf_stat(int argc, const char **argv) 579static int __run_perf_stat(int argc, const char **argv, int run_idx)
573{ 580{
574 int interval = stat_config.interval; 581 int interval = stat_config.interval;
575 int times = stat_config.times; 582 int times = stat_config.times;
@@ -724,7 +731,7 @@ try_again:
724 break; 731 break;
725 } 732 }
726 } 733 }
727 waitpid(child_pid, &status, 0); 734 wait4(child_pid, &status, 0, &ru_data);
728 735
729 if (workload_exec_errno) { 736 if (workload_exec_errno) {
730 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 737 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
@@ -752,6 +759,9 @@ try_again:
752 759
753 t1 = rdclock(); 760 t1 = rdclock();
754 761
762 if (walltime_run_table)
763 walltime_run[run_idx] = t1 - t0;
764
755 update_stats(&walltime_nsecs_stats, t1 - t0); 765 update_stats(&walltime_nsecs_stats, t1 - t0);
756 766
757 /* 767 /*
@@ -766,7 +776,7 @@ try_again:
766 return WEXITSTATUS(status); 776 return WEXITSTATUS(status);
767} 777}
768 778
769static int run_perf_stat(int argc, const char **argv) 779static int run_perf_stat(int argc, const char **argv, int run_idx)
770{ 780{
771 int ret; 781 int ret;
772 782
@@ -779,7 +789,7 @@ static int run_perf_stat(int argc, const char **argv)
779 if (sync_run) 789 if (sync_run)
780 sync(); 790 sync();
781 791
782 ret = __run_perf_stat(argc, argv); 792 ret = __run_perf_stat(argc, argv, run_idx);
783 if (ret) 793 if (ret)
784 return ret; 794 return ret;
785 795
@@ -1764,19 +1774,81 @@ static void print_header(int argc, const char **argv)
1764 } 1774 }
1765} 1775}
1766 1776
1777static int get_precision(double num)
1778{
1779 if (num > 1)
1780 return 0;
1781
1782 return lround(ceil(-log10(num)));
1783}
1784
1785static void print_table(FILE *output, int precision, double avg)
1786{
1787 char tmp[64];
1788 int idx, indent = 0;
1789
1790 scnprintf(tmp, 64, " %17.*f", precision, avg);
1791 while (tmp[indent] == ' ')
1792 indent++;
1793
1794 fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
1795
1796 for (idx = 0; idx < run_count; idx++) {
1797 double run = (double) walltime_run[idx] / NSEC_PER_SEC;
1798 int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
1799
1800 fprintf(output, " %17.*f (%+.*f) ",
1801 precision, run, precision, run - avg);
1802
1803 for (h = 0; h < n; h++)
1804 fprintf(output, "#");
1805
1806 fprintf(output, "\n");
1807 }
1808
1809 fprintf(output, "\n%*s# Final result:\n", indent, "");
1810}
1811
1812static double timeval2double(struct timeval *t)
1813{
1814 return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
1815}
1816
1767static void print_footer(void) 1817static void print_footer(void)
1768{ 1818{
1819 double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
1769 FILE *output = stat_config.output; 1820 FILE *output = stat_config.output;
1770 int n; 1821 int n;
1771 1822
1772 if (!null_run) 1823 if (!null_run)
1773 fprintf(output, "\n"); 1824 fprintf(output, "\n");
1774 fprintf(output, " %17.9f seconds time elapsed", 1825
1775 avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC); 1826 if (run_count == 1) {
1776 if (run_count > 1) { 1827 fprintf(output, " %17.9f seconds time elapsed", avg);
1777 fprintf(output, " "); 1828
1778 print_noise_pct(stddev_stats(&walltime_nsecs_stats), 1829 if (ru_display) {
1779 avg_stats(&walltime_nsecs_stats)); 1830 double ru_utime = timeval2double(&ru_data.ru_utime);
1831 double ru_stime = timeval2double(&ru_data.ru_stime);
1832
1833 fprintf(output, "\n\n");
1834 fprintf(output, " %17.9f seconds user\n", ru_utime);
1835 fprintf(output, " %17.9f seconds sys\n", ru_stime);
1836 }
1837 } else {
1838 double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
1839 /*
1840 * Display at most 2 more significant
1841 * digits than the stddev inaccuracy.
1842 */
1843 int precision = get_precision(sd) + 2;
1844
1845 if (walltime_run_table)
1846 print_table(output, precision, avg);
1847
1848 fprintf(output, " %17.*f +- %.*f seconds time elapsed",
1849 precision, avg, precision, sd);
1850
1851 print_noise_pct(sd, avg);
1780 } 1852 }
1781 fprintf(output, "\n\n"); 1853 fprintf(output, "\n\n");
1782 1854
@@ -1952,6 +2024,8 @@ static const struct option stat_options[] = {
1952 "be more verbose (show counter open errors, etc)"), 2024 "be more verbose (show counter open errors, etc)"),
1953 OPT_INTEGER('r', "repeat", &run_count, 2025 OPT_INTEGER('r', "repeat", &run_count,
1954 "repeat command and print average + stddev (max: 100, forever: 0)"), 2026 "repeat command and print average + stddev (max: 100, forever: 0)"),
2027 OPT_BOOLEAN(0, "table", &walltime_run_table,
2028 "display details about each run (only with -r option)"),
1955 OPT_BOOLEAN('n', "null", &null_run, 2029 OPT_BOOLEAN('n', "null", &null_run,
1956 "null run - dont start any counters"), 2030 "null run - dont start any counters"),
1957 OPT_INCR('d', "detailed", &detailed_run, 2031 OPT_INCR('d', "detailed", &detailed_run,
@@ -2843,6 +2917,13 @@ int cmd_stat(int argc, const char **argv)
2843 goto out; 2917 goto out;
2844 } 2918 }
2845 2919
2920 if (walltime_run_table && run_count <= 1) {
2921 fprintf(stderr, "--table is only supported with -r\n");
2922 parse_options_usage(stat_usage, stat_options, "r", 1);
2923 parse_options_usage(NULL, stat_options, "table", 0);
2924 goto out;
2925 }
2926
2846 if (output_fd < 0) { 2927 if (output_fd < 0) {
2847 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2928 fprintf(stderr, "argument to --log-fd must be a > 0\n");
2848 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2929 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
@@ -2888,6 +2969,13 @@ int cmd_stat(int argc, const char **argv)
2888 2969
2889 setup_system_wide(argc); 2970 setup_system_wide(argc);
2890 2971
2972 /*
2973 * Display user/system times only for single
2974 * run and when there's specified tracee.
2975 */
2976 if ((run_count == 1) && target__none(&target))
2977 ru_display = true;
2978
2891 if (run_count < 0) { 2979 if (run_count < 0) {
2892 pr_err("Run count must be a positive number\n"); 2980 pr_err("Run count must be a positive number\n");
2893 parse_options_usage(stat_usage, stat_options, "r", 1); 2981 parse_options_usage(stat_usage, stat_options, "r", 1);
@@ -2897,6 +2985,14 @@ int cmd_stat(int argc, const char **argv)
2897 run_count = 1; 2985 run_count = 1;
2898 } 2986 }
2899 2987
2988 if (walltime_run_table) {
2989 walltime_run = zalloc(run_count * sizeof(walltime_run[0]));
2990 if (!walltime_run) {
2991 pr_err("failed to setup -r option");
2992 goto out;
2993 }
2994 }
2995
2900 if ((stat_config.aggr_mode == AGGR_THREAD) && 2996 if ((stat_config.aggr_mode == AGGR_THREAD) &&
2901 !target__has_task(&target)) { 2997 !target__has_task(&target)) {
2902 if (!target.system_wide || target.cpu_list) { 2998 if (!target.system_wide || target.cpu_list) {
@@ -3012,7 +3108,7 @@ int cmd_stat(int argc, const char **argv)
3012 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 3108 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
3013 run_idx + 1); 3109 run_idx + 1);
3014 3110
3015 status = run_perf_stat(argc, argv); 3111 status = run_perf_stat(argc, argv, run_idx);
3016 if (forever && status != -1) { 3112 if (forever && status != -1) {
3017 print_counters(NULL, argc, argv); 3113 print_counters(NULL, argc, argv);
3018 perf_stat__reset_stats(); 3114 perf_stat__reset_stats();
@@ -3060,6 +3156,8 @@ int cmd_stat(int argc, const char **argv)
3060 perf_stat__exit_aggr_mode(); 3156 perf_stat__exit_aggr_mode();
3061 perf_evlist__free_stats(evsel_list); 3157 perf_evlist__free_stats(evsel_list);
3062out: 3158out:
3159 free(walltime_run);
3160
3063 if (smi_cost && smi_reset) 3161 if (smi_cost && smi_reset)
3064 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 3162 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
3065 3163
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 813698a9b8c7..a827919c6263 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -533,12 +533,8 @@ static const char *cat_backtrace(union perf_event *event,
533 } 533 }
534 534
535 tal.filtered = 0; 535 tal.filtered = 0;
536 thread__find_addr_location(al.thread, cpumode, 536 if (thread__find_symbol(al.thread, cpumode, ip, &tal))
537 MAP__FUNCTION, ip, &tal); 537 fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
538
539 if (tal.sym)
540 fprintf(f, "..... %016" PRIx64 " %s\n", ip,
541 tal.sym->name);
542 else 538 else
543 fprintf(f, "..... %016" PRIx64 "\n", ip); 539 fprintf(f, "..... %016" PRIx64 "\n", ip);
544 } 540 }
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index f39bd60d2708..ffdc2769ff9f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -123,14 +123,9 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
123 } 123 }
124 124
125 notes = symbol__annotation(sym); 125 notes = symbol__annotation(sym);
126 if (notes->src != NULL) {
127 pthread_mutex_lock(&notes->lock);
128 goto out_assign;
129 }
130
131 pthread_mutex_lock(&notes->lock); 126 pthread_mutex_lock(&notes->lock);
132 127
133 if (symbol__alloc_hist(sym) < 0) { 128 if (!symbol__hists(sym, top->evlist->nr_entries)) {
134 pthread_mutex_unlock(&notes->lock); 129 pthread_mutex_unlock(&notes->lock);
135 pr_err("Not enough memory for annotating '%s' symbol!\n", 130 pr_err("Not enough memory for annotating '%s' symbol!\n",
136 sym->name); 131 sym->name);
@@ -138,9 +133,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
138 return err; 133 return err;
139 } 134 }
140 135
141 err = symbol__annotate(sym, map, evsel, 0, NULL); 136 err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
142 if (err == 0) { 137 if (err == 0) {
143out_assign:
144 top->sym_filter_entry = he; 138 top->sym_filter_entry = he;
145 } else { 139 } else {
146 char msg[BUFSIZ]; 140 char msg[BUFSIZ];
@@ -188,7 +182,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
188static void perf_top__record_precise_ip(struct perf_top *top, 182static void perf_top__record_precise_ip(struct perf_top *top,
189 struct hist_entry *he, 183 struct hist_entry *he,
190 struct perf_sample *sample, 184 struct perf_sample *sample,
191 int counter, u64 ip) 185 struct perf_evsel *evsel, u64 ip)
192{ 186{
193 struct annotation *notes; 187 struct annotation *notes;
194 struct symbol *sym = he->ms.sym; 188 struct symbol *sym = he->ms.sym;
@@ -204,7 +198,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
204 if (pthread_mutex_trylock(&notes->lock)) 198 if (pthread_mutex_trylock(&notes->lock))
205 return; 199 return;
206 200
207 err = hist_entry__inc_addr_samples(he, sample, counter, ip); 201 err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
208 202
209 pthread_mutex_unlock(&notes->lock); 203 pthread_mutex_unlock(&notes->lock);
210 204
@@ -249,10 +243,9 @@ static void perf_top__show_details(struct perf_top *top)
249 goto out_unlock; 243 goto out_unlock;
250 244
251 printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name); 245 printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
252 printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter); 246 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
253 247
254 more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, 248 more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
255 0, top->sym_pcnt_filter, top->print_entries, 4);
256 249
257 if (top->evlist->enabled) { 250 if (top->evlist->enabled) {
258 if (top->zero) 251 if (top->zero)
@@ -412,7 +405,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
412 405
413 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); 406 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
414 407
415 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter); 408 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
416 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); 409 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
417 fprintf(stdout, "\t[S] stop annotation.\n"); 410 fprintf(stdout, "\t[S] stop annotation.\n");
418 411
@@ -515,7 +508,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
515 prompt_integer(&top->count_filter, "Enter display event count filter"); 508 prompt_integer(&top->count_filter, "Enter display event count filter");
516 break; 509 break;
517 case 'F': 510 case 'F':
518 prompt_percent(&top->sym_pcnt_filter, 511 prompt_percent(&top->annotation_opts.min_pcnt,
519 "Enter details display event filter (percent)"); 512 "Enter details display event filter (percent)");
520 break; 513 break;
521 case 'K': 514 case 'K':
@@ -613,7 +606,8 @@ static void *display_thread_tui(void *arg)
613 perf_evlist__tui_browse_hists(top->evlist, help, &hbt, 606 perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
614 top->min_percent, 607 top->min_percent,
615 &top->session->header.env, 608 &top->session->header.env,
616 !top->record_opts.overwrite); 609 !top->record_opts.overwrite,
610 &top->annotation_opts);
617 611
618 done = 1; 612 done = 1;
619 return NULL; 613 return NULL;
@@ -691,7 +685,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
691 struct perf_evsel *evsel = iter->evsel; 685 struct perf_evsel *evsel = iter->evsel;
692 686
693 if (perf_hpp_list.sym && single) 687 if (perf_hpp_list.sym && single)
694 perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr); 688 perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
695 689
696 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, 690 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
697 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY)); 691 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
@@ -742,7 +736,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
742"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
743"Check /proc/sys/kernel/kptr_restrict.\n\n" 737"Check /proc/sys/kernel/kptr_restrict.\n\n"
744"Kernel%s samples will not be resolved.\n", 738"Kernel%s samples will not be resolved.\n",
745 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 739 al.map && map__has_symbols(al.map) ?
746 " modules" : ""); 740 " modules" : "");
747 if (use_browser <= 0) 741 if (use_browser <= 0)
748 sleep(5); 742 sleep(5);
@@ -750,7 +744,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
750 machine->kptr_restrict_warned = true; 744 machine->kptr_restrict_warned = true;
751 } 745 }
752 746
753 if (al.sym == NULL) { 747 if (al.sym == NULL && al.map != NULL) {
754 const char *msg = "Kernel samples will not be resolved.\n"; 748 const char *msg = "Kernel samples will not be resolved.\n";
755 /* 749 /*
756 * As we do lazy loading of symtabs we only will know if the 750 * As we do lazy loading of symtabs we only will know if the
@@ -764,8 +758,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
764 * invalid --vmlinux ;-) 758 * invalid --vmlinux ;-)
765 */ 759 */
766 if (!machine->kptr_restrict_warned && !top->vmlinux_warned && 760 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
767 al.map == machine->vmlinux_maps[MAP__FUNCTION] && 761 __map__is_kernel(al.map) && map__has_symbols(al.map)) {
768 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
769 if (symbol_conf.vmlinux_name) { 762 if (symbol_conf.vmlinux_name) {
770 char serr[256]; 763 char serr[256];
771 dso__strerror_load(al.map->dso, serr, sizeof(serr)); 764 dso__strerror_load(al.map->dso, serr, sizeof(serr));
@@ -1084,8 +1077,9 @@ static int __cmd_top(struct perf_top *top)
1084 if (top->session == NULL) 1077 if (top->session == NULL)
1085 return -1; 1078 return -1;
1086 1079
1087 if (!objdump_path) { 1080 if (!top->annotation_opts.objdump_path) {
1088 ret = perf_env__lookup_objdump(&top->session->header.env); 1081 ret = perf_env__lookup_objdump(&top->session->header.env,
1082 &top->annotation_opts.objdump_path);
1089 if (ret) 1083 if (ret)
1090 goto out_delete; 1084 goto out_delete;
1091 } 1085 }
@@ -1265,8 +1259,8 @@ int cmd_top(int argc, const char **argv)
1265 .proc_map_timeout = 500, 1259 .proc_map_timeout = 500,
1266 .overwrite = 1, 1260 .overwrite = 1,
1267 }, 1261 },
1268 .max_stack = sysctl_perf_event_max_stack, 1262 .max_stack = sysctl__max_stack(),
1269 .sym_pcnt_filter = 5, 1263 .annotation_opts = annotation__default_options,
1270 .nr_threads_synthesize = UINT_MAX, 1264 .nr_threads_synthesize = UINT_MAX,
1271 }; 1265 };
1272 struct record_opts *opts = &top.record_opts; 1266 struct record_opts *opts = &top.record_opts;
@@ -1348,15 +1342,15 @@ int cmd_top(int argc, const char **argv)
1348 "only consider symbols in these comms"), 1342 "only consider symbols in these comms"),
1349 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 1343 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1350 "only consider these symbols"), 1344 "only consider these symbols"),
1351 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, 1345 OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1352 "Interleave source code with assembly code (default)"), 1346 "Interleave source code with assembly code (default)"),
1353 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, 1347 OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1354 "Display raw encoding of assembly instructions (default)"), 1348 "Display raw encoding of assembly instructions (default)"),
1355 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1349 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1356 "Enable kernel symbol demangling"), 1350 "Enable kernel symbol demangling"),
1357 OPT_STRING(0, "objdump", &objdump_path, "path", 1351 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1358 "objdump binary to use for disassembly and annotations"), 1352 "objdump binary to use for disassembly and annotations"),
1359 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1353 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
1360 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1354 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1361 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), 1355 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1362 OPT_CALLBACK(0, "percent-limit", &top, "percent", 1356 OPT_CALLBACK(0, "percent-limit", &top, "percent",
@@ -1392,6 +1386,9 @@ int cmd_top(int argc, const char **argv)
1392 if (status < 0) 1386 if (status < 0)
1393 return status; 1387 return status;
1394 1388
1389 top.annotation_opts.min_pcnt = 5;
1390 top.annotation_opts.context = 4;
1391
1395 top.evlist = perf_evlist__new(); 1392 top.evlist = perf_evlist__new();
1396 if (top.evlist == NULL) 1393 if (top.evlist == NULL)
1397 return -ENOMEM; 1394 return -ENOMEM;
@@ -1469,8 +1466,6 @@ int cmd_top(int argc, const char **argv)
1469 goto out_delete_evlist; 1466 goto out_delete_evlist;
1470 } 1467 }
1471 1468
1472 symbol_conf.nr_events = top.evlist->nr_entries;
1473
1474 if (top.delay_secs < 1) 1469 if (top.delay_secs < 1)
1475 top.delay_secs = 1; 1470 top.delay_secs = 1;
1476 1471
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 3ad17ee89403..6a748eca2edb 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2024,8 +2024,7 @@ static int trace__pgfault(struct trace *trace,
2024 if (trace->summary_only) 2024 if (trace->summary_only)
2025 goto out; 2025 goto out;
2026 2026
2027 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION, 2027 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2028 sample->ip, &al);
2029 2028
2030 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 2029 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2031 2030
@@ -2037,12 +2036,10 @@ static int trace__pgfault(struct trace *trace,
2037 2036
2038 fprintf(trace->output, "] => "); 2037 fprintf(trace->output, "] => ");
2039 2038
2040 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE, 2039 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2041 sample->addr, &al);
2042 2040
2043 if (!al.map) { 2041 if (!al.map) {
2044 thread__find_addr_location(thread, sample->cpumode, 2042 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2045 MAP__FUNCTION, sample->addr, &al);
2046 2043
2047 if (al.map) 2044 if (al.map)
2048 map_type = 'x'; 2045 map_type = 'x';
@@ -2494,7 +2491,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
2494 * to override an explicitely set --max-stack global setting. 2491 * to override an explicitely set --max-stack global setting.
2495 */ 2492 */
2496 evlist__for_each_entry(evlist, evsel) { 2493 evlist__for_each_entry(evlist, evsel) {
2497 if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) && 2494 if (evsel__has_callchain(evsel) &&
2498 evsel->attr.sample_max_stack == 0) 2495 evsel->attr.sample_max_stack == 0)
2499 evsel->attr.sample_max_stack = trace->max_stack; 2496 evsel->attr.sample_max_stack = trace->max_stack;
2500 } 2497 }
@@ -3165,7 +3162,7 @@ int cmd_trace(int argc, const char **argv)
3165 mmap_pages_user_set = false; 3162 mmap_pages_user_set = false;
3166 3163
3167 if (trace.max_stack == UINT_MAX) { 3164 if (trace.max_stack == UINT_MAX) {
3168 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack; 3165 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
3169 max_stack_user_set = false; 3166 max_stack_user_set = false;
3170 } 3167 }
3171 3168
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 9aff89bc7535..10f333e2e825 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -55,22 +55,26 @@ include/uapi/asm-generic/ioctls.h
55include/uapi/asm-generic/mman-common.h 55include/uapi/asm-generic/mman-common.h
56' 56'
57 57
58check () { 58check_2 () {
59 file=$1 59 file1=$1
60 file2=$2
60 61
61 shift 62 shift
62 opts= 63 shift
63 while [ -n "$*" ]; do
64 opts="$opts \"$1\""
65 shift
66 done
67 64
68 cmd="diff $opts ../$file ../../$file > /dev/null" 65 cmd="diff $* $file1 $file2 > /dev/null"
69 66
70 test -f ../../$file && 67 test -f $file2 &&
71 eval $cmd || echo "Warning: Kernel ABI header at 'tools/$file' differs from latest version at '$file'" >&2 68 eval $cmd || echo "Warning: Kernel ABI header at 'tools/$file' differs from latest version at '$file'" >&2
72} 69}
73 70
71check () {
72 file=$1
73
74 shift
75
76 check_2 ../$file ../../$file $*
77}
74 78
75# Check if we have the kernel headers (tools/perf/../../include), else 79# Check if we have the kernel headers (tools/perf/../../include), else
76# we're probably on a detached tarball, so no point in trying to check 80# we're probably on a detached tarball, so no point in trying to check
@@ -83,7 +87,7 @@ for i in $HEADERS; do
83done 87done
84 88
85# diff with extra ignore lines 89# diff with extra ignore lines
86check arch/x86/lib/memcpy_64.S -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" 90check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
87check arch/x86/lib/memset_64.S -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" 91check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
88check include/uapi/asm-generic/mman.h -I "^#include <\(uapi/\)*asm-generic/mman-common.h>" 92check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
89check include/uapi/linux/mman.h -I "^#include <\(uapi/\)*asm/mman.h>" 93check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c
new file mode 100644
index 000000000000..b9c203219691
--- /dev/null
+++ b/tools/perf/examples/bpf/5sec.c
@@ -0,0 +1,49 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 Description:
4
5 . Disable strace like syscall tracing (--no-syscalls), or try tracing
6 just some (-e *sleep).
7
8 . Attach a filter function to a kernel function, returning when it should
9 be considered, i.e. appear on the output.
10
11 . Run it system wide, so that any sleep of >= 5 seconds and < than 6
12 seconds gets caught.
13
14 . Ask for callgraphs using DWARF info, so that userspace can be unwound
15
16 . While this is running, run something like "sleep 5s".
17
18 . If we decide to add tv_nsec as well, then it becomes:
19
20 int probe(hrtimer_nanosleep, rqtp->tv_sec rqtp->tv_nsec)(void *ctx, int err, long sec, long nsec)
21
22 I.e. add where it comes from (rqtp->tv_nsec) and where it will be
23 accessible in the function body (nsec)
24
25 # perf trace --no-syscalls -e tools/perf/examples/bpf/5sec.c/call-graph=dwarf/
26 0.000 perf_bpf_probe:func:(ffffffff9811b5f0) tv_sec=5
27 hrtimer_nanosleep ([kernel.kallsyms])
28 __x64_sys_nanosleep ([kernel.kallsyms])
29 do_syscall_64 ([kernel.kallsyms])
30 entry_SYSCALL_64 ([kernel.kallsyms])
31 __GI___nanosleep (/usr/lib64/libc-2.26.so)
32 rpl_nanosleep (/usr/bin/sleep)
33 xnanosleep (/usr/bin/sleep)
34 main (/usr/bin/sleep)
35 __libc_start_main (/usr/lib64/libc-2.26.so)
36 _start (/usr/bin/sleep)
37 ^C#
38
39 Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
40*/
41
42#include <bpf.h>
43
44int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec)
45{
46 return sec == 5;
47}
48
49license(GPL);
diff --git a/tools/perf/examples/bpf/empty.c b/tools/perf/examples/bpf/empty.c
new file mode 100644
index 000000000000..3776d26db9e7
--- /dev/null
+++ b/tools/perf/examples/bpf/empty.c
@@ -0,0 +1,3 @@
1#include <bpf.h>
2
3license(GPL);
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
new file mode 100644
index 000000000000..dd764ad5efdf
--- /dev/null
+++ b/tools/perf/include/bpf/bpf.h
@@ -0,0 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0
2#ifndef _PERF_BPF_H
3#define _PERF_BPF_H
4#define SEC(NAME) __attribute__((section(NAME), used))
5
6#define probe(function, vars) \
7 SEC(#function "=" #function " " #vars) function
8
9#define license(name) \
10char _license[] SEC("license") = #name; \
11int _version SEC("version") = LINUX_VERSION_CODE;
12
13#endif /* _PERF_BPF_H */
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 20a08cb32332..a11cb006f968 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -12,7 +12,6 @@
12#include "util/env.h" 12#include "util/env.h"
13#include <subcmd/exec-cmd.h> 13#include <subcmd/exec-cmd.h>
14#include "util/config.h" 14#include "util/config.h"
15#include "util/quote.h"
16#include <subcmd/run-command.h> 15#include <subcmd/run-command.h>
17#include "util/parse-events.h" 16#include "util/parse-events.h"
18#include <subcmd/parse-options.h> 17#include <subcmd/parse-options.h>
@@ -238,7 +237,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
238 (*argc)--; 237 (*argc)--;
239 } else if (strstarts(cmd, CMD_DEBUGFS_DIR)) { 238 } else if (strstarts(cmd, CMD_DEBUGFS_DIR)) {
240 tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR)); 239 tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR));
241 fprintf(stderr, "dir: %s\n", tracing_path); 240 fprintf(stderr, "dir: %s\n", tracing_path_mount());
242 if (envchanged) 241 if (envchanged)
243 *envchanged = 1; 242 *envchanged = 1;
244 } else if (!strcmp(cmd, "--list-cmds")) { 243 } else if (!strcmp(cmd, "--list-cmds")) {
@@ -421,22 +420,11 @@ void pthread__unblock_sigwinch(void)
421 pthread_sigmask(SIG_UNBLOCK, &set, NULL); 420 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
422} 421}
423 422
424#ifdef _SC_LEVEL1_DCACHE_LINESIZE
425#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
426#else
427static void cache_line_size(int *cacheline_sizep)
428{
429 if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
430 pr_debug("cannot determine cache line size");
431}
432#endif
433
434int main(int argc, const char **argv) 423int main(int argc, const char **argv)
435{ 424{
436 int err; 425 int err;
437 const char *cmd; 426 const char *cmd;
438 char sbuf[STRERR_BUFSIZE]; 427 char sbuf[STRERR_BUFSIZE];
439 int value;
440 428
441 /* libsubcmd init */ 429 /* libsubcmd init */
442 exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); 430 exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
@@ -444,13 +432,6 @@ int main(int argc, const char **argv)
444 432
445 /* The page_size is placed in util object. */ 433 /* The page_size is placed in util object. */
446 page_size = sysconf(_SC_PAGE_SIZE); 434 page_size = sysconf(_SC_PAGE_SIZE);
447 cache_line_size(&cacheline_size);
448
449 if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
450 sysctl_perf_event_max_stack = value;
451
452 if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
453 sysctl_perf_event_max_contexts_per_stack = value;
454 435
455 cmd = extract_argv0_path(argv[0]); 436 cmd = extract_argv0_path(argv[0]);
456 if (!cmd) 437 if (!cmd)
@@ -458,15 +439,11 @@ int main(int argc, const char **argv)
458 439
459 srandom(time(NULL)); 440 srandom(time(NULL));
460 441
461 perf_config__init();
462 err = perf_config(perf_default_config, NULL); 442 err = perf_config(perf_default_config, NULL);
463 if (err) 443 if (err)
464 return err; 444 return err;
465 set_buildid_dir(NULL); 445 set_buildid_dir(NULL);
466 446
467 /* get debugfs/tracefs mount point from /proc/mounts */
468 tracing_path_mount();
469
470 /* 447 /*
471 * "perf-xxxx" is the same as "perf xxxx", but we obviously: 448 * "perf-xxxx" is the same as "perf xxxx", but we obviously:
472 * 449 *
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-record b/tools/perf/scripts/python/bin/powerpc-hcalls-record
new file mode 100644
index 000000000000..b7402aa9147d
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-record
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf record -e "{powerpc:hcall_entry,powerpc:hcall_exit}" $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-report b/tools/perf/scripts/python/bin/powerpc-hcalls-report
new file mode 100644
index 000000000000..dd32ad7465f6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-report
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/powerpc-hcalls.py
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
new file mode 100644
index 000000000000..00e0e7476e55
--- /dev/null
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -0,0 +1,200 @@
1# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (C) 2018 Ravi Bangoria, IBM Corporation
4#
5# Hypervisor call statisics
6
7import os
8import sys
9
10sys.path.append(os.environ['PERF_EXEC_PATH'] + \
11 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
12
13from perf_trace_context import *
14from Core import *
15from Util import *
16
17# output: {
18# opcode: {
19# 'min': minimum time nsec
20# 'max': maximum time nsec
21# 'time': average time nsec
22# 'cnt': counter
23# } ...
24# }
25output = {}
26
27# d_enter: {
28# cpu: {
29# opcode: nsec
30# } ...
31# }
32d_enter = {}
33
34hcall_table = {
35 4: 'H_REMOVE',
36 8: 'H_ENTER',
37 12: 'H_READ',
38 16: 'H_CLEAR_MOD',
39 20: 'H_CLEAR_REF',
40 24: 'H_PROTECT',
41 28: 'H_GET_TCE',
42 32: 'H_PUT_TCE',
43 36: 'H_SET_SPRG0',
44 40: 'H_SET_DABR',
45 44: 'H_PAGE_INIT',
46 48: 'H_SET_ASR',
47 52: 'H_ASR_ON',
48 56: 'H_ASR_OFF',
49 60: 'H_LOGICAL_CI_LOAD',
50 64: 'H_LOGICAL_CI_STORE',
51 68: 'H_LOGICAL_CACHE_LOAD',
52 72: 'H_LOGICAL_CACHE_STORE',
53 76: 'H_LOGICAL_ICBI',
54 80: 'H_LOGICAL_DCBF',
55 84: 'H_GET_TERM_CHAR',
56 88: 'H_PUT_TERM_CHAR',
57 92: 'H_REAL_TO_LOGICAL',
58 96: 'H_HYPERVISOR_DATA',
59 100: 'H_EOI',
60 104: 'H_CPPR',
61 108: 'H_IPI',
62 112: 'H_IPOLL',
63 116: 'H_XIRR',
64 120: 'H_MIGRATE_DMA',
65 124: 'H_PERFMON',
66 220: 'H_REGISTER_VPA',
67 224: 'H_CEDE',
68 228: 'H_CONFER',
69 232: 'H_PROD',
70 236: 'H_GET_PPP',
71 240: 'H_SET_PPP',
72 244: 'H_PURR',
73 248: 'H_PIC',
74 252: 'H_REG_CRQ',
75 256: 'H_FREE_CRQ',
76 260: 'H_VIO_SIGNAL',
77 264: 'H_SEND_CRQ',
78 272: 'H_COPY_RDMA',
79 276: 'H_REGISTER_LOGICAL_LAN',
80 280: 'H_FREE_LOGICAL_LAN',
81 284: 'H_ADD_LOGICAL_LAN_BUFFER',
82 288: 'H_SEND_LOGICAL_LAN',
83 292: 'H_BULK_REMOVE',
84 304: 'H_MULTICAST_CTRL',
85 308: 'H_SET_XDABR',
86 312: 'H_STUFF_TCE',
87 316: 'H_PUT_TCE_INDIRECT',
88 332: 'H_CHANGE_LOGICAL_LAN_MAC',
89 336: 'H_VTERM_PARTNER_INFO',
90 340: 'H_REGISTER_VTERM',
91 344: 'H_FREE_VTERM',
92 348: 'H_RESET_EVENTS',
93 352: 'H_ALLOC_RESOURCE',
94 356: 'H_FREE_RESOURCE',
95 360: 'H_MODIFY_QP',
96 364: 'H_QUERY_QP',
97 368: 'H_REREGISTER_PMR',
98 372: 'H_REGISTER_SMR',
99 376: 'H_QUERY_MR',
100 380: 'H_QUERY_MW',
101 384: 'H_QUERY_HCA',
102 388: 'H_QUERY_PORT',
103 392: 'H_MODIFY_PORT',
104 396: 'H_DEFINE_AQP1',
105 400: 'H_GET_TRACE_BUFFER',
106 404: 'H_DEFINE_AQP0',
107 408: 'H_RESIZE_MR',
108 412: 'H_ATTACH_MCQP',
109 416: 'H_DETACH_MCQP',
110 420: 'H_CREATE_RPT',
111 424: 'H_REMOVE_RPT',
112 428: 'H_REGISTER_RPAGES',
113 432: 'H_DISABLE_AND_GETC',
114 436: 'H_ERROR_DATA',
115 440: 'H_GET_HCA_INFO',
116 444: 'H_GET_PERF_COUNT',
117 448: 'H_MANAGE_TRACE',
118 468: 'H_FREE_LOGICAL_LAN_BUFFER',
119 472: 'H_POLL_PENDING',
120 484: 'H_QUERY_INT_STATE',
121 580: 'H_ILLAN_ATTRIBUTES',
122 592: 'H_MODIFY_HEA_QP',
123 596: 'H_QUERY_HEA_QP',
124 600: 'H_QUERY_HEA',
125 604: 'H_QUERY_HEA_PORT',
126 608: 'H_MODIFY_HEA_PORT',
127 612: 'H_REG_BCMC',
128 616: 'H_DEREG_BCMC',
129 620: 'H_REGISTER_HEA_RPAGES',
130 624: 'H_DISABLE_AND_GET_HEA',
131 628: 'H_GET_HEA_INFO',
132 632: 'H_ALLOC_HEA_RESOURCE',
133 644: 'H_ADD_CONN',
134 648: 'H_DEL_CONN',
135 664: 'H_JOIN',
136 676: 'H_VASI_STATE',
137 688: 'H_ENABLE_CRQ',
138 696: 'H_GET_EM_PARMS',
139 720: 'H_SET_MPP',
140 724: 'H_GET_MPP',
141 748: 'H_HOME_NODE_ASSOCIATIVITY',
142 756: 'H_BEST_ENERGY',
143 764: 'H_XIRR_X',
144 768: 'H_RANDOM',
145 772: 'H_COP',
146 788: 'H_GET_MPP_X',
147 796: 'H_SET_MODE',
148 61440: 'H_RTAS',
149}
150
151def hcall_table_lookup(opcode):
152 if (hcall_table.has_key(opcode)):
153 return hcall_table[opcode]
154 else:
155 return opcode
156
157print_ptrn = '%-28s%10s%10s%10s%10s'
158
159def trace_end():
160 print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
161 print '-' * 68
162 for opcode in output:
163 h_name = hcall_table_lookup(opcode)
164 time = output[opcode]['time']
165 cnt = output[opcode]['cnt']
166 min_t = output[opcode]['min']
167 max_t = output[opcode]['max']
168
169 print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
170
171def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
172 opcode, retval):
173 if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
174 diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
175
176 if (output.has_key(opcode)):
177 output[opcode]['time'] += diff
178 output[opcode]['cnt'] += 1
179 if (output[opcode]['min'] > diff):
180 output[opcode]['min'] = diff
181 if (output[opcode]['max'] < diff):
182 output[opcode]['max'] = diff
183 else:
184 output[opcode] = {
185 'time': diff,
186 'cnt': 1,
187 'min': diff,
188 'max': diff,
189 }
190
191 del d_enter[cpu][opcode]
192# else:
193# print "Can't find matching hcall_enter event. Ignoring sample"
194
195def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
196 callchain, opcode):
197 if (d_enter.has_key(cpu)):
198 d_enter[cpu][opcode] = nsecs(sec, nsec)
199 else:
200 d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index cac8f8889bc3..2bde505e2e7e 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -654,6 +654,15 @@ static int perf_test__list(int argc, const char **argv)
654 continue; 654 continue;
655 655
656 pr_info("%2d: %s\n", i, t->desc); 656 pr_info("%2d: %s\n", i, t->desc);
657
658 if (t->subtest.get_nr) {
659 int subn = t->subtest.get_nr();
660 int subi;
661
662 for (subi = 0; subi < subn; subi++)
663 pr_info("%2d:%1d: %s\n", i, subi + 1,
664 t->subtest.get_desc(subi));
665 }
657 } 666 }
658 667
659 perf_test__list_shell(argc, argv, i); 668 perf_test__list_shell(argc, argv, i);
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 99936352df4f..4892bd2dc33e 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -236,14 +236,13 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
236 236
237 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 237 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
238 238
239 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); 239 if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) {
240 if (!al.map || !al.map->dso) {
241 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) { 240 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
242 pr_debug("Hypervisor address can not be resolved - skipping\n"); 241 pr_debug("Hypervisor address can not be resolved - skipping\n");
243 return 0; 242 return 0;
244 } 243 }
245 244
246 pr_debug("thread__find_addr_map failed\n"); 245 pr_debug("thread__find_map failed\n");
247 return -1; 246 return -1;
248 } 247 }
249 248
@@ -561,6 +560,7 @@ static int do_test_code_reading(bool try_kcore)
561 pid = getpid(); 560 pid = getpid();
562 561
563 machine = machine__new_host(); 562 machine = machine__new_host();
563 machine->env = &perf_env;
564 564
565 ret = machine__create_kernel_maps(machine); 565 ret = machine__create_kernel_maps(machine);
566 if (ret < 0) { 566 if (ret < 0) {
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index f7c5b613d667..b889a28fd80b 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -131,20 +131,20 @@ struct machine *setup_fake_machine(struct machines *machines)
131 goto out; 131 goto out;
132 132
133 /* emulate dso__load() */ 133 /* emulate dso__load() */
134 dso__set_loaded(dso, MAP__FUNCTION); 134 dso__set_loaded(dso);
135 135
136 for (k = 0; k < fake_symbols[i].nr_syms; k++) { 136 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
137 struct symbol *sym; 137 struct symbol *sym;
138 struct fake_sym *fsym = &fake_symbols[i].syms[k]; 138 struct fake_sym *fsym = &fake_symbols[i].syms[k];
139 139
140 sym = symbol__new(fsym->start, fsym->length, 140 sym = symbol__new(fsym->start, fsym->length,
141 STB_GLOBAL, fsym->name); 141 STB_GLOBAL, STT_FUNC, fsym->name);
142 if (sym == NULL) { 142 if (sym == NULL) {
143 dso__put(dso); 143 dso__put(dso);
144 goto out; 144 goto out;
145 } 145 }
146 146
147 symbols__insert(&dso->symbols[MAP__FUNCTION], sym); 147 symbols__insert(&dso->symbols, sym);
148 } 148 }
149 149
150 dso__put(dso); 150 dso__put(dso);
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 8e57d46109de..148dd31cc201 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -127,6 +127,22 @@ int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_un
127 M("[vdso]", PERF_RECORD_MISC_KERNEL, false); 127 M("[vdso]", PERF_RECORD_MISC_KERNEL, false);
128 M("[vdso]", PERF_RECORD_MISC_USER, false); 128 M("[vdso]", PERF_RECORD_MISC_USER, false);
129 129
130 T("[vdso32]", true , true , false, false, "[vdso32]", NULL);
131 T("[vdso32]", false , true , false, false, NULL , NULL);
132 T("[vdso32]", true , false , false, false, "[vdso32]", NULL);
133 T("[vdso32]", false , false , false, false, NULL , NULL);
134 M("[vdso32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
135 M("[vdso32]", PERF_RECORD_MISC_KERNEL, false);
136 M("[vdso32]", PERF_RECORD_MISC_USER, false);
137
138 T("[vdsox32]", true , true , false, false, "[vdsox32]", NULL);
139 T("[vdsox32]", false , true , false, false, NULL , NULL);
140 T("[vdsox32]", true , false , false, false, "[vdsox32]", NULL);
141 T("[vdsox32]", false , false , false, false, NULL , NULL);
142 M("[vdsox32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
143 M("[vdsox32]", PERF_RECORD_MISC_KERNEL, false);
144 M("[vdsox32]", PERF_RECORD_MISC_USER, false);
145
130 /* path alloc_name alloc_ext kmod comp name ext */ 146 /* path alloc_name alloc_ext kmod comp name ext */
131 T("[vsyscall]", true , true , false, false, "[vsyscall]", NULL); 147 T("[vsyscall]", true , true , false, false, "[vsyscall]", NULL);
132 T("[vsyscall]", false , true , false, false, NULL , NULL); 148 T("[vsyscall]", false , true , false, false, NULL , NULL);
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 868d82b501f4..b1af2499a3c9 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -188,9 +188,8 @@ static int mmap_events(synth_cb synth)
188 188
189 pr_debug("looking for map %p\n", td->map); 189 pr_debug("looking for map %p\n", td->map);
190 190
191 thread__find_addr_map(thread, 191 thread__find_map(thread, PERF_RECORD_MISC_USER,
192 PERF_RECORD_MISC_USER, MAP__FUNCTION, 192 (unsigned long) (td->map + 1), &al);
193 (unsigned long) (td->map + 1), &al);
194 193
195 thread__put(thread); 194 thread__put(thread);
196 195
@@ -218,7 +217,7 @@ static int mmap_events(synth_cb synth)
218 * perf_event__synthesize_threads (global) 217 * perf_event__synthesize_threads (global)
219 * 218 *
220 * We test we can find all memory maps via: 219 * We test we can find all memory maps via:
221 * thread__find_addr_map 220 * thread__find_map
222 * 221 *
223 * by using all thread objects. 222 * by using all thread objects.
224 */ 223 */
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 18b06444f230..7d4077068454 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -499,7 +499,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
499 * while this test executes only parse events method. 499 * while this test executes only parse events method.
500 */ 500 */
501 TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period); 501 TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
502 TEST_ASSERT_VAL("wrong callgraph", !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type)); 502 TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
503 TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type)); 503 TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
504 504
505 /* cpu/config=2,call-graph=no,time=0,period=2000/ */ 505 /* cpu/config=2,call-graph=no,time=0,period=2000/ */
@@ -512,7 +512,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
512 * while this test executes only parse events method. 512 * while this test executes only parse events method.
513 */ 513 */
514 TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period); 514 TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
515 TEST_ASSERT_VAL("wrong callgraph", !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type)); 515 TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
516 TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type)); 516 TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
517 517
518 return 0; 518 return 0;
@@ -1309,18 +1309,26 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
1309 return 0; 1309 return 0;
1310} 1310}
1311 1311
1312static int test__intel_pt(struct perf_evlist *evlist)
1313{
1314 struct perf_evsel *evsel = perf_evlist__first(evlist);
1315
1316 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
1317 return 0;
1318}
1319
1312static int count_tracepoints(void) 1320static int count_tracepoints(void)
1313{ 1321{
1314 struct dirent *events_ent; 1322 struct dirent *events_ent;
1315 DIR *events_dir; 1323 DIR *events_dir;
1316 int cnt = 0; 1324 int cnt = 0;
1317 1325
1318 events_dir = opendir(tracing_events_path); 1326 events_dir = tracing_events__opendir();
1319 1327
1320 TEST_ASSERT_VAL("Can't open events dir", events_dir); 1328 TEST_ASSERT_VAL("Can't open events dir", events_dir);
1321 1329
1322 while ((events_ent = readdir(events_dir))) { 1330 while ((events_ent = readdir(events_dir))) {
1323 char sys_path[PATH_MAX]; 1331 char *sys_path;
1324 struct dirent *sys_ent; 1332 struct dirent *sys_ent;
1325 DIR *sys_dir; 1333 DIR *sys_dir;
1326 1334
@@ -1331,8 +1339,8 @@ static int count_tracepoints(void)
1331 || !strcmp(events_ent->d_name, "header_page")) 1339 || !strcmp(events_ent->d_name, "header_page"))
1332 continue; 1340 continue;
1333 1341
1334 scnprintf(sys_path, PATH_MAX, "%s/%s", 1342 sys_path = get_events_file(events_ent->d_name);
1335 tracing_events_path, events_ent->d_name); 1343 TEST_ASSERT_VAL("Can't get sys path", sys_path);
1336 1344
1337 sys_dir = opendir(sys_path); 1345 sys_dir = opendir(sys_path);
1338 TEST_ASSERT_VAL("Can't open sys dir", sys_dir); 1346 TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
@@ -1348,6 +1356,7 @@ static int count_tracepoints(void)
1348 } 1356 }
1349 1357
1350 closedir(sys_dir); 1358 closedir(sys_dir);
1359 put_events_file(sys_path);
1351 } 1360 }
1352 1361
1353 closedir(events_dir); 1362 closedir(events_dir);
@@ -1637,6 +1646,11 @@ static struct evlist_test test__events[] = {
1637 .check = test__checkevent_config_cache, 1646 .check = test__checkevent_config_cache,
1638 .id = 51, 1647 .id = 51,
1639 }, 1648 },
1649 {
1650 .name = "intel_pt//u",
1651 .check = test__intel_pt,
1652 .id = 52,
1653 },
1640}; 1654};
1641 1655
1642static struct evlist_test test__events_pmu[] = { 1656static struct evlist_test test__events_pmu[] = {
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
index 5d2df65ada6a..40ab72149ce1 100644
--- a/tools/perf/tests/python-use.c
+++ b/tools/perf/tests/python-use.c
@@ -7,8 +7,7 @@
7#include <stdlib.h> 7#include <stdlib.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include "tests.h" 9#include "tests.h"
10 10#include "util/debug.h"
11extern int verbose;
12 11
13int test__python_use(struct test *test __maybe_unused, int subtest __maybe_unused) 12int test__python_use(struct test *test __maybe_unused, int subtest __maybe_unused)
14{ 13{
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index ee86473643be..263057039693 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -11,23 +11,23 @@
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g') 13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
14nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254 14nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
15 15
16trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
17 idx=0 17 idx=0
18 expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" 18 expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
19 expected[1]=".*inet_pton[[:space:]]\($libc|inlined\)$" 19 expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
20 case "$(uname -m)" in 20 case "$(uname -m)" in
21 s390x) 21 s390x)
22 eventattr='call-graph=dwarf,max-stack=4' 22 eventattr='call-graph=dwarf,max-stack=4'
23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" 23 expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
24 expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$" 24 expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" 25 expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
26 ;; 26 ;;
27 *) 27 *)
28 eventattr='max-stack=3' 28 eventattr='max-stack=3'
29 expected[2]="getaddrinfo[[:space:]]\($libc\)$" 29 expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
30 expected[3]=".*\(.*/bin/ping.*\)$" 30 expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
31 ;; 31 ;;
32 esac 32 esac
33 33
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 1e5adb65632a..7691980b7df1 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -19,8 +19,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
19 struct symbol *sym; 19 struct symbol *sym;
20 struct map *kallsyms_map, *vmlinux_map, *map; 20 struct map *kallsyms_map, *vmlinux_map, *map;
21 struct machine kallsyms, vmlinux; 21 struct machine kallsyms, vmlinux;
22 enum map_type type = MAP__FUNCTION; 22 struct maps *maps = machine__kernel_maps(&vmlinux);
23 struct maps *maps = &vmlinux.kmaps.maps[type];
24 u64 mem_start, mem_end; 23 u64 mem_start, mem_end;
25 bool header_printed; 24 bool header_printed;
26 25
@@ -56,7 +55,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
56 * be compacted against the list of modules found in the "vmlinux" 55 * be compacted against the list of modules found in the "vmlinux"
57 * code and with the one got from /proc/modules from the "kallsyms" code. 56 * code and with the one got from /proc/modules from the "kallsyms" code.
58 */ 57 */
59 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type) <= 0) { 58 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
60 pr_debug("dso__load_kallsyms "); 59 pr_debug("dso__load_kallsyms ");
61 goto out; 60 goto out;
62 } 61 }
@@ -94,7 +93,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
94 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines 93 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
95 * to fixup the symbols. 94 * to fixup the symbols.
96 */ 95 */
97 if (machine__load_vmlinux_path(&vmlinux, type) <= 0) { 96 if (machine__load_vmlinux_path(&vmlinux) <= 0) {
98 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); 97 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
99 err = TEST_SKIP; 98 err = TEST_SKIP;
100 goto out; 99 goto out;
@@ -108,7 +107,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
108 * in the kallsyms dso. For the ones that are in both, check its names and 107 * in the kallsyms dso. For the ones that are in both, check its names and
109 * end addresses too. 108 * end addresses too.
110 */ 109 */
111 for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { 110 map__for_each_symbol(vmlinux_map, sym, nd) {
112 struct symbol *pair, *first_pair; 111 struct symbol *pair, *first_pair;
113 112
114 sym = rb_entry(nd, struct symbol, rb_node); 113 sym = rb_entry(nd, struct symbol, rb_node);
@@ -119,8 +118,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
119 mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start); 118 mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
120 mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end); 119 mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
121 120
122 first_pair = machine__find_kernel_symbol(&kallsyms, type, 121 first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
123 mem_start, NULL);
124 pair = first_pair; 122 pair = first_pair;
125 123
126 if (pair && UM(pair->start) == mem_start) { 124 if (pair && UM(pair->start) == mem_start) {
@@ -149,7 +147,7 @@ next_pair:
149 */ 147 */
150 continue; 148 continue;
151 } else { 149 } else {
152 pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL); 150 pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
153 if (pair) { 151 if (pair) {
154 if (UM(pair->start) == mem_start) 152 if (UM(pair->start) == mem_start)
155 goto next_pair; 153 goto next_pair;
@@ -183,7 +181,7 @@ next_pair:
183 * so use the short name, less descriptive but the same ("[kernel]" in 181 * so use the short name, less descriptive but the same ("[kernel]" in
184 * both cases. 182 * both cases.
185 */ 183 */
186 pair = map_groups__find_by_name(&kallsyms.kmaps, type, 184 pair = map_groups__find_by_name(&kallsyms.kmaps,
187 (map->dso->kernel ? 185 (map->dso->kernel ?
188 map->dso->short_name : 186 map->dso->short_name :
189 map->dso->name)); 187 map->dso->name));
@@ -206,7 +204,7 @@ next_pair:
206 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); 204 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
207 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); 205 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
208 206
209 pair = map_groups__find(&kallsyms.kmaps, type, mem_start); 207 pair = map_groups__find(&kallsyms.kmaps, mem_start);
210 if (pair == NULL || pair->priv) 208 if (pair == NULL || pair->priv)
211 continue; 209 continue;
212 210
@@ -228,7 +226,7 @@ next_pair:
228 226
229 header_printed = false; 227 header_printed = false;
230 228
231 maps = &kallsyms.kmaps.maps[type]; 229 maps = machine__kernel_maps(&kallsyms);
232 230
233 for (map = maps__first(maps); map; map = map__next(map)) { 231 for (map = maps__first(maps); map; map = map__next(map)) {
234 if (!map->priv) { 232 if (!map->priv) {
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index 0be4138fbe71..f24722146ebe 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -1,6 +1,6 @@
1#!/bin/sh 1#!/bin/sh
2 2
3header_dir=$1 3[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
4 4
5printf "static const char *prctl_options[] = {\n" 5printf "static const char *prctl_options[] = {\n"
6regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*' 6regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 3781d74088a7..3b4f1c10ff57 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -29,6 +29,7 @@ struct annotate_browser {
29 struct rb_node *curr_hot; 29 struct rb_node *curr_hot;
30 struct annotation_line *selection; 30 struct annotation_line *selection;
31 struct arch *arch; 31 struct arch *arch;
32 struct annotation_options *opts;
32 bool searching_backwards; 33 bool searching_backwards;
33 char search_bf[128]; 34 char search_bf[128];
34}; 35};
@@ -410,7 +411,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
410 notes = symbol__annotation(dl->ops.target.sym); 411 notes = symbol__annotation(dl->ops.target.sym);
411 pthread_mutex_lock(&notes->lock); 412 pthread_mutex_lock(&notes->lock);
412 413
413 if (notes->src == NULL && symbol__alloc_hist(dl->ops.target.sym) < 0) { 414 if (!symbol__hists(dl->ops.target.sym, evsel->evlist->nr_entries)) {
414 pthread_mutex_unlock(&notes->lock); 415 pthread_mutex_unlock(&notes->lock);
415 ui__warning("Not enough memory for annotating '%s' symbol!\n", 416 ui__warning("Not enough memory for annotating '%s' symbol!\n",
416 dl->ops.target.sym->name); 417 dl->ops.target.sym->name);
@@ -418,7 +419,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
418 } 419 }
419 420
420 pthread_mutex_unlock(&notes->lock); 421 pthread_mutex_unlock(&notes->lock);
421 symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt); 422 symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt, browser->opts);
422 sym_title(ms->sym, ms->map, title, sizeof(title)); 423 sym_title(ms->sym, ms->map, title, sizeof(title));
423 ui_browser__show_title(&browser->b, title); 424 ui_browser__show_title(&browser->b, title);
424 return true; 425 return true;
@@ -695,6 +696,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
695 "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" 696 "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n"
696 "s Toggle source code view\n" 697 "s Toggle source code view\n"
697 "t Circulate percent, total period, samples view\n" 698 "t Circulate percent, total period, samples view\n"
699 "c Show min/max cycle\n"
698 "/ Search string\n" 700 "/ Search string\n"
699 "k Toggle line numbers\n" 701 "k Toggle line numbers\n"
700 "P Print to [symbol_name].annotation file.\n" 702 "P Print to [symbol_name].annotation file.\n"
@@ -791,6 +793,13 @@ show_sup_ins:
791 notes->options->show_total_period = true; 793 notes->options->show_total_period = true;
792 annotation__update_column_widths(notes); 794 annotation__update_column_widths(notes);
793 continue; 795 continue;
796 case 'c':
797 if (notes->options->show_minmax_cycle)
798 notes->options->show_minmax_cycle = false;
799 else
800 notes->options->show_minmax_cycle = true;
801 annotation__update_column_widths(notes);
802 continue;
794 case K_LEFT: 803 case K_LEFT:
795 case K_ESC: 804 case K_ESC:
796 case 'q': 805 case 'q':
@@ -809,24 +818,27 @@ out:
809} 818}
810 819
811int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel, 820int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
812 struct hist_browser_timer *hbt) 821 struct hist_browser_timer *hbt,
822 struct annotation_options *opts)
813{ 823{
814 return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt); 824 return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt, opts);
815} 825}
816 826
817int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, 827int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
818 struct hist_browser_timer *hbt) 828 struct hist_browser_timer *hbt,
829 struct annotation_options *opts)
819{ 830{
820 /* reset abort key so that it can get Ctrl-C as a key */ 831 /* reset abort key so that it can get Ctrl-C as a key */
821 SLang_reset_tty(); 832 SLang_reset_tty();
822 SLang_init_tty(0, 0, 0); 833 SLang_init_tty(0, 0, 0);
823 834
824 return map_symbol__tui_annotate(&he->ms, evsel, hbt); 835 return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
825} 836}
826 837
827int symbol__tui_annotate(struct symbol *sym, struct map *map, 838int symbol__tui_annotate(struct symbol *sym, struct map *map,
828 struct perf_evsel *evsel, 839 struct perf_evsel *evsel,
829 struct hist_browser_timer *hbt) 840 struct hist_browser_timer *hbt,
841 struct annotation_options *opts)
830{ 842{
831 struct annotation *notes = symbol__annotation(sym); 843 struct annotation *notes = symbol__annotation(sym);
832 struct map_symbol ms = { 844 struct map_symbol ms = {
@@ -843,6 +855,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
843 .priv = &ms, 855 .priv = &ms,
844 .use_navkeypressed = true, 856 .use_navkeypressed = true,
845 }, 857 },
858 .opts = opts,
846 }; 859 };
847 int ret = -1, err; 860 int ret = -1, err;
848 861
@@ -852,7 +865,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
852 if (map->dso->annotate_warned) 865 if (map->dso->annotate_warned)
853 return -1; 866 return -1;
854 867
855 err = symbol__annotate2(sym, map, evsel, &annotation__default_options, &browser.arch); 868 err = symbol__annotate2(sym, map, evsel, opts, &browser.arch);
856 if (err) { 869 if (err) {
857 char msg[BUFSIZ]; 870 char msg[BUFSIZ];
858 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg)); 871 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e5f247247daa..a96f62ca984a 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1231,6 +1231,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1231 int width = browser->b.width; 1231 int width = browser->b.width;
1232 char folded_sign = ' '; 1232 char folded_sign = ' ';
1233 bool current_entry = ui_browser__is_current_entry(&browser->b, row); 1233 bool current_entry = ui_browser__is_current_entry(&browser->b, row);
1234 bool use_callchain = hist_entry__has_callchains(entry) && symbol_conf.use_callchain;
1234 off_t row_offset = entry->row_offset; 1235 off_t row_offset = entry->row_offset;
1235 bool first = true; 1236 bool first = true;
1236 struct perf_hpp_fmt *fmt; 1237 struct perf_hpp_fmt *fmt;
@@ -1240,7 +1241,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1240 browser->selection = &entry->ms; 1241 browser->selection = &entry->ms;
1241 } 1242 }
1242 1243
1243 if (symbol_conf.use_callchain) { 1244 if (use_callchain) {
1244 hist_entry__init_have_children(entry); 1245 hist_entry__init_have_children(entry);
1245 folded_sign = hist_entry__folded(entry); 1246 folded_sign = hist_entry__folded(entry);
1246 } 1247 }
@@ -1276,7 +1277,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1276 } 1277 }
1277 1278
1278 if (first) { 1279 if (first) {
1279 if (symbol_conf.use_callchain) { 1280 if (use_callchain) {
1280 ui_browser__printf(&browser->b, "%c ", folded_sign); 1281 ui_browser__printf(&browser->b, "%c ", folded_sign);
1281 width -= 2; 1282 width -= 2;
1282 } 1283 }
@@ -1583,7 +1584,7 @@ hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf,
1583 int column = 0; 1584 int column = 0;
1584 int span = 0; 1585 int span = 0;
1585 1586
1586 if (symbol_conf.use_callchain) { 1587 if (hists__has_callchains(hists) && symbol_conf.use_callchain) {
1587 ret = scnprintf(buf, size, " "); 1588 ret = scnprintf(buf, size, " ");
1588 if (advance_hpp_check(&dummy_hpp, ret)) 1589 if (advance_hpp_check(&dummy_hpp, ret))
1589 return ret; 1590 return ret;
@@ -1987,7 +1988,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
1987 bool first = true; 1988 bool first = true;
1988 int ret; 1989 int ret;
1989 1990
1990 if (symbol_conf.use_callchain) { 1991 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1991 folded_sign = hist_entry__folded(he); 1992 folded_sign = hist_entry__folded(he);
1992 printed += fprintf(fp, "%c ", folded_sign); 1993 printed += fprintf(fp, "%c ", folded_sign);
1993 } 1994 }
@@ -2175,7 +2176,8 @@ struct hist_browser *hist_browser__new(struct hists *hists)
2175static struct hist_browser * 2176static struct hist_browser *
2176perf_evsel_browser__new(struct perf_evsel *evsel, 2177perf_evsel_browser__new(struct perf_evsel *evsel,
2177 struct hist_browser_timer *hbt, 2178 struct hist_browser_timer *hbt,
2178 struct perf_env *env) 2179 struct perf_env *env,
2180 struct annotation_options *annotation_opts)
2179{ 2181{
2180 struct hist_browser *browser = hist_browser__new(evsel__hists(evsel)); 2182 struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
2181 2183
@@ -2183,6 +2185,7 @@ perf_evsel_browser__new(struct perf_evsel *evsel,
2183 browser->hbt = hbt; 2185 browser->hbt = hbt;
2184 browser->env = env; 2186 browser->env = env;
2185 browser->title = hists_browser__scnprintf_title; 2187 browser->title = hists_browser__scnprintf_title;
2188 browser->annotation_opts = annotation_opts;
2186 } 2189 }
2187 return browser; 2190 return browser;
2188} 2191}
@@ -2336,7 +2339,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
2336 struct hist_entry *he; 2339 struct hist_entry *he;
2337 int err; 2340 int err;
2338 2341
2339 if (!objdump_path && perf_env__lookup_objdump(browser->env)) 2342 if (!browser->annotation_opts->objdump_path &&
2343 perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
2340 return 0; 2344 return 0;
2341 2345
2342 notes = symbol__annotation(act->ms.sym); 2346 notes = symbol__annotation(act->ms.sym);
@@ -2344,7 +2348,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
2344 return 0; 2348 return 0;
2345 2349
2346 evsel = hists_to_evsel(browser->hists); 2350 evsel = hists_to_evsel(browser->hists);
2347 err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt); 2351 err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
2352 browser->annotation_opts);
2348 he = hist_browser__selected_entry(browser); 2353 he = hist_browser__selected_entry(browser);
2349 /* 2354 /*
2350 * offer option to annotate the other branch source or target 2355 * offer option to annotate the other branch source or target
@@ -2667,7 +2672,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
2667 he->nr_rows = 0; 2672 he->nr_rows = 0;
2668 } 2673 }
2669 2674
2670 if (!he->leaf || !symbol_conf.use_callchain) 2675 if (!he->leaf || !hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
2671 goto next; 2676 goto next;
2672 2677
2673 if (callchain_param.mode == CHAIN_GRAPH_REL) { 2678 if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -2697,10 +2702,11 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2697 struct hist_browser_timer *hbt, 2702 struct hist_browser_timer *hbt,
2698 float min_pcnt, 2703 float min_pcnt,
2699 struct perf_env *env, 2704 struct perf_env *env,
2700 bool warn_lost_event) 2705 bool warn_lost_event,
2706 struct annotation_options *annotation_opts)
2701{ 2707{
2702 struct hists *hists = evsel__hists(evsel); 2708 struct hists *hists = evsel__hists(evsel);
2703 struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); 2709 struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
2704 struct branch_info *bi; 2710 struct branch_info *bi;
2705#define MAX_OPTIONS 16 2711#define MAX_OPTIONS 16
2706 char *options[MAX_OPTIONS]; 2712 char *options[MAX_OPTIONS];
@@ -3062,6 +3068,7 @@ out:
3062struct perf_evsel_menu { 3068struct perf_evsel_menu {
3063 struct ui_browser b; 3069 struct ui_browser b;
3064 struct perf_evsel *selection; 3070 struct perf_evsel *selection;
3071 struct annotation_options *annotation_opts;
3065 bool lost_events, lost_events_warned; 3072 bool lost_events, lost_events_warned;
3066 float min_pcnt; 3073 float min_pcnt;
3067 struct perf_env *env; 3074 struct perf_env *env;
@@ -3163,7 +3170,8 @@ browse_hists:
3163 true, hbt, 3170 true, hbt,
3164 menu->min_pcnt, 3171 menu->min_pcnt,
3165 menu->env, 3172 menu->env,
3166 warn_lost_event); 3173 warn_lost_event,
3174 menu->annotation_opts);
3167 ui_browser__show_title(&menu->b, title); 3175 ui_browser__show_title(&menu->b, title);
3168 switch (key) { 3176 switch (key) {
3169 case K_TAB: 3177 case K_TAB:
@@ -3222,7 +3230,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
3222 struct hist_browser_timer *hbt, 3230 struct hist_browser_timer *hbt,
3223 float min_pcnt, 3231 float min_pcnt,
3224 struct perf_env *env, 3232 struct perf_env *env,
3225 bool warn_lost_event) 3233 bool warn_lost_event,
3234 struct annotation_options *annotation_opts)
3226{ 3235{
3227 struct perf_evsel *pos; 3236 struct perf_evsel *pos;
3228 struct perf_evsel_menu menu = { 3237 struct perf_evsel_menu menu = {
@@ -3237,6 +3246,7 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
3237 }, 3246 },
3238 .min_pcnt = min_pcnt, 3247 .min_pcnt = min_pcnt,
3239 .env = env, 3248 .env = env,
3249 .annotation_opts = annotation_opts,
3240 }; 3250 };
3241 3251
3242 ui_helpline__push("Press ESC to exit"); 3252 ui_helpline__push("Press ESC to exit");
@@ -3257,7 +3267,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
3257 struct hist_browser_timer *hbt, 3267 struct hist_browser_timer *hbt,
3258 float min_pcnt, 3268 float min_pcnt,
3259 struct perf_env *env, 3269 struct perf_env *env,
3260 bool warn_lost_event) 3270 bool warn_lost_event,
3271 struct annotation_options *annotation_opts)
3261{ 3272{
3262 int nr_entries = evlist->nr_entries; 3273 int nr_entries = evlist->nr_entries;
3263 3274
@@ -3267,7 +3278,8 @@ single_entry:
3267 3278
3268 return perf_evsel__hists_browse(first, nr_entries, help, 3279 return perf_evsel__hists_browse(first, nr_entries, help,
3269 false, hbt, min_pcnt, 3280 false, hbt, min_pcnt,
3270 env, warn_lost_event); 3281 env, warn_lost_event,
3282 annotation_opts);
3271 } 3283 }
3272 3284
3273 if (symbol_conf.event_group) { 3285 if (symbol_conf.event_group) {
@@ -3285,5 +3297,6 @@ single_entry:
3285 3297
3286 return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, 3298 return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
3287 hbt, min_pcnt, env, 3299 hbt, min_pcnt, env,
3288 warn_lost_event); 3300 warn_lost_event,
3301 annotation_opts);
3289} 3302}
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index 9428bee076f2..91d3e18b50aa 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -4,6 +4,8 @@
4 4
5#include "ui/browser.h" 5#include "ui/browser.h"
6 6
7struct annotation_options;
8
7struct hist_browser { 9struct hist_browser {
8 struct ui_browser b; 10 struct ui_browser b;
9 struct hists *hists; 11 struct hists *hists;
@@ -12,6 +14,7 @@ struct hist_browser {
12 struct hist_browser_timer *hbt; 14 struct hist_browser_timer *hbt;
13 struct pstack *pstack; 15 struct pstack *pstack;
14 struct perf_env *env; 16 struct perf_env *env;
17 struct annotation_options *annotation_opts;
15 int print_seq; 18 int print_seq;
16 bool show_dso; 19 bool show_dso;
17 bool show_headers; 20 bool show_headers;
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index e03fa75f108a..5b8b8c637686 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -104,7 +104,7 @@ int map__browse(struct map *map)
104{ 104{
105 struct map_browser mb = { 105 struct map_browser mb = {
106 .b = { 106 .b = {
107 .entries = &map->dso->symbols[map->type], 107 .entries = &map->dso->symbols,
108 .refresh = ui_browser__rb_tree_refresh, 108 .refresh = ui_browser__rb_tree_refresh,
109 .seek = ui_browser__rb_tree_seek, 109 .seek = ui_browser__rb_tree_seek,
110 .write = map_browser__write, 110 .write = map_browser__write,
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index aeeaf15029f0..48428c9acd89 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -169,7 +169,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
169 if (map->dso->annotate_warned) 169 if (map->dso->annotate_warned)
170 return -1; 170 return -1;
171 171
172 err = symbol__annotate(sym, map, evsel, 0, NULL); 172 err = symbol__annotate(sym, map, evsel, 0, &annotation__default_options, NULL);
173 if (err) { 173 if (err) {
174 char msg[BUFSIZ]; 174 char msg[BUFSIZ];
175 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg)); 175 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 24e1ec201ffd..b085f1b3e34d 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -382,7 +382,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
382 gtk_tree_store_set(store, &iter, col_idx++, s, -1); 382 gtk_tree_store_set(store, &iter, col_idx++, s, -1);
383 } 383 }
384 384
385 if (symbol_conf.use_callchain && hists__has(hists, sym)) { 385 if (hists__has_callchains(hists) &&
386 symbol_conf.use_callchain && hists__has(hists, sym)) {
386 if (callchain_param.mode == CHAIN_GRAPH_REL) 387 if (callchain_param.mode == CHAIN_GRAPH_REL)
387 total = symbol_conf.cumulate_callchain ? 388 total = symbol_conf.cumulate_callchain ?
388 h->stat_acc->period : h->stat.period; 389 h->stat_acc->period : h->stat.period;
@@ -479,7 +480,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
479 } 480 }
480 } 481 }
481 482
482 if (symbol_conf.use_callchain && he->leaf) { 483 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
483 if (callchain_param.mode == CHAIN_GRAPH_REL) 484 if (callchain_param.mode == CHAIN_GRAPH_REL)
484 total = symbol_conf.cumulate_callchain ? 485 total = symbol_conf.cumulate_callchain ?
485 he->stat_acc->period : he->stat.period; 486 he->stat_acc->period : he->stat.period;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 706f6f1e9c7d..fe3dfaa64a91 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -207,7 +207,7 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
207 if (ret) 207 if (ret)
208 return ret; 208 return ret;
209 209
210 if (a->thread != b->thread || !symbol_conf.use_callchain) 210 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
211 return 0; 211 return 0;
212 212
213 ret = b->callchain->max_depth - a->callchain->max_depth; 213 ret = b->callchain->max_depth - a->callchain->max_depth;
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 6832fcb2e6ff..69b7a28f7a1c 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -516,7 +516,7 @@ static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
516 } 516 }
517 printed += putc('\n', fp); 517 printed += putc('\n', fp);
518 518
519 if (symbol_conf.use_callchain && he->leaf) { 519 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
520 u64 total = hists__total_period(hists); 520 u64 total = hists__total_period(hists);
521 521
522 printed += hist_entry_callchain__fprintf(he, total, 0, fp); 522 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
@@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
550 550
551 ret = fprintf(fp, "%s\n", bf); 551 ret = fprintf(fp, "%s\n", bf);
552 552
553 if (use_callchain) 553 if (hist_entry__has_callchains(he) && use_callchain)
554 callchain_ret = hist_entry_callchain__fprintf(he, total_period, 554 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
555 0, fp); 555 0, fp);
556 556
@@ -819,8 +819,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
819 } 819 }
820 820
821 if (h->ms.map == NULL && verbose > 1) { 821 if (h->ms.map == NULL && verbose > 1) {
822 __map_groups__fprintf_maps(h->thread->mg, 822 map_groups__fprintf(h->thread->mg, fp);
823 MAP__FUNCTION, fp);
824 fprintf(fp, "%.10s end\n", graph_dotted_line); 823 fprintf(fp, "%.10s end\n", graph_dotted_line);
825 } 824 }
826 } 825 }
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 8052373bcd6a..b604ef334dc9 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -24,7 +24,6 @@ libperf-y += libstring.o
24libperf-y += bitmap.o 24libperf-y += bitmap.o
25libperf-y += hweight.o 25libperf-y += hweight.o
26libperf-y += smt.o 26libperf-y += smt.o
27libperf-y += quote.o
28libperf-y += strbuf.o 27libperf-y += strbuf.o
29libperf-y += string.o 28libperf-y += string.o
30libperf-y += strlist.o 29libperf-y += strlist.o
@@ -152,6 +151,8 @@ libperf-y += perf-hooks.o
152libperf-$(CONFIG_CXX) += c++/ 151libperf-$(CONFIG_CXX) += c++/
153 152
154CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" 153CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
154CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
155
155# avoid compiler warnings in 32-bit mode 156# avoid compiler warnings in 32-bit mode
156CFLAGS_genelf_debug.o += -Wno-packed 157CFLAGS_genelf_debug.o += -Wno-packed
157 158
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 5d74a30fe00f..f91775b4bc3c 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -21,6 +21,7 @@
21#include "debug.h" 21#include "debug.h"
22#include "annotate.h" 22#include "annotate.h"
23#include "evsel.h" 23#include "evsel.h"
24#include "evlist.h"
24#include "block-range.h" 25#include "block-range.h"
25#include "string2.h" 26#include "string2.h"
26#include "arch/common.h" 27#include "arch/common.h"
@@ -46,11 +47,10 @@
46struct annotation_options annotation__default_options = { 47struct annotation_options annotation__default_options = {
47 .use_offset = true, 48 .use_offset = true,
48 .jump_arrows = true, 49 .jump_arrows = true,
50 .annotate_src = true,
49 .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, 51 .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS,
50}; 52};
51 53
52const char *disassembler_style;
53const char *objdump_path;
54static regex_t file_lineno; 54static regex_t file_lineno;
55 55
56static struct ins_ops *ins__find(struct arch *arch, const char *name); 56static struct ins_ops *ins__find(struct arch *arch, const char *name);
@@ -678,10 +678,28 @@ static struct arch *arch__find(const char *name)
678 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); 678 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
679} 679}
680 680
681int symbol__alloc_hist(struct symbol *sym) 681static struct annotated_source *annotated_source__new(void)
682{
683 struct annotated_source *src = zalloc(sizeof(*src));
684
685 if (src != NULL)
686 INIT_LIST_HEAD(&src->source);
687
688 return src;
689}
690
691static __maybe_unused void annotated_source__delete(struct annotated_source *src)
692{
693 if (src == NULL)
694 return;
695 zfree(&src->histograms);
696 zfree(&src->cycles_hist);
697 free(src);
698}
699
700static int annotated_source__alloc_histograms(struct annotated_source *src,
701 size_t size, int nr_hists)
682{ 702{
683 struct annotation *notes = symbol__annotation(sym);
684 size_t size = symbol__size(sym);
685 size_t sizeof_sym_hist; 703 size_t sizeof_sym_hist;
686 704
687 /* 705 /*
@@ -701,17 +719,13 @@ int symbol__alloc_hist(struct symbol *sym)
701 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry)); 719 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
702 720
703 /* Check for overflow in zalloc argument */ 721 /* Check for overflow in zalloc argument */
704 if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src)) 722 if (sizeof_sym_hist > SIZE_MAX / nr_hists)
705 / symbol_conf.nr_events)
706 return -1; 723 return -1;
707 724
708 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); 725 src->sizeof_sym_hist = sizeof_sym_hist;
709 if (notes->src == NULL) 726 src->nr_histograms = nr_hists;
710 return -1; 727 src->histograms = calloc(nr_hists, sizeof_sym_hist) ;
711 notes->src->sizeof_sym_hist = sizeof_sym_hist; 728 return src->histograms ? 0 : -1;
712 notes->src->nr_histograms = symbol_conf.nr_events;
713 INIT_LIST_HEAD(&notes->src->source);
714 return 0;
715} 729}
716 730
717/* The cycles histogram is lazily allocated. */ 731/* The cycles histogram is lazily allocated. */
@@ -741,14 +755,11 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
741 pthread_mutex_unlock(&notes->lock); 755 pthread_mutex_unlock(&notes->lock);
742} 756}
743 757
744static int __symbol__account_cycles(struct annotation *notes, 758static int __symbol__account_cycles(struct cyc_hist *ch,
745 u64 start, 759 u64 start,
746 unsigned offset, unsigned cycles, 760 unsigned offset, unsigned cycles,
747 unsigned have_start) 761 unsigned have_start)
748{ 762{
749 struct cyc_hist *ch;
750
751 ch = notes->src->cycles_hist;
752 /* 763 /*
753 * For now we can only account one basic block per 764 * For now we can only account one basic block per
754 * final jump. But multiple could be overlapping. 765 * final jump. But multiple could be overlapping.
@@ -760,6 +771,15 @@ static int __symbol__account_cycles(struct annotation *notes,
760 ch[offset].num_aggr++; 771 ch[offset].num_aggr++;
761 ch[offset].cycles_aggr += cycles; 772 ch[offset].cycles_aggr += cycles;
762 773
774 if (cycles > ch[offset].cycles_max)
775 ch[offset].cycles_max = cycles;
776
777 if (ch[offset].cycles_min) {
778 if (cycles && cycles < ch[offset].cycles_min)
779 ch[offset].cycles_min = cycles;
780 } else
781 ch[offset].cycles_min = cycles;
782
763 if (!have_start && ch[offset].have_start) 783 if (!have_start && ch[offset].have_start)
764 return 0; 784 return 0;
765 if (ch[offset].num) { 785 if (ch[offset].num) {
@@ -782,7 +802,7 @@ static int __symbol__account_cycles(struct annotation *notes,
782} 802}
783 803
784static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, 804static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
785 struct annotation *notes, int evidx, u64 addr, 805 struct annotated_source *src, int evidx, u64 addr,
786 struct perf_sample *sample) 806 struct perf_sample *sample)
787{ 807{
788 unsigned offset; 808 unsigned offset;
@@ -798,7 +818,12 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
798 } 818 }
799 819
800 offset = addr - sym->start; 820 offset = addr - sym->start;
801 h = annotation__histogram(notes, evidx); 821 h = annotated_source__histogram(src, evidx);
822 if (h == NULL) {
823 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
824 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
825 return -ENOMEM;
826 }
802 h->nr_samples++; 827 h->nr_samples++;
803 h->addr[offset].nr_samples++; 828 h->addr[offset].nr_samples++;
804 h->period += sample->period; 829 h->period += sample->period;
@@ -811,45 +836,69 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
811 return 0; 836 return 0;
812} 837}
813 838
814static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles) 839static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
815{ 840{
816 struct annotation *notes = symbol__annotation(sym); 841 struct annotation *notes = symbol__annotation(sym);
817 842
818 if (notes->src == NULL) { 843 if (notes->src == NULL) {
819 if (symbol__alloc_hist(sym) < 0) 844 notes->src = annotated_source__new();
845 if (notes->src == NULL)
820 return NULL; 846 return NULL;
847 goto alloc_cycles_hist;
821 } 848 }
822 if (!notes->src->cycles_hist && cycles) { 849
823 if (symbol__alloc_hist_cycles(sym) < 0) 850 if (!notes->src->cycles_hist) {
851alloc_cycles_hist:
852 symbol__alloc_hist_cycles(sym);
853 }
854
855 return notes->src->cycles_hist;
856}
857
858struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
859{
860 struct annotation *notes = symbol__annotation(sym);
861
862 if (notes->src == NULL) {
863 notes->src = annotated_source__new();
864 if (notes->src == NULL)
824 return NULL; 865 return NULL;
866 goto alloc_histograms;
867 }
868
869 if (notes->src->histograms == NULL) {
870alloc_histograms:
871 annotated_source__alloc_histograms(notes->src, symbol__size(sym),
872 nr_hists);
825 } 873 }
826 return notes; 874
875 return notes->src;
827} 876}
828 877
829static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, 878static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
830 int evidx, u64 addr, 879 struct perf_evsel *evsel, u64 addr,
831 struct perf_sample *sample) 880 struct perf_sample *sample)
832{ 881{
833 struct annotation *notes; 882 struct annotated_source *src;
834 883
835 if (sym == NULL) 884 if (sym == NULL)
836 return 0; 885 return 0;
837 notes = symbol__get_annotation(sym, false); 886 src = symbol__hists(sym, evsel->evlist->nr_entries);
838 if (notes == NULL) 887 if (src == NULL)
839 return -ENOMEM; 888 return -ENOMEM;
840 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr, sample); 889 return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
841} 890}
842 891
843static int symbol__account_cycles(u64 addr, u64 start, 892static int symbol__account_cycles(u64 addr, u64 start,
844 struct symbol *sym, unsigned cycles) 893 struct symbol *sym, unsigned cycles)
845{ 894{
846 struct annotation *notes; 895 struct cyc_hist *cycles_hist;
847 unsigned offset; 896 unsigned offset;
848 897
849 if (sym == NULL) 898 if (sym == NULL)
850 return 0; 899 return 0;
851 notes = symbol__get_annotation(sym, true); 900 cycles_hist = symbol__cycles_hist(sym);
852 if (notes == NULL) 901 if (cycles_hist == NULL)
853 return -ENOMEM; 902 return -ENOMEM;
854 if (addr < sym->start || addr >= sym->end) 903 if (addr < sym->start || addr >= sym->end)
855 return -ERANGE; 904 return -ERANGE;
@@ -861,7 +910,7 @@ static int symbol__account_cycles(u64 addr, u64 start,
861 start = 0; 910 start = 0;
862 } 911 }
863 offset = addr - sym->start; 912 offset = addr - sym->start;
864 return __symbol__account_cycles(notes, 913 return __symbol__account_cycles(cycles_hist,
865 start ? start - sym->start : 0, 914 start ? start - sym->start : 0,
866 offset, cycles, 915 offset, cycles,
867 !!start); 916 !!start);
@@ -953,8 +1002,11 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
953 if (ch->have_start) 1002 if (ch->have_start)
954 annotation__count_and_fill(notes, ch->start, offset, ch); 1003 annotation__count_and_fill(notes, ch->start, offset, ch);
955 al = notes->offsets[offset]; 1004 al = notes->offsets[offset];
956 if (al && ch->num_aggr) 1005 if (al && ch->num_aggr) {
957 al->cycles = ch->cycles_aggr / ch->num_aggr; 1006 al->cycles = ch->cycles_aggr / ch->num_aggr;
1007 al->cycles_max = ch->cycles_max;
1008 al->cycles_min = ch->cycles_min;
1009 }
958 notes->have_cycles = true; 1010 notes->have_cycles = true;
959 } 1011 }
960 } 1012 }
@@ -962,15 +1014,15 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
962} 1014}
963 1015
964int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 1016int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
965 int evidx) 1017 struct perf_evsel *evsel)
966{ 1018{
967 return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr, sample); 1019 return symbol__inc_addr_samples(ams->sym, ams->map, evsel, ams->al_addr, sample);
968} 1020}
969 1021
970int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 1022int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
971 int evidx, u64 ip) 1023 struct perf_evsel *evsel, u64 ip)
972{ 1024{
973 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample); 1025 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evsel, ip, sample);
974} 1026}
975 1027
976static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms) 1028static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
@@ -1019,6 +1071,7 @@ struct annotate_args {
1019 struct arch *arch; 1071 struct arch *arch;
1020 struct map_symbol ms; 1072 struct map_symbol ms;
1021 struct perf_evsel *evsel; 1073 struct perf_evsel *evsel;
1074 struct annotation_options *options;
1022 s64 offset; 1075 s64 offset;
1023 char *line; 1076 char *line;
1024 int line_nr; 1077 int line_nr;
@@ -1560,6 +1613,7 @@ fallback:
1560 1613
1561static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) 1614static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1562{ 1615{
1616 struct annotation_options *opts = args->options;
1563 struct map *map = args->ms.map; 1617 struct map *map = args->ms.map;
1564 struct dso *dso = map->dso; 1618 struct dso *dso = map->dso;
1565 char *command; 1619 char *command;
@@ -1607,13 +1661,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1607 "%s %s%s --start-address=0x%016" PRIx64 1661 "%s %s%s --start-address=0x%016" PRIx64
1608 " --stop-address=0x%016" PRIx64 1662 " --stop-address=0x%016" PRIx64
1609 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1663 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1610 objdump_path ? objdump_path : "objdump", 1664 opts->objdump_path ?: "objdump",
1611 disassembler_style ? "-M " : "", 1665 opts->disassembler_style ? "-M " : "",
1612 disassembler_style ? disassembler_style : "", 1666 opts->disassembler_style ?: "",
1613 map__rip_2objdump(map, sym->start), 1667 map__rip_2objdump(map, sym->start),
1614 map__rip_2objdump(map, sym->end), 1668 map__rip_2objdump(map, sym->end),
1615 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", 1669 opts->show_asm_raw ? "" : "--no-show-raw",
1616 symbol_conf.annotate_src ? "-S" : "", 1670 opts->annotate_src ? "-S" : "",
1617 symfs_filename, symfs_filename); 1671 symfs_filename, symfs_filename);
1618 1672
1619 if (err < 0) { 1673 if (err < 0) {
@@ -1755,11 +1809,13 @@ void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
1755 1809
1756int symbol__annotate(struct symbol *sym, struct map *map, 1810int symbol__annotate(struct symbol *sym, struct map *map,
1757 struct perf_evsel *evsel, size_t privsize, 1811 struct perf_evsel *evsel, size_t privsize,
1812 struct annotation_options *options,
1758 struct arch **parch) 1813 struct arch **parch)
1759{ 1814{
1760 struct annotate_args args = { 1815 struct annotate_args args = {
1761 .privsize = privsize, 1816 .privsize = privsize,
1762 .evsel = evsel, 1817 .evsel = evsel,
1818 .options = options,
1763 }; 1819 };
1764 struct perf_env *env = perf_evsel__env(evsel); 1820 struct perf_env *env = perf_evsel__env(evsel);
1765 const char *arch_name = perf_env__arch(env); 1821 const char *arch_name = perf_env__arch(env);
@@ -1937,8 +1993,8 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1937} 1993}
1938 1994
1939int symbol__annotate_printf(struct symbol *sym, struct map *map, 1995int symbol__annotate_printf(struct symbol *sym, struct map *map,
1940 struct perf_evsel *evsel, bool full_paths, 1996 struct perf_evsel *evsel,
1941 int min_pcnt, int max_lines, int context) 1997 struct annotation_options *opts)
1942{ 1998{
1943 struct dso *dso = map->dso; 1999 struct dso *dso = map->dso;
1944 char *filename; 2000 char *filename;
@@ -1950,23 +2006,28 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
1950 u64 start = map__rip_2objdump(map, sym->start); 2006 u64 start = map__rip_2objdump(map, sym->start);
1951 int printed = 2, queue_len = 0, addr_fmt_width; 2007 int printed = 2, queue_len = 0, addr_fmt_width;
1952 int more = 0; 2008 int more = 0;
2009 bool context = opts->context;
1953 u64 len; 2010 u64 len;
1954 int width = symbol_conf.show_total_period ? 12 : 8; 2011 int width = symbol_conf.show_total_period ? 12 : 8;
1955 int graph_dotted_len; 2012 int graph_dotted_len;
2013 char buf[512];
1956 2014
1957 filename = strdup(dso->long_name); 2015 filename = strdup(dso->long_name);
1958 if (!filename) 2016 if (!filename)
1959 return -ENOMEM; 2017 return -ENOMEM;
1960 2018
1961 if (full_paths) 2019 if (opts->full_path)
1962 d_filename = filename; 2020 d_filename = filename;
1963 else 2021 else
1964 d_filename = basename(filename); 2022 d_filename = basename(filename);
1965 2023
1966 len = symbol__size(sym); 2024 len = symbol__size(sym);
1967 2025
1968 if (perf_evsel__is_group_event(evsel)) 2026 if (perf_evsel__is_group_event(evsel)) {
1969 width *= evsel->nr_members; 2027 width *= evsel->nr_members;
2028 perf_evsel__group_desc(evsel, buf, sizeof(buf));
2029 evsel_name = buf;
2030 }
1970 2031
1971 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", 2032 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1972 width, width, symbol_conf.show_total_period ? "Period" : 2033 width, width, symbol_conf.show_total_period ? "Period" :
@@ -1990,7 +2051,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
1990 } 2051 }
1991 2052
1992 err = annotation_line__print(pos, sym, start, evsel, len, 2053 err = annotation_line__print(pos, sym, start, evsel, len,
1993 min_pcnt, printed, max_lines, 2054 opts->min_pcnt, printed, opts->max_lines,
1994 queue, addr_fmt_width); 2055 queue, addr_fmt_width);
1995 2056
1996 switch (err) { 2057 switch (err) {
@@ -2323,20 +2384,19 @@ static void symbol__calc_lines(struct symbol *sym, struct map *map,
2323} 2384}
2324 2385
2325int symbol__tty_annotate2(struct symbol *sym, struct map *map, 2386int symbol__tty_annotate2(struct symbol *sym, struct map *map,
2326 struct perf_evsel *evsel, bool print_lines, 2387 struct perf_evsel *evsel,
2327 bool full_paths) 2388 struct annotation_options *opts)
2328{ 2389{
2329 struct dso *dso = map->dso; 2390 struct dso *dso = map->dso;
2330 struct rb_root source_line = RB_ROOT; 2391 struct rb_root source_line = RB_ROOT;
2331 struct annotation_options opts = annotation__default_options;
2332 struct annotation *notes = symbol__annotation(sym); 2392 struct annotation *notes = symbol__annotation(sym);
2333 char buf[1024]; 2393 char buf[1024];
2334 2394
2335 if (symbol__annotate2(sym, map, evsel, &opts, NULL) < 0) 2395 if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0)
2336 return -1; 2396 return -1;
2337 2397
2338 if (print_lines) { 2398 if (opts->print_lines) {
2339 srcline_full_filename = full_paths; 2399 srcline_full_filename = opts->full_path;
2340 symbol__calc_lines(sym, map, &source_line); 2400 symbol__calc_lines(sym, map, &source_line);
2341 print_summary(&source_line, dso->long_name); 2401 print_summary(&source_line, dso->long_name);
2342 } 2402 }
@@ -2351,25 +2411,24 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
2351} 2411}
2352 2412
2353int symbol__tty_annotate(struct symbol *sym, struct map *map, 2413int symbol__tty_annotate(struct symbol *sym, struct map *map,
2354 struct perf_evsel *evsel, bool print_lines, 2414 struct perf_evsel *evsel,
2355 bool full_paths, int min_pcnt, int max_lines) 2415 struct annotation_options *opts)
2356{ 2416{
2357 struct dso *dso = map->dso; 2417 struct dso *dso = map->dso;
2358 struct rb_root source_line = RB_ROOT; 2418 struct rb_root source_line = RB_ROOT;
2359 2419
2360 if (symbol__annotate(sym, map, evsel, 0, NULL) < 0) 2420 if (symbol__annotate(sym, map, evsel, 0, opts, NULL) < 0)
2361 return -1; 2421 return -1;
2362 2422
2363 symbol__calc_percent(sym, evsel); 2423 symbol__calc_percent(sym, evsel);
2364 2424
2365 if (print_lines) { 2425 if (opts->print_lines) {
2366 srcline_full_filename = full_paths; 2426 srcline_full_filename = opts->full_path;
2367 symbol__calc_lines(sym, map, &source_line); 2427 symbol__calc_lines(sym, map, &source_line);
2368 print_summary(&source_line, dso->long_name); 2428 print_summary(&source_line, dso->long_name);
2369 } 2429 }
2370 2430
2371 symbol__annotate_printf(sym, map, evsel, full_paths, 2431 symbol__annotate_printf(sym, map, evsel, opts);
2372 min_pcnt, max_lines, 0);
2373 2432
2374 annotated_source__purge(symbol__annotation(sym)->src); 2433 annotated_source__purge(symbol__annotation(sym)->src);
2375 2434
@@ -2486,13 +2545,38 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
2486 else 2545 else
2487 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); 2546 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
2488 2547
2489 if (al->cycles) 2548 if (!notes->options->show_minmax_cycle) {
2490 obj__printf(obj, "%*" PRIu64 " ", 2549 if (al->cycles)
2550 obj__printf(obj, "%*" PRIu64 " ",
2491 ANNOTATION__CYCLES_WIDTH - 1, al->cycles); 2551 ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
2492 else if (!show_title) 2552 else if (!show_title)
2493 obj__printf(obj, "%*s", ANNOTATION__CYCLES_WIDTH, " "); 2553 obj__printf(obj, "%*s",
2494 else 2554 ANNOTATION__CYCLES_WIDTH, " ");
2495 obj__printf(obj, "%*s ", ANNOTATION__CYCLES_WIDTH - 1, "Cycle"); 2555 else
2556 obj__printf(obj, "%*s ",
2557 ANNOTATION__CYCLES_WIDTH - 1,
2558 "Cycle");
2559 } else {
2560 if (al->cycles) {
2561 char str[32];
2562
2563 scnprintf(str, sizeof(str),
2564 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
2565 al->cycles, al->cycles_min,
2566 al->cycles_max);
2567
2568 obj__printf(obj, "%*s ",
2569 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2570 str);
2571 } else if (!show_title)
2572 obj__printf(obj, "%*s",
2573 ANNOTATION__MINMAX_CYCLES_WIDTH,
2574 " ");
2575 else
2576 obj__printf(obj, "%*s ",
2577 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2578 "Cycle(min/max)");
2579 }
2496 } 2580 }
2497 2581
2498 obj__printf(obj, " "); 2582 obj__printf(obj, " ");
@@ -2579,7 +2663,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
2579 if (perf_evsel__is_group_event(evsel)) 2663 if (perf_evsel__is_group_event(evsel))
2580 nr_pcnt = evsel->nr_members; 2664 nr_pcnt = evsel->nr_members;
2581 2665
2582 err = symbol__annotate(sym, map, evsel, 0, parch); 2666 err = symbol__annotate(sym, map, evsel, 0, options, parch);
2583 if (err) 2667 if (err)
2584 goto out_free_offsets; 2668 goto out_free_offsets;
2585 2669
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index f28a9e43421d..a4c0d91907e6 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -61,16 +61,27 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
61 61
62#define ANNOTATION__IPC_WIDTH 6 62#define ANNOTATION__IPC_WIDTH 6
63#define ANNOTATION__CYCLES_WIDTH 6 63#define ANNOTATION__CYCLES_WIDTH 6
64#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
64 65
65struct annotation_options { 66struct annotation_options {
66 bool hide_src_code, 67 bool hide_src_code,
67 use_offset, 68 use_offset,
68 jump_arrows, 69 jump_arrows,
70 print_lines,
71 full_path,
69 show_linenr, 72 show_linenr,
70 show_nr_jumps, 73 show_nr_jumps,
71 show_nr_samples, 74 show_nr_samples,
72 show_total_period; 75 show_total_period,
76 show_minmax_cycle,
77 show_asm_raw,
78 annotate_src;
73 u8 offset_level; 79 u8 offset_level;
80 int min_pcnt;
81 int max_lines;
82 int context;
83 const char *objdump_path;
84 const char *disassembler_style;
74}; 85};
75 86
76enum { 87enum {
@@ -105,6 +116,8 @@ struct annotation_line {
105 int jump_sources; 116 int jump_sources;
106 float ipc; 117 float ipc;
107 u64 cycles; 118 u64 cycles;
119 u64 cycles_max;
120 u64 cycles_min;
108 size_t privsize; 121 size_t privsize;
109 char *path; 122 char *path;
110 u32 idx; 123 u32 idx;
@@ -186,6 +199,8 @@ struct cyc_hist {
186 u64 start; 199 u64 start;
187 u64 cycles; 200 u64 cycles;
188 u64 cycles_aggr; 201 u64 cycles_aggr;
202 u64 cycles_max;
203 u64 cycles_min;
189 u32 num; 204 u32 num;
190 u32 num_aggr; 205 u32 num_aggr;
191 u8 have_start; 206 u8 have_start;
@@ -195,7 +210,11 @@ struct cyc_hist {
195 210
196/** struct annotated_source - symbols with hits have this attached as in sannotation 211/** struct annotated_source - symbols with hits have this attached as in sannotation
197 * 212 *
198 * @histogram: Array of addr hit histograms per event being monitored 213 * @histograms: Array of addr hit histograms per event being monitored
214 * nr_histograms: This may not be the same as evsel->evlist->nr_entries if
215 * we have more than a group in a evlist, where we will want
216 * to see each group separately, that is why symbol__annotate2()
217 * sets src->nr_histograms to evsel->nr_members.
199 * @lines: If 'print_lines' is specified, per source code line percentages 218 * @lines: If 'print_lines' is specified, per source code line percentages
200 * @source: source parsed from a disassembler like objdump -dS 219 * @source: source parsed from a disassembler like objdump -dS
201 * @cyc_hist: Average cycles per basic block 220 * @cyc_hist: Average cycles per basic block
@@ -211,7 +230,7 @@ struct annotated_source {
211 int nr_histograms; 230 int nr_histograms;
212 size_t sizeof_sym_hist; 231 size_t sizeof_sym_hist;
213 struct cyc_hist *cycles_hist; 232 struct cyc_hist *cycles_hist;
214 struct sym_hist histograms[0]; 233 struct sym_hist *histograms;
215}; 234};
216 235
217struct annotation { 236struct annotation {
@@ -239,6 +258,9 @@ struct annotation {
239 258
240static inline int annotation__cycles_width(struct annotation *notes) 259static inline int annotation__cycles_width(struct annotation *notes)
241{ 260{
261 if (notes->have_cycles && notes->options->show_minmax_cycle)
262 return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
263
242 return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; 264 return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
243} 265}
244 266
@@ -258,10 +280,14 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
258void annotation__update_column_widths(struct annotation *notes); 280void annotation__update_column_widths(struct annotation *notes);
259void annotation__init_column_widths(struct annotation *notes, struct symbol *sym); 281void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
260 282
283static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx)
284{
285 return ((void *)src->histograms) + (src->sizeof_sym_hist * idx);
286}
287
261static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) 288static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
262{ 289{
263 return (((void *)&notes->src->histograms) + 290 return annotated_source__histogram(notes->src, idx);
264 (notes->src->sizeof_sym_hist * idx));
265} 291}
266 292
267static inline struct annotation *symbol__annotation(struct symbol *sym) 293static inline struct annotation *symbol__annotation(struct symbol *sym)
@@ -270,20 +296,21 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
270} 296}
271 297
272int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 298int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
273 int evidx); 299 struct perf_evsel *evsel);
274 300
275int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 301int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
276 struct addr_map_symbol *start, 302 struct addr_map_symbol *start,
277 unsigned cycles); 303 unsigned cycles);
278 304
279int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 305int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
280 int evidx, u64 addr); 306 struct perf_evsel *evsel, u64 addr);
281 307
282int symbol__alloc_hist(struct symbol *sym); 308struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists);
283void symbol__annotate_zero_histograms(struct symbol *sym); 309void symbol__annotate_zero_histograms(struct symbol *sym);
284 310
285int symbol__annotate(struct symbol *sym, struct map *map, 311int symbol__annotate(struct symbol *sym, struct map *map,
286 struct perf_evsel *evsel, size_t privsize, 312 struct perf_evsel *evsel, size_t privsize,
313 struct annotation_options *options,
287 struct arch **parch); 314 struct arch **parch);
288int symbol__annotate2(struct symbol *sym, struct map *map, 315int symbol__annotate2(struct symbol *sym, struct map *map,
289 struct perf_evsel *evsel, 316 struct perf_evsel *evsel,
@@ -311,8 +338,8 @@ int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
311 int errnum, char *buf, size_t buflen); 338 int errnum, char *buf, size_t buflen);
312 339
313int symbol__annotate_printf(struct symbol *sym, struct map *map, 340int symbol__annotate_printf(struct symbol *sym, struct map *map,
314 struct perf_evsel *evsel, bool full_paths, 341 struct perf_evsel *evsel,
315 int min_pcnt, int max_lines, int context); 342 struct annotation_options *options);
316int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp); 343int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp);
317void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); 344void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
318void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); 345void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
@@ -323,30 +350,27 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel)
323bool ui__has_annotation(void); 350bool ui__has_annotation(void);
324 351
325int symbol__tty_annotate(struct symbol *sym, struct map *map, 352int symbol__tty_annotate(struct symbol *sym, struct map *map,
326 struct perf_evsel *evsel, bool print_lines, 353 struct perf_evsel *evsel, struct annotation_options *opts);
327 bool full_paths, int min_pcnt, int max_lines);
328 354
329int symbol__tty_annotate2(struct symbol *sym, struct map *map, 355int symbol__tty_annotate2(struct symbol *sym, struct map *map,
330 struct perf_evsel *evsel, bool print_lines, 356 struct perf_evsel *evsel, struct annotation_options *opts);
331 bool full_paths);
332 357
333#ifdef HAVE_SLANG_SUPPORT 358#ifdef HAVE_SLANG_SUPPORT
334int symbol__tui_annotate(struct symbol *sym, struct map *map, 359int symbol__tui_annotate(struct symbol *sym, struct map *map,
335 struct perf_evsel *evsel, 360 struct perf_evsel *evsel,
336 struct hist_browser_timer *hbt); 361 struct hist_browser_timer *hbt,
362 struct annotation_options *opts);
337#else 363#else
338static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, 364static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
339 struct map *map __maybe_unused, 365 struct map *map __maybe_unused,
340 struct perf_evsel *evsel __maybe_unused, 366 struct perf_evsel *evsel __maybe_unused,
341 struct hist_browser_timer *hbt 367 struct hist_browser_timer *hbt __maybe_unused,
342 __maybe_unused) 368 struct annotation_options *opts __maybe_unused)
343{ 369{
344 return 0; 370 return 0;
345} 371}
346#endif 372#endif
347 373
348extern const char *disassembler_style;
349
350void annotation_config__init(void); 374void annotation_config__init(void);
351 375
352#endif /* __PERF_ANNOTATE_H */ 376#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 857de69a5361..d056447520a2 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1679,7 +1679,7 @@ struct sym_args {
1679static bool kern_sym_match(struct sym_args *args, const char *name, char type) 1679static bool kern_sym_match(struct sym_args *args, const char *name, char type)
1680{ 1680{
1681 /* A function with the same name, and global or the n'th found or any */ 1681 /* A function with the same name, and global or the n'th found or any */
1682 return symbol_type__is_a(type, MAP__FUNCTION) && 1682 return kallsyms__is_function(type) &&
1683 !strcmp(name, args->name) && 1683 !strcmp(name, args->name) &&
1684 ((args->global && isupper(type)) || 1684 ((args->global && isupper(type)) ||
1685 (args->selected && ++(args->cnt) == args->idx) || 1685 (args->selected && ++(args->cnt) == args->idx) ||
@@ -1784,7 +1784,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
1784{ 1784{
1785 struct sym_args *args = arg; 1785 struct sym_args *args = arg;
1786 1786
1787 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1787 if (!kallsyms__is_function(type))
1788 return 0; 1788 return 0;
1789 1789
1790 if (!args->started) { 1790 if (!args->started) {
@@ -1915,7 +1915,7 @@ static void print_duplicate_syms(struct dso *dso, const char *sym_name)
1915 1915
1916 pr_err("Multiple symbols with name '%s'\n", sym_name); 1916 pr_err("Multiple symbols with name '%s'\n", sym_name);
1917 1917
1918 sym = dso__first_symbol(dso, MAP__FUNCTION); 1918 sym = dso__first_symbol(dso);
1919 while (sym) { 1919 while (sym) {
1920 if (dso_sym_match(sym, sym_name, &cnt, -1)) { 1920 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
1921 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 1921 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
@@ -1945,7 +1945,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
1945 *start = 0; 1945 *start = 0;
1946 *size = 0; 1946 *size = 0;
1947 1947
1948 sym = dso__first_symbol(dso, MAP__FUNCTION); 1948 sym = dso__first_symbol(dso);
1949 while (sym) { 1949 while (sym) {
1950 if (*start) { 1950 if (*start) {
1951 if (!*size) 1951 if (!*size)
@@ -1972,8 +1972,8 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
1972 1972
1973static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 1973static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
1974{ 1974{
1975 struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION); 1975 struct symbol *first_sym = dso__first_symbol(dso);
1976 struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION); 1976 struct symbol *last_sym = dso__last_symbol(dso);
1977 1977
1978 if (!first_sym || !last_sym) { 1978 if (!first_sym || !last_sym) {
1979 pr_err("Failed to determine filter for %s\nNo symbols found.\n", 1979 pr_err("Failed to determine filter for %s\nNo symbols found.\n",
diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c
index 29347756b0af..77e4891e17b0 100644
--- a/tools/perf/util/bpf-prologue.c
+++ b/tools/perf/util/bpf-prologue.c
@@ -61,7 +61,7 @@ check_pos(struct bpf_insn_pos *pos)
61 61
62/* 62/*
63 * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see 63 * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
64 * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM 64 * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM
65 * instruction (BPF_{B,H,W,DW}). 65 * instruction (BPF_{B,H,W,DW}).
66 */ 66 */
67static int 67static int
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 537eadd81914..04b1d53e4bf9 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -47,9 +47,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
47 return -1; 47 return -1;
48 } 48 }
49 49
50 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al); 50 if (thread__find_map(thread, sample->cpumode, sample->ip, &al))
51
52 if (al.map != NULL)
53 al.map->dso->hit = 1; 51 al.map->dso->hit = 1;
54 52
55 thread__put(thread); 53 thread__put(thread);
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index decb91f9da82..ccd02634a616 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -93,20 +93,17 @@ static int open_cgroup(const char *name)
93static struct cgroup *evlist__find_cgroup(struct perf_evlist *evlist, const char *str) 93static struct cgroup *evlist__find_cgroup(struct perf_evlist *evlist, const char *str)
94{ 94{
95 struct perf_evsel *counter; 95 struct perf_evsel *counter;
96 struct cgroup *cgrp = NULL;
97 /* 96 /*
98 * check if cgrp is already defined, if so we reuse it 97 * check if cgrp is already defined, if so we reuse it
99 */ 98 */
100 evlist__for_each_entry(evlist, counter) { 99 evlist__for_each_entry(evlist, counter) {
101 if (!counter->cgrp) 100 if (!counter->cgrp)
102 continue; 101 continue;
103 if (!strcmp(counter->cgrp->name, str)) { 102 if (!strcmp(counter->cgrp->name, str))
104 cgrp = cgroup__get(counter->cgrp); 103 return cgroup__get(counter->cgrp);
105 break;
106 }
107 } 104 }
108 105
109 return cgrp; 106 return NULL;
110} 107}
111 108
112static struct cgroup *cgroup__new(const char *name) 109static struct cgroup *cgroup__new(const char *name)
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 84eb9393c7db..5ac157056cdf 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -707,6 +707,14 @@ struct perf_config_set *perf_config_set__new(void)
707 return set; 707 return set;
708} 708}
709 709
710static int perf_config__init(void)
711{
712 if (config_set == NULL)
713 config_set = perf_config_set__new();
714
715 return config_set == NULL;
716}
717
710int perf_config(config_fn_t fn, void *data) 718int perf_config(config_fn_t fn, void *data)
711{ 719{
712 int ret = 0; 720 int ret = 0;
@@ -714,7 +722,7 @@ int perf_config(config_fn_t fn, void *data)
714 struct perf_config_section *section; 722 struct perf_config_section *section;
715 struct perf_config_item *item; 723 struct perf_config_item *item;
716 724
717 if (config_set == NULL) 725 if (config_set == NULL && perf_config__init())
718 return -1; 726 return -1;
719 727
720 perf_config_set__for_each_entry(config_set, section, item) { 728 perf_config_set__for_each_entry(config_set, section, item) {
@@ -735,12 +743,6 @@ int perf_config(config_fn_t fn, void *data)
735 return ret; 743 return ret;
736} 744}
737 745
738void perf_config__init(void)
739{
740 if (config_set == NULL)
741 config_set = perf_config_set__new();
742}
743
744void perf_config__exit(void) 746void perf_config__exit(void)
745{ 747{
746 perf_config_set__delete(config_set); 748 perf_config_set__delete(config_set);
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index baf82bf227ac..bd0a5897c76a 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -38,7 +38,6 @@ struct perf_config_set *perf_config_set__new(void);
38void perf_config_set__delete(struct perf_config_set *set); 38void perf_config_set__delete(struct perf_config_set *set);
39int perf_config_set__collect(struct perf_config_set *set, const char *file_name, 39int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
40 const char *var, const char *value); 40 const char *var, const char *value);
41void perf_config__init(void);
42void perf_config__exit(void); 41void perf_config__exit(void);
43void perf_config__refresh(void); 42void perf_config__refresh(void);
44 43
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index bf16dc9ee507..822ba915d144 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -270,9 +270,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
270 thread = etmq->etm->unknown_thread; 270 thread = etmq->etm->unknown_thread;
271 } 271 }
272 272
273 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al); 273 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
274
275 if (!al.map || !al.map->dso)
276 return 0; 274 return 0;
277 275
278 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 276 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index b0c2b5c5d337..7123746edcf4 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -247,9 +247,9 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
247 *dso_db_id = dso->db_id; 247 *dso_db_id = dso->db_id;
248 248
249 if (!al->sym) { 249 if (!al->sym) {
250 al->sym = symbol__new(al->addr, 0, 0, "unknown"); 250 al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
251 if (al->sym) 251 if (al->sym)
252 dso__insert_symbol(dso, al->map->type, al->sym); 252 dso__insert_symbol(dso, al->sym);
253 } 253 }
254 254
255 if (al->sym) { 255 if (al->sym) {
@@ -315,8 +315,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
315 al.addr = node->ip; 315 al.addr = node->ip;
316 316
317 if (al.map && !al.sym) 317 if (al.map && !al.sym)
318 al.sym = dso__find_symbol(al.map->dso, MAP__FUNCTION, 318 al.sym = dso__find_symbol(al.map->dso, al.addr);
319 al.addr);
320 319
321 db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset); 320 db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
322 321
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 36ef45b2e89d..51cf82cf1882 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -354,6 +354,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
354 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 354 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
355 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 355 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
356 (strncmp(name, "[vdso]", 6) == 0) || 356 (strncmp(name, "[vdso]", 6) == 0) ||
357 (strncmp(name, "[vdso32]", 8) == 0) ||
358 (strncmp(name, "[vdsox32]", 9) == 0) ||
357 (strncmp(name, "[vsyscall]", 10) == 0)) { 359 (strncmp(name, "[vsyscall]", 10) == 0)) {
358 m->kmod = false; 360 m->kmod = false;
359 361
@@ -1014,7 +1016,7 @@ struct map *dso__new_map(const char *name)
1014 struct dso *dso = dso__new(name); 1016 struct dso *dso = dso__new(name);
1015 1017
1016 if (dso) 1018 if (dso)
1017 map = map__new2(0, dso, MAP__FUNCTION); 1019 map = map__new2(0, dso);
1018 1020
1019 return map; 1021 return map;
1020} 1022}
@@ -1176,19 +1178,19 @@ int dso__name_len(const struct dso *dso)
1176 return dso->short_name_len; 1178 return dso->short_name_len;
1177} 1179}
1178 1180
1179bool dso__loaded(const struct dso *dso, enum map_type type) 1181bool dso__loaded(const struct dso *dso)
1180{ 1182{
1181 return dso->loaded & (1 << type); 1183 return dso->loaded;
1182} 1184}
1183 1185
1184bool dso__sorted_by_name(const struct dso *dso, enum map_type type) 1186bool dso__sorted_by_name(const struct dso *dso)
1185{ 1187{
1186 return dso->sorted_by_name & (1 << type); 1188 return dso->sorted_by_name;
1187} 1189}
1188 1190
1189void dso__set_sorted_by_name(struct dso *dso, enum map_type type) 1191void dso__set_sorted_by_name(struct dso *dso)
1190{ 1192{
1191 dso->sorted_by_name |= (1 << type); 1193 dso->sorted_by_name = true;
1192} 1194}
1193 1195
1194struct dso *dso__new(const char *name) 1196struct dso *dso__new(const char *name)
@@ -1196,12 +1198,10 @@ struct dso *dso__new(const char *name)
1196 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1198 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1197 1199
1198 if (dso != NULL) { 1200 if (dso != NULL) {
1199 int i;
1200 strcpy(dso->name, name); 1201 strcpy(dso->name, name);
1201 dso__set_long_name(dso, dso->name, false); 1202 dso__set_long_name(dso, dso->name, false);
1202 dso__set_short_name(dso, dso->name, false); 1203 dso__set_short_name(dso, dso->name, false);
1203 for (i = 0; i < MAP__NR_TYPES; ++i) 1204 dso->symbols = dso->symbol_names = RB_ROOT;
1204 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1205 dso->data.cache = RB_ROOT; 1205 dso->data.cache = RB_ROOT;
1206 dso->inlined_nodes = RB_ROOT; 1206 dso->inlined_nodes = RB_ROOT;
1207 dso->srclines = RB_ROOT; 1207 dso->srclines = RB_ROOT;
@@ -1231,8 +1231,6 @@ struct dso *dso__new(const char *name)
1231 1231
1232void dso__delete(struct dso *dso) 1232void dso__delete(struct dso *dso)
1233{ 1233{
1234 int i;
1235
1236 if (!RB_EMPTY_NODE(&dso->rb_node)) 1234 if (!RB_EMPTY_NODE(&dso->rb_node))
1237 pr_err("DSO %s is still in rbtree when being deleted!\n", 1235 pr_err("DSO %s is still in rbtree when being deleted!\n",
1238 dso->long_name); 1236 dso->long_name);
@@ -1240,8 +1238,7 @@ void dso__delete(struct dso *dso)
1240 /* free inlines first, as they reference symbols */ 1238 /* free inlines first, as they reference symbols */
1241 inlines__tree_delete(&dso->inlined_nodes); 1239 inlines__tree_delete(&dso->inlined_nodes);
1242 srcline__tree_delete(&dso->srclines); 1240 srcline__tree_delete(&dso->srclines);
1243 for (i = 0; i < MAP__NR_TYPES; ++i) 1241 symbols__delete(&dso->symbols);
1244 symbols__delete(&dso->symbols[i]);
1245 1242
1246 if (dso->short_name_allocated) { 1243 if (dso->short_name_allocated) {
1247 zfree((char **)&dso->short_name); 1244 zfree((char **)&dso->short_name);
@@ -1451,9 +1448,7 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1451 size_t ret = 0; 1448 size_t ret = 0;
1452 1449
1453 list_for_each_entry(pos, head, node) { 1450 list_for_each_entry(pos, head, node) {
1454 int i; 1451 ret += dso__fprintf(pos, fp);
1455 for (i = 0; i < MAP__NR_TYPES; ++i)
1456 ret += dso__fprintf(pos, i, fp);
1457 } 1452 }
1458 1453
1459 return ret; 1454 return ret;
@@ -1467,18 +1462,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1467 return fprintf(fp, "%s", sbuild_id); 1462 return fprintf(fp, "%s", sbuild_id);
1468} 1463}
1469 1464
1470size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) 1465size_t dso__fprintf(struct dso *dso, FILE *fp)
1471{ 1466{
1472 struct rb_node *nd; 1467 struct rb_node *nd;
1473 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1468 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1474 1469
1475 if (dso->short_name != dso->long_name) 1470 if (dso->short_name != dso->long_name)
1476 ret += fprintf(fp, "%s, ", dso->long_name); 1471 ret += fprintf(fp, "%s, ", dso->long_name);
1477 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 1472 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1478 dso__loaded(dso, type) ? "" : "NOT ");
1479 ret += dso__fprintf_buildid(dso, fp); 1473 ret += dso__fprintf_buildid(dso, fp);
1480 ret += fprintf(fp, ")\n"); 1474 ret += fprintf(fp, ")\n");
1481 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 1475 for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
1482 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1476 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1483 ret += symbol__fprintf(pos, fp); 1477 ret += symbol__fprintf(pos, fp);
1484 } 1478 }
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index c229dbe0277a..ef69de2e69ea 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -140,14 +140,14 @@ struct dso {
140 struct list_head node; 140 struct list_head node;
141 struct rb_node rb_node; /* rbtree node sorted by long name */ 141 struct rb_node rb_node; /* rbtree node sorted by long name */
142 struct rb_root *root; /* root of rbtree that rb_node is in */ 142 struct rb_root *root; /* root of rbtree that rb_node is in */
143 struct rb_root symbols[MAP__NR_TYPES]; 143 struct rb_root symbols;
144 struct rb_root symbol_names[MAP__NR_TYPES]; 144 struct rb_root symbol_names;
145 struct rb_root inlined_nodes; 145 struct rb_root inlined_nodes;
146 struct rb_root srclines; 146 struct rb_root srclines;
147 struct { 147 struct {
148 u64 addr; 148 u64 addr;
149 struct symbol *symbol; 149 struct symbol *symbol;
150 } last_find_result[MAP__NR_TYPES]; 150 } last_find_result;
151 void *a2l; 151 void *a2l;
152 char *symsrc_filename; 152 char *symsrc_filename;
153 unsigned int a2l_fails; 153 unsigned int a2l_fails;
@@ -164,8 +164,8 @@ struct dso {
164 u8 short_name_allocated:1; 164 u8 short_name_allocated:1;
165 u8 long_name_allocated:1; 165 u8 long_name_allocated:1;
166 u8 is_64_bit:1; 166 u8 is_64_bit:1;
167 u8 sorted_by_name; 167 bool sorted_by_name;
168 u8 loaded; 168 bool loaded;
169 u8 rel; 169 u8 rel;
170 u8 build_id[BUILD_ID_SIZE]; 170 u8 build_id[BUILD_ID_SIZE];
171 u64 text_offset; 171 u64 text_offset;
@@ -202,14 +202,13 @@ struct dso {
202 * @dso: the 'struct dso *' in which symbols itereated 202 * @dso: the 'struct dso *' in which symbols itereated
203 * @pos: the 'struct symbol *' to use as a loop cursor 203 * @pos: the 'struct symbol *' to use as a loop cursor
204 * @n: the 'struct rb_node *' to use as a temporary storage 204 * @n: the 'struct rb_node *' to use as a temporary storage
205 * @type: the 'enum map_type' type of symbols
206 */ 205 */
207#define dso__for_each_symbol(dso, pos, n, type) \ 206#define dso__for_each_symbol(dso, pos, n) \
208 symbols__for_each_entry(&(dso)->symbols[(type)], pos, n) 207 symbols__for_each_entry(&(dso)->symbols, pos, n)
209 208
210static inline void dso__set_loaded(struct dso *dso, enum map_type type) 209static inline void dso__set_loaded(struct dso *dso)
211{ 210{
212 dso->loaded |= (1 << type); 211 dso->loaded = true;
213} 212}
214 213
215struct dso *dso__new(const char *name); 214struct dso *dso__new(const char *name);
@@ -231,11 +230,16 @@ static inline void __dso__zput(struct dso **dso)
231 230
232#define dso__zput(dso) __dso__zput(&dso) 231#define dso__zput(dso) __dso__zput(&dso)
233 232
234bool dso__loaded(const struct dso *dso, enum map_type type); 233bool dso__loaded(const struct dso *dso);
235 234
236bool dso__sorted_by_name(const struct dso *dso, enum map_type type); 235static inline bool dso__has_symbols(const struct dso *dso)
237void dso__set_sorted_by_name(struct dso *dso, enum map_type type); 236{
238void dso__sort_by_name(struct dso *dso, enum map_type type); 237 return !RB_EMPTY_ROOT(&dso->symbols);
238}
239
240bool dso__sorted_by_name(const struct dso *dso);
241void dso__set_sorted_by_name(struct dso *dso);
242void dso__sort_by_name(struct dso *dso);
239 243
240void dso__set_build_id(struct dso *dso, void *build_id); 244void dso__set_build_id(struct dso *dso, void *build_id);
241bool dso__build_id_equal(const struct dso *dso, u8 *build_id); 245bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
@@ -349,9 +353,8 @@ size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
349size_t __dsos__fprintf(struct list_head *head, FILE *fp); 353size_t __dsos__fprintf(struct list_head *head, FILE *fp);
350 354
351size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); 355size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
352size_t dso__fprintf_symbols_by_name(struct dso *dso, 356size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp);
353 enum map_type type, FILE *fp); 357size_t dso__fprintf(struct dso *dso, FILE *fp);
354size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
355 358
356static inline bool dso__is_vmlinux(struct dso *dso) 359static inline bool dso__is_vmlinux(struct dso *dso)
357{ 360{
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c842762e3f2..59f38c7693f8 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -93,6 +93,37 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
93 return 0; 93 return 0;
94} 94}
95 95
96static int perf_env__read_arch(struct perf_env *env)
97{
98 struct utsname uts;
99
100 if (env->arch)
101 return 0;
102
103 if (!uname(&uts))
104 env->arch = strdup(uts.machine);
105
106 return env->arch ? 0 : -ENOMEM;
107}
108
109static int perf_env__read_nr_cpus_avail(struct perf_env *env)
110{
111 if (env->nr_cpus_avail == 0)
112 env->nr_cpus_avail = cpu__max_present_cpu();
113
114 return env->nr_cpus_avail ? 0 : -ENOENT;
115}
116
117const char *perf_env__raw_arch(struct perf_env *env)
118{
119 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
120}
121
122int perf_env__nr_cpus_avail(struct perf_env *env)
123{
124 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
125}
126
96void cpu_cache_level__free(struct cpu_cache_level *cache) 127void cpu_cache_level__free(struct cpu_cache_level *cache)
97{ 128{
98 free(cache->type); 129 free(cache->type);
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index c4ef2e523367..1f3ccc368530 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -76,4 +76,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env);
76void cpu_cache_level__free(struct cpu_cache_level *cache); 76void cpu_cache_level__free(struct cpu_cache_level *cache);
77 77
78const char *perf_env__arch(struct perf_env *env); 78const char *perf_env__arch(struct perf_env *env);
79const char *perf_env__raw_arch(struct perf_env *env);
80int perf_env__nr_cpus_avail(struct perf_env *env);
81
79#endif /* __PERF_ENV_H */ 82#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 98ff3a6a3d50..0c8ecf0c78a4 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -88,10 +88,10 @@ static const char *perf_ns__name(unsigned int id)
88 return perf_ns__names[id]; 88 return perf_ns__names[id];
89} 89}
90 90
91static int perf_tool__process_synth_event(struct perf_tool *tool, 91int perf_tool__process_synth_event(struct perf_tool *tool,
92 union perf_event *event, 92 union perf_event *event,
93 struct machine *machine, 93 struct machine *machine,
94 perf_event__handler_t process) 94 perf_event__handler_t process)
95{ 95{
96 struct perf_sample synth_sample = { 96 struct perf_sample synth_sample = {
97 .pid = -1, 97 .pid = -1,
@@ -464,8 +464,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
464{ 464{
465 int rc = 0; 465 int rc = 0;
466 struct map *pos; 466 struct map *pos;
467 struct map_groups *kmaps = &machine->kmaps; 467 struct maps *maps = machine__kernel_maps(machine);
468 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
469 union perf_event *event = zalloc((sizeof(event->mmap) + 468 union perf_event *event = zalloc((sizeof(event->mmap) +
470 machine->id_hdr_size)); 469 machine->id_hdr_size));
471 if (event == NULL) { 470 if (event == NULL) {
@@ -488,7 +487,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
488 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 487 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
489 size_t size; 488 size_t size;
490 489
491 if (__map__is_kernel(pos)) 490 if (!__map__is_kmodule(pos))
492 continue; 491 continue;
493 492
494 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 493 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
@@ -869,7 +868,7 @@ static int find_symbol_cb(void *arg, const char *name, char type,
869 * Must be a function or at least an alias, as in PARISC64, where "_text" is 868 * Must be a function or at least an alias, as in PARISC64, where "_text" is
870 * an 'A' to the same address as "_stext". 869 * an 'A' to the same address as "_stext".
871 */ 870 */
872 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 871 if (!(kallsyms__is_function(type) ||
873 type == 'A') || strcmp(name, args->name)) 872 type == 'A') || strcmp(name, args->name))
874 return 0; 873 return 0;
875 874
@@ -889,9 +888,16 @@ int kallsyms__get_function_start(const char *kallsyms_filename,
889 return 0; 888 return 0;
890} 889}
891 890
892int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 891int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
893 perf_event__handler_t process, 892 perf_event__handler_t process __maybe_unused,
894 struct machine *machine) 893 struct machine *machine __maybe_unused)
894{
895 return 0;
896}
897
898static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
899 perf_event__handler_t process,
900 struct machine *machine)
895{ 901{
896 size_t size; 902 size_t size;
897 struct map *map = machine__kernel_map(machine); 903 struct map *map = machine__kernel_map(machine);
@@ -944,6 +950,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
944 return err; 950 return err;
945} 951}
946 952
953int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
954 perf_event__handler_t process,
955 struct machine *machine)
956{
957 int err;
958
959 err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
960 if (err < 0)
961 return err;
962
963 return perf_event__synthesize_extra_kmaps(tool, process, machine);
964}
965
947int perf_event__synthesize_thread_map2(struct perf_tool *tool, 966int perf_event__synthesize_thread_map2(struct perf_tool *tool,
948 struct thread_map *threads, 967 struct thread_map *threads,
949 perf_event__handler_t process, 968 perf_event__handler_t process,
@@ -1489,9 +1508,8 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
1489 return machine__process_event(machine, event, sample); 1508 return machine__process_event(machine, event, sample);
1490} 1509}
1491 1510
1492void thread__find_addr_map(struct thread *thread, u8 cpumode, 1511struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1493 enum map_type type, u64 addr, 1512 struct addr_location *al)
1494 struct addr_location *al)
1495{ 1513{
1496 struct map_groups *mg = thread->mg; 1514 struct map_groups *mg = thread->mg;
1497 struct machine *machine = mg->machine; 1515 struct machine *machine = mg->machine;
@@ -1505,7 +1523,7 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
1505 1523
1506 if (machine == NULL) { 1524 if (machine == NULL) {
1507 al->map = NULL; 1525 al->map = NULL;
1508 return; 1526 return NULL;
1509 } 1527 }
1510 1528
1511 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1529 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
@@ -1533,10 +1551,10 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
1533 !perf_host) 1551 !perf_host)
1534 al->filtered |= (1 << HIST_FILTER__HOST); 1552 al->filtered |= (1 << HIST_FILTER__HOST);
1535 1553
1536 return; 1554 return NULL;
1537 } 1555 }
1538try_again: 1556try_again:
1539 al->map = map_groups__find(mg, type, al->addr); 1557 al->map = map_groups__find(mg, al->addr);
1540 if (al->map == NULL) { 1558 if (al->map == NULL) {
1541 /* 1559 /*
1542 * If this is outside of all known maps, and is a negative 1560 * If this is outside of all known maps, and is a negative
@@ -1563,17 +1581,17 @@ try_again:
1563 map__load(al->map); 1581 map__load(al->map);
1564 al->addr = al->map->map_ip(al->map, al->addr); 1582 al->addr = al->map->map_ip(al->map, al->addr);
1565 } 1583 }
1584
1585 return al->map;
1566} 1586}
1567 1587
1568void thread__find_addr_location(struct thread *thread, 1588struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1569 u8 cpumode, enum map_type type, u64 addr, 1589 u64 addr, struct addr_location *al)
1570 struct addr_location *al)
1571{ 1590{
1572 thread__find_addr_map(thread, cpumode, type, addr, al); 1591 al->sym = NULL;
1573 if (al->map != NULL) 1592 if (thread__find_map(thread, cpumode, addr, al))
1574 al->sym = map__find_symbol(al->map, al->addr); 1593 al->sym = map__find_symbol(al->map, al->addr);
1575 else 1594 return al->sym;
1576 al->sym = NULL;
1577} 1595}
1578 1596
1579/* 1597/*
@@ -1590,7 +1608,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
1590 return -1; 1608 return -1;
1591 1609
1592 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1610 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1593 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1611 thread__find_map(thread, sample->cpumode, sample->ip, al);
1594 dump_printf(" ...... dso: %s\n", 1612 dump_printf(" ...... dso: %s\n",
1595 al->map ? al->map->dso->long_name : 1613 al->map ? al->map->dso->long_name :
1596 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1614 al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -1669,10 +1687,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1669void thread__resolve(struct thread *thread, struct addr_location *al, 1687void thread__resolve(struct thread *thread, struct addr_location *al,
1670 struct perf_sample *sample) 1688 struct perf_sample *sample)
1671{ 1689{
1672 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1690 thread__find_map(thread, sample->cpumode, sample->addr, al);
1673 if (!al->map)
1674 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1675 sample->addr, al);
1676 1691
1677 al->cpu = sample->cpu; 1692 al->cpu = sample->cpu;
1678 al->sym = NULL; 1693 al->sym = NULL;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 0f794744919c..bfa60bcafbde 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -750,6 +750,10 @@ int perf_event__process_exit(struct perf_tool *tool,
750 union perf_event *event, 750 union perf_event *event,
751 struct perf_sample *sample, 751 struct perf_sample *sample,
752 struct machine *machine); 752 struct machine *machine);
753int perf_tool__process_synth_event(struct perf_tool *tool,
754 union perf_event *event,
755 struct machine *machine,
756 perf_event__handler_t process);
753int perf_event__process(struct perf_tool *tool, 757int perf_event__process(struct perf_tool *tool,
754 union perf_event *event, 758 union perf_event *event,
755 struct perf_sample *sample, 759 struct perf_sample *sample,
@@ -796,6 +800,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
796 bool mmap_data, 800 bool mmap_data,
797 unsigned int proc_map_timeout); 801 unsigned int proc_map_timeout);
798 802
803int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
804 perf_event__handler_t process,
805 struct machine *machine);
806
799size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); 807size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
800size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); 808size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
801size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); 809size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index a59281d64368..e7a4b31a84fb 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1795,3 +1795,18 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1795 1795
1796 return true; 1796 return true;
1797} 1797}
1798
1799/*
1800 * Events in data file are not collect in groups, but we still want
1801 * the group display. Set the artificial group and set the leader's
1802 * forced_leader flag to notify the display code.
1803 */
1804void perf_evlist__force_leader(struct perf_evlist *evlist)
1805{
1806 if (!evlist->nr_groups) {
1807 struct perf_evsel *leader = perf_evlist__first(evlist);
1808
1809 perf_evlist__set_leader(evlist);
1810 leader->forced_leader = true;
1811 }
1812}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 6c41b2f78713..dc66436add98 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -309,4 +309,7 @@ struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
309 union perf_event *event); 309 union perf_event *event);
310 310
311bool perf_evlist__exclude_kernel(struct perf_evlist *evlist); 311bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
312
313void perf_evlist__force_leader(struct perf_evlist *evlist);
314
312#endif /* __PERF_EVLIST_H */ 315#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4cd2cf93f726..94fce4f537e9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2197,7 +2197,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
2197 } 2197 }
2198 } 2198 }
2199 2199
2200 if (type & PERF_SAMPLE_CALLCHAIN) { 2200 if (evsel__has_callchain(evsel)) {
2201 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 2201 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2202 2202
2203 OVERFLOW_CHECK_u64(array); 2203 OVERFLOW_CHECK_u64(array);
@@ -2857,12 +2857,12 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2857 "Hint: Try again after reducing the number of events.\n" 2857 "Hint: Try again after reducing the number of events.\n"
2858 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2858 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2859 case ENOMEM: 2859 case ENOMEM:
2860 if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 && 2860 if (evsel__has_callchain(evsel) &&
2861 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0) 2861 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2862 return scnprintf(msg, size, 2862 return scnprintf(msg, size,
2863 "Not enough memory to setup event with callchain.\n" 2863 "Not enough memory to setup event with callchain.\n"
2864 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" 2864 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2865 "Hint: Current value: %d", sysctl_perf_event_max_stack); 2865 "Hint: Current value: %d", sysctl__max_stack());
2866 break; 2866 break;
2867 case ENODEV: 2867 case ENODEV:
2868 if (target->cpu_list) 2868 if (target->cpu_list)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b13f5f234c8f..d277930b19a1 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -459,6 +459,11 @@ static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evs
459 return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; 459 return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
460} 460}
461 461
462static inline bool evsel__has_callchain(const struct perf_evsel *evsel)
463{
464 return (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
465}
466
462typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *); 467typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
463 468
464int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 469int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index c540d47583e7..aafbe54fd3fa 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -114,7 +114,7 @@ gen_build_id(struct buildid_note *note,
114 114
115 fd = open("/dev/urandom", O_RDONLY); 115 fd = open("/dev/urandom", O_RDONLY);
116 if (fd == -1) 116 if (fd == -1)
117 err(1, "cannot access /dev/urandom for builid"); 117 err(1, "cannot access /dev/urandom for buildid");
118 118
119 sret = read(fd, note->build_id, sz); 119 sret = read(fd, note->build_id, sz);
120 120
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a8bff2178fbc..540cd2dcd3e7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1459,8 +1459,24 @@ static void print_cmdline(struct feat_fd *ff, FILE *fp)
1459 1459
1460 fprintf(fp, "# cmdline : "); 1460 fprintf(fp, "# cmdline : ");
1461 1461
1462 for (i = 0; i < nr; i++) 1462 for (i = 0; i < nr; i++) {
1463 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1463 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1464 if (!argv_i) {
1465 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1466 } else {
1467 char *mem = argv_i;
1468 do {
1469 char *quote = strchr(argv_i, '\'');
1470 if (!quote)
1471 break;
1472 *quote++ = '\0';
1473 fprintf(fp, "%s\\\'", argv_i);
1474 argv_i = quote;
1475 } while (1);
1476 fprintf(fp, "%s ", argv_i);
1477 free(mem);
1478 }
1479 }
1464 fputc('\n', fp); 1480 fputc('\n', fp);
1465} 1481}
1466 1482
@@ -3312,8 +3328,6 @@ int perf_session__read_header(struct perf_session *session)
3312 lseek(fd, tmp, SEEK_SET); 3328 lseek(fd, tmp, SEEK_SET);
3313 } 3329 }
3314 3330
3315 symbol_conf.nr_events = nr_attrs;
3316
3317 perf_header__process_sections(header, fd, &session->tevent, 3331 perf_header__process_sections(header, fd, &session->tevent,
3318 perf_file_section__process); 3332 perf_file_section__process);
3319 3333
@@ -3739,8 +3753,6 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3739 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3753 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3740 } 3754 }
3741 3755
3742 symbol_conf.nr_events = evlist->nr_entries;
3743
3744 return 0; 3756 return 0;
3745} 3757}
3746 3758
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4d602fba40b2..52e8fda93a47 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -410,7 +410,7 @@ static int hist_entry__init(struct hist_entry *he,
410 map__get(he->mem_info->daddr.map); 410 map__get(he->mem_info->daddr.map);
411 } 411 }
412 412
413 if (symbol_conf.use_callchain) 413 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
414 callchain_init(he->callchain); 414 callchain_init(he->callchain);
415 415
416 if (he->raw_data) { 416 if (he->raw_data) {
@@ -492,7 +492,7 @@ static u8 symbol__parent_filter(const struct symbol *parent)
492 492
493static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 493static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
494{ 494{
495 if (!symbol_conf.use_callchain) 495 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
496 return; 496 return;
497 497
498 he->hists->callchain_period += period; 498 he->hists->callchain_period += period;
@@ -986,7 +986,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
986 iter->he = he; 986 iter->he = he;
987 he_cache[iter->curr++] = he; 987 he_cache[iter->curr++] = he;
988 988
989 if (symbol_conf.use_callchain) 989 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
990 callchain_append(he->callchain, &cursor, sample->period); 990 callchain_append(he->callchain, &cursor, sample->period);
991 return 0; 991 return 0;
992} 992}
@@ -1039,7 +1039,7 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1039 int err, err2; 1039 int err, err2;
1040 struct map *alm = NULL; 1040 struct map *alm = NULL;
1041 1041
1042 if (al && al->map) 1042 if (al)
1043 alm = map__get(al->map); 1043 alm = map__get(al->map);
1044 1044
1045 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1045 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
@@ -1373,7 +1373,8 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
1373 if (new_he) { 1373 if (new_he) {
1374 new_he->leaf = true; 1374 new_he->leaf = true;
1375 1375
1376 if (symbol_conf.use_callchain) { 1376 if (hist_entry__has_callchains(new_he) &&
1377 symbol_conf.use_callchain) {
1377 callchain_cursor_reset(&callchain_cursor); 1378 callchain_cursor_reset(&callchain_cursor);
1378 if (callchain_merge(&callchain_cursor, 1379 if (callchain_merge(&callchain_cursor,
1379 new_he->callchain, 1380 new_he->callchain,
@@ -1414,7 +1415,7 @@ static int hists__collapse_insert_entry(struct hists *hists,
1414 if (symbol_conf.cumulate_callchain) 1415 if (symbol_conf.cumulate_callchain)
1415 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1416 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1416 1417
1417 if (symbol_conf.use_callchain) { 1418 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1418 callchain_cursor_reset(&callchain_cursor); 1419 callchain_cursor_reset(&callchain_cursor);
1419 if (callchain_merge(&callchain_cursor, 1420 if (callchain_merge(&callchain_cursor,
1420 iter->callchain, 1421 iter->callchain,
@@ -1757,7 +1758,7 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
1757 bool use_callchain; 1758 bool use_callchain;
1758 1759
1759 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1760 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1760 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN; 1761 use_callchain = evsel__has_callchain(evsel);
1761 else 1762 else
1762 use_callchain = symbol_conf.use_callchain; 1763 use_callchain = symbol_conf.use_callchain;
1763 1764
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index fbabfd8a215d..06607c434949 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -220,6 +220,12 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
220 return &hevsel->hists; 220 return &hevsel->hists;
221} 221}
222 222
223static __pure inline bool hists__has_callchains(struct hists *hists)
224{
225 const struct perf_evsel *evsel = hists_to_evsel(hists);
226 return evsel__has_callchain(evsel);
227}
228
223int hists__init(void); 229int hists__init(void);
224int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list); 230int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
225 231
@@ -419,19 +425,24 @@ struct hist_browser_timer {
419 int refresh; 425 int refresh;
420}; 426};
421 427
428struct annotation_options;
429
422#ifdef HAVE_SLANG_SUPPORT 430#ifdef HAVE_SLANG_SUPPORT
423#include "../ui/keysyms.h" 431#include "../ui/keysyms.h"
424int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel, 432int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
425 struct hist_browser_timer *hbt); 433 struct hist_browser_timer *hbt,
434 struct annotation_options *annotation_opts);
426 435
427int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, 436int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
428 struct hist_browser_timer *hbt); 437 struct hist_browser_timer *hbt,
438 struct annotation_options *annotation_opts);
429 439
430int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 440int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
431 struct hist_browser_timer *hbt, 441 struct hist_browser_timer *hbt,
432 float min_pcnt, 442 float min_pcnt,
433 struct perf_env *env, 443 struct perf_env *env,
434 bool warn_lost_event); 444 bool warn_lost_event,
445 struct annotation_options *annotation_options);
435int script_browse(const char *script_opt); 446int script_browse(const char *script_opt);
436#else 447#else
437static inline 448static inline
@@ -440,20 +451,23 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
440 struct hist_browser_timer *hbt __maybe_unused, 451 struct hist_browser_timer *hbt __maybe_unused,
441 float min_pcnt __maybe_unused, 452 float min_pcnt __maybe_unused,
442 struct perf_env *env __maybe_unused, 453 struct perf_env *env __maybe_unused,
443 bool warn_lost_event __maybe_unused) 454 bool warn_lost_event __maybe_unused,
455 struct annotation_options *annotation_options __maybe_unused)
444{ 456{
445 return 0; 457 return 0;
446} 458}
447static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused, 459static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
448 struct perf_evsel *evsel __maybe_unused, 460 struct perf_evsel *evsel __maybe_unused,
449 struct hist_browser_timer *hbt __maybe_unused) 461 struct hist_browser_timer *hbt __maybe_unused,
462 struct annotation_options *annotation_options __maybe_unused)
450{ 463{
451 return 0; 464 return 0;
452} 465}
453 466
454static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, 467static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
455 struct perf_evsel *evsel __maybe_unused, 468 struct perf_evsel *evsel __maybe_unused,
456 struct hist_browser_timer *hbt __maybe_unused) 469 struct hist_browser_timer *hbt __maybe_unused,
470 struct annotation_options *annotation_opts __maybe_unused)
457{ 471{
458 return 0; 472 return 0;
459} 473}
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 72db2744876d..7f0c83b6332b 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -335,8 +335,7 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
335 if (!thread) 335 if (!thread)
336 return -1; 336 return -1;
337 337
338 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); 338 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
339 if (!al.map || !al.map->dso)
340 goto out_put; 339 goto out_put;
341 340
342 len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, 341 len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
diff --git a/tools/perf/util/intel-pt-decoder/insn.h b/tools/perf/util/intel-pt-decoder/insn.h
index e23578c7b1be..2669c9f748e4 100644
--- a/tools/perf/util/intel-pt-decoder/insn.h
+++ b/tools/perf/util/intel-pt-decoder/insn.h
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
208 return insn_offset_displacement(insn) + insn->displacement.nbytes; 208 return insn_offset_displacement(insn) + insn->displacement.nbytes;
209} 209}
210 210
211#define POP_SS_OPCODE 0x1f
212#define MOV_SREG_OPCODE 0x8e
213
214/*
215 * Intel SDM Vol.3A 6.8.3 states;
216 * "Any single-step trap that would be delivered following the MOV to SS
217 * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
218 * suppressed."
219 * This function returns true if @insn is MOV SS or POP SS. On these
220 * instructions, single stepping is suppressed.
221 */
222static inline int insn_masking_exception(struct insn *insn)
223{
224 return insn->opcode.bytes[0] == POP_SS_OPCODE ||
225 (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
226 X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
227}
228
211#endif /* _ASM_X86_INSN_H */ 229#endif /* _ASM_X86_INSN_H */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index f9157aed1289..d404bed7003a 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -113,6 +113,7 @@ struct intel_pt_decoder {
113 bool have_cyc; 113 bool have_cyc;
114 bool fixup_last_mtc; 114 bool fixup_last_mtc;
115 bool have_last_ip; 115 bool have_last_ip;
116 enum intel_pt_param_flags flags;
116 uint64_t pos; 117 uint64_t pos;
117 uint64_t last_ip; 118 uint64_t last_ip;
118 uint64_t ip; 119 uint64_t ip;
@@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
226 decoder->return_compression = params->return_compression; 227 decoder->return_compression = params->return_compression;
227 decoder->branch_enable = params->branch_enable; 228 decoder->branch_enable = params->branch_enable;
228 229
230 decoder->flags = params->flags;
231
229 decoder->period = params->period; 232 decoder->period = params->period;
230 decoder->period_type = params->period_type; 233 decoder->period_type = params->period_type;
231 234
@@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
1097 return ret; 1100 return ret;
1098} 1101}
1099 1102
1103static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
1104 struct intel_pt_insn *intel_pt_insn,
1105 uint64_t ip, int err)
1106{
1107 return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
1108 intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
1109 ip == decoder->ip + intel_pt_insn->length;
1110}
1111
1100static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) 1112static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1101{ 1113{
1102 struct intel_pt_insn intel_pt_insn; 1114 struct intel_pt_insn intel_pt_insn;
@@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1109 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip); 1121 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
1110 if (err == INTEL_PT_RETURN) 1122 if (err == INTEL_PT_RETURN)
1111 return 0; 1123 return 0;
1112 if (err == -EAGAIN) { 1124 if (err == -EAGAIN ||
1125 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
1113 if (intel_pt_fup_event(decoder)) 1126 if (intel_pt_fup_event(decoder))
1114 return 0; 1127 return 0;
1115 return err; 1128 return -EAGAIN;
1116 } 1129 }
1117 decoder->set_fup_tx_flags = false; 1130 decoder->set_fup_tx_flags = false;
1118 if (err) 1131 if (err)
@@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
1376{ 1389{
1377 intel_pt_log("ERROR: Buffer overflow\n"); 1390 intel_pt_log("ERROR: Buffer overflow\n");
1378 intel_pt_clear_tx_flags(decoder); 1391 intel_pt_clear_tx_flags(decoder);
1379 decoder->have_tma = false;
1380 decoder->cbr = 0; 1392 decoder->cbr = 0;
1381 decoder->timestamp_insn_cnt = 0; 1393 decoder->timestamp_insn_cnt = 0;
1382 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; 1394 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
@@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
1604 case INTEL_PT_PSB: 1616 case INTEL_PT_PSB:
1605 case INTEL_PT_TSC: 1617 case INTEL_PT_TSC:
1606 case INTEL_PT_TMA: 1618 case INTEL_PT_TMA:
1607 case INTEL_PT_CBR:
1608 case INTEL_PT_MODE_TSX: 1619 case INTEL_PT_MODE_TSX:
1609 case INTEL_PT_BAD: 1620 case INTEL_PT_BAD:
1610 case INTEL_PT_PSBEND: 1621 case INTEL_PT_PSBEND:
@@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
1620 decoder->pkt_step = 0; 1631 decoder->pkt_step = 0;
1621 return -ENOENT; 1632 return -ENOENT;
1622 1633
1634 case INTEL_PT_CBR:
1635 intel_pt_calc_cbr(decoder);
1636 break;
1637
1623 case INTEL_PT_OVF: 1638 case INTEL_PT_OVF:
1624 return intel_pt_overflow(decoder); 1639 return intel_pt_overflow(decoder);
1625 1640
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index fc1752d50019..51c18d67f4ca 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -60,6 +60,14 @@ enum {
60 INTEL_PT_ERR_MAX, 60 INTEL_PT_ERR_MAX,
61}; 61};
62 62
63enum intel_pt_param_flags {
64 /*
65 * FUP packet can contain next linear instruction pointer instead of
66 * current linear instruction pointer.
67 */
68 INTEL_PT_FUP_WITH_NLIP = 1 << 0,
69};
70
63struct intel_pt_state { 71struct intel_pt_state {
64 enum intel_pt_sample_type type; 72 enum intel_pt_sample_type type;
65 int err; 73 int err;
@@ -106,6 +114,7 @@ struct intel_pt_params {
106 unsigned int mtc_period; 114 unsigned int mtc_period;
107 uint32_t tsc_ctc_ratio_n; 115 uint32_t tsc_ctc_ratio_n;
108 uint32_t tsc_ctc_ratio_d; 116 uint32_t tsc_ctc_ratio_d;
117 enum intel_pt_param_flags flags;
109}; 118};
110 119
111struct intel_pt_decoder; 120struct intel_pt_decoder;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 0effaff57020..aec68908d604 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -442,8 +442,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
442 } 442 }
443 443
444 while (1) { 444 while (1) {
445 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al); 445 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
446 if (!al.map || !al.map->dso)
447 return -EINVAL; 446 return -EINVAL;
448 447
449 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 448 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
@@ -596,8 +595,7 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data)
596 if (!thread) 595 if (!thread)
597 return -EINVAL; 596 return -EINVAL;
598 597
599 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); 598 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
600 if (!al.map || !al.map->dso)
601 return -EINVAL; 599 return -EINVAL;
602 600
603 offset = al.map->map_ip(al.map, ip); 601 offset = al.map->map_ip(al.map, ip);
@@ -751,6 +749,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
751 unsigned int queue_nr) 749 unsigned int queue_nr)
752{ 750{
753 struct intel_pt_params params = { .get_trace = 0, }; 751 struct intel_pt_params params = { .get_trace = 0, };
752 struct perf_env *env = pt->machine->env;
754 struct intel_pt_queue *ptq; 753 struct intel_pt_queue *ptq;
755 754
756 ptq = zalloc(sizeof(struct intel_pt_queue)); 755 ptq = zalloc(sizeof(struct intel_pt_queue));
@@ -832,6 +831,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
832 } 831 }
833 } 832 }
834 833
834 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
835 params.flags |= INTEL_PT_FUP_WITH_NLIP;
836
835 ptq->decoder = intel_pt_decoder_new(&params); 837 ptq->decoder = intel_pt_decoder_new(&params);
836 if (!ptq->decoder) 838 if (!ptq->decoder)
837 goto out_free; 839 goto out_free;
@@ -1523,6 +1525,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
1523 1525
1524 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 1526 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1525 switch (ptq->switch_state) { 1527 switch (ptq->switch_state) {
1528 case INTEL_PT_SS_NOT_TRACING:
1526 case INTEL_PT_SS_UNKNOWN: 1529 case INTEL_PT_SS_UNKNOWN:
1527 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1530 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1528 err = intel_pt_next_tid(pt, ptq); 1531 err = intel_pt_next_tid(pt, ptq);
@@ -1565,7 +1568,7 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1565 if (map__load(map)) 1568 if (map__load(map))
1566 return 0; 1569 return 0;
1567 1570
1568 start = dso__first_symbol(map->dso, MAP__FUNCTION); 1571 start = dso__first_symbol(map->dso);
1569 1572
1570 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1573 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1571 if (sym->binding == STB_GLOBAL && 1574 if (sym->binding == STB_GLOBAL &&
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 1cca0a2fa641..976e658e38dc 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -14,11 +14,12 @@
14#include "config.h" 14#include "config.h"
15#include "util.h" 15#include "util.h"
16#include <sys/wait.h> 16#include <sys/wait.h>
17#include <subcmd/exec-cmd.h>
17 18
18#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \ 19#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \
19 "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\ 20 "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
20 "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \ 21 "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
21 "$CLANG_OPTIONS $KERNEL_INC_OPTIONS " \ 22 "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \
22 "-Wno-unused-value -Wno-pointer-sign " \ 23 "-Wno-unused-value -Wno-pointer-sign " \
23 "-working-directory $WORKING_DIR " \ 24 "-working-directory $WORKING_DIR " \
24 "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -" 25 "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
@@ -212,7 +213,7 @@ version_notice(void)
212" \t\thttp://llvm.org/apt\n\n" 213" \t\thttp://llvm.org/apt\n\n"
213" \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n" 214" \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n"
214" \toption in [llvm] section of ~/.perfconfig to:\n\n" 215" \toption in [llvm] section of ~/.perfconfig to:\n\n"
215" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS \\\n" 216" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS \\\n"
216" \t -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n" 217" \t -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n"
217" \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n" 218" \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n"
218" \t(Replace /path/to/llc with path to your llc)\n\n" 219" \t(Replace /path/to/llc with path to your llc)\n\n"
@@ -431,9 +432,11 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
431 const char *clang_opt = llvm_param.clang_opt; 432 const char *clang_opt = llvm_param.clang_opt;
432 char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64]; 433 char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64];
433 char serr[STRERR_BUFSIZE]; 434 char serr[STRERR_BUFSIZE];
434 char *kbuild_dir = NULL, *kbuild_include_opts = NULL; 435 char *kbuild_dir = NULL, *kbuild_include_opts = NULL,
436 *perf_bpf_include_opts = NULL;
435 const char *template = llvm_param.clang_bpf_cmd_template; 437 const char *template = llvm_param.clang_bpf_cmd_template;
436 char *command_echo, *command_out; 438 char *command_echo = NULL, *command_out;
439 char *perf_include_dir = system_path(PERF_INCLUDE_DIR);
437 440
438 if (path[0] != '-' && realpath(path, abspath) == NULL) { 441 if (path[0] != '-' && realpath(path, abspath) == NULL) {
439 err = errno; 442 err = errno;
@@ -471,12 +474,14 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
471 474
472 snprintf(linux_version_code_str, sizeof(linux_version_code_str), 475 snprintf(linux_version_code_str, sizeof(linux_version_code_str),
473 "0x%x", kernel_version); 476 "0x%x", kernel_version);
474 477 if (asprintf(&perf_bpf_include_opts, "-I%s/bpf", perf_include_dir) < 0)
478 goto errout;
475 force_set_env("NR_CPUS", nr_cpus_avail_str); 479 force_set_env("NR_CPUS", nr_cpus_avail_str);
476 force_set_env("LINUX_VERSION_CODE", linux_version_code_str); 480 force_set_env("LINUX_VERSION_CODE", linux_version_code_str);
477 force_set_env("CLANG_EXEC", clang_path); 481 force_set_env("CLANG_EXEC", clang_path);
478 force_set_env("CLANG_OPTIONS", clang_opt); 482 force_set_env("CLANG_OPTIONS", clang_opt);
479 force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts); 483 force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);
484 force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts);
480 force_set_env("WORKING_DIR", kbuild_dir ? : "."); 485 force_set_env("WORKING_DIR", kbuild_dir ? : ".");
481 486
482 /* 487 /*
@@ -512,6 +517,8 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
512 free(command_out); 517 free(command_out);
513 free(kbuild_dir); 518 free(kbuild_dir);
514 free(kbuild_include_opts); 519 free(kbuild_include_opts);
520 free(perf_bpf_include_opts);
521 free(perf_include_dir);
515 522
516 if (!p_obj_buf) 523 if (!p_obj_buf)
517 free(obj_buf); 524 free(obj_buf);
@@ -526,6 +533,8 @@ errout:
526 free(kbuild_dir); 533 free(kbuild_dir);
527 free(kbuild_include_opts); 534 free(kbuild_include_opts);
528 free(obj_buf); 535 free(obj_buf);
536 free(perf_bpf_include_opts);
537 free(perf_include_dir);
529 if (p_obj_buf) 538 if (p_obj_buf)
530 *p_obj_buf = NULL; 539 *p_obj_buf = NULL;
531 if (p_obj_buf_sz) 540 if (p_obj_buf_sz)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 32d50492505d..e7b4a8b513f2 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -24,6 +24,7 @@
24 24
25#include "sane_ctype.h" 25#include "sane_ctype.h"
26#include <symbol/kallsyms.h> 26#include <symbol/kallsyms.h>
27#include <linux/mman.h>
27 28
28static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); 29static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
29 30
@@ -81,8 +82,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
81 machine->kptr_restrict_warned = false; 82 machine->kptr_restrict_warned = false;
82 machine->comm_exec = false; 83 machine->comm_exec = false;
83 machine->kernel_start = 0; 84 machine->kernel_start = 0;
84 85 machine->vmlinux_map = NULL;
85 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
86 86
87 machine->root_dir = strdup(root_dir); 87 machine->root_dir = strdup(root_dir);
88 if (machine->root_dir == NULL) 88 if (machine->root_dir == NULL)
@@ -137,13 +137,11 @@ struct machine *machine__new_kallsyms(void)
137 struct machine *machine = machine__new_host(); 137 struct machine *machine = machine__new_host();
138 /* 138 /*
139 * FIXME: 139 * FIXME:
140 * 1) MAP__FUNCTION will go away when we stop loading separate maps for 140 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
141 * functions and data objects.
142 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
143 * ask for not using the kcore parsing code, once this one is fixed 141 * ask for not using the kcore parsing code, once this one is fixed
144 * to create a map per module. 142 * to create a map per module.
145 */ 143 */
146 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) { 144 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
147 machine__delete(machine); 145 machine__delete(machine);
148 machine = NULL; 146 machine = NULL;
149 } 147 }
@@ -673,8 +671,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
673 if (kmod_path__parse_name(&m, filename)) 671 if (kmod_path__parse_name(&m, filename))
674 return NULL; 672 return NULL;
675 673
676 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, 674 map = map_groups__find_by_name(&machine->kmaps, m.name);
677 m.name);
678 if (map) { 675 if (map) {
679 /* 676 /*
680 * If the map's dso is an offline module, give dso__load() 677 * If the map's dso is an offline module, give dso__load()
@@ -689,7 +686,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
689 if (dso == NULL) 686 if (dso == NULL)
690 goto out; 687 goto out;
691 688
692 map = map__new2(start, dso, MAP__FUNCTION); 689 map = map__new2(start, dso);
693 if (map == NULL) 690 if (map == NULL)
694 goto out; 691 goto out;
695 692
@@ -810,8 +807,8 @@ struct process_args {
810 u64 start; 807 u64 start;
811}; 808};
812 809
813static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 810void machine__get_kallsyms_filename(struct machine *machine, char *buf,
814 size_t bufsz) 811 size_t bufsz)
815{ 812{
816 if (machine__is_default_guest(machine)) 813 if (machine__is_default_guest(machine))
817 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 814 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
@@ -854,65 +851,171 @@ static int machine__get_running_kernel_start(struct machine *machine,
854 return 0; 851 return 0;
855} 852}
856 853
854int machine__create_extra_kernel_map(struct machine *machine,
855 struct dso *kernel,
856 struct extra_kernel_map *xm)
857{
858 struct kmap *kmap;
859 struct map *map;
860
861 map = map__new2(xm->start, kernel);
862 if (!map)
863 return -1;
864
865 map->end = xm->end;
866 map->pgoff = xm->pgoff;
867
868 kmap = map__kmap(map);
869
870 kmap->kmaps = &machine->kmaps;
871 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
872
873 map_groups__insert(&machine->kmaps, map);
874
875 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
876 kmap->name, map->start, map->end);
877
878 map__put(map);
879
880 return 0;
881}
882
883static u64 find_entry_trampoline(struct dso *dso)
884{
885 /* Duplicates are removed so lookup all aliases */
886 const char *syms[] = {
887 "_entry_trampoline",
888 "__entry_trampoline_start",
889 "entry_SYSCALL_64_trampoline",
890 };
891 struct symbol *sym = dso__first_symbol(dso);
892 unsigned int i;
893
894 for (; sym; sym = dso__next_symbol(sym)) {
895 if (sym->binding != STB_GLOBAL)
896 continue;
897 for (i = 0; i < ARRAY_SIZE(syms); i++) {
898 if (!strcmp(sym->name, syms[i]))
899 return sym->start;
900 }
901 }
902
903 return 0;
904}
905
906/*
907 * These values can be used for kernels that do not have symbols for the entry
908 * trampolines in kallsyms.
909 */
910#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
911#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
912#define X86_64_ENTRY_TRAMPOLINE 0x6000
913
914/* Map x86_64 PTI entry trampolines */
915int machine__map_x86_64_entry_trampolines(struct machine *machine,
916 struct dso *kernel)
917{
918 struct map_groups *kmaps = &machine->kmaps;
919 struct maps *maps = &kmaps->maps;
920 int nr_cpus_avail, cpu;
921 bool found = false;
922 struct map *map;
923 u64 pgoff;
924
925 /*
926 * In the vmlinux case, pgoff is a virtual address which must now be
927 * mapped to a vmlinux offset.
928 */
929 for (map = maps__first(maps); map; map = map__next(map)) {
930 struct kmap *kmap = __map__kmap(map);
931 struct map *dest_map;
932
933 if (!kmap || !is_entry_trampoline(kmap->name))
934 continue;
935
936 dest_map = map_groups__find(kmaps, map->pgoff);
937 if (dest_map != map)
938 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
939 found = true;
940 }
941 if (found || machine->trampolines_mapped)
942 return 0;
943
944 pgoff = find_entry_trampoline(kernel);
945 if (!pgoff)
946 return 0;
947
948 nr_cpus_avail = machine__nr_cpus_avail(machine);
949
950 /* Add a 1 page map for each CPU's entry trampoline */
951 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
952 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
953 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
954 X86_64_ENTRY_TRAMPOLINE;
955 struct extra_kernel_map xm = {
956 .start = va,
957 .end = va + page_size,
958 .pgoff = pgoff,
959 };
960
961 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
962
963 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
964 return -1;
965 }
966
967 machine->trampolines_mapped = nr_cpus_avail;
968
969 return 0;
970}
971
972int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
973 struct dso *kernel __maybe_unused)
974{
975 return 0;
976}
977
857static int 978static int
858__machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 979__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
859{ 980{
860 int type; 981 struct kmap *kmap;
982 struct map *map;
861 983
862 /* In case of renewal the kernel map, destroy previous one */ 984 /* In case of renewal the kernel map, destroy previous one */
863 machine__destroy_kernel_maps(machine); 985 machine__destroy_kernel_maps(machine);
864 986
865 for (type = 0; type < MAP__NR_TYPES; ++type) { 987 machine->vmlinux_map = map__new2(0, kernel);
866 struct kmap *kmap; 988 if (machine->vmlinux_map == NULL)
867 struct map *map; 989 return -1;
868
869 machine->vmlinux_maps[type] = map__new2(0, kernel, type);
870 if (machine->vmlinux_maps[type] == NULL)
871 return -1;
872 990
873 machine->vmlinux_maps[type]->map_ip = 991 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
874 machine->vmlinux_maps[type]->unmap_ip = 992 map = machine__kernel_map(machine);
875 identity__map_ip; 993 kmap = map__kmap(map);
876 map = __machine__kernel_map(machine, type); 994 if (!kmap)
877 kmap = map__kmap(map); 995 return -1;
878 if (!kmap)
879 return -1;
880 996
881 kmap->kmaps = &machine->kmaps; 997 kmap->kmaps = &machine->kmaps;
882 map_groups__insert(&machine->kmaps, map); 998 map_groups__insert(&machine->kmaps, map);
883 }
884 999
885 return 0; 1000 return 0;
886} 1001}
887 1002
888void machine__destroy_kernel_maps(struct machine *machine) 1003void machine__destroy_kernel_maps(struct machine *machine)
889{ 1004{
890 int type; 1005 struct kmap *kmap;
891 1006 struct map *map = machine__kernel_map(machine);
892 for (type = 0; type < MAP__NR_TYPES; ++type) {
893 struct kmap *kmap;
894 struct map *map = __machine__kernel_map(machine, type);
895
896 if (map == NULL)
897 continue;
898 1007
899 kmap = map__kmap(map); 1008 if (map == NULL)
900 map_groups__remove(&machine->kmaps, map); 1009 return;
901 if (kmap && kmap->ref_reloc_sym) {
902 /*
903 * ref_reloc_sym is shared among all maps, so free just
904 * on one of them.
905 */
906 if (type == MAP__FUNCTION) {
907 zfree((char **)&kmap->ref_reloc_sym->name);
908 zfree(&kmap->ref_reloc_sym);
909 } else
910 kmap->ref_reloc_sym = NULL;
911 }
912 1010
913 map__put(machine->vmlinux_maps[type]); 1011 kmap = map__kmap(map);
914 machine->vmlinux_maps[type] = NULL; 1012 map_groups__remove(&machine->kmaps, map);
1013 if (kmap && kmap->ref_reloc_sym) {
1014 zfree((char **)&kmap->ref_reloc_sym->name);
1015 zfree(&kmap->ref_reloc_sym);
915 } 1016 }
1017
1018 map__zput(machine->vmlinux_map);
916} 1019}
917 1020
918int machines__create_guest_kernel_maps(struct machines *machines) 1021int machines__create_guest_kernel_maps(struct machines *machines)
@@ -989,32 +1092,31 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
989 return machine__create_kernel_maps(machine); 1092 return machine__create_kernel_maps(machine);
990} 1093}
991 1094
992int machine__load_kallsyms(struct machine *machine, const char *filename, 1095int machine__load_kallsyms(struct machine *machine, const char *filename)
993 enum map_type type)
994{ 1096{
995 struct map *map = machine__kernel_map(machine); 1097 struct map *map = machine__kernel_map(machine);
996 int ret = __dso__load_kallsyms(map->dso, filename, map, true); 1098 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
997 1099
998 if (ret > 0) { 1100 if (ret > 0) {
999 dso__set_loaded(map->dso, type); 1101 dso__set_loaded(map->dso);
1000 /* 1102 /*
1001 * Since /proc/kallsyms will have multiple sessions for the 1103 * Since /proc/kallsyms will have multiple sessions for the
1002 * kernel, with modules between them, fixup the end of all 1104 * kernel, with modules between them, fixup the end of all
1003 * sections. 1105 * sections.
1004 */ 1106 */
1005 __map_groups__fixup_end(&machine->kmaps, type); 1107 map_groups__fixup_end(&machine->kmaps);
1006 } 1108 }
1007 1109
1008 return ret; 1110 return ret;
1009} 1111}
1010 1112
1011int machine__load_vmlinux_path(struct machine *machine, enum map_type type) 1113int machine__load_vmlinux_path(struct machine *machine)
1012{ 1114{
1013 struct map *map = machine__kernel_map(machine); 1115 struct map *map = machine__kernel_map(machine);
1014 int ret = dso__load_vmlinux_path(map->dso, map); 1116 int ret = dso__load_vmlinux_path(map->dso, map);
1015 1117
1016 if (ret > 0) 1118 if (ret > 0)
1017 dso__set_loaded(map->dso, type); 1119 dso__set_loaded(map->dso);
1018 1120
1019 return ret; 1121 return ret;
1020} 1122}
@@ -1055,10 +1157,9 @@ static bool is_kmod_dso(struct dso *dso)
1055static int map_groups__set_module_path(struct map_groups *mg, const char *path, 1157static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1056 struct kmod_path *m) 1158 struct kmod_path *m)
1057{ 1159{
1058 struct map *map;
1059 char *long_name; 1160 char *long_name;
1161 struct map *map = map_groups__find_by_name(mg, m->name);
1060 1162
1061 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1062 if (map == NULL) 1163 if (map == NULL)
1063 return 0; 1164 return 0;
1064 1165
@@ -1207,19 +1308,14 @@ static int machine__create_modules(struct machine *machine)
1207static void machine__set_kernel_mmap(struct machine *machine, 1308static void machine__set_kernel_mmap(struct machine *machine,
1208 u64 start, u64 end) 1309 u64 start, u64 end)
1209{ 1310{
1210 int i; 1311 machine->vmlinux_map->start = start;
1211 1312 machine->vmlinux_map->end = end;
1212 for (i = 0; i < MAP__NR_TYPES; i++) { 1313 /*
1213 machine->vmlinux_maps[i]->start = start; 1314 * Be a bit paranoid here, some perf.data file came with
1214 machine->vmlinux_maps[i]->end = end; 1315 * a zero sized synthesized MMAP event for the kernel.
1215 1316 */
1216 /* 1317 if (start == 0 && end == 0)
1217 * Be a bit paranoid here, some perf.data file came with 1318 machine->vmlinux_map->end = ~0ULL;
1218 * a zero sized synthesized MMAP event for the kernel.
1219 */
1220 if (start == 0 && end == 0)
1221 machine->vmlinux_maps[i]->end = ~0ULL;
1222 }
1223} 1319}
1224 1320
1225int machine__create_kernel_maps(struct machine *machine) 1321int machine__create_kernel_maps(struct machine *machine)
@@ -1234,9 +1330,8 @@ int machine__create_kernel_maps(struct machine *machine)
1234 return -1; 1330 return -1;
1235 1331
1236 ret = __machine__create_kernel_maps(machine, kernel); 1332 ret = __machine__create_kernel_maps(machine, kernel);
1237 dso__put(kernel);
1238 if (ret < 0) 1333 if (ret < 0)
1239 return -1; 1334 goto out_put;
1240 1335
1241 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1336 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1242 if (machine__is_host(machine)) 1337 if (machine__is_host(machine))
@@ -1249,9 +1344,10 @@ int machine__create_kernel_maps(struct machine *machine)
1249 1344
1250 if (!machine__get_running_kernel_start(machine, &name, &addr)) { 1345 if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1251 if (name && 1346 if (name &&
1252 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1347 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
1253 machine__destroy_kernel_maps(machine); 1348 machine__destroy_kernel_maps(machine);
1254 return -1; 1349 ret = -1;
1350 goto out_put;
1255 } 1351 }
1256 1352
1257 /* we have a real start address now, so re-order the kmaps */ 1353 /* we have a real start address now, so re-order the kmaps */
@@ -1267,12 +1363,16 @@ int machine__create_kernel_maps(struct machine *machine)
1267 map__put(map); 1363 map__put(map);
1268 } 1364 }
1269 1365
1366 if (machine__create_extra_kernel_maps(machine, kernel))
1367 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1368
1270 /* update end address of the kernel map using adjacent module address */ 1369 /* update end address of the kernel map using adjacent module address */
1271 map = map__next(machine__kernel_map(machine)); 1370 map = map__next(machine__kernel_map(machine));
1272 if (map) 1371 if (map)
1273 machine__set_kernel_mmap(machine, addr, map->start); 1372 machine__set_kernel_mmap(machine, addr, map->start);
1274 1373out_put:
1275 return 0; 1374 dso__put(kernel);
1375 return ret;
1276} 1376}
1277 1377
1278static bool machine__uses_kcore(struct machine *machine) 1378static bool machine__uses_kcore(struct machine *machine)
@@ -1287,6 +1387,32 @@ static bool machine__uses_kcore(struct machine *machine)
1287 return false; 1387 return false;
1288} 1388}
1289 1389
1390static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1391 union perf_event *event)
1392{
1393 return machine__is(machine, "x86_64") &&
1394 is_entry_trampoline(event->mmap.filename);
1395}
1396
1397static int machine__process_extra_kernel_map(struct machine *machine,
1398 union perf_event *event)
1399{
1400 struct map *kernel_map = machine__kernel_map(machine);
1401 struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
1402 struct extra_kernel_map xm = {
1403 .start = event->mmap.start,
1404 .end = event->mmap.start + event->mmap.len,
1405 .pgoff = event->mmap.pgoff,
1406 };
1407
1408 if (kernel == NULL)
1409 return -1;
1410
1411 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1412
1413 return machine__create_extra_kernel_map(machine, kernel, &xm);
1414}
1415
1290static int machine__process_kernel_mmap_event(struct machine *machine, 1416static int machine__process_kernel_mmap_event(struct machine *machine,
1291 union perf_event *event) 1417 union perf_event *event)
1292{ 1418{
@@ -1379,9 +1505,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1379 * time /proc/sys/kernel/kptr_restrict was non zero. 1505 * time /proc/sys/kernel/kptr_restrict was non zero.
1380 */ 1506 */
1381 if (event->mmap.pgoff != 0) { 1507 if (event->mmap.pgoff != 0) {
1382 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1508 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1383 symbol_name, 1509 symbol_name,
1384 event->mmap.pgoff); 1510 event->mmap.pgoff);
1385 } 1511 }
1386 1512
1387 if (machine__is_default_guest(machine)) { 1513 if (machine__is_default_guest(machine)) {
@@ -1390,6 +1516,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1390 */ 1516 */
1391 dso__load(kernel, machine__kernel_map(machine)); 1517 dso__load(kernel, machine__kernel_map(machine));
1392 } 1518 }
1519 } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1520 return machine__process_extra_kernel_map(machine, event);
1393 } 1521 }
1394 return 0; 1522 return 0;
1395out_problem: 1523out_problem:
@@ -1402,7 +1530,6 @@ int machine__process_mmap2_event(struct machine *machine,
1402{ 1530{
1403 struct thread *thread; 1531 struct thread *thread;
1404 struct map *map; 1532 struct map *map;
1405 enum map_type type;
1406 int ret = 0; 1533 int ret = 0;
1407 1534
1408 if (dump_trace) 1535 if (dump_trace)
@@ -1421,11 +1548,6 @@ int machine__process_mmap2_event(struct machine *machine,
1421 if (thread == NULL) 1548 if (thread == NULL)
1422 goto out_problem; 1549 goto out_problem;
1423 1550
1424 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1425 type = MAP__VARIABLE;
1426 else
1427 type = MAP__FUNCTION;
1428
1429 map = map__new(machine, event->mmap2.start, 1551 map = map__new(machine, event->mmap2.start,
1430 event->mmap2.len, event->mmap2.pgoff, 1552 event->mmap2.len, event->mmap2.pgoff,
1431 event->mmap2.maj, 1553 event->mmap2.maj,
@@ -1433,7 +1555,7 @@ int machine__process_mmap2_event(struct machine *machine,
1433 event->mmap2.ino_generation, 1555 event->mmap2.ino_generation,
1434 event->mmap2.prot, 1556 event->mmap2.prot,
1435 event->mmap2.flags, 1557 event->mmap2.flags,
1436 event->mmap2.filename, type, thread); 1558 event->mmap2.filename, thread);
1437 1559
1438 if (map == NULL) 1560 if (map == NULL)
1439 goto out_problem_map; 1561 goto out_problem_map;
@@ -1460,7 +1582,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
1460{ 1582{
1461 struct thread *thread; 1583 struct thread *thread;
1462 struct map *map; 1584 struct map *map;
1463 enum map_type type; 1585 u32 prot = 0;
1464 int ret = 0; 1586 int ret = 0;
1465 1587
1466 if (dump_trace) 1588 if (dump_trace)
@@ -1479,16 +1601,14 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
1479 if (thread == NULL) 1601 if (thread == NULL)
1480 goto out_problem; 1602 goto out_problem;
1481 1603
1482 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1604 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1483 type = MAP__VARIABLE; 1605 prot = PROT_EXEC;
1484 else
1485 type = MAP__FUNCTION;
1486 1606
1487 map = map__new(machine, event->mmap.start, 1607 map = map__new(machine, event->mmap.start,
1488 event->mmap.len, event->mmap.pgoff, 1608 event->mmap.len, event->mmap.pgoff,
1489 0, 0, 0, 0, 0, 0, 1609 0, 0, 0, 0, prot, 0,
1490 event->mmap.filename, 1610 event->mmap.filename,
1491 type, thread); 1611 thread);
1492 1612
1493 if (map == NULL) 1613 if (map == NULL)
1494 goto out_problem_map; 1614 goto out_problem_map;
@@ -1664,7 +1784,7 @@ static void ip__resolve_ams(struct thread *thread,
1664 * Thus, we have to try consecutively until we find a match 1784 * Thus, we have to try consecutively until we find a match
1665 * or else, the symbol is unknown 1785 * or else, the symbol is unknown
1666 */ 1786 */
1667 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); 1787 thread__find_cpumode_addr_location(thread, ip, &al);
1668 1788
1669 ams->addr = ip; 1789 ams->addr = ip;
1670 ams->al_addr = al.addr; 1790 ams->al_addr = al.addr;
@@ -1681,15 +1801,7 @@ static void ip__resolve_data(struct thread *thread,
1681 1801
1682 memset(&al, 0, sizeof(al)); 1802 memset(&al, 0, sizeof(al));
1683 1803
1684 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); 1804 thread__find_symbol(thread, m, addr, &al);
1685 if (al.map == NULL) {
1686 /*
1687 * some shared data regions have execute bit set which puts
1688 * their mapping in the MAP__FUNCTION type array.
1689 * Check there as a fallback option before dropping the sample.
1690 */
1691 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1692 }
1693 1805
1694 ams->addr = addr; 1806 ams->addr = addr;
1695 ams->al_addr = al.addr; 1807 ams->al_addr = al.addr;
@@ -1758,8 +1870,7 @@ static int add_callchain_ip(struct thread *thread,
1758 al.filtered = 0; 1870 al.filtered = 0;
1759 al.sym = NULL; 1871 al.sym = NULL;
1760 if (!cpumode) { 1872 if (!cpumode) {
1761 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, 1873 thread__find_cpumode_addr_location(thread, ip, &al);
1762 ip, &al);
1763 } else { 1874 } else {
1764 if (ip >= PERF_CONTEXT_MAX) { 1875 if (ip >= PERF_CONTEXT_MAX) {
1765 switch (ip) { 1876 switch (ip) {
@@ -1784,8 +1895,7 @@ static int add_callchain_ip(struct thread *thread,
1784 } 1895 }
1785 return 0; 1896 return 0;
1786 } 1897 }
1787 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, 1898 thread__find_symbol(thread, *cpumode, ip, &al);
1788 ip, &al);
1789 } 1899 }
1790 1900
1791 if (al.sym != NULL) { 1901 if (al.sym != NULL) {
@@ -1810,7 +1920,7 @@ static int add_callchain_ip(struct thread *thread,
1810 } 1920 }
1811 1921
1812 srcline = callchain_srcline(al.map, al.sym, al.addr); 1922 srcline = callchain_srcline(al.map, al.sym, al.addr);
1813 return callchain_cursor_append(cursor, al.addr, al.map, al.sym, 1923 return callchain_cursor_append(cursor, ip, al.map, al.sym,
1814 branch, flags, nr_loop_iter, 1924 branch, flags, nr_loop_iter,
1815 iter_cycles, branch_from, srcline); 1925 iter_cycles, branch_from, srcline);
1816} 1926}
@@ -2342,6 +2452,20 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2342 return 0; 2452 return 0;
2343} 2453}
2344 2454
2455/*
2456 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2457 * normalized arch is needed.
2458 */
2459bool machine__is(struct machine *machine, const char *arch)
2460{
2461 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2462}
2463
2464int machine__nr_cpus_avail(struct machine *machine)
2465{
2466 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2467}
2468
2345int machine__get_kernel_start(struct machine *machine) 2469int machine__get_kernel_start(struct machine *machine)
2346{ 2470{
2347 struct map *map = machine__kernel_map(machine); 2471 struct map *map = machine__kernel_map(machine);
@@ -2358,7 +2482,12 @@ int machine__get_kernel_start(struct machine *machine)
2358 machine->kernel_start = 1ULL << 63; 2482 machine->kernel_start = 1ULL << 63;
2359 if (map) { 2483 if (map) {
2360 err = map__load(map); 2484 err = map__load(map);
2361 if (!err) 2485 /*
2486 * On x86_64, PTI entry trampolines are less than the
2487 * start of kernel text, but still above 2^63. So leave
2488 * kernel_start = 1ULL << 63 for x86_64.
2489 */
2490 if (!err && !machine__is(machine, "x86_64"))
2362 machine->kernel_start = map->start; 2491 machine->kernel_start = map->start;
2363 } 2492 }
2364 return err; 2493 return err;
@@ -2373,7 +2502,7 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch
2373{ 2502{
2374 struct machine *machine = vmachine; 2503 struct machine *machine = vmachine;
2375 struct map *map; 2504 struct map *map;
2376 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map); 2505 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
2377 2506
2378 if (sym == NULL) 2507 if (sym == NULL)
2379 return NULL; 2508 return NULL;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 66cc200ef86f..1de7660d93e9 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -49,13 +49,14 @@ struct machine {
49 struct perf_env *env; 49 struct perf_env *env;
50 struct dsos dsos; 50 struct dsos dsos;
51 struct map_groups kmaps; 51 struct map_groups kmaps;
52 struct map *vmlinux_maps[MAP__NR_TYPES]; 52 struct map *vmlinux_map;
53 u64 kernel_start; 53 u64 kernel_start;
54 pid_t *current_tid; 54 pid_t *current_tid;
55 union { /* Tool specific area */ 55 union { /* Tool specific area */
56 void *priv; 56 void *priv;
57 u64 db_id; 57 u64 db_id;
58 }; 58 };
59 bool trampolines_mapped;
59}; 60};
60 61
61static inline struct threads *machine__threads(struct machine *machine, pid_t tid) 62static inline struct threads *machine__threads(struct machine *machine, pid_t tid)
@@ -64,16 +65,22 @@ static inline struct threads *machine__threads(struct machine *machine, pid_t ti
64 return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE]; 65 return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE];
65} 66}
66 67
68/*
69 * The main kernel (vmlinux) map
70 */
67static inline 71static inline
68struct map *__machine__kernel_map(struct machine *machine, enum map_type type) 72struct map *machine__kernel_map(struct machine *machine)
69{ 73{
70 return machine->vmlinux_maps[type]; 74 return machine->vmlinux_map;
71} 75}
72 76
77/*
78 * kernel (the one returned by machine__kernel_map()) plus kernel modules maps
79 */
73static inline 80static inline
74struct map *machine__kernel_map(struct machine *machine) 81struct maps *machine__kernel_maps(struct machine *machine)
75{ 82{
76 return __machine__kernel_map(machine, MAP__FUNCTION); 83 return &machine->kmaps.maps;
77} 84}
78 85
79int machine__get_kernel_start(struct machine *machine); 86int machine__get_kernel_start(struct machine *machine);
@@ -182,6 +189,9 @@ static inline bool machine__is_host(struct machine *machine)
182 return machine ? machine->pid == HOST_KERNEL_ID : false; 189 return machine ? machine->pid == HOST_KERNEL_ID : false;
183} 190}
184 191
192bool machine__is(struct machine *machine, const char *arch);
193int machine__nr_cpus_avail(struct machine *machine);
194
185struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); 195struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
186struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); 196struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
187 197
@@ -190,44 +200,27 @@ struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
190size_t machine__fprintf(struct machine *machine, FILE *fp); 200size_t machine__fprintf(struct machine *machine, FILE *fp);
191 201
192static inline 202static inline
193struct symbol *machine__find_kernel_symbol(struct machine *machine, 203struct symbol *machine__find_kernel_symbol(struct machine *machine, u64 addr,
194 enum map_type type, u64 addr,
195 struct map **mapp) 204 struct map **mapp)
196{ 205{
197 return map_groups__find_symbol(&machine->kmaps, type, addr, mapp); 206 return map_groups__find_symbol(&machine->kmaps, addr, mapp);
198} 207}
199 208
200static inline 209static inline
201struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine, 210struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
202 enum map_type type, const char *name, 211 const char *name,
203 struct map **mapp) 212 struct map **mapp)
204{ 213{
205 return map_groups__find_symbol_by_name(&machine->kmaps, type, name, mapp); 214 return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp);
206}
207
208static inline
209struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
210 struct map **mapp)
211{
212 return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr,
213 mapp);
214}
215
216static inline
217struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
218 const char *name,
219 struct map **mapp)
220{
221 return map_groups__find_function_by_name(&machine->kmaps, name, mapp);
222} 215}
223 216
224struct map *machine__findnew_module_map(struct machine *machine, u64 start, 217struct map *machine__findnew_module_map(struct machine *machine, u64 start,
225 const char *filename); 218 const char *filename);
226int arch__fix_module_text_start(u64 *start, const char *name); 219int arch__fix_module_text_start(u64 *start, const char *name);
227 220
228int machine__load_kallsyms(struct machine *machine, const char *filename, 221int machine__load_kallsyms(struct machine *machine, const char *filename);
229 enum map_type type); 222
230int machine__load_vmlinux_path(struct machine *machine, enum map_type type); 223int machine__load_vmlinux_path(struct machine *machine);
231 224
232size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 225size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
233 bool (skip)(struct dso *dso, int parm), int parm); 226 bool (skip)(struct dso *dso, int parm), int parm);
@@ -276,4 +269,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
276 */ 269 */
277char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); 270char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
278 271
272void machine__get_kallsyms_filename(struct machine *machine, char *buf,
273 size_t bufsz);
274
275int machine__create_extra_kernel_maps(struct machine *machine,
276 struct dso *kernel);
277
278/* Kernel-space maps for symbols that are outside the main kernel map and module maps */
279struct extra_kernel_map {
280 u64 start;
281 u64 end;
282 u64 pgoff;
283 char name[KMAP_NAME_LEN];
284};
285
286int machine__create_extra_kernel_map(struct machine *machine,
287 struct dso *kernel,
288 struct extra_kernel_map *xm);
289
290int machine__map_x86_64_entry_trampolines(struct machine *machine,
291 struct dso *kernel);
292
279#endif /* __PERF_MACHINE_H */ 293#endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 8fe57031e1a8..89ac5b5dc218 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -22,11 +22,6 @@
22 22
23static void __maps__insert(struct maps *maps, struct map *map); 23static void __maps__insert(struct maps *maps, struct map *map);
24 24
25const char *map_type__name[MAP__NR_TYPES] = {
26 [MAP__FUNCTION] = "Functions",
27 [MAP__VARIABLE] = "Variables",
28};
29
30static inline int is_anon_memory(const char *filename, u32 flags) 25static inline int is_anon_memory(const char *filename, u32 flags)
31{ 26{
32 return flags & MAP_HUGETLB || 27 return flags & MAP_HUGETLB ||
@@ -129,10 +124,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
129 return false; 124 return false;
130} 125}
131 126
132void map__init(struct map *map, enum map_type type, 127void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
133 u64 start, u64 end, u64 pgoff, struct dso *dso)
134{ 128{
135 map->type = type;
136 map->start = start; 129 map->start = start;
137 map->end = end; 130 map->end = end;
138 map->pgoff = pgoff; 131 map->pgoff = pgoff;
@@ -149,7 +142,7 @@ void map__init(struct map *map, enum map_type type,
149struct map *map__new(struct machine *machine, u64 start, u64 len, 142struct map *map__new(struct machine *machine, u64 start, u64 len,
150 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 143 u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
151 u64 ino_gen, u32 prot, u32 flags, char *filename, 144 u64 ino_gen, u32 prot, u32 flags, char *filename,
152 enum map_type type, struct thread *thread) 145 struct thread *thread)
153{ 146{
154 struct map *map = malloc(sizeof(*map)); 147 struct map *map = malloc(sizeof(*map));
155 struct nsinfo *nsi = NULL; 148 struct nsinfo *nsi = NULL;
@@ -173,7 +166,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
173 map->flags = flags; 166 map->flags = flags;
174 nsi = nsinfo__get(thread->nsinfo); 167 nsi = nsinfo__get(thread->nsinfo);
175 168
176 if ((anon || no_dso) && nsi && type == MAP__FUNCTION) { 169 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
177 snprintf(newfilename, sizeof(newfilename), 170 snprintf(newfilename, sizeof(newfilename),
178 "/tmp/perf-%d.map", nsi->pid); 171 "/tmp/perf-%d.map", nsi->pid);
179 filename = newfilename; 172 filename = newfilename;
@@ -203,7 +196,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
203 if (dso == NULL) 196 if (dso == NULL)
204 goto out_delete; 197 goto out_delete;
205 198
206 map__init(map, type, start, start + len, pgoff, dso); 199 map__init(map, start, start + len, pgoff, dso);
207 200
208 if (anon || no_dso) { 201 if (anon || no_dso) {
209 map->map_ip = map->unmap_ip = identity__map_ip; 202 map->map_ip = map->unmap_ip = identity__map_ip;
@@ -213,8 +206,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
213 * functions still return NULL, and we avoid the 206 * functions still return NULL, and we avoid the
214 * unnecessary map__load warning. 207 * unnecessary map__load warning.
215 */ 208 */
216 if (type != MAP__FUNCTION) 209 if (!(prot & PROT_EXEC))
217 dso__set_loaded(dso, map->type); 210 dso__set_loaded(dso);
218 } 211 }
219 dso->nsinfo = nsi; 212 dso->nsinfo = nsi;
220 dso__put(dso); 213 dso__put(dso);
@@ -231,7 +224,7 @@ out_delete:
231 * they are loaded) and for vmlinux, where only after we load all the 224 * they are loaded) and for vmlinux, where only after we load all the
232 * symbols we'll know where it starts and ends. 225 * symbols we'll know where it starts and ends.
233 */ 226 */
234struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 227struct map *map__new2(u64 start, struct dso *dso)
235{ 228{
236 struct map *map = calloc(1, (sizeof(*map) + 229 struct map *map = calloc(1, (sizeof(*map) +
237 (dso->kernel ? sizeof(struct kmap) : 0))); 230 (dso->kernel ? sizeof(struct kmap) : 0)));
@@ -239,7 +232,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
239 /* 232 /*
240 * ->end will be filled after we load all the symbols 233 * ->end will be filled after we load all the symbols
241 */ 234 */
242 map__init(map, type, start, 0, 0, dso); 235 map__init(map, start, 0, 0, dso);
243 } 236 }
244 237
245 return map; 238 return map;
@@ -256,7 +249,19 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
256 */ 249 */
257bool __map__is_kernel(const struct map *map) 250bool __map__is_kernel(const struct map *map)
258{ 251{
259 return __machine__kernel_map(map->groups->machine, map->type) == map; 252 return machine__kernel_map(map->groups->machine) == map;
253}
254
255bool __map__is_extra_kernel_map(const struct map *map)
256{
257 struct kmap *kmap = __map__kmap((struct map *)map);
258
259 return kmap && kmap->name[0];
260}
261
262bool map__has_symbols(const struct map *map)
263{
264 return dso__has_symbols(map->dso);
260} 265}
261 266
262static void map__exit(struct map *map) 267static void map__exit(struct map *map)
@@ -279,7 +284,7 @@ void map__put(struct map *map)
279 284
280void map__fixup_start(struct map *map) 285void map__fixup_start(struct map *map)
281{ 286{
282 struct rb_root *symbols = &map->dso->symbols[map->type]; 287 struct rb_root *symbols = &map->dso->symbols;
283 struct rb_node *nd = rb_first(symbols); 288 struct rb_node *nd = rb_first(symbols);
284 if (nd != NULL) { 289 if (nd != NULL) {
285 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 290 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
@@ -289,7 +294,7 @@ void map__fixup_start(struct map *map)
289 294
290void map__fixup_end(struct map *map) 295void map__fixup_end(struct map *map)
291{ 296{
292 struct rb_root *symbols = &map->dso->symbols[map->type]; 297 struct rb_root *symbols = &map->dso->symbols;
293 struct rb_node *nd = rb_last(symbols); 298 struct rb_node *nd = rb_last(symbols);
294 if (nd != NULL) { 299 if (nd != NULL) {
295 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 300 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
@@ -304,7 +309,7 @@ int map__load(struct map *map)
304 const char *name = map->dso->long_name; 309 const char *name = map->dso->long_name;
305 int nr; 310 int nr;
306 311
307 if (dso__loaded(map->dso, map->type)) 312 if (dso__loaded(map->dso))
308 return 0; 313 return 0;
309 314
310 nr = dso__load(map->dso, map); 315 nr = dso__load(map->dso, map);
@@ -348,7 +353,7 @@ struct symbol *map__find_symbol(struct map *map, u64 addr)
348 if (map__load(map) < 0) 353 if (map__load(map) < 0)
349 return NULL; 354 return NULL;
350 355
351 return dso__find_symbol(map->dso, map->type, addr); 356 return dso__find_symbol(map->dso, addr);
352} 357}
353 358
354struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 359struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
@@ -356,10 +361,10 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
356 if (map__load(map) < 0) 361 if (map__load(map) < 0)
357 return NULL; 362 return NULL;
358 363
359 if (!dso__sorted_by_name(map->dso, map->type)) 364 if (!dso__sorted_by_name(map->dso))
360 dso__sort_by_name(map->dso, map->type); 365 dso__sort_by_name(map->dso);
361 366
362 return dso__find_symbol_by_name(map->dso, map->type, name); 367 return dso__find_symbol_by_name(map->dso, name);
363} 368}
364 369
365struct map *map__clone(struct map *from) 370struct map *map__clone(struct map *from)
@@ -410,16 +415,20 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
410 return fprintf(fp, "%s", dsoname); 415 return fprintf(fp, "%s", dsoname);
411} 416}
412 417
418char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
419{
420 if (map == NULL)
421 return SRCLINE_UNKNOWN;
422 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
423}
424
413int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 425int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
414 FILE *fp) 426 FILE *fp)
415{ 427{
416 char *srcline;
417 int ret = 0; 428 int ret = 0;
418 429
419 if (map && map->dso) { 430 if (map && map->dso) {
420 srcline = get_srcline(map->dso, 431 char *srcline = map__srcline(map, addr, NULL);
421 map__rip_2objdump(map, addr), NULL,
422 true, true, addr);
423 if (srcline != SRCLINE_UNKNOWN) 432 if (srcline != SRCLINE_UNKNOWN)
424 ret = fprintf(fp, "%s%s", prefix, srcline); 433 ret = fprintf(fp, "%s%s", prefix, srcline);
425 free_srcline(srcline); 434 free_srcline(srcline);
@@ -440,6 +449,20 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
440 */ 449 */
441u64 map__rip_2objdump(struct map *map, u64 rip) 450u64 map__rip_2objdump(struct map *map, u64 rip)
442{ 451{
452 struct kmap *kmap = __map__kmap(map);
453
454 /*
455 * vmlinux does not have program headers for PTI entry trampolines and
456 * kcore may not either. However the trampoline object code is on the
457 * main kernel map, so just use that instead.
458 */
459 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
460 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
461
462 if (kernel_map)
463 map = kernel_map;
464 }
465
443 if (!map->dso->adjust_symbols) 466 if (!map->dso->adjust_symbols)
444 return rip; 467 return rip;
445 468
@@ -494,10 +517,7 @@ static void maps__init(struct maps *maps)
494 517
495void map_groups__init(struct map_groups *mg, struct machine *machine) 518void map_groups__init(struct map_groups *mg, struct machine *machine)
496{ 519{
497 int i; 520 maps__init(&mg->maps);
498 for (i = 0; i < MAP__NR_TYPES; ++i) {
499 maps__init(&mg->maps[i]);
500 }
501 mg->machine = machine; 521 mg->machine = machine;
502 refcount_set(&mg->refcnt, 1); 522 refcount_set(&mg->refcnt, 1);
503} 523}
@@ -525,22 +545,12 @@ static void maps__exit(struct maps *maps)
525 545
526void map_groups__exit(struct map_groups *mg) 546void map_groups__exit(struct map_groups *mg)
527{ 547{
528 int i; 548 maps__exit(&mg->maps);
529
530 for (i = 0; i < MAP__NR_TYPES; ++i)
531 maps__exit(&mg->maps[i]);
532} 549}
533 550
534bool map_groups__empty(struct map_groups *mg) 551bool map_groups__empty(struct map_groups *mg)
535{ 552{
536 int i; 553 return !maps__first(&mg->maps);
537
538 for (i = 0; i < MAP__NR_TYPES; ++i) {
539 if (maps__first(&mg->maps[i]))
540 return false;
541 }
542
543 return true;
544} 554}
545 555
546struct map_groups *map_groups__new(struct machine *machine) 556struct map_groups *map_groups__new(struct machine *machine)
@@ -566,10 +576,9 @@ void map_groups__put(struct map_groups *mg)
566} 576}
567 577
568struct symbol *map_groups__find_symbol(struct map_groups *mg, 578struct symbol *map_groups__find_symbol(struct map_groups *mg,
569 enum map_type type, u64 addr, 579 u64 addr, struct map **mapp)
570 struct map **mapp)
571{ 580{
572 struct map *map = map_groups__find(mg, type, addr); 581 struct map *map = map_groups__find(mg, addr);
573 582
574 /* Ensure map is loaded before using map->map_ip */ 583 /* Ensure map is loaded before using map->map_ip */
575 if (map != NULL && map__load(map) >= 0) { 584 if (map != NULL && map__load(map) >= 0) {
@@ -608,13 +617,10 @@ out:
608} 617}
609 618
610struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 619struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
611 enum map_type type,
612 const char *name, 620 const char *name,
613 struct map **mapp) 621 struct map **mapp)
614{ 622{
615 struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp); 623 return maps__find_symbol_by_name(&mg->maps, name, mapp);
616
617 return sym;
618} 624}
619 625
620int map_groups__find_ams(struct addr_map_symbol *ams) 626int map_groups__find_ams(struct addr_map_symbol *ams)
@@ -622,8 +628,7 @@ int map_groups__find_ams(struct addr_map_symbol *ams)
622 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 628 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
623 if (ams->map->groups == NULL) 629 if (ams->map->groups == NULL)
624 return -1; 630 return -1;
625 ams->map = map_groups__find(ams->map->groups, ams->map->type, 631 ams->map = map_groups__find(ams->map->groups, ams->addr);
626 ams->addr);
627 if (ams->map == NULL) 632 if (ams->map == NULL)
628 return -1; 633 return -1;
629 } 634 }
@@ -646,7 +651,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
646 printed += fprintf(fp, "Map:"); 651 printed += fprintf(fp, "Map:");
647 printed += map__fprintf(pos, fp); 652 printed += map__fprintf(pos, fp);
648 if (verbose > 2) { 653 if (verbose > 2) {
649 printed += dso__fprintf(pos->dso, pos->type, fp); 654 printed += dso__fprintf(pos->dso, fp);
650 printed += fprintf(fp, "--\n"); 655 printed += fprintf(fp, "--\n");
651 } 656 }
652 } 657 }
@@ -656,24 +661,14 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
656 return printed; 661 return printed;
657} 662}
658 663
659size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
660 FILE *fp)
661{
662 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
663 return printed += maps__fprintf(&mg->maps[type], fp);
664}
665
666size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 664size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
667{ 665{
668 size_t printed = 0, i; 666 return maps__fprintf(&mg->maps, fp);
669 for (i = 0; i < MAP__NR_TYPES; ++i)
670 printed += __map_groups__fprintf_maps(mg, i, fp);
671 return printed;
672} 667}
673 668
674static void __map_groups__insert(struct map_groups *mg, struct map *map) 669static void __map_groups__insert(struct map_groups *mg, struct map *map)
675{ 670{
676 __maps__insert(&mg->maps[map->type], map); 671 __maps__insert(&mg->maps, map);
677 map->groups = mg; 672 map->groups = mg;
678} 673}
679 674
@@ -758,19 +753,18 @@ out:
758int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 753int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
759 FILE *fp) 754 FILE *fp)
760{ 755{
761 return maps__fixup_overlappings(&mg->maps[map->type], map, fp); 756 return maps__fixup_overlappings(&mg->maps, map, fp);
762} 757}
763 758
764/* 759/*
765 * XXX This should not really _copy_ te maps, but refcount them. 760 * XXX This should not really _copy_ te maps, but refcount them.
766 */ 761 */
767int map_groups__clone(struct thread *thread, 762int map_groups__clone(struct thread *thread, struct map_groups *parent)
768 struct map_groups *parent, enum map_type type)
769{ 763{
770 struct map_groups *mg = thread->mg; 764 struct map_groups *mg = thread->mg;
771 int err = -ENOMEM; 765 int err = -ENOMEM;
772 struct map *map; 766 struct map *map;
773 struct maps *maps = &parent->maps[type]; 767 struct maps *maps = &parent->maps;
774 768
775 down_read(&maps->lock); 769 down_read(&maps->lock);
776 770
@@ -877,15 +871,22 @@ struct map *map__next(struct map *map)
877 return NULL; 871 return NULL;
878} 872}
879 873
880struct kmap *map__kmap(struct map *map) 874struct kmap *__map__kmap(struct map *map)
881{ 875{
882 if (!map->dso || !map->dso->kernel) { 876 if (!map->dso || !map->dso->kernel)
883 pr_err("Internal error: map__kmap with a non-kernel map\n");
884 return NULL; 877 return NULL;
885 }
886 return (struct kmap *)(map + 1); 878 return (struct kmap *)(map + 1);
887} 879}
888 880
881struct kmap *map__kmap(struct map *map)
882{
883 struct kmap *kmap = __map__kmap(map);
884
885 if (!kmap)
886 pr_err("Internal error: map__kmap with a non-kernel map\n");
887 return kmap;
888}
889
889struct map_groups *map__kmaps(struct map *map) 890struct map_groups *map__kmaps(struct map *map)
890{ 891{
891 struct kmap *kmap = map__kmap(map); 892 struct kmap *kmap = map__kmap(map);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 0e9bbe01b0ab..4cb90f242bed 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -8,19 +8,11 @@
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include <pthread.h> 9#include <pthread.h>
10#include <stdio.h> 10#include <stdio.h>
11#include <string.h>
11#include <stdbool.h> 12#include <stdbool.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include "rwsem.h" 14#include "rwsem.h"
14 15
15enum map_type {
16 MAP__FUNCTION = 0,
17 MAP__VARIABLE,
18};
19
20#define MAP__NR_TYPES (MAP__VARIABLE + 1)
21
22extern const char *map_type__name[MAP__NR_TYPES];
23
24struct dso; 16struct dso;
25struct ip_callchain; 17struct ip_callchain;
26struct ref_reloc_sym; 18struct ref_reloc_sym;
@@ -35,7 +27,6 @@ struct map {
35 }; 27 };
36 u64 start; 28 u64 start;
37 u64 end; 29 u64 end;
38 u8 /* enum map_type */ type;
39 bool erange_warned; 30 bool erange_warned;
40 u32 priv; 31 u32 priv;
41 u32 prot; 32 u32 prot;
@@ -56,9 +47,12 @@ struct map {
56 refcount_t refcnt; 47 refcount_t refcnt;
57}; 48};
58 49
50#define KMAP_NAME_LEN 256
51
59struct kmap { 52struct kmap {
60 struct ref_reloc_sym *ref_reloc_sym; 53 struct ref_reloc_sym *ref_reloc_sym;
61 struct map_groups *kmaps; 54 struct map_groups *kmaps;
55 char name[KMAP_NAME_LEN];
62}; 56};
63 57
64struct maps { 58struct maps {
@@ -67,7 +61,7 @@ struct maps {
67}; 61};
68 62
69struct map_groups { 63struct map_groups {
70 struct maps maps[MAP__NR_TYPES]; 64 struct maps maps;
71 struct machine *machine; 65 struct machine *machine;
72 refcount_t refcnt; 66 refcount_t refcnt;
73}; 67};
@@ -85,6 +79,7 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg)
85 79
86void map_groups__put(struct map_groups *mg); 80void map_groups__put(struct map_groups *mg);
87 81
82struct kmap *__map__kmap(struct map *map);
88struct kmap *map__kmap(struct map *map); 83struct kmap *map__kmap(struct map *map);
89struct map_groups *map__kmaps(struct map *map); 84struct map_groups *map__kmaps(struct map *map);
90 85
@@ -125,7 +120,7 @@ struct thread;
125 * Note: caller must ensure map->dso is not NULL (map is loaded). 120 * Note: caller must ensure map->dso is not NULL (map is loaded).
126 */ 121 */
127#define map__for_each_symbol(map, pos, n) \ 122#define map__for_each_symbol(map, pos, n) \
128 dso__for_each_symbol(map->dso, pos, n, map->type) 123 dso__for_each_symbol(map->dso, pos, n)
129 124
130/* map__for_each_symbol_with_name - iterate over the symbols in the given map 125/* map__for_each_symbol_with_name - iterate over the symbols in the given map
131 * that have the given name 126 * that have the given name
@@ -144,13 +139,13 @@ struct thread;
144#define map__for_each_symbol_by_name(map, sym_name, pos) \ 139#define map__for_each_symbol_by_name(map, sym_name, pos) \
145 __map__for_each_symbol_by_name(map, sym_name, (pos)) 140 __map__for_each_symbol_by_name(map, sym_name, (pos))
146 141
147void map__init(struct map *map, enum map_type type, 142void map__init(struct map *map,
148 u64 start, u64 end, u64 pgoff, struct dso *dso); 143 u64 start, u64 end, u64 pgoff, struct dso *dso);
149struct map *map__new(struct machine *machine, u64 start, u64 len, 144struct map *map__new(struct machine *machine, u64 start, u64 len,
150 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 145 u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
151 u64 ino_gen, u32 prot, u32 flags, 146 u64 ino_gen, u32 prot, u32 flags,
152 char *filename, enum map_type type, struct thread *thread); 147 char *filename, struct thread *thread);
153struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 148struct map *map__new2(u64 start, struct dso *dso);
154void map__delete(struct map *map); 149void map__delete(struct map *map);
155struct map *map__clone(struct map *map); 150struct map *map__clone(struct map *map);
156 151
@@ -174,6 +169,7 @@ static inline void __map__zput(struct map **map)
174int map__overlap(struct map *l, struct map *r); 169int map__overlap(struct map *l, struct map *r);
175size_t map__fprintf(struct map *map, FILE *fp); 170size_t map__fprintf(struct map *map, FILE *fp);
176size_t map__fprintf_dsoname(struct map *map, FILE *fp); 171size_t map__fprintf_dsoname(struct map *map, FILE *fp);
172char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
177int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 173int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
178 FILE *fp); 174 FILE *fp);
179 175
@@ -185,8 +181,6 @@ void map__fixup_end(struct map *map);
185 181
186void map__reloc_vmlinux(struct map *map); 182void map__reloc_vmlinux(struct map *map);
187 183
188size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
189 FILE *fp);
190void maps__insert(struct maps *maps, struct map *map); 184void maps__insert(struct maps *maps, struct map *map);
191void maps__remove(struct maps *maps, struct map *map); 185void maps__remove(struct maps *maps, struct map *map);
192struct map *maps__find(struct maps *maps, u64 addr); 186struct map *maps__find(struct maps *maps, u64 addr);
@@ -197,34 +191,29 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
197void map_groups__init(struct map_groups *mg, struct machine *machine); 191void map_groups__init(struct map_groups *mg, struct machine *machine);
198void map_groups__exit(struct map_groups *mg); 192void map_groups__exit(struct map_groups *mg);
199int map_groups__clone(struct thread *thread, 193int map_groups__clone(struct thread *thread,
200 struct map_groups *parent, enum map_type type); 194 struct map_groups *parent);
201size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); 195size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
202 196
203int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, 197int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
204 u64 addr); 198 u64 addr);
205 199
206static inline void map_groups__insert(struct map_groups *mg, struct map *map) 200static inline void map_groups__insert(struct map_groups *mg, struct map *map)
207{ 201{
208 maps__insert(&mg->maps[map->type], map); 202 maps__insert(&mg->maps, map);
209 map->groups = mg; 203 map->groups = mg;
210} 204}
211 205
212static inline void map_groups__remove(struct map_groups *mg, struct map *map) 206static inline void map_groups__remove(struct map_groups *mg, struct map *map)
213{ 207{
214 maps__remove(&mg->maps[map->type], map); 208 maps__remove(&mg->maps, map);
215} 209}
216 210
217static inline struct map *map_groups__find(struct map_groups *mg, 211static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
218 enum map_type type, u64 addr)
219{ 212{
220 return maps__find(&mg->maps[type], addr); 213 return maps__find(&mg->maps, addr);
221} 214}
222 215
223static inline struct map *map_groups__first(struct map_groups *mg, 216struct map *map_groups__first(struct map_groups *mg);
224 enum map_type type)
225{
226 return maps__first(&mg->maps[type]);
227}
228 217
229static inline struct map *map_groups__next(struct map *map) 218static inline struct map *map_groups__next(struct map *map)
230{ 219{
@@ -232,11 +221,9 @@ static inline struct map *map_groups__next(struct map *map)
232} 221}
233 222
234struct symbol *map_groups__find_symbol(struct map_groups *mg, 223struct symbol *map_groups__find_symbol(struct map_groups *mg,
235 enum map_type type, u64 addr, 224 u64 addr, struct map **mapp);
236 struct map **mapp);
237 225
238struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 226struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
239 enum map_type type,
240 const char *name, 227 const char *name,
241 struct map **mapp); 228 struct map **mapp);
242 229
@@ -244,24 +231,26 @@ struct addr_map_symbol;
244 231
245int map_groups__find_ams(struct addr_map_symbol *ams); 232int map_groups__find_ams(struct addr_map_symbol *ams);
246 233
247static inline
248struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
249 const char *name, struct map **mapp)
250{
251 return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp);
252}
253
254int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 234int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
255 FILE *fp); 235 FILE *fp);
256 236
257struct map *map_groups__find_by_name(struct map_groups *mg, 237struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
258 enum map_type type, const char *name);
259 238
260bool __map__is_kernel(const struct map *map); 239bool __map__is_kernel(const struct map *map);
240bool __map__is_extra_kernel_map(const struct map *map);
261 241
262static inline bool __map__is_kmodule(const struct map *map) 242static inline bool __map__is_kmodule(const struct map *map)
263{ 243{
264 return !__map__is_kernel(map); 244 return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
245}
246
247bool map__has_symbols(const struct map *map);
248
249#define ENTRY_TRAMPOLINE_NAME "__entry_SYSCALL_64_trampoline"
250
251static inline bool is_entry_trampoline(const char *name)
252{
253 return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
265} 254}
266 255
267#endif /* __PERF_MAP_H */ 256#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 2fc4ee8b86c1..15eec49e71a1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -156,13 +156,12 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
156 (strcmp(sys_dirent->d_name, ".")) && \ 156 (strcmp(sys_dirent->d_name, ".")) && \
157 (strcmp(sys_dirent->d_name, ".."))) 157 (strcmp(sys_dirent->d_name, "..")))
158 158
159static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) 159static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
160{ 160{
161 char evt_path[MAXPATHLEN]; 161 char evt_path[MAXPATHLEN];
162 int fd; 162 int fd;
163 163
164 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, 164 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
165 sys_dir->d_name, evt_dir->d_name);
166 fd = open(evt_path, O_RDONLY); 165 fd = open(evt_path, O_RDONLY);
167 if (fd < 0) 166 if (fd < 0)
168 return -EINVAL; 167 return -EINVAL;
@@ -171,12 +170,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
171 return 0; 170 return 0;
172} 171}
173 172
174#define for_each_event(sys_dirent, evt_dir, evt_dirent) \ 173#define for_each_event(dir_path, evt_dir, evt_dirent) \
175 while ((evt_dirent = readdir(evt_dir)) != NULL) \ 174 while ((evt_dirent = readdir(evt_dir)) != NULL) \
176 if (evt_dirent->d_type == DT_DIR && \ 175 if (evt_dirent->d_type == DT_DIR && \
177 (strcmp(evt_dirent->d_name, ".")) && \ 176 (strcmp(evt_dirent->d_name, ".")) && \
178 (strcmp(evt_dirent->d_name, "..")) && \ 177 (strcmp(evt_dirent->d_name, "..")) && \
179 (!tp_event_has_id(sys_dirent, evt_dirent))) 178 (!tp_event_has_id(dir_path, evt_dirent)))
180 179
181#define MAX_EVENT_LENGTH 512 180#define MAX_EVENT_LENGTH 512
182 181
@@ -190,21 +189,21 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
190 int fd; 189 int fd;
191 u64 id; 190 u64 id;
192 char evt_path[MAXPATHLEN]; 191 char evt_path[MAXPATHLEN];
193 char dir_path[MAXPATHLEN]; 192 char *dir_path;
194 193
195 sys_dir = opendir(tracing_events_path); 194 sys_dir = tracing_events__opendir();
196 if (!sys_dir) 195 if (!sys_dir)
197 return NULL; 196 return NULL;
198 197
199 for_each_subsystem(sys_dir, sys_dirent) { 198 for_each_subsystem(sys_dir, sys_dirent) {
200 199 dir_path = get_events_file(sys_dirent->d_name);
201 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 200 if (!dir_path)
202 sys_dirent->d_name); 201 continue;
203 evt_dir = opendir(dir_path); 202 evt_dir = opendir(dir_path);
204 if (!evt_dir) 203 if (!evt_dir)
205 continue; 204 goto next;
206 205
207 for_each_event(sys_dirent, evt_dir, evt_dirent) { 206 for_each_event(dir_path, evt_dir, evt_dirent) {
208 207
209 scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, 208 scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
210 evt_dirent->d_name); 209 evt_dirent->d_name);
@@ -218,6 +217,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
218 close(fd); 217 close(fd);
219 id = atoll(id_buf); 218 id = atoll(id_buf);
220 if (id == config) { 219 if (id == config) {
220 put_events_file(dir_path);
221 closedir(evt_dir); 221 closedir(evt_dir);
222 closedir(sys_dir); 222 closedir(sys_dir);
223 path = zalloc(sizeof(*path)); 223 path = zalloc(sizeof(*path));
@@ -242,6 +242,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
242 } 242 }
243 } 243 }
244 closedir(evt_dir); 244 closedir(evt_dir);
245next:
246 put_events_file(dir_path);
245 } 247 }
246 248
247 closedir(sys_dir); 249 closedir(sys_dir);
@@ -512,14 +514,19 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
512 struct parse_events_error *err, 514 struct parse_events_error *err,
513 struct list_head *head_config) 515 struct list_head *head_config)
514{ 516{
515 char evt_path[MAXPATHLEN]; 517 char *evt_path;
516 struct dirent *evt_ent; 518 struct dirent *evt_ent;
517 DIR *evt_dir; 519 DIR *evt_dir;
518 int ret = 0, found = 0; 520 int ret = 0, found = 0;
519 521
520 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 522 evt_path = get_events_file(sys_name);
523 if (!evt_path) {
524 tracepoint_error(err, errno, sys_name, evt_name);
525 return -1;
526 }
521 evt_dir = opendir(evt_path); 527 evt_dir = opendir(evt_path);
522 if (!evt_dir) { 528 if (!evt_dir) {
529 put_events_file(evt_path);
523 tracepoint_error(err, errno, sys_name, evt_name); 530 tracepoint_error(err, errno, sys_name, evt_name);
524 return -1; 531 return -1;
525 } 532 }
@@ -545,6 +552,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
545 ret = -1; 552 ret = -1;
546 } 553 }
547 554
555 put_events_file(evt_path);
548 closedir(evt_dir); 556 closedir(evt_dir);
549 return ret; 557 return ret;
550} 558}
@@ -570,7 +578,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
570 DIR *events_dir; 578 DIR *events_dir;
571 int ret = 0; 579 int ret = 0;
572 580
573 events_dir = opendir(tracing_events_path); 581 events_dir = tracing_events__opendir();
574 if (!events_dir) { 582 if (!events_dir) {
575 tracepoint_error(err, errno, sys_name, evt_name); 583 tracepoint_error(err, errno, sys_name, evt_name);
576 return -1; 584 return -1;
@@ -2092,13 +2100,13 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
2092 DIR *sys_dir, *evt_dir; 2100 DIR *sys_dir, *evt_dir;
2093 struct dirent *sys_dirent, *evt_dirent; 2101 struct dirent *sys_dirent, *evt_dirent;
2094 char evt_path[MAXPATHLEN]; 2102 char evt_path[MAXPATHLEN];
2095 char dir_path[MAXPATHLEN]; 2103 char *dir_path;
2096 char **evt_list = NULL; 2104 char **evt_list = NULL;
2097 unsigned int evt_i = 0, evt_num = 0; 2105 unsigned int evt_i = 0, evt_num = 0;
2098 bool evt_num_known = false; 2106 bool evt_num_known = false;
2099 2107
2100restart: 2108restart:
2101 sys_dir = opendir(tracing_events_path); 2109 sys_dir = tracing_events__opendir();
2102 if (!sys_dir) 2110 if (!sys_dir)
2103 return; 2111 return;
2104 2112
@@ -2113,13 +2121,14 @@ restart:
2113 !strglobmatch(sys_dirent->d_name, subsys_glob)) 2121 !strglobmatch(sys_dirent->d_name, subsys_glob))
2114 continue; 2122 continue;
2115 2123
2116 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 2124 dir_path = get_events_file(sys_dirent->d_name);
2117 sys_dirent->d_name); 2125 if (!dir_path)
2126 continue;
2118 evt_dir = opendir(dir_path); 2127 evt_dir = opendir(dir_path);
2119 if (!evt_dir) 2128 if (!evt_dir)
2120 continue; 2129 goto next;
2121 2130
2122 for_each_event(sys_dirent, evt_dir, evt_dirent) { 2131 for_each_event(dir_path, evt_dir, evt_dirent) {
2123 if (event_glob != NULL && 2132 if (event_glob != NULL &&
2124 !strglobmatch(evt_dirent->d_name, event_glob)) 2133 !strglobmatch(evt_dirent->d_name, event_glob))
2125 continue; 2134 continue;
@@ -2133,11 +2142,15 @@ restart:
2133 sys_dirent->d_name, evt_dirent->d_name); 2142 sys_dirent->d_name, evt_dirent->d_name);
2134 2143
2135 evt_list[evt_i] = strdup(evt_path); 2144 evt_list[evt_i] = strdup(evt_path);
2136 if (evt_list[evt_i] == NULL) 2145 if (evt_list[evt_i] == NULL) {
2146 put_events_file(dir_path);
2137 goto out_close_evt_dir; 2147 goto out_close_evt_dir;
2148 }
2138 evt_i++; 2149 evt_i++;
2139 } 2150 }
2140 closedir(evt_dir); 2151 closedir(evt_dir);
2152next:
2153 put_events_file(dir_path);
2141 } 2154 }
2142 closedir(sys_dir); 2155 closedir(sys_dir);
2143 2156
@@ -2185,21 +2198,21 @@ int is_valid_tracepoint(const char *event_string)
2185 DIR *sys_dir, *evt_dir; 2198 DIR *sys_dir, *evt_dir;
2186 struct dirent *sys_dirent, *evt_dirent; 2199 struct dirent *sys_dirent, *evt_dirent;
2187 char evt_path[MAXPATHLEN]; 2200 char evt_path[MAXPATHLEN];
2188 char dir_path[MAXPATHLEN]; 2201 char *dir_path;
2189 2202
2190 sys_dir = opendir(tracing_events_path); 2203 sys_dir = tracing_events__opendir();
2191 if (!sys_dir) 2204 if (!sys_dir)
2192 return 0; 2205 return 0;
2193 2206
2194 for_each_subsystem(sys_dir, sys_dirent) { 2207 for_each_subsystem(sys_dir, sys_dirent) {
2195 2208 dir_path = get_events_file(sys_dirent->d_name);
2196 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 2209 if (!dir_path)
2197 sys_dirent->d_name); 2210 continue;
2198 evt_dir = opendir(dir_path); 2211 evt_dir = opendir(dir_path);
2199 if (!evt_dir) 2212 if (!evt_dir)
2200 continue; 2213 goto next;
2201 2214
2202 for_each_event(sys_dirent, evt_dir, evt_dirent) { 2215 for_each_event(dir_path, evt_dir, evt_dirent) {
2203 snprintf(evt_path, MAXPATHLEN, "%s:%s", 2216 snprintf(evt_path, MAXPATHLEN, "%s:%s",
2204 sys_dirent->d_name, evt_dirent->d_name); 2217 sys_dirent->d_name, evt_dirent->d_name);
2205 if (!strcmp(evt_path, event_string)) { 2218 if (!strcmp(evt_path, event_string)) {
@@ -2209,6 +2222,8 @@ int is_valid_tracepoint(const char *event_string)
2209 } 2222 }
2210 } 2223 }
2211 closedir(evt_dir); 2224 closedir(evt_dir);
2225next:
2226 put_events_file(dir_path);
2212 } 2227 }
2213 closedir(sys_dir); 2228 closedir(sys_dir);
2214 return 0; 2229 return 0;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index a1a01b1ac8b8..5f761f3ed0f3 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -53,7 +53,21 @@ static int str(yyscan_t scanner, int token)
53 YYSTYPE *yylval = parse_events_get_lval(scanner); 53 YYSTYPE *yylval = parse_events_get_lval(scanner);
54 char *text = parse_events_get_text(scanner); 54 char *text = parse_events_get_text(scanner);
55 55
56 yylval->str = strdup(text); 56 if (text[0] != '\'') {
57 yylval->str = strdup(text);
58 } else {
59 /*
60 * If a text tag specified on the command line
61 * contains opening single quite ' then it is
62 * expected that the tag ends with single quote
63 * as well, like this:
64 * name=\'CPU_CLK_UNHALTED.THREAD:cmask=1\'
65 * quotes need to be escaped to bypass shell
66 * processing.
67 */
68 yylval->str = strndup(&text[1], strlen(text) - 2);
69 }
70
57 return token; 71 return token;
58} 72}
59 73
@@ -176,6 +190,7 @@ num_dec [0-9]+
176num_hex 0x[a-fA-F0-9]+ 190num_hex 0x[a-fA-F0-9]+
177num_raw_hex [a-fA-F0-9]+ 191num_raw_hex [a-fA-F0-9]+
178name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]]* 192name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]]*
193name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
179name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]* 194name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
180drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)? 195drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
181/* If you add a modifier you need to update check_modifier() */ 196/* If you add a modifier you need to update check_modifier() */
@@ -344,6 +359,7 @@ r{num_raw_hex} { return raw(yyscanner); }
344{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); } 359{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
345{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); } 360{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
346{name} { return pmu_str_check(yyscanner); } 361{name} { return pmu_str_check(yyscanner); }
362{name_tag} { return str(yyscanner, PE_NAME); }
347"/" { BEGIN(config); return '/'; } 363"/" { BEGIN(config); return '/'; }
348- { return '-'; } 364- { return '-'; }
349, { BEGIN(event); return ','; } 365, { BEGIN(event); return ','; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index e37608a87dba..155d2570274f 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -73,6 +73,7 @@ static void inc_group_count(struct list_head *list,
73%type <num> value_sym 73%type <num> value_sym
74%type <head> event_config 74%type <head> event_config
75%type <head> opt_event_config 75%type <head> opt_event_config
76%type <head> opt_pmu_config
76%type <term> event_term 77%type <term> event_term
77%type <head> event_pmu 78%type <head> event_pmu
78%type <head> event_legacy_symbol 79%type <head> event_legacy_symbol
@@ -224,7 +225,7 @@ event_def: event_pmu |
224 event_bpf_file 225 event_bpf_file
225 226
226event_pmu: 227event_pmu:
227PE_NAME opt_event_config 228PE_NAME opt_pmu_config
228{ 229{
229 struct list_head *list, *orig_terms, *terms; 230 struct list_head *list, *orig_terms, *terms;
230 231
@@ -496,6 +497,17 @@ opt_event_config:
496 $$ = NULL; 497 $$ = NULL;
497} 498}
498 499
500opt_pmu_config:
501'/' event_config '/'
502{
503 $$ = $2;
504}
505|
506'/' '/'
507{
508 $$ = NULL;
509}
510
499start_terms: event_config 511start_terms: event_config
500{ 512{
501 struct parse_events_state *parse_state = _parse_state; 513 struct parse_events_state *parse_state = _parse_state;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e1dbc9821617..f119eb628dbb 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -111,17 +111,6 @@ void exit_probe_symbol_maps(void)
111 symbol__exit(); 111 symbol__exit();
112} 112}
113 113
114static struct symbol *__find_kernel_function_by_name(const char *name,
115 struct map **mapp)
116{
117 return machine__find_kernel_function_by_name(host_machine, name, mapp);
118}
119
120static struct symbol *__find_kernel_function(u64 addr, struct map **mapp)
121{
122 return machine__find_kernel_function(host_machine, addr, mapp);
123}
124
125static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) 114static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
126{ 115{
127 /* kmap->ref_reloc_sym should be set if host_machine is initialized */ 116 /* kmap->ref_reloc_sym should be set if host_machine is initialized */
@@ -149,7 +138,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
149 if (reloc_sym && strcmp(name, reloc_sym->name) == 0) 138 if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
150 *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; 139 *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
151 else { 140 else {
152 sym = __find_kernel_function_by_name(name, &map); 141 sym = machine__find_kernel_symbol_by_name(host_machine, name, &map);
153 if (!sym) 142 if (!sym)
154 return -ENOENT; 143 return -ENOENT;
155 *addr = map->unmap_ip(map, sym->start) - 144 *addr = map->unmap_ip(map, sym->start) -
@@ -161,8 +150,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
161 150
162static struct map *kernel_get_module_map(const char *module) 151static struct map *kernel_get_module_map(const char *module)
163{ 152{
164 struct map_groups *grp = &host_machine->kmaps; 153 struct maps *maps = machine__kernel_maps(host_machine);
165 struct maps *maps = &grp->maps[MAP__FUNCTION];
166 struct map *pos; 154 struct map *pos;
167 155
168 /* A file path -- this is an offline module */ 156 /* A file path -- this is an offline module */
@@ -177,8 +165,7 @@ static struct map *kernel_get_module_map(const char *module)
177 if (strncmp(pos->dso->short_name + 1, module, 165 if (strncmp(pos->dso->short_name + 1, module,
178 pos->dso->short_name_len - 2) == 0 && 166 pos->dso->short_name_len - 2) == 0 &&
179 module[pos->dso->short_name_len - 2] == '\0') { 167 module[pos->dso->short_name_len - 2] == '\0') {
180 map__get(pos); 168 return map__get(pos);
181 return pos;
182 } 169 }
183 } 170 }
184 return NULL; 171 return NULL;
@@ -341,7 +328,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
341 char module_name[128]; 328 char module_name[128];
342 329
343 snprintf(module_name, sizeof(module_name), "[%s]", module); 330 snprintf(module_name, sizeof(module_name), "[%s]", module);
344 map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); 331 map = map_groups__find_by_name(&host_machine->kmaps, module_name);
345 if (map) { 332 if (map) {
346 dso = map->dso; 333 dso = map->dso;
347 goto found; 334 goto found;
@@ -2098,7 +2085,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
2098 } 2085 }
2099 if (addr) { 2086 if (addr) {
2100 addr += tp->offset; 2087 addr += tp->offset;
2101 sym = __find_kernel_function(addr, &map); 2088 sym = machine__find_kernel_symbol(host_machine, addr, &map);
2102 } 2089 }
2103 } 2090 }
2104 2091
@@ -3504,19 +3491,18 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
3504 (target) ? : "kernel"); 3491 (target) ? : "kernel");
3505 goto end; 3492 goto end;
3506 } 3493 }
3507 if (!dso__sorted_by_name(map->dso, map->type)) 3494 if (!dso__sorted_by_name(map->dso))
3508 dso__sort_by_name(map->dso, map->type); 3495 dso__sort_by_name(map->dso);
3509 3496
3510 /* Show all (filtered) symbols */ 3497 /* Show all (filtered) symbols */
3511 setup_pager(); 3498 setup_pager();
3512 3499
3513 for (nd = rb_first(&map->dso->symbol_names[map->type]); nd; nd = rb_next(nd)) { 3500 for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
3514 struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 3501 struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
3515 3502
3516 if (strfilter__compare(_filter, pos->sym.name)) 3503 if (strfilter__compare(_filter, pos->sym.name))
3517 printf("%s\n", pos->sym.name); 3504 printf("%s\n", pos->sym.name);
3518 } 3505 }
3519
3520end: 3506end:
3521 map__put(map); 3507 map__put(map);
3522 exit_probe_symbol_maps(); 3508 exit_probe_symbol_maps();
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 4ae1123c6794..b76088fadf3d 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -84,8 +84,7 @@ int open_trace_file(const char *trace_file, bool readwrite)
84 char buf[PATH_MAX]; 84 char buf[PATH_MAX];
85 int ret; 85 int ret;
86 86
87 ret = e_snprintf(buf, PATH_MAX, "%s/%s", 87 ret = e_snprintf(buf, PATH_MAX, "%s/%s", tracing_path_mount(), trace_file);
88 tracing_path, trace_file);
89 if (ret >= 0) { 88 if (ret >= 0) {
90 pr_debug("Opening %s write=%d\n", buf, readwrite); 89 pr_debug("Opening %s write=%d\n", buf, readwrite);
91 if (readwrite && !probe_event_dry_run) 90 if (readwrite && !probe_event_dry_run)
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
deleted file mode 100644
index 22eaa201aa27..000000000000
--- a/tools/perf/util/quote.c
+++ /dev/null
@@ -1,62 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include "strbuf.h"
5#include "quote.h"
6#include "util.h"
7
8/* Help to copy the thing properly quoted for the shell safety.
9 * any single quote is replaced with '\'', any exclamation point
10 * is replaced with '\!', and the whole thing is enclosed in a
11 *
12 * E.g.
13 * original sq_quote result
14 * name ==> name ==> 'name'
15 * a b ==> a b ==> 'a b'
16 * a'b ==> a'\''b ==> 'a'\''b'
17 * a!b ==> a'\!'b ==> 'a'\!'b'
18 */
19static inline int need_bs_quote(char c)
20{
21 return (c == '\'' || c == '!');
22}
23
24static int sq_quote_buf(struct strbuf *dst, const char *src)
25{
26 char *to_free = NULL;
27 int ret;
28
29 if (dst->buf == src)
30 to_free = strbuf_detach(dst, NULL);
31
32 ret = strbuf_addch(dst, '\'');
33 while (!ret && *src) {
34 size_t len = strcspn(src, "'!");
35 ret = strbuf_add(dst, src, len);
36 src += len;
37 while (!ret && need_bs_quote(*src))
38 ret = strbuf_addf(dst, "'\\%c\'", *src++);
39 }
40 if (!ret)
41 ret = strbuf_addch(dst, '\'');
42 free(to_free);
43
44 return ret;
45}
46
47int sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
48{
49 int i, ret;
50
51 /* Copy into destination buffer. */
52 ret = strbuf_grow(dst, 255);
53 for (i = 0; !ret && argv[i]; ++i) {
54 ret = strbuf_addch(dst, ' ');
55 if (ret)
56 break;
57 ret = sq_quote_buf(dst, argv[i]);
58 if (maxlen && dst->len > maxlen)
59 return -ENOSPC;
60 }
61 return ret;
62}
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
deleted file mode 100644
index 274bf26d3511..000000000000
--- a/tools/perf/util/quote.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_QUOTE_H
3#define __PERF_QUOTE_H
4
5#include <stddef.h>
6
7/* Help to copy the thing properly quoted for the shell safety.
8 * any single quote is replaced with '\'', any exclamation point
9 * is replaced with '\!', and the whole thing is enclosed in a
10 * single quote pair.
11 *
12 * For example, if you are passing the result to system() as an
13 * argument:
14 *
15 * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1))
16 *
17 * would be appropriate. If the system() is going to call ssh to
18 * run the command on the other side:
19 *
20 * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1));
21 * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd));
22 *
23 * Note that the above examples leak memory! Remember to free result from
24 * sq_quote() in a real application.
25 */
26
27struct strbuf;
28
29int sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
30
31#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 7f8afacd08ee..46e9e19ab1ac 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -48,6 +48,7 @@
48#include "cpumap.h" 48#include "cpumap.h"
49#include "print_binary.h" 49#include "print_binary.h"
50#include "stat.h" 50#include "stat.h"
51#include "mem-events.h"
51 52
52#if PY_MAJOR_VERSION < 3 53#if PY_MAJOR_VERSION < 3
53#define _PyUnicode_FromString(arg) \ 54#define _PyUnicode_FromString(arg) \
@@ -372,6 +373,19 @@ static PyObject *get_field_numeric_entry(struct event_format *event,
372 return obj; 373 return obj;
373} 374}
374 375
376static const char *get_dsoname(struct map *map)
377{
378 const char *dsoname = "[unknown]";
379
380 if (map && map->dso) {
381 if (symbol_conf.show_kernel_path && map->dso->long_name)
382 dsoname = map->dso->long_name;
383 else
384 dsoname = map->dso->name;
385 }
386
387 return dsoname;
388}
375 389
376static PyObject *python_process_callchain(struct perf_sample *sample, 390static PyObject *python_process_callchain(struct perf_sample *sample,
377 struct perf_evsel *evsel, 391 struct perf_evsel *evsel,
@@ -427,14 +441,8 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
427 } 441 }
428 442
429 if (node->map) { 443 if (node->map) {
430 struct map *map = node->map; 444 const char *dsoname = get_dsoname(node->map);
431 const char *dsoname = "[unknown]"; 445
432 if (map && map->dso) {
433 if (symbol_conf.show_kernel_path && map->dso->long_name)
434 dsoname = map->dso->long_name;
435 else
436 dsoname = map->dso->name;
437 }
438 pydict_set_item_string_decref(pyelem, "dso", 446 pydict_set_item_string_decref(pyelem, "dso",
439 _PyUnicode_FromString(dsoname)); 447 _PyUnicode_FromString(dsoname));
440 } 448 }
@@ -448,6 +456,166 @@ exit:
448 return pylist; 456 return pylist;
449} 457}
450 458
459static PyObject *python_process_brstack(struct perf_sample *sample,
460 struct thread *thread)
461{
462 struct branch_stack *br = sample->branch_stack;
463 PyObject *pylist;
464 u64 i;
465
466 pylist = PyList_New(0);
467 if (!pylist)
468 Py_FatalError("couldn't create Python list");
469
470 if (!(br && br->nr))
471 goto exit;
472
473 for (i = 0; i < br->nr; i++) {
474 PyObject *pyelem;
475 struct addr_location al;
476 const char *dsoname;
477
478 pyelem = PyDict_New();
479 if (!pyelem)
480 Py_FatalError("couldn't create Python dictionary");
481
482 pydict_set_item_string_decref(pyelem, "from",
483 PyLong_FromUnsignedLongLong(br->entries[i].from));
484 pydict_set_item_string_decref(pyelem, "to",
485 PyLong_FromUnsignedLongLong(br->entries[i].to));
486 pydict_set_item_string_decref(pyelem, "mispred",
487 PyBool_FromLong(br->entries[i].flags.mispred));
488 pydict_set_item_string_decref(pyelem, "predicted",
489 PyBool_FromLong(br->entries[i].flags.predicted));
490 pydict_set_item_string_decref(pyelem, "in_tx",
491 PyBool_FromLong(br->entries[i].flags.in_tx));
492 pydict_set_item_string_decref(pyelem, "abort",
493 PyBool_FromLong(br->entries[i].flags.abort));
494 pydict_set_item_string_decref(pyelem, "cycles",
495 PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
496
497 thread__find_map(thread, sample->cpumode,
498 br->entries[i].from, &al);
499 dsoname = get_dsoname(al.map);
500 pydict_set_item_string_decref(pyelem, "from_dsoname",
501 _PyUnicode_FromString(dsoname));
502
503 thread__find_map(thread, sample->cpumode,
504 br->entries[i].to, &al);
505 dsoname = get_dsoname(al.map);
506 pydict_set_item_string_decref(pyelem, "to_dsoname",
507 _PyUnicode_FromString(dsoname));
508
509 PyList_Append(pylist, pyelem);
510 Py_DECREF(pyelem);
511 }
512
513exit:
514 return pylist;
515}
516
517static unsigned long get_offset(struct symbol *sym, struct addr_location *al)
518{
519 unsigned long offset;
520
521 if (al->addr < sym->end)
522 offset = al->addr - sym->start;
523 else
524 offset = al->addr - al->map->start - sym->start;
525
526 return offset;
527}
528
529static int get_symoff(struct symbol *sym, struct addr_location *al,
530 bool print_off, char *bf, int size)
531{
532 unsigned long offset;
533
534 if (!sym || !sym->name[0])
535 return scnprintf(bf, size, "%s", "[unknown]");
536
537 if (!print_off)
538 return scnprintf(bf, size, "%s", sym->name);
539
540 offset = get_offset(sym, al);
541
542 return scnprintf(bf, size, "%s+0x%x", sym->name, offset);
543}
544
545static int get_br_mspred(struct branch_flags *flags, char *bf, int size)
546{
547 if (!flags->mispred && !flags->predicted)
548 return scnprintf(bf, size, "%s", "-");
549
550 if (flags->mispred)
551 return scnprintf(bf, size, "%s", "M");
552
553 return scnprintf(bf, size, "%s", "P");
554}
555
556static PyObject *python_process_brstacksym(struct perf_sample *sample,
557 struct thread *thread)
558{
559 struct branch_stack *br = sample->branch_stack;
560 PyObject *pylist;
561 u64 i;
562 char bf[512];
563 struct addr_location al;
564
565 pylist = PyList_New(0);
566 if (!pylist)
567 Py_FatalError("couldn't create Python list");
568
569 if (!(br && br->nr))
570 goto exit;
571
572 for (i = 0; i < br->nr; i++) {
573 PyObject *pyelem;
574
575 pyelem = PyDict_New();
576 if (!pyelem)
577 Py_FatalError("couldn't create Python dictionary");
578
579 thread__find_symbol(thread, sample->cpumode,
580 br->entries[i].from, &al);
581 get_symoff(al.sym, &al, true, bf, sizeof(bf));
582 pydict_set_item_string_decref(pyelem, "from",
583 _PyUnicode_FromString(bf));
584
585 thread__find_symbol(thread, sample->cpumode,
586 br->entries[i].to, &al);
587 get_symoff(al.sym, &al, true, bf, sizeof(bf));
588 pydict_set_item_string_decref(pyelem, "to",
589 _PyUnicode_FromString(bf));
590
591 get_br_mspred(&br->entries[i].flags, bf, sizeof(bf));
592 pydict_set_item_string_decref(pyelem, "pred",
593 _PyUnicode_FromString(bf));
594
595 if (br->entries[i].flags.in_tx) {
596 pydict_set_item_string_decref(pyelem, "in_tx",
597 _PyUnicode_FromString("X"));
598 } else {
599 pydict_set_item_string_decref(pyelem, "in_tx",
600 _PyUnicode_FromString("-"));
601 }
602
603 if (br->entries[i].flags.abort) {
604 pydict_set_item_string_decref(pyelem, "abort",
605 _PyUnicode_FromString("A"));
606 } else {
607 pydict_set_item_string_decref(pyelem, "abort",
608 _PyUnicode_FromString("-"));
609 }
610
611 PyList_Append(pylist, pyelem);
612 Py_DECREF(pyelem);
613 }
614
615exit:
616 return pylist;
617}
618
451static PyObject *get_sample_value_as_tuple(struct sample_read_value *value) 619static PyObject *get_sample_value_as_tuple(struct sample_read_value *value)
452{ 620{
453 PyObject *t; 621 PyObject *t;
@@ -498,12 +666,63 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
498 pydict_set_item_string_decref(dict_sample, "values", values); 666 pydict_set_item_string_decref(dict_sample, "values", values);
499} 667}
500 668
669static void set_sample_datasrc_in_dict(PyObject *dict,
670 struct perf_sample *sample)
671{
672 struct mem_info mi = { .data_src.val = sample->data_src };
673 char decode[100];
674
675 pydict_set_item_string_decref(dict, "datasrc",
676 PyLong_FromUnsignedLongLong(sample->data_src));
677
678 perf_script__meminfo_scnprintf(decode, 100, &mi);
679
680 pydict_set_item_string_decref(dict, "datasrc_decode",
681 _PyUnicode_FromString(decode));
682}
683
684static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
685{
686 unsigned int i = 0, r;
687 int printed = 0;
688
689 bf[0] = 0;
690
691 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
692 u64 val = regs->regs[i++];
693
694 printed += scnprintf(bf + printed, size - printed,
695 "%5s:0x%" PRIx64 " ",
696 perf_reg_name(r), val);
697 }
698
699 return printed;
700}
701
702static void set_regs_in_dict(PyObject *dict,
703 struct perf_sample *sample,
704 struct perf_evsel *evsel)
705{
706 struct perf_event_attr *attr = &evsel->attr;
707 char bf[512];
708
709 regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf));
710
711 pydict_set_item_string_decref(dict, "iregs",
712 _PyUnicode_FromString(bf));
713
714 regs_map(&sample->user_regs, attr->sample_regs_user, bf, sizeof(bf));
715
716 pydict_set_item_string_decref(dict, "uregs",
717 _PyUnicode_FromString(bf));
718}
719
501static PyObject *get_perf_sample_dict(struct perf_sample *sample, 720static PyObject *get_perf_sample_dict(struct perf_sample *sample,
502 struct perf_evsel *evsel, 721 struct perf_evsel *evsel,
503 struct addr_location *al, 722 struct addr_location *al,
504 PyObject *callchain) 723 PyObject *callchain)
505{ 724{
506 PyObject *dict, *dict_sample; 725 PyObject *dict, *dict_sample, *brstack, *brstacksym;
507 726
508 dict = PyDict_New(); 727 dict = PyDict_New();
509 if (!dict) 728 if (!dict)
@@ -534,6 +753,11 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
534 pydict_set_item_string_decref(dict_sample, "addr", 753 pydict_set_item_string_decref(dict_sample, "addr",
535 PyLong_FromUnsignedLongLong(sample->addr)); 754 PyLong_FromUnsignedLongLong(sample->addr));
536 set_sample_read_in_dict(dict_sample, sample, evsel); 755 set_sample_read_in_dict(dict_sample, sample, evsel);
756 pydict_set_item_string_decref(dict_sample, "weight",
757 PyLong_FromUnsignedLongLong(sample->weight));
758 pydict_set_item_string_decref(dict_sample, "transaction",
759 PyLong_FromUnsignedLongLong(sample->transaction));
760 set_sample_datasrc_in_dict(dict_sample, sample);
537 pydict_set_item_string_decref(dict, "sample", dict_sample); 761 pydict_set_item_string_decref(dict, "sample", dict_sample);
538 762
539 pydict_set_item_string_decref(dict, "raw_buf", _PyBytes_FromStringAndSize( 763 pydict_set_item_string_decref(dict, "raw_buf", _PyBytes_FromStringAndSize(
@@ -551,6 +775,14 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
551 775
552 pydict_set_item_string_decref(dict, "callchain", callchain); 776 pydict_set_item_string_decref(dict, "callchain", callchain);
553 777
778 brstack = python_process_brstack(sample, al->thread);
779 pydict_set_item_string_decref(dict, "brstack", brstack);
780
781 brstacksym = python_process_brstacksym(sample, al->thread);
782 pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
783
784 set_regs_in_dict(dict, sample, evsel);
785
554 return dict; 786 return dict;
555} 787}
556 788
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f4a7a437ee87..8b9369303561 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1094,7 +1094,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1094 1094
1095 sample_type = evsel->attr.sample_type; 1095 sample_type = evsel->attr.sample_type;
1096 1096
1097 if (sample_type & PERF_SAMPLE_CALLCHAIN) 1097 if (evsel__has_callchain(evsel))
1098 callchain__printf(evsel, sample); 1098 callchain__printf(evsel, sample);
1099 1099
1100 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel)) 1100 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
@@ -1973,12 +1973,11 @@ bool perf_session__has_traces(struct perf_session *session, const char *msg)
1973 return false; 1973 return false;
1974} 1974}
1975 1975
1976int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1976int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
1977 const char *symbol_name, u64 addr)
1978{ 1977{
1979 char *bracket; 1978 char *bracket;
1980 int i;
1981 struct ref_reloc_sym *ref; 1979 struct ref_reloc_sym *ref;
1980 struct kmap *kmap;
1982 1981
1983 ref = zalloc(sizeof(struct ref_reloc_sym)); 1982 ref = zalloc(sizeof(struct ref_reloc_sym));
1984 if (ref == NULL) 1983 if (ref == NULL)
@@ -1996,13 +1995,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1996 1995
1997 ref->addr = addr; 1996 ref->addr = addr;
1998 1997
1999 for (i = 0; i < MAP__NR_TYPES; ++i) { 1998 kmap = map__kmap(map);
2000 struct kmap *kmap = map__kmap(maps[i]); 1999 if (kmap)
2001
2002 if (!kmap)
2003 continue;
2004 kmap->ref_reloc_sym = ref; 2000 kmap->ref_reloc_sym = ref;
2005 }
2006 2001
2007 return 0; 2002 return 0;
2008} 2003}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 26a68dfd8a4f..fed2952ab45a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2,7 +2,7 @@
2#include <errno.h> 2#include <errno.h>
3#include <inttypes.h> 3#include <inttypes.h>
4#include <regex.h> 4#include <regex.h>
5#include <sys/mman.h> 5#include <linux/mman.h>
6#include "sort.h" 6#include "sort.h"
7#include "hist.h" 7#include "hist.h"
8#include "comm.h" 8#include "comm.h"
@@ -282,7 +282,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
282 282
283 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 283 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
284 if (sym && map) { 284 if (sym && map) {
285 if (map->type == MAP__VARIABLE) { 285 if (sym->type == STT_OBJECT) {
286 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 286 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
287 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 287 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
288 ip - map->unmap_ip(map, sym->start)); 288 ip - map->unmap_ip(map, sym->start));
@@ -331,24 +331,18 @@ struct sort_entry sort_sym = {
331 331
332/* --sort srcline */ 332/* --sort srcline */
333 333
334char *hist_entry__get_srcline(struct hist_entry *he) 334char *hist_entry__srcline(struct hist_entry *he)
335{ 335{
336 struct map *map = he->ms.map; 336 return map__srcline(he->ms.map, he->ip, he->ms.sym);
337
338 if (!map)
339 return SRCLINE_UNKNOWN;
340
341 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
342 he->ms.sym, true, true, he->ip);
343} 337}
344 338
345static int64_t 339static int64_t
346sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 340sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
347{ 341{
348 if (!left->srcline) 342 if (!left->srcline)
349 left->srcline = hist_entry__get_srcline(left); 343 left->srcline = hist_entry__srcline(left);
350 if (!right->srcline) 344 if (!right->srcline)
351 right->srcline = hist_entry__get_srcline(right); 345 right->srcline = hist_entry__srcline(right);
352 346
353 return strcmp(right->srcline, left->srcline); 347 return strcmp(right->srcline, left->srcline);
354} 348}
@@ -357,7 +351,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
357 size_t size, unsigned int width) 351 size_t size, unsigned int width)
358{ 352{
359 if (!he->srcline) 353 if (!he->srcline)
360 he->srcline = hist_entry__get_srcline(he); 354 he->srcline = hist_entry__srcline(he);
361 355
362 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 356 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
363} 357}
@@ -371,33 +365,20 @@ struct sort_entry sort_srcline = {
371 365
372/* --sort srcline_from */ 366/* --sort srcline_from */
373 367
368static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
369{
370 return map__srcline(ams->map, ams->al_addr, ams->sym);
371}
372
374static int64_t 373static int64_t
375sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 374sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
376{ 375{
377 if (!left->branch_info->srcline_from) { 376 if (!left->branch_info->srcline_from)
378 struct map *map = left->branch_info->from.map; 377 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
379 if (!map) 378
380 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 379 if (!right->branch_info->srcline_from)
381 else 380 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
382 left->branch_info->srcline_from = get_srcline(map->dso, 381
383 map__rip_2objdump(map,
384 left->branch_info->from.al_addr),
385 left->branch_info->from.sym,
386 true, true,
387 left->branch_info->from.al_addr);
388 }
389 if (!right->branch_info->srcline_from) {
390 struct map *map = right->branch_info->from.map;
391 if (!map)
392 right->branch_info->srcline_from = SRCLINE_UNKNOWN;
393 else
394 right->branch_info->srcline_from = get_srcline(map->dso,
395 map__rip_2objdump(map,
396 right->branch_info->from.al_addr),
397 right->branch_info->from.sym,
398 true, true,
399 right->branch_info->from.al_addr);
400 }
401 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 382 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
402} 383}
403 384
@@ -419,30 +400,12 @@ struct sort_entry sort_srcline_from = {
419static int64_t 400static int64_t
420sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 401sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
421{ 402{
422 if (!left->branch_info->srcline_to) { 403 if (!left->branch_info->srcline_to)
423 struct map *map = left->branch_info->to.map; 404 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
424 if (!map) 405
425 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 406 if (!right->branch_info->srcline_to)
426 else 407 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
427 left->branch_info->srcline_to = get_srcline(map->dso, 408
428 map__rip_2objdump(map,
429 left->branch_info->to.al_addr),
430 left->branch_info->from.sym,
431 true, true,
432 left->branch_info->to.al_addr);
433 }
434 if (!right->branch_info->srcline_to) {
435 struct map *map = right->branch_info->to.map;
436 if (!map)
437 right->branch_info->srcline_to = SRCLINE_UNKNOWN;
438 else
439 right->branch_info->srcline_to = get_srcline(map->dso,
440 map__rip_2objdump(map,
441 right->branch_info->to.al_addr),
442 right->branch_info->to.sym,
443 true, true,
444 right->branch_info->to.al_addr);
445 }
446 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 409 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
447} 410}
448 411
@@ -1211,7 +1174,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1211 1174
1212 /* print [s] for shared data mmaps */ 1175 /* print [s] for shared data mmaps */
1213 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1176 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1214 map && (map->type == MAP__VARIABLE) && 1177 map && !(map->prot & PROT_EXEC) &&
1215 (map->flags & MAP_SHARED) && 1178 (map->flags & MAP_SHARED) &&
1216 (map->maj || map->min || map->ino || 1179 (map->maj || map->min || map->ino ||
1217 map->ino_generation)) 1180 map->ino_generation))
@@ -2582,7 +2545,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2582 if (sort__mode != SORT_MODE__MEMORY) 2545 if (sort__mode != SORT_MODE__MEMORY)
2583 return -EINVAL; 2546 return -EINVAL;
2584 2547
2585 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2548 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2586 return -EINVAL; 2549 return -EINVAL;
2587 2550
2588 if (sd->entry == &sort_mem_daddr_sym) 2551 if (sd->entry == &sort_mem_daddr_sym)
@@ -2628,7 +2591,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
2628 if (*tok) { 2591 if (*tok) {
2629 ret = sort_dimension__add(list, tok, evlist, level); 2592 ret = sort_dimension__add(list, tok, evlist, level);
2630 if (ret == -EINVAL) { 2593 if (ret == -EINVAL) {
2631 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2594 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2632 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2595 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2633 else 2596 else
2634 pr_err("Invalid --sort key: `%s'", tok); 2597 pr_err("Invalid --sort key: `%s'", tok);
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 035b62e2c60b..7cf2d5cc038e 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -151,6 +151,11 @@ struct hist_entry {
151 struct callchain_root callchain[0]; /* must be last member */ 151 struct callchain_root callchain[0]; /* must be last member */
152}; 152};
153 153
154static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
155{
156 return hists__has_callchains(he->hists);
157}
158
154static inline bool hist_entry__has_pairs(struct hist_entry *he) 159static inline bool hist_entry__has_pairs(struct hist_entry *he)
155{ 160{
156 return !list_empty(&he->pairs.node); 161 return !list_empty(&he->pairs.node);
@@ -186,13 +191,13 @@ static inline float hist_entry__get_percent_limit(struct hist_entry *he)
186static inline u64 cl_address(u64 address) 191static inline u64 cl_address(u64 address)
187{ 192{
188 /* return the cacheline of the address */ 193 /* return the cacheline of the address */
189 return (address & ~(cacheline_size - 1)); 194 return (address & ~(cacheline_size() - 1));
190} 195}
191 196
192static inline u64 cl_offset(u64 address) 197static inline u64 cl_offset(u64 address)
193{ 198{
194 /* return the cacheline of the address */ 199 /* return the cacheline of the address */
195 return (address & (cacheline_size - 1)); 200 return (address & (cacheline_size() - 1));
196} 201}
197 202
198enum sort_mode { 203enum sort_mode {
@@ -292,5 +297,5 @@ int64_t
292sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right); 297sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right);
293int64_t 298int64_t
294sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right); 299sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
295char *hist_entry__get_srcline(struct hist_entry *he); 300char *hist_entry__srcline(struct hist_entry *he);
296#endif /* __PERF_SORT_H */ 301#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 3c21fd059b64..09d6746e6ec8 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -103,6 +103,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
103 inline_sym = symbol__new(base_sym ? base_sym->start : 0, 103 inline_sym = symbol__new(base_sym ? base_sym->start : 0,
104 base_sym ? base_sym->end : 0, 104 base_sym ? base_sym->end : 0,
105 base_sym ? base_sym->binding : 0, 105 base_sym ? base_sym->binding : 0,
106 base_sym ? base_sym->type : 0,
106 funcname); 107 funcname);
107 if (inline_sym) 108 if (inline_sym)
108 inline_sym->inlined = 1; 109 inline_sym->inlined = 1;
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 8f56ba4fd258..36efb986f7fc 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -7,8 +7,7 @@
7#include "xyarray.h" 7#include "xyarray.h"
8#include "rblist.h" 8#include "rblist.h"
9 9
10struct stats 10struct stats {
11{
12 double n, mean, M2; 11 double n, mean, M2;
13 u64 max, min; 12 u64 max, min;
14}; 13};
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 2de770511e70..29770ea61768 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -114,16 +114,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
114 sym->st_shndx != SHN_ABS; 114 sym->st_shndx != SHN_ABS;
115} 115}
116 116
117static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) 117static bool elf_sym__filter(GElf_Sym *sym)
118{ 118{
119 switch (type) { 119 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
120 case MAP__FUNCTION:
121 return elf_sym__is_function(sym);
122 case MAP__VARIABLE:
123 return elf_sym__is_object(sym);
124 default:
125 return false;
126 }
127} 120}
128 121
129static inline const char *elf_sym__name(const GElf_Sym *sym, 122static inline const char *elf_sym__name(const GElf_Sym *sym,
@@ -150,17 +143,10 @@ static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
150 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 143 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
151} 144}
152 145
153static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, 146static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
154 enum map_type type)
155{ 147{
156 switch (type) { 148 return elf_sec__is_text(shdr, secstrs) ||
157 case MAP__FUNCTION: 149 elf_sec__is_data(shdr, secstrs);
158 return elf_sec__is_text(shdr, secstrs);
159 case MAP__VARIABLE:
160 return elf_sec__is_data(shdr, secstrs);
161 default:
162 return false;
163 }
164} 150}
165 151
166static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 152static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
@@ -256,7 +242,7 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
256 * And always look at the original dso, not at debuginfo packages, that 242 * And always look at the original dso, not at debuginfo packages, that
257 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 243 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
258 */ 244 */
259int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map) 245int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
260{ 246{
261 uint32_t nr_rel_entries, idx; 247 uint32_t nr_rel_entries, idx;
262 GElf_Sym sym; 248 GElf_Sym sym;
@@ -364,12 +350,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *
364 free(demangled); 350 free(demangled);
365 351
366 f = symbol__new(plt_offset, plt_entry_size, 352 f = symbol__new(plt_offset, plt_entry_size,
367 STB_GLOBAL, sympltname); 353 STB_GLOBAL, STT_FUNC, sympltname);
368 if (!f) 354 if (!f)
369 goto out_elf_end; 355 goto out_elf_end;
370 356
371 plt_offset += plt_entry_size; 357 plt_offset += plt_entry_size;
372 symbols__insert(&dso->symbols[map->type], f); 358 symbols__insert(&dso->symbols, f);
373 ++nr; 359 ++nr;
374 } 360 }
375 } else if (shdr_rel_plt.sh_type == SHT_REL) { 361 } else if (shdr_rel_plt.sh_type == SHT_REL) {
@@ -390,12 +376,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *
390 free(demangled); 376 free(demangled);
391 377
392 f = symbol__new(plt_offset, plt_entry_size, 378 f = symbol__new(plt_offset, plt_entry_size,
393 STB_GLOBAL, sympltname); 379 STB_GLOBAL, STT_FUNC, sympltname);
394 if (!f) 380 if (!f)
395 goto out_elf_end; 381 goto out_elf_end;
396 382
397 plt_offset += plt_entry_size; 383 plt_offset += plt_entry_size;
398 symbols__insert(&dso->symbols[map->type], f); 384 symbols__insert(&dso->symbols, f);
399 ++nr; 385 ++nr;
400 } 386 }
401 } 387 }
@@ -811,6 +797,110 @@ static u64 ref_reloc(struct kmap *kmap)
811void __weak arch__sym_update(struct symbol *s __maybe_unused, 797void __weak arch__sym_update(struct symbol *s __maybe_unused,
812 GElf_Sym *sym __maybe_unused) { } 798 GElf_Sym *sym __maybe_unused) { }
813 799
800static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
801 GElf_Sym *sym, GElf_Shdr *shdr,
802 struct map_groups *kmaps, struct kmap *kmap,
803 struct dso **curr_dsop, struct map **curr_mapp,
804 const char *section_name,
805 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
806{
807 struct dso *curr_dso = *curr_dsop;
808 struct map *curr_map;
809 char dso_name[PATH_MAX];
810
811 /* Adjust symbol to map to file offset */
812 if (adjust_kernel_syms)
813 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
814
815 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
816 return 0;
817
818 if (strcmp(section_name, ".text") == 0) {
819 /*
820 * The initial kernel mapping is based on
821 * kallsyms and identity maps. Overwrite it to
822 * map to the kernel dso.
823 */
824 if (*remap_kernel && dso->kernel) {
825 *remap_kernel = false;
826 map->start = shdr->sh_addr + ref_reloc(kmap);
827 map->end = map->start + shdr->sh_size;
828 map->pgoff = shdr->sh_offset;
829 map->map_ip = map__map_ip;
830 map->unmap_ip = map__unmap_ip;
831 /* Ensure maps are correctly ordered */
832 if (kmaps) {
833 map__get(map);
834 map_groups__remove(kmaps, map);
835 map_groups__insert(kmaps, map);
836 map__put(map);
837 }
838 }
839
840 /*
841 * The initial module mapping is based on
842 * /proc/modules mapped to offset zero.
843 * Overwrite it to map to the module dso.
844 */
845 if (*remap_kernel && kmodule) {
846 *remap_kernel = false;
847 map->pgoff = shdr->sh_offset;
848 }
849
850 *curr_mapp = map;
851 *curr_dsop = dso;
852 return 0;
853 }
854
855 if (!kmap)
856 return 0;
857
858 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
859
860 curr_map = map_groups__find_by_name(kmaps, dso_name);
861 if (curr_map == NULL) {
862 u64 start = sym->st_value;
863
864 if (kmodule)
865 start += map->start + shdr->sh_offset;
866
867 curr_dso = dso__new(dso_name);
868 if (curr_dso == NULL)
869 return -1;
870 curr_dso->kernel = dso->kernel;
871 curr_dso->long_name = dso->long_name;
872 curr_dso->long_name_len = dso->long_name_len;
873 curr_map = map__new2(start, curr_dso);
874 dso__put(curr_dso);
875 if (curr_map == NULL)
876 return -1;
877
878 if (adjust_kernel_syms) {
879 curr_map->start = shdr->sh_addr + ref_reloc(kmap);
880 curr_map->end = curr_map->start + shdr->sh_size;
881 curr_map->pgoff = shdr->sh_offset;
882 } else {
883 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
884 }
885 curr_dso->symtab_type = dso->symtab_type;
886 map_groups__insert(kmaps, curr_map);
887 /*
888 * Add it before we drop the referece to curr_map, i.e. while
889 * we still are sure to have a reference to this DSO via
890 * *curr_map->dso.
891 */
892 dsos__add(&map->groups->machine->dsos, curr_dso);
893 /* kmaps already got it */
894 map__put(curr_map);
895 dso__set_loaded(curr_dso);
896 *curr_mapp = curr_map;
897 *curr_dsop = curr_dso;
898 } else
899 *curr_dsop = curr_map->dso;
900
901 return 0;
902}
903
814int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 904int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
815 struct symsrc *runtime_ss, int kmodule) 905 struct symsrc *runtime_ss, int kmodule)
816{ 906{
@@ -844,7 +934,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
844 * have the wrong values for the dso maps, so remove them. 934 * have the wrong values for the dso maps, so remove them.
845 */ 935 */
846 if (kmodule && syms_ss->symtab) 936 if (kmodule && syms_ss->symtab)
847 symbols__delete(&dso->symbols[map->type]); 937 symbols__delete(&dso->symbols);
848 938
849 if (!syms_ss->symtab) { 939 if (!syms_ss->symtab) {
850 /* 940 /*
@@ -921,10 +1011,10 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
921 1011
922 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 1012 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
923 /* 1013 /*
924 * Initial kernel and module mappings do not map to the dso. For 1014 * Initial kernel and module mappings do not map to the dso.
925 * function mappings, flag the fixups. 1015 * Flag the fixups.
926 */ 1016 */
927 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { 1017 if (dso->kernel || kmodule) {
928 remap_kernel = true; 1018 remap_kernel = true;
929 adjust_kernel_syms = dso->adjust_symbols; 1019 adjust_kernel_syms = dso->adjust_symbols;
930 } 1020 }
@@ -936,7 +1026,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
936 const char *section_name; 1026 const char *section_name;
937 bool used_opd = false; 1027 bool used_opd = false;
938 1028
939 if (!is_label && !elf_sym__is_a(&sym, map->type)) 1029 if (!is_label && !elf_sym__filter(&sym))
940 continue; 1030 continue;
941 1031
942 /* Reject ARM ELF "mapping symbols": these aren't unique and 1032 /* Reject ARM ELF "mapping symbols": these aren't unique and
@@ -974,7 +1064,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
974 1064
975 gelf_getshdr(sec, &shdr); 1065 gelf_getshdr(sec, &shdr);
976 1066
977 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 1067 if (is_label && !elf_sec__filter(&shdr, secstrs))
978 continue; 1068 continue;
979 1069
980 section_name = elf_sec__name(&shdr, secstrs); 1070 section_name = elf_sec__name(&shdr, secstrs);
@@ -982,134 +1072,37 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
982 /* On ARM, symbols for thumb functions have 1 added to 1072 /* On ARM, symbols for thumb functions have 1 added to
983 * the symbol address as a flag - remove it */ 1073 * the symbol address as a flag - remove it */
984 if ((ehdr.e_machine == EM_ARM) && 1074 if ((ehdr.e_machine == EM_ARM) &&
985 (map->type == MAP__FUNCTION) && 1075 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
986 (sym.st_value & 1)) 1076 (sym.st_value & 1))
987 --sym.st_value; 1077 --sym.st_value;
988 1078
989 if (dso->kernel || kmodule) { 1079 if (dso->kernel || kmodule) {
990 char dso_name[PATH_MAX]; 1080 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
991 1081 section_name, adjust_kernel_syms, kmodule, &remap_kernel))
992 /* Adjust symbol to map to file offset */ 1082 goto out_elf_end;
993 if (adjust_kernel_syms) 1083 } else if ((used_opd && runtime_ss->adjust_symbols) ||
994 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1084 (!used_opd && syms_ss->adjust_symbols)) {
995
996 if (strcmp(section_name,
997 (curr_dso->short_name +
998 dso->short_name_len)) == 0)
999 goto new_symbol;
1000
1001 if (strcmp(section_name, ".text") == 0) {
1002 /*
1003 * The initial kernel mapping is based on
1004 * kallsyms and identity maps. Overwrite it to
1005 * map to the kernel dso.
1006 */
1007 if (remap_kernel && dso->kernel) {
1008 remap_kernel = false;
1009 map->start = shdr.sh_addr +
1010 ref_reloc(kmap);
1011 map->end = map->start + shdr.sh_size;
1012 map->pgoff = shdr.sh_offset;
1013 map->map_ip = map__map_ip;
1014 map->unmap_ip = map__unmap_ip;
1015 /* Ensure maps are correctly ordered */
1016 if (kmaps) {
1017 map__get(map);
1018 map_groups__remove(kmaps, map);
1019 map_groups__insert(kmaps, map);
1020 map__put(map);
1021 }
1022 }
1023
1024 /*
1025 * The initial module mapping is based on
1026 * /proc/modules mapped to offset zero.
1027 * Overwrite it to map to the module dso.
1028 */
1029 if (remap_kernel && kmodule) {
1030 remap_kernel = false;
1031 map->pgoff = shdr.sh_offset;
1032 }
1033
1034 curr_map = map;
1035 curr_dso = dso;
1036 goto new_symbol;
1037 }
1038
1039 if (!kmap)
1040 goto new_symbol;
1041
1042 snprintf(dso_name, sizeof(dso_name),
1043 "%s%s", dso->short_name, section_name);
1044
1045 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
1046 if (curr_map == NULL) {
1047 u64 start = sym.st_value;
1048
1049 if (kmodule)
1050 start += map->start + shdr.sh_offset;
1051
1052 curr_dso = dso__new(dso_name);
1053 if (curr_dso == NULL)
1054 goto out_elf_end;
1055 curr_dso->kernel = dso->kernel;
1056 curr_dso->long_name = dso->long_name;
1057 curr_dso->long_name_len = dso->long_name_len;
1058 curr_map = map__new2(start, curr_dso,
1059 map->type);
1060 dso__put(curr_dso);
1061 if (curr_map == NULL) {
1062 goto out_elf_end;
1063 }
1064 if (adjust_kernel_syms) {
1065 curr_map->start = shdr.sh_addr +
1066 ref_reloc(kmap);
1067 curr_map->end = curr_map->start +
1068 shdr.sh_size;
1069 curr_map->pgoff = shdr.sh_offset;
1070 } else {
1071 curr_map->map_ip = identity__map_ip;
1072 curr_map->unmap_ip = identity__map_ip;
1073 }
1074 curr_dso->symtab_type = dso->symtab_type;
1075 map_groups__insert(kmaps, curr_map);
1076 /*
1077 * Add it before we drop the referece to curr_map,
1078 * i.e. while we still are sure to have a reference
1079 * to this DSO via curr_map->dso.
1080 */
1081 dsos__add(&map->groups->machine->dsos, curr_dso);
1082 /* kmaps already got it */
1083 map__put(curr_map);
1084 dso__set_loaded(curr_dso, map->type);
1085 } else
1086 curr_dso = curr_map->dso;
1087
1088 goto new_symbol;
1089 }
1090
1091 if ((used_opd && runtime_ss->adjust_symbols)
1092 || (!used_opd && syms_ss->adjust_symbols)) {
1093 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1085 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1094 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1086 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1095 (u64)sym.st_value, (u64)shdr.sh_addr, 1087 (u64)sym.st_value, (u64)shdr.sh_addr,
1096 (u64)shdr.sh_offset); 1088 (u64)shdr.sh_offset);
1097 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1089 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1098 } 1090 }
1099new_symbol: 1091
1100 demangled = demangle_sym(dso, kmodule, elf_name); 1092 demangled = demangle_sym(dso, kmodule, elf_name);
1101 if (demangled != NULL) 1093 if (demangled != NULL)
1102 elf_name = demangled; 1094 elf_name = demangled;
1103 1095
1104 f = symbol__new(sym.st_value, sym.st_size, 1096 f = symbol__new(sym.st_value, sym.st_size,
1105 GELF_ST_BIND(sym.st_info), elf_name); 1097 GELF_ST_BIND(sym.st_info),
1098 GELF_ST_TYPE(sym.st_info), elf_name);
1106 free(demangled); 1099 free(demangled);
1107 if (!f) 1100 if (!f)
1108 goto out_elf_end; 1101 goto out_elf_end;
1109 1102
1110 arch__sym_update(f, &sym); 1103 arch__sym_update(f, &sym);
1111 1104
1112 __symbols__insert(&curr_dso->symbols[curr_map->type], f, dso->kernel); 1105 __symbols__insert(&curr_dso->symbols, f, dso->kernel);
1113 nr++; 1106 nr++;
1114 } 1107 }
1115 1108
@@ -1117,14 +1110,14 @@ new_symbol:
1117 * For misannotated, zeroed, ASM function sizes. 1110 * For misannotated, zeroed, ASM function sizes.
1118 */ 1111 */
1119 if (nr > 0) { 1112 if (nr > 0) {
1120 symbols__fixup_end(&dso->symbols[map->type]); 1113 symbols__fixup_end(&dso->symbols);
1121 symbols__fixup_duplicate(&dso->symbols[map->type]); 1114 symbols__fixup_duplicate(&dso->symbols);
1122 if (kmap) { 1115 if (kmap) {
1123 /* 1116 /*
1124 * We need to fixup this here too because we create new 1117 * We need to fixup this here too because we create new
1125 * maps here, for things like vsyscall sections. 1118 * maps here, for things like vsyscall sections.
1126 */ 1119 */
1127 __map_groups__fixup_end(kmaps, map->type); 1120 map_groups__fixup_end(kmaps);
1128 } 1121 }
1129 } 1122 }
1130 err = nr; 1123 err = nr;
@@ -1393,8 +1386,16 @@ static off_t kcore__write(struct kcore *kcore)
1393 1386
1394struct phdr_data { 1387struct phdr_data {
1395 off_t offset; 1388 off_t offset;
1389 off_t rel;
1396 u64 addr; 1390 u64 addr;
1397 u64 len; 1391 u64 len;
1392 struct list_head node;
1393 struct phdr_data *remaps;
1394};
1395
1396struct sym_data {
1397 u64 addr;
1398 struct list_head node;
1398}; 1399};
1399 1400
1400struct kcore_copy_info { 1401struct kcore_copy_info {
@@ -1404,16 +1405,78 @@ struct kcore_copy_info {
1404 u64 last_symbol; 1405 u64 last_symbol;
1405 u64 first_module; 1406 u64 first_module;
1406 u64 last_module_symbol; 1407 u64 last_module_symbol;
1407 struct phdr_data kernel_map; 1408 size_t phnum;
1408 struct phdr_data modules_map; 1409 struct list_head phdrs;
1410 struct list_head syms;
1409}; 1411};
1410 1412
1413#define kcore_copy__for_each_phdr(k, p) \
1414 list_for_each_entry((p), &(k)->phdrs, node)
1415
1416static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
1417{
1418 struct phdr_data *p = zalloc(sizeof(*p));
1419
1420 if (p) {
1421 p->addr = addr;
1422 p->len = len;
1423 p->offset = offset;
1424 }
1425
1426 return p;
1427}
1428
1429static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
1430 u64 addr, u64 len,
1431 off_t offset)
1432{
1433 struct phdr_data *p = phdr_data__new(addr, len, offset);
1434
1435 if (p)
1436 list_add_tail(&p->node, &kci->phdrs);
1437
1438 return p;
1439}
1440
1441static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
1442{
1443 struct phdr_data *p, *tmp;
1444
1445 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
1446 list_del(&p->node);
1447 free(p);
1448 }
1449}
1450
1451static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
1452 u64 addr)
1453{
1454 struct sym_data *s = zalloc(sizeof(*s));
1455
1456 if (s) {
1457 s->addr = addr;
1458 list_add_tail(&s->node, &kci->syms);
1459 }
1460
1461 return s;
1462}
1463
1464static void kcore_copy__free_syms(struct kcore_copy_info *kci)
1465{
1466 struct sym_data *s, *tmp;
1467
1468 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
1469 list_del(&s->node);
1470 free(s);
1471 }
1472}
1473
1411static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1474static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1412 u64 start) 1475 u64 start)
1413{ 1476{
1414 struct kcore_copy_info *kci = arg; 1477 struct kcore_copy_info *kci = arg;
1415 1478
1416 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1479 if (!kallsyms__is_function(type))
1417 return 0; 1480 return 0;
1418 1481
1419 if (strchr(name, '[')) { 1482 if (strchr(name, '[')) {
@@ -1438,6 +1501,9 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1438 return 0; 1501 return 0;
1439 } 1502 }
1440 1503
1504 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
1505 return -1;
1506
1441 return 0; 1507 return 0;
1442} 1508}
1443 1509
@@ -1487,27 +1553,39 @@ static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1487 return 0; 1553 return 0;
1488} 1554}
1489 1555
1490static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, 1556static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
1491 u64 s, u64 e) 1557 u64 pgoff, u64 s, u64 e)
1492{ 1558{
1493 if (p->addr || s < start || s >= end) 1559 u64 len, offset;
1494 return; 1560
1561 if (s < start || s >= end)
1562 return 0;
1495 1563
1496 p->addr = s; 1564 offset = (s - start) + pgoff;
1497 p->offset = (s - start) + pgoff; 1565 len = e < end ? e - s : end - s;
1498 p->len = e < end ? e - s : end - s; 1566
1567 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
1499} 1568}
1500 1569
1501static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1570static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1502{ 1571{
1503 struct kcore_copy_info *kci = data; 1572 struct kcore_copy_info *kci = data;
1504 u64 end = start + len; 1573 u64 end = start + len;
1574 struct sym_data *sdat;
1505 1575
1506 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, 1576 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
1507 kci->etext); 1577 return -1;
1508 1578
1509 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, 1579 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
1510 kci->last_module_symbol); 1580 kci->last_module_symbol))
1581 return -1;
1582
1583 list_for_each_entry(sdat, &kci->syms, node) {
1584 u64 s = round_down(sdat->addr, page_size);
1585
1586 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
1587 return -1;
1588 }
1511 1589
1512 return 0; 1590 return 0;
1513} 1591}
@@ -1520,6 +1598,64 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1520 return 0; 1598 return 0;
1521} 1599}
1522 1600
1601static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
1602{
1603 struct phdr_data *p, *k = NULL;
1604 u64 kend;
1605
1606 if (!kci->stext)
1607 return;
1608
1609 /* Find phdr that corresponds to the kernel map (contains stext) */
1610 kcore_copy__for_each_phdr(kci, p) {
1611 u64 pend = p->addr + p->len - 1;
1612
1613 if (p->addr <= kci->stext && pend >= kci->stext) {
1614 k = p;
1615 break;
1616 }
1617 }
1618
1619 if (!k)
1620 return;
1621
1622 kend = k->offset + k->len;
1623
1624 /* Find phdrs that remap the kernel */
1625 kcore_copy__for_each_phdr(kci, p) {
1626 u64 pend = p->offset + p->len;
1627
1628 if (p == k)
1629 continue;
1630
1631 if (p->offset >= k->offset && pend <= kend)
1632 p->remaps = k;
1633 }
1634}
1635
1636static void kcore_copy__layout(struct kcore_copy_info *kci)
1637{
1638 struct phdr_data *p;
1639 off_t rel = 0;
1640
1641 kcore_copy__find_remaps(kci);
1642
1643 kcore_copy__for_each_phdr(kci, p) {
1644 if (!p->remaps) {
1645 p->rel = rel;
1646 rel += p->len;
1647 }
1648 kci->phnum += 1;
1649 }
1650
1651 kcore_copy__for_each_phdr(kci, p) {
1652 struct phdr_data *k = p->remaps;
1653
1654 if (k)
1655 p->rel = p->offset - k->offset + k->rel;
1656 }
1657}
1658
1523static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1659static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1524 Elf *elf) 1660 Elf *elf)
1525{ 1661{
@@ -1555,7 +1691,12 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1555 if (kci->first_module && !kci->last_module_symbol) 1691 if (kci->first_module && !kci->last_module_symbol)
1556 return -1; 1692 return -1;
1557 1693
1558 return kcore_copy__read_maps(kci, elf); 1694 if (kcore_copy__read_maps(kci, elf))
1695 return -1;
1696
1697 kcore_copy__layout(kci);
1698
1699 return 0;
1559} 1700}
1560 1701
1561static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1702static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
@@ -1678,12 +1819,15 @@ int kcore_copy(const char *from_dir, const char *to_dir)
1678{ 1819{
1679 struct kcore kcore; 1820 struct kcore kcore;
1680 struct kcore extract; 1821 struct kcore extract;
1681 size_t count = 2;
1682 int idx = 0, err = -1; 1822 int idx = 0, err = -1;
1683 off_t offset = page_size, sz, modules_offset = 0; 1823 off_t offset, sz;
1684 struct kcore_copy_info kci = { .stext = 0, }; 1824 struct kcore_copy_info kci = { .stext = 0, };
1685 char kcore_filename[PATH_MAX]; 1825 char kcore_filename[PATH_MAX];
1686 char extract_filename[PATH_MAX]; 1826 char extract_filename[PATH_MAX];
1827 struct phdr_data *p;
1828
1829 INIT_LIST_HEAD(&kci.phdrs);
1830 INIT_LIST_HEAD(&kci.syms);
1687 1831
1688 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 1832 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1689 return -1; 1833 return -1;
@@ -1703,20 +1847,17 @@ int kcore_copy(const char *from_dir, const char *to_dir)
1703 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 1847 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1704 goto out_kcore_close; 1848 goto out_kcore_close;
1705 1849
1706 if (!kci.modules_map.addr) 1850 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
1707 count -= 1;
1708
1709 if (kcore__copy_hdr(&kcore, &extract, count))
1710 goto out_extract_close; 1851 goto out_extract_close;
1711 1852
1712 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, 1853 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
1713 kci.kernel_map.len)) 1854 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
1714 goto out_extract_close; 1855 offset = round_up(offset, page_size);
1856
1857 kcore_copy__for_each_phdr(&kci, p) {
1858 off_t offs = p->rel + offset;
1715 1859
1716 if (kci.modules_map.addr) { 1860 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
1717 modules_offset = offset + kci.kernel_map.len;
1718 if (kcore__add_phdr(&extract, idx, modules_offset,
1719 kci.modules_map.addr, kci.modules_map.len))
1720 goto out_extract_close; 1861 goto out_extract_close;
1721 } 1862 }
1722 1863
@@ -1724,14 +1865,14 @@ int kcore_copy(const char *from_dir, const char *to_dir)
1724 if (sz < 0 || sz > offset) 1865 if (sz < 0 || sz > offset)
1725 goto out_extract_close; 1866 goto out_extract_close;
1726 1867
1727 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, 1868 kcore_copy__for_each_phdr(&kci, p) {
1728 kci.kernel_map.len)) 1869 off_t offs = p->rel + offset;
1729 goto out_extract_close;
1730 1870
1731 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, 1871 if (p->remaps)
1732 extract.fd, modules_offset, 1872 continue;
1733 kci.modules_map.len)) 1873 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
1734 goto out_extract_close; 1874 goto out_extract_close;
1875 }
1735 1876
1736 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 1877 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1737 goto out_extract_close; 1878 goto out_extract_close;
@@ -1754,6 +1895,9 @@ out_unlink_kallsyms:
1754 if (err) 1895 if (err)
1755 kcore_copy__unlink(to_dir, "kallsyms"); 1896 kcore_copy__unlink(to_dir, "kallsyms");
1756 1897
1898 kcore_copy__free_phdrs(&kci);
1899 kcore_copy__free_syms(&kci);
1900
1757 return err; 1901 return err;
1758} 1902}
1759 1903
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index ff48d0d49584..7119df77dc0b 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -288,8 +288,7 @@ void symsrc__destroy(struct symsrc *ss)
288} 288}
289 289
290int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, 290int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
291 struct symsrc *ss __maybe_unused, 291 struct symsrc *ss __maybe_unused)
292 struct map *map __maybe_unused)
293{ 292{
294 return 0; 293 return 0;
295} 294}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1466814ebada..d188b7588152 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -5,6 +5,7 @@
5#include <stdio.h> 5#include <stdio.h>
6#include <string.h> 6#include <string.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/mman.h>
8#include <sys/types.h> 9#include <sys/types.h>
9#include <sys/stat.h> 10#include <sys/stat.h>
10#include <sys/param.h> 11#include <sys/param.h>
@@ -39,7 +40,6 @@ char **vmlinux_path;
39struct symbol_conf symbol_conf = { 40struct symbol_conf symbol_conf = {
40 .use_modules = true, 41 .use_modules = true,
41 .try_vmlinux_path = true, 42 .try_vmlinux_path = true,
42 .annotate_src = true,
43 .demangle = true, 43 .demangle = true,
44 .demangle_kernel = false, 44 .demangle_kernel = false,
45 .cumulate_callchain = true, 45 .cumulate_callchain = true,
@@ -70,18 +70,10 @@ static enum dso_binary_type binary_type_symtab[] = {
70 70
71#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 71#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
72 72
73bool symbol_type__is_a(char symbol_type, enum map_type map_type) 73static bool symbol_type__filter(char symbol_type)
74{ 74{
75 symbol_type = toupper(symbol_type); 75 symbol_type = toupper(symbol_type);
76 76 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
77 switch (map_type) {
78 case MAP__FUNCTION:
79 return symbol_type == 'T' || symbol_type == 'W';
80 case MAP__VARIABLE:
81 return symbol_type == 'D';
82 default:
83 return false;
84 }
85} 77}
86 78
87static int prefix_underscores_count(const char *str) 79static int prefix_underscores_count(const char *str)
@@ -228,9 +220,9 @@ void symbols__fixup_end(struct rb_root *symbols)
228 curr->end = roundup(curr->start, 4096) + 4096; 220 curr->end = roundup(curr->start, 4096) + 4096;
229} 221}
230 222
231void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 223void map_groups__fixup_end(struct map_groups *mg)
232{ 224{
233 struct maps *maps = &mg->maps[type]; 225 struct maps *maps = &mg->maps;
234 struct map *next, *curr; 226 struct map *next, *curr;
235 227
236 down_write(&maps->lock); 228 down_write(&maps->lock);
@@ -256,7 +248,7 @@ out_unlock:
256 up_write(&maps->lock); 248 up_write(&maps->lock);
257} 249}
258 250
259struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 251struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
260{ 252{
261 size_t namelen = strlen(name) + 1; 253 size_t namelen = strlen(name) + 1;
262 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 254 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
@@ -274,6 +266,7 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
274 266
275 sym->start = start; 267 sym->start = start;
276 sym->end = len ? start + len : start; 268 sym->end = len ? start + len : start;
269 sym->type = type;
277 sym->binding = binding; 270 sym->binding = binding;
278 sym->namelen = namelen - 1; 271 sym->namelen = namelen - 1;
279 272
@@ -484,45 +477,40 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
484 477
485void dso__reset_find_symbol_cache(struct dso *dso) 478void dso__reset_find_symbol_cache(struct dso *dso)
486{ 479{
487 enum map_type type; 480 dso->last_find_result.addr = 0;
488 481 dso->last_find_result.symbol = NULL;
489 for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
490 dso->last_find_result[type].addr = 0;
491 dso->last_find_result[type].symbol = NULL;
492 }
493} 482}
494 483
495void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym) 484void dso__insert_symbol(struct dso *dso, struct symbol *sym)
496{ 485{
497 __symbols__insert(&dso->symbols[type], sym, dso->kernel); 486 __symbols__insert(&dso->symbols, sym, dso->kernel);
498 487
499 /* update the symbol cache if necessary */ 488 /* update the symbol cache if necessary */
500 if (dso->last_find_result[type].addr >= sym->start && 489 if (dso->last_find_result.addr >= sym->start &&
501 (dso->last_find_result[type].addr < sym->end || 490 (dso->last_find_result.addr < sym->end ||
502 sym->start == sym->end)) { 491 sym->start == sym->end)) {
503 dso->last_find_result[type].symbol = sym; 492 dso->last_find_result.symbol = sym;
504 } 493 }
505} 494}
506 495
507struct symbol *dso__find_symbol(struct dso *dso, 496struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
508 enum map_type type, u64 addr)
509{ 497{
510 if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) { 498 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
511 dso->last_find_result[type].addr = addr; 499 dso->last_find_result.addr = addr;
512 dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr); 500 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
513 } 501 }
514 502
515 return dso->last_find_result[type].symbol; 503 return dso->last_find_result.symbol;
516} 504}
517 505
518struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) 506struct symbol *dso__first_symbol(struct dso *dso)
519{ 507{
520 return symbols__first(&dso->symbols[type]); 508 return symbols__first(&dso->symbols);
521} 509}
522 510
523struct symbol *dso__last_symbol(struct dso *dso, enum map_type type) 511struct symbol *dso__last_symbol(struct dso *dso)
524{ 512{
525 return symbols__last(&dso->symbols[type]); 513 return symbols__last(&dso->symbols);
526} 514}
527 515
528struct symbol *dso__next_symbol(struct symbol *sym) 516struct symbol *dso__next_symbol(struct symbol *sym)
@@ -539,24 +527,22 @@ struct symbol *symbol__next_by_name(struct symbol *sym)
539} 527}
540 528
541 /* 529 /*
542 * Teturns first symbol that matched with @name. 530 * Returns first symbol that matched with @name.
543 */ 531 */
544struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 532struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
545 const char *name)
546{ 533{
547 struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name, 534 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
548 SYMBOL_TAG_INCLUDE__NONE); 535 SYMBOL_TAG_INCLUDE__NONE);
549 if (!s) 536 if (!s)
550 s = symbols__find_by_name(&dso->symbol_names[type], name, 537 s = symbols__find_by_name(&dso->symbol_names, name,
551 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 538 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
552 return s; 539 return s;
553} 540}
554 541
555void dso__sort_by_name(struct dso *dso, enum map_type type) 542void dso__sort_by_name(struct dso *dso)
556{ 543{
557 dso__set_sorted_by_name(dso, type); 544 dso__set_sorted_by_name(dso);
558 return symbols__sort_by_name(&dso->symbol_names[type], 545 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
559 &dso->symbols[type]);
560} 546}
561 547
562int modules__parse(const char *filename, void *arg, 548int modules__parse(const char *filename, void *arg,
@@ -621,11 +607,6 @@ out:
621 return err; 607 return err;
622} 608}
623 609
624struct process_kallsyms_args {
625 struct map *map;
626 struct dso *dso;
627};
628
629/* 610/*
630 * These are symbols in the kernel image, so make sure that 611 * These are symbols in the kernel image, so make sure that
631 * sym is from a kernel DSO. 612 * sym is from a kernel DSO.
@@ -661,10 +642,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
661 char type, u64 start) 642 char type, u64 start)
662{ 643{
663 struct symbol *sym; 644 struct symbol *sym;
664 struct process_kallsyms_args *a = arg; 645 struct dso *dso = arg;
665 struct rb_root *root = &a->dso->symbols[a->map->type]; 646 struct rb_root *root = &dso->symbols;
666 647
667 if (!symbol_type__is_a(type, a->map->type)) 648 if (!symbol_type__filter(type))
668 return 0; 649 return 0;
669 650
670 /* 651 /*
@@ -672,7 +653,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
672 * symbols, setting length to 0, and rely on 653 * symbols, setting length to 0, and rely on
673 * symbols__fixup_end() to fix it up. 654 * symbols__fixup_end() to fix it up.
674 */ 655 */
675 sym = symbol__new(start, 0, kallsyms2elf_binding(type), name); 656 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
676 if (sym == NULL) 657 if (sym == NULL)
677 return -ENOMEM; 658 return -ENOMEM;
678 /* 659 /*
@@ -689,21 +670,18 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
689 * so that we can in the next step set the symbol ->end address and then 670 * so that we can in the next step set the symbol ->end address and then
690 * call kernel_maps__split_kallsyms. 671 * call kernel_maps__split_kallsyms.
691 */ 672 */
692static int dso__load_all_kallsyms(struct dso *dso, const char *filename, 673static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
693 struct map *map)
694{ 674{
695 struct process_kallsyms_args args = { .map = map, .dso = dso, }; 675 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
696 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
697} 676}
698 677
699static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) 678static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
700{ 679{
701 struct map_groups *kmaps = map__kmaps(map);
702 struct map *curr_map; 680 struct map *curr_map;
703 struct symbol *pos; 681 struct symbol *pos;
704 int count = 0; 682 int count = 0;
705 struct rb_root old_root = dso->symbols[map->type]; 683 struct rb_root old_root = dso->symbols;
706 struct rb_root *root = &dso->symbols[map->type]; 684 struct rb_root *root = &dso->symbols;
707 struct rb_node *next = rb_first(root); 685 struct rb_node *next = rb_first(root);
708 686
709 if (!kmaps) 687 if (!kmaps)
@@ -723,7 +701,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
723 if (module) 701 if (module)
724 *module = '\0'; 702 *module = '\0';
725 703
726 curr_map = map_groups__find(kmaps, map->type, pos->start); 704 curr_map = map_groups__find(kmaps, pos->start);
727 705
728 if (!curr_map) { 706 if (!curr_map) {
729 symbol__delete(pos); 707 symbol__delete(pos);
@@ -733,7 +711,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
733 pos->start -= curr_map->start - curr_map->pgoff; 711 pos->start -= curr_map->start - curr_map->pgoff;
734 if (pos->end) 712 if (pos->end)
735 pos->end -= curr_map->start - curr_map->pgoff; 713 pos->end -= curr_map->start - curr_map->pgoff;
736 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 714 symbols__insert(&curr_map->dso->symbols, pos);
737 ++count; 715 ++count;
738 } 716 }
739 717
@@ -748,22 +726,25 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
748 * kernel range is broken in several maps, named [kernel].N, as we don't have 726 * kernel range is broken in several maps, named [kernel].N, as we don't have
749 * the original ELF section names vmlinux have. 727 * the original ELF section names vmlinux have.
750 */ 728 */
751static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) 729static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
730 struct map *initial_map)
752{ 731{
753 struct map_groups *kmaps = map__kmaps(map);
754 struct machine *machine; 732 struct machine *machine;
755 struct map *curr_map = map; 733 struct map *curr_map = initial_map;
756 struct symbol *pos; 734 struct symbol *pos;
757 int count = 0, moved = 0; 735 int count = 0, moved = 0;
758 struct rb_root *root = &dso->symbols[map->type]; 736 struct rb_root *root = &dso->symbols;
759 struct rb_node *next = rb_first(root); 737 struct rb_node *next = rb_first(root);
760 int kernel_range = 0; 738 int kernel_range = 0;
739 bool x86_64;
761 740
762 if (!kmaps) 741 if (!kmaps)
763 return -1; 742 return -1;
764 743
765 machine = kmaps->machine; 744 machine = kmaps->machine;
766 745
746 x86_64 = machine__is(machine, "x86_64");
747
767 while (next) { 748 while (next) {
768 char *module; 749 char *module;
769 750
@@ -778,7 +759,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
778 *module++ = '\0'; 759 *module++ = '\0';
779 760
780 if (strcmp(curr_map->dso->short_name, module)) { 761 if (strcmp(curr_map->dso->short_name, module)) {
781 if (curr_map != map && 762 if (curr_map != initial_map &&
782 dso->kernel == DSO_TYPE_GUEST_KERNEL && 763 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
783 machine__is_default_guest(machine)) { 764 machine__is_default_guest(machine)) {
784 /* 765 /*
@@ -788,18 +769,16 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
788 * symbols are in its kmap. Mark it as 769 * symbols are in its kmap. Mark it as
789 * loaded. 770 * loaded.
790 */ 771 */
791 dso__set_loaded(curr_map->dso, 772 dso__set_loaded(curr_map->dso);
792 curr_map->type);
793 } 773 }
794 774
795 curr_map = map_groups__find_by_name(kmaps, 775 curr_map = map_groups__find_by_name(kmaps, module);
796 map->type, module);
797 if (curr_map == NULL) { 776 if (curr_map == NULL) {
798 pr_debug("%s/proc/{kallsyms,modules} " 777 pr_debug("%s/proc/{kallsyms,modules} "
799 "inconsistency while looking " 778 "inconsistency while looking "
800 "for \"%s\" module!\n", 779 "for \"%s\" module!\n",
801 machine->root_dir, module); 780 machine->root_dir, module);
802 curr_map = map; 781 curr_map = initial_map;
803 goto discard_symbol; 782 goto discard_symbol;
804 } 783 }
805 784
@@ -809,11 +788,21 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
809 } 788 }
810 /* 789 /*
811 * So that we look just like we get from .ko files, 790 * So that we look just like we get from .ko files,
812 * i.e. not prelinked, relative to map->start. 791 * i.e. not prelinked, relative to initial_map->start.
813 */ 792 */
814 pos->start = curr_map->map_ip(curr_map, pos->start); 793 pos->start = curr_map->map_ip(curr_map, pos->start);
815 pos->end = curr_map->map_ip(curr_map, pos->end); 794 pos->end = curr_map->map_ip(curr_map, pos->end);
816 } else if (curr_map != map) { 795 } else if (x86_64 && is_entry_trampoline(pos->name)) {
796 /*
797 * These symbols are not needed anymore since the
798 * trampoline maps refer to the text section and it's
799 * symbols instead. Avoid having to deal with
800 * relocations, and the assumption that the first symbol
801 * is the start of kernel text, by simply removing the
802 * symbols at this point.
803 */
804 goto discard_symbol;
805 } else if (curr_map != initial_map) {
817 char dso_name[PATH_MAX]; 806 char dso_name[PATH_MAX];
818 struct dso *ndso; 807 struct dso *ndso;
819 808
@@ -824,7 +813,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
824 } 813 }
825 814
826 if (count == 0) { 815 if (count == 0) {
827 curr_map = map; 816 curr_map = initial_map;
828 goto add_symbol; 817 goto add_symbol;
829 } 818 }
830 819
@@ -843,7 +832,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
843 832
844 ndso->kernel = dso->kernel; 833 ndso->kernel = dso->kernel;
845 834
846 curr_map = map__new2(pos->start, ndso, map->type); 835 curr_map = map__new2(pos->start, ndso);
847 if (curr_map == NULL) { 836 if (curr_map == NULL) {
848 dso__put(ndso); 837 dso__put(ndso);
849 return -1; 838 return -1;
@@ -858,9 +847,9 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
858 pos->end -= delta; 847 pos->end -= delta;
859 } 848 }
860add_symbol: 849add_symbol:
861 if (curr_map != map) { 850 if (curr_map != initial_map) {
862 rb_erase(&pos->rb_node, root); 851 rb_erase(&pos->rb_node, root);
863 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 852 symbols__insert(&curr_map->dso->symbols, pos);
864 ++moved; 853 ++moved;
865 } else 854 } else
866 ++count; 855 ++count;
@@ -871,10 +860,10 @@ discard_symbol:
871 symbol__delete(pos); 860 symbol__delete(pos);
872 } 861 }
873 862
874 if (curr_map != map && 863 if (curr_map != initial_map &&
875 dso->kernel == DSO_TYPE_GUEST_KERNEL && 864 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
876 machine__is_default_guest(kmaps->machine)) { 865 machine__is_default_guest(kmaps->machine)) {
877 dso__set_loaded(curr_map->dso, curr_map->type); 866 dso__set_loaded(curr_map->dso);
878 } 867 }
879 868
880 return count + moved; 869 return count + moved;
@@ -1035,7 +1024,12 @@ out_delete_from:
1035 return ret; 1024 return ret;
1036} 1025}
1037 1026
1038static int do_validate_kcore_modules(const char *filename, struct map *map, 1027struct map *map_groups__first(struct map_groups *mg)
1028{
1029 return maps__first(&mg->maps);
1030}
1031
1032static int do_validate_kcore_modules(const char *filename,
1039 struct map_groups *kmaps) 1033 struct map_groups *kmaps)
1040{ 1034{
1041 struct rb_root modules = RB_ROOT; 1035 struct rb_root modules = RB_ROOT;
@@ -1046,13 +1040,12 @@ static int do_validate_kcore_modules(const char *filename, struct map *map,
1046 if (err) 1040 if (err)
1047 return err; 1041 return err;
1048 1042
1049 old_map = map_groups__first(kmaps, map->type); 1043 old_map = map_groups__first(kmaps);
1050 while (old_map) { 1044 while (old_map) {
1051 struct map *next = map_groups__next(old_map); 1045 struct map *next = map_groups__next(old_map);
1052 struct module_info *mi; 1046 struct module_info *mi;
1053 1047
1054 if (old_map == map || old_map->start == map->start) { 1048 if (!__map__is_kmodule(old_map)) {
1055 /* The kernel map */
1056 old_map = next; 1049 old_map = next;
1057 continue; 1050 continue;
1058 } 1051 }
@@ -1109,7 +1102,7 @@ static int validate_kcore_modules(const char *kallsyms_filename,
1109 kallsyms_filename)) 1102 kallsyms_filename))
1110 return -EINVAL; 1103 return -EINVAL;
1111 1104
1112 if (do_validate_kcore_modules(modules_filename, map, kmaps)) 1105 if (do_validate_kcore_modules(modules_filename, kmaps))
1113 return -EINVAL; 1106 return -EINVAL;
1114 1107
1115 return 0; 1108 return 0;
@@ -1138,7 +1131,6 @@ static int validate_kcore_addresses(const char *kallsyms_filename,
1138 1131
1139struct kcore_mapfn_data { 1132struct kcore_mapfn_data {
1140 struct dso *dso; 1133 struct dso *dso;
1141 enum map_type type;
1142 struct list_head maps; 1134 struct list_head maps;
1143}; 1135};
1144 1136
@@ -1147,7 +1139,7 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1147 struct kcore_mapfn_data *md = data; 1139 struct kcore_mapfn_data *md = data;
1148 struct map *map; 1140 struct map *map;
1149 1141
1150 map = map__new2(start, md->dso, md->type); 1142 map = map__new2(start, md->dso);
1151 if (map == NULL) 1143 if (map == NULL)
1152 return -ENOMEM; 1144 return -ENOMEM;
1153 1145
@@ -1163,13 +1155,13 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1163 const char *kallsyms_filename) 1155 const char *kallsyms_filename)
1164{ 1156{
1165 struct map_groups *kmaps = map__kmaps(map); 1157 struct map_groups *kmaps = map__kmaps(map);
1166 struct machine *machine;
1167 struct kcore_mapfn_data md; 1158 struct kcore_mapfn_data md;
1168 struct map *old_map, *new_map, *replacement_map = NULL; 1159 struct map *old_map, *new_map, *replacement_map = NULL;
1160 struct machine *machine;
1169 bool is_64_bit; 1161 bool is_64_bit;
1170 int err, fd; 1162 int err, fd;
1171 char kcore_filename[PATH_MAX]; 1163 char kcore_filename[PATH_MAX];
1172 struct symbol *sym; 1164 u64 stext;
1173 1165
1174 if (!kmaps) 1166 if (!kmaps)
1175 return -EINVAL; 1167 return -EINVAL;
@@ -1177,7 +1169,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1177 machine = kmaps->machine; 1169 machine = kmaps->machine;
1178 1170
1179 /* This function requires that the map is the kernel map */ 1171 /* This function requires that the map is the kernel map */
1180 if (map != machine->vmlinux_maps[map->type]) 1172 if (!__map__is_kernel(map))
1181 return -EINVAL; 1173 return -EINVAL;
1182 1174
1183 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1175 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
@@ -1189,7 +1181,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1189 return -EINVAL; 1181 return -EINVAL;
1190 1182
1191 md.dso = dso; 1183 md.dso = dso;
1192 md.type = map->type;
1193 INIT_LIST_HEAD(&md.maps); 1184 INIT_LIST_HEAD(&md.maps);
1194 1185
1195 fd = open(kcore_filename, O_RDONLY); 1186 fd = open(kcore_filename, O_RDONLY);
@@ -1200,7 +1191,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1200 } 1191 }
1201 1192
1202 /* Read new maps into temporary lists */ 1193 /* Read new maps into temporary lists */
1203 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, 1194 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1204 &is_64_bit); 1195 &is_64_bit);
1205 if (err) 1196 if (err)
1206 goto out_err; 1197 goto out_err;
@@ -1212,7 +1203,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1212 } 1203 }
1213 1204
1214 /* Remove old maps */ 1205 /* Remove old maps */
1215 old_map = map_groups__first(kmaps, map->type); 1206 old_map = map_groups__first(kmaps);
1216 while (old_map) { 1207 while (old_map) {
1217 struct map *next = map_groups__next(old_map); 1208 struct map *next = map_groups__next(old_map);
1218 1209
@@ -1220,14 +1211,15 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1220 map_groups__remove(kmaps, old_map); 1211 map_groups__remove(kmaps, old_map);
1221 old_map = next; 1212 old_map = next;
1222 } 1213 }
1214 machine->trampolines_mapped = false;
1223 1215
1224 /* Find the kernel map using the first symbol */ 1216 /* Find the kernel map using the '_stext' symbol */
1225 sym = dso__first_symbol(dso, map->type); 1217 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1226 list_for_each_entry(new_map, &md.maps, node) { 1218 list_for_each_entry(new_map, &md.maps, node) {
1227 if (sym && sym->start >= new_map->start && 1219 if (stext >= new_map->start && stext < new_map->end) {
1228 sym->start < new_map->end) { 1220 replacement_map = new_map;
1229 replacement_map = new_map; 1221 break;
1230 break; 1222 }
1231 } 1223 }
1232 } 1224 }
1233 1225
@@ -1256,6 +1248,19 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1256 map__put(new_map); 1248 map__put(new_map);
1257 } 1249 }
1258 1250
1251 if (machine__is(machine, "x86_64")) {
1252 u64 addr;
1253
1254 /*
1255 * If one of the corresponding symbols is there, assume the
1256 * entry trampoline maps are too.
1257 */
1258 if (!kallsyms__get_function_start(kallsyms_filename,
1259 ENTRY_TRAMPOLINE_NAME,
1260 &addr))
1261 machine->trampolines_mapped = true;
1262 }
1263
1259 /* 1264 /*
1260 * Set the data type and long name so that kcore can be read via 1265 * Set the data type and long name so that kcore can be read via
1261 * dso__data_read_addr(). 1266 * dso__data_read_addr().
@@ -1268,7 +1273,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1268 1273
1269 close(fd); 1274 close(fd);
1270 1275
1271 if (map->type == MAP__FUNCTION) 1276 if (map->prot & PROT_EXEC)
1272 pr_debug("Using %s for kernel object code\n", kcore_filename); 1277 pr_debug("Using %s for kernel object code\n", kcore_filename);
1273 else 1278 else
1274 pr_debug("Using %s for kernel data\n", kcore_filename); 1279 pr_debug("Using %s for kernel data\n", kcore_filename);
@@ -1289,14 +1294,10 @@ out_err:
1289 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1294 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1290 * delta based on the relocation reference symbol. 1295 * delta based on the relocation reference symbol.
1291 */ 1296 */
1292static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) 1297static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1293{ 1298{
1294 struct kmap *kmap = map__kmap(map);
1295 u64 addr; 1299 u64 addr;
1296 1300
1297 if (!kmap)
1298 return -1;
1299
1300 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1301 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1301 return 0; 1302 return 0;
1302 1303
@@ -1310,19 +1311,23 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1310int __dso__load_kallsyms(struct dso *dso, const char *filename, 1311int __dso__load_kallsyms(struct dso *dso, const char *filename,
1311 struct map *map, bool no_kcore) 1312 struct map *map, bool no_kcore)
1312{ 1313{
1314 struct kmap *kmap = map__kmap(map);
1313 u64 delta = 0; 1315 u64 delta = 0;
1314 1316
1315 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1317 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1316 return -1; 1318 return -1;
1317 1319
1318 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1320 if (!kmap || !kmap->kmaps)
1319 return -1; 1321 return -1;
1320 1322
1321 if (kallsyms__delta(map, filename, &delta)) 1323 if (dso__load_all_kallsyms(dso, filename) < 0)
1322 return -1; 1324 return -1;
1323 1325
1324 symbols__fixup_end(&dso->symbols[map->type]); 1326 if (kallsyms__delta(kmap, filename, &delta))
1325 symbols__fixup_duplicate(&dso->symbols[map->type]); 1327 return -1;
1328
1329 symbols__fixup_end(&dso->symbols);
1330 symbols__fixup_duplicate(&dso->symbols);
1326 1331
1327 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1332 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1328 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1333 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
@@ -1330,9 +1335,9 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
1330 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1335 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1331 1336
1332 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1337 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1333 return dso__split_kallsyms_for_kcore(dso, map); 1338 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1334 else 1339 else
1335 return dso__split_kallsyms(dso, map, delta); 1340 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1336} 1341}
1337 1342
1338int dso__load_kallsyms(struct dso *dso, const char *filename, 1343int dso__load_kallsyms(struct dso *dso, const char *filename,
@@ -1341,8 +1346,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
1341 return __dso__load_kallsyms(dso, filename, map, false); 1346 return __dso__load_kallsyms(dso, filename, map, false);
1342} 1347}
1343 1348
1344static int dso__load_perf_map(const char *map_path, struct dso *dso, 1349static int dso__load_perf_map(const char *map_path, struct dso *dso)
1345 struct map *map)
1346{ 1350{
1347 char *line = NULL; 1351 char *line = NULL;
1348 size_t n; 1352 size_t n;
@@ -1379,12 +1383,12 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso,
1379 if (len + 2 >= line_len) 1383 if (len + 2 >= line_len)
1380 continue; 1384 continue;
1381 1385
1382 sym = symbol__new(start, size, STB_GLOBAL, line + len); 1386 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1383 1387
1384 if (sym == NULL) 1388 if (sym == NULL)
1385 goto out_delete_line; 1389 goto out_delete_line;
1386 1390
1387 symbols__insert(&dso->symbols[map->type], sym); 1391 symbols__insert(&dso->symbols, sym);
1388 nr_syms++; 1392 nr_syms++;
1389 } 1393 }
1390 1394
@@ -1509,25 +1513,27 @@ int dso__load(struct dso *dso, struct map *map)
1509 pthread_mutex_lock(&dso->lock); 1513 pthread_mutex_lock(&dso->lock);
1510 1514
1511 /* check again under the dso->lock */ 1515 /* check again under the dso->lock */
1512 if (dso__loaded(dso, map->type)) { 1516 if (dso__loaded(dso)) {
1513 ret = 1; 1517 ret = 1;
1514 goto out; 1518 goto out;
1515 } 1519 }
1516 1520
1521 if (map->groups && map->groups->machine)
1522 machine = map->groups->machine;
1523 else
1524 machine = NULL;
1525
1517 if (dso->kernel) { 1526 if (dso->kernel) {
1518 if (dso->kernel == DSO_TYPE_KERNEL) 1527 if (dso->kernel == DSO_TYPE_KERNEL)
1519 ret = dso__load_kernel_sym(dso, map); 1528 ret = dso__load_kernel_sym(dso, map);
1520 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1529 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1521 ret = dso__load_guest_kernel_sym(dso, map); 1530 ret = dso__load_guest_kernel_sym(dso, map);
1522 1531
1532 if (machine__is(machine, "x86_64"))
1533 machine__map_x86_64_entry_trampolines(machine, dso);
1523 goto out; 1534 goto out;
1524 } 1535 }
1525 1536
1526 if (map->groups && map->groups->machine)
1527 machine = map->groups->machine;
1528 else
1529 machine = NULL;
1530
1531 dso->adjust_symbols = 0; 1537 dso->adjust_symbols = 0;
1532 1538
1533 if (perfmap) { 1539 if (perfmap) {
@@ -1542,7 +1548,7 @@ int dso__load(struct dso *dso, struct map *map)
1542 goto out; 1548 goto out;
1543 } 1549 }
1544 1550
1545 ret = dso__load_perf_map(map_path, dso, map); 1551 ret = dso__load_perf_map(map_path, dso);
1546 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1552 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1547 DSO_BINARY_TYPE__NOT_FOUND; 1553 DSO_BINARY_TYPE__NOT_FOUND;
1548 goto out; 1554 goto out;
@@ -1651,7 +1657,7 @@ int dso__load(struct dso *dso, struct map *map)
1651 if (ret > 0) { 1657 if (ret > 0) {
1652 int nr_plt; 1658 int nr_plt;
1653 1659
1654 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map); 1660 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1655 if (nr_plt > 0) 1661 if (nr_plt > 0)
1656 ret += nr_plt; 1662 ret += nr_plt;
1657 } 1663 }
@@ -1663,17 +1669,16 @@ out_free:
1663 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1669 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1664 ret = 0; 1670 ret = 0;
1665out: 1671out:
1666 dso__set_loaded(dso, map->type); 1672 dso__set_loaded(dso);
1667 pthread_mutex_unlock(&dso->lock); 1673 pthread_mutex_unlock(&dso->lock);
1668 nsinfo__mountns_exit(&nsc); 1674 nsinfo__mountns_exit(&nsc);
1669 1675
1670 return ret; 1676 return ret;
1671} 1677}
1672 1678
1673struct map *map_groups__find_by_name(struct map_groups *mg, 1679struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1674 enum map_type type, const char *name)
1675{ 1680{
1676 struct maps *maps = &mg->maps[type]; 1681 struct maps *maps = &mg->maps;
1677 struct map *map; 1682 struct map *map;
1678 1683
1679 down_read(&maps->lock); 1684 down_read(&maps->lock);
@@ -1720,7 +1725,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
1720 else 1725 else
1721 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1726 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1722 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1727 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1723 dso__set_loaded(dso, map->type); 1728 dso__set_loaded(dso);
1724 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1729 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1725 } 1730 }
1726 1731
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 70c16741f50a..f25fae4b5743 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -57,7 +57,8 @@ struct symbol {
57 u64 start; 57 u64 start;
58 u64 end; 58 u64 end;
59 u16 namelen; 59 u16 namelen;
60 u8 binding; 60 u8 type:4;
61 u8 binding:4;
61 u8 idle:1; 62 u8 idle:1;
62 u8 ignore:1; 63 u8 ignore:1;
63 u8 inlined:1; 64 u8 inlined:1;
@@ -89,7 +90,6 @@ struct intlist;
89 90
90struct symbol_conf { 91struct symbol_conf {
91 unsigned short priv_size; 92 unsigned short priv_size;
92 unsigned short nr_events;
93 bool try_vmlinux_path, 93 bool try_vmlinux_path,
94 init_annotation, 94 init_annotation,
95 force, 95 force,
@@ -108,8 +108,6 @@ struct symbol_conf {
108 show_cpu_utilization, 108 show_cpu_utilization,
109 initialized, 109 initialized,
110 kptr_restrict, 110 kptr_restrict,
111 annotate_asm_raw,
112 annotate_src,
113 event_group, 111 event_group,
114 demangle, 112 demangle,
115 demangle_kernel, 113 demangle_kernel,
@@ -259,17 +257,16 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
259 bool no_kcore); 257 bool no_kcore);
260int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map); 258int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map);
261 259
262void dso__insert_symbol(struct dso *dso, enum map_type type, 260void dso__insert_symbol(struct dso *dso,
263 struct symbol *sym); 261 struct symbol *sym);
264 262
265struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, 263struct symbol *dso__find_symbol(struct dso *dso, u64 addr);
266 u64 addr); 264struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name);
267struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 265
268 const char *name);
269struct symbol *symbol__next_by_name(struct symbol *sym); 266struct symbol *symbol__next_by_name(struct symbol *sym);
270 267
271struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); 268struct symbol *dso__first_symbol(struct dso *dso);
272struct symbol *dso__last_symbol(struct dso *dso, enum map_type type); 269struct symbol *dso__last_symbol(struct dso *dso);
273struct symbol *dso__next_symbol(struct symbol *sym); 270struct symbol *dso__next_symbol(struct symbol *sym);
274 271
275enum dso_type dso__type_fd(int fd); 272enum dso_type dso__type_fd(int fd);
@@ -288,7 +285,7 @@ void symbol__exit(void);
288void symbol__elf_init(void); 285void symbol__elf_init(void);
289int symbol__annotation_init(void); 286int symbol__annotation_init(void);
290 287
291struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); 288struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name);
292size_t __symbol__fprintf_symname_offs(const struct symbol *sym, 289size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
293 const struct addr_location *al, 290 const struct addr_location *al,
294 bool unknown_as_addr, 291 bool unknown_as_addr,
@@ -300,7 +297,6 @@ size_t __symbol__fprintf_symname(const struct symbol *sym,
300 bool unknown_as_addr, FILE *fp); 297 bool unknown_as_addr, FILE *fp);
301size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); 298size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
302size_t symbol__fprintf(struct symbol *sym, FILE *fp); 299size_t symbol__fprintf(struct symbol *sym, FILE *fp);
303bool symbol_type__is_a(char symbol_type, enum map_type map_type);
304bool symbol__restricted_filename(const char *filename, 300bool symbol__restricted_filename(const char *filename,
305 const char *restricted_filename); 301 const char *restricted_filename);
306int symbol__config_symfs(const struct option *opt __maybe_unused, 302int symbol__config_symfs(const struct option *opt __maybe_unused,
@@ -308,8 +304,7 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
308 304
309int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 305int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
310 struct symsrc *runtime_ss, int kmodule); 306 struct symsrc *runtime_ss, int kmodule);
311int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, 307int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
312 struct map *map);
313 308
314char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name); 309char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
315 310
@@ -317,7 +312,7 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
317void symbols__insert(struct rb_root *symbols, struct symbol *sym); 312void symbols__insert(struct rb_root *symbols, struct symbol *sym);
318void symbols__fixup_duplicate(struct rb_root *symbols); 313void symbols__fixup_duplicate(struct rb_root *symbols);
319void symbols__fixup_end(struct rb_root *symbols); 314void symbols__fixup_end(struct rb_root *symbols);
320void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); 315void map_groups__fixup_end(struct map_groups *mg);
321 316
322typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); 317typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
323int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 318int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index 6dd2cb88ccbe..ed0205cc7942 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -58,13 +58,13 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
58} 58}
59 59
60size_t dso__fprintf_symbols_by_name(struct dso *dso, 60size_t dso__fprintf_symbols_by_name(struct dso *dso,
61 enum map_type type, FILE *fp) 61 FILE *fp)
62{ 62{
63 size_t ret = 0; 63 size_t ret = 0;
64 struct rb_node *nd; 64 struct rb_node *nd;
65 struct symbol_name_rb_node *pos; 65 struct symbol_name_rb_node *pos;
66 66
67 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { 67 for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
68 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 68 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
69 fprintf(fp, "%s\n", pos->sym.name); 69 fprintf(fp, "%s\n", pos->sym.name);
70 } 70 }
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 68b65b10579b..2048d393ece6 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -302,23 +302,20 @@ int thread__insert_map(struct thread *thread, struct map *map)
302static int __thread__prepare_access(struct thread *thread) 302static int __thread__prepare_access(struct thread *thread)
303{ 303{
304 bool initialized = false; 304 bool initialized = false;
305 int i, err = 0; 305 int err = 0;
306 306 struct maps *maps = &thread->mg->maps;
307 for (i = 0; i < MAP__NR_TYPES; ++i) { 307 struct map *map;
308 struct maps *maps = &thread->mg->maps[i];
309 struct map *map;
310 308
311 down_read(&maps->lock); 309 down_read(&maps->lock);
312 310
313 for (map = maps__first(maps); map; map = map__next(map)) { 311 for (map = maps__first(maps); map; map = map__next(map)) {
314 err = unwind__prepare_access(thread, map, &initialized); 312 err = unwind__prepare_access(thread, map, &initialized);
315 if (err || initialized) 313 if (err || initialized)
316 break; 314 break;
317 }
318
319 up_read(&maps->lock);
320 } 315 }
321 316
317 up_read(&maps->lock);
318
322 return err; 319 return err;
323} 320}
324 321
@@ -335,8 +332,6 @@ static int thread__prepare_access(struct thread *thread)
335static int thread__clone_map_groups(struct thread *thread, 332static int thread__clone_map_groups(struct thread *thread,
336 struct thread *parent) 333 struct thread *parent)
337{ 334{
338 int i;
339
340 /* This is new thread, we share map groups for process. */ 335 /* This is new thread, we share map groups for process. */
341 if (thread->pid_ == parent->pid_) 336 if (thread->pid_ == parent->pid_)
342 return thread__prepare_access(thread); 337 return thread__prepare_access(thread);
@@ -348,9 +343,8 @@ static int thread__clone_map_groups(struct thread *thread,
348 } 343 }
349 344
350 /* But this one is new process, copy maps. */ 345 /* But this one is new process, copy maps. */
351 for (i = 0; i < MAP__NR_TYPES; ++i) 346 if (map_groups__clone(thread, parent->mg) < 0)
352 if (map_groups__clone(thread, parent->mg, i) < 0) 347 return -ENOMEM;
353 return -ENOMEM;
354 348
355 return 0; 349 return 0;
356} 350}
@@ -371,8 +365,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
371 return thread__clone_map_groups(thread, parent); 365 return thread__clone_map_groups(thread, parent);
372} 366}
373 367
374void thread__find_cpumode_addr_location(struct thread *thread, 368void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
375 enum map_type type, u64 addr,
376 struct addr_location *al) 369 struct addr_location *al)
377{ 370{
378 size_t i; 371 size_t i;
@@ -384,7 +377,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
384 }; 377 };
385 378
386 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 379 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
387 thread__find_addr_location(thread, cpumodes[i], type, addr, al); 380 thread__find_symbol(thread, cpumodes[i], addr, al);
388 if (al->map) 381 if (al->map)
389 break; 382 break;
390 } 383 }
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 14d44c3235b8..07606aa6998d 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -92,16 +92,13 @@ size_t thread__fprintf(struct thread *thread, FILE *fp);
92 92
93struct thread *thread__main_thread(struct machine *machine, struct thread *thread); 93struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
94 94
95void thread__find_addr_map(struct thread *thread, 95struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
96 u8 cpumode, enum map_type type, u64 addr, 96 struct addr_location *al);
97 struct addr_location *al);
98 97
99void thread__find_addr_location(struct thread *thread, 98struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
100 u8 cpumode, enum map_type type, u64 addr, 99 u64 addr, struct addr_location *al);
101 struct addr_location *al);
102 100
103void thread__find_cpumode_addr_location(struct thread *thread, 101void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
104 enum map_type type, u64 addr,
105 struct addr_location *al); 102 struct addr_location *al);
106 103
107static inline void *thread__priv(struct thread *thread) 104static inline void *thread__priv(struct thread *thread)
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 9892323cdd7c..9add1f72ce95 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -3,6 +3,7 @@
3#define __PERF_TOP_H 1 3#define __PERF_TOP_H 1
4 4
5#include "tool.h" 5#include "tool.h"
6#include "annotate.h"
6#include <linux/types.h> 7#include <linux/types.h>
7#include <stddef.h> 8#include <stddef.h>
8#include <stdbool.h> 9#include <stdbool.h>
@@ -16,6 +17,7 @@ struct perf_top {
16 struct perf_tool tool; 17 struct perf_tool tool;
17 struct perf_evlist *evlist; 18 struct perf_evlist *evlist;
18 struct record_opts record_opts; 19 struct record_opts record_opts;
20 struct annotation_options annotation_opts;
19 /* 21 /*
20 * Symbols will be added here in perf_event__process_sample and will 22 * Symbols will be added here in perf_event__process_sample and will
21 * get out after decayed. 23 * get out after decayed.
@@ -35,7 +37,6 @@ struct perf_top {
35 struct perf_session *session; 37 struct perf_session *session;
36 struct winsize winsize; 38 struct winsize winsize;
37 int realtime_prio; 39 int realtime_prio;
38 int sym_pcnt_filter;
39 const char *sym_filter; 40 const char *sym_filter;
40 float min_percent; 41 float min_percent;
41 unsigned int nr_threads_synthesize; 42 unsigned int nr_threads_synthesize;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d7f2113462fb..c85d0d1a65ed 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -103,11 +103,10 @@ out:
103 103
104static int record_header_files(void) 104static int record_header_files(void)
105{ 105{
106 char *path; 106 char *path = get_events_file("header_page");
107 struct stat st; 107 struct stat st;
108 int err = -EIO; 108 int err = -EIO;
109 109
110 path = get_tracing_file("events/header_page");
111 if (!path) { 110 if (!path) {
112 pr_debug("can't get tracing/events/header_page"); 111 pr_debug("can't get tracing/events/header_page");
113 return -ENOMEM; 112 return -ENOMEM;
@@ -128,9 +127,9 @@ static int record_header_files(void)
128 goto out; 127 goto out;
129 } 128 }
130 129
131 put_tracing_file(path); 130 put_events_file(path);
132 131
133 path = get_tracing_file("events/header_event"); 132 path = get_events_file("header_event");
134 if (!path) { 133 if (!path) {
135 pr_debug("can't get tracing/events/header_event"); 134 pr_debug("can't get tracing/events/header_event");
136 err = -ENOMEM; 135 err = -ENOMEM;
@@ -154,7 +153,7 @@ static int record_header_files(void)
154 153
155 err = 0; 154 err = 0;
156out: 155out:
157 put_tracing_file(path); 156 put_events_file(path);
158 return err; 157 return err;
159} 158}
160 159
@@ -243,7 +242,7 @@ static int record_ftrace_files(struct tracepoint_path *tps)
243 char *path; 242 char *path;
244 int ret; 243 int ret;
245 244
246 path = get_tracing_file("events/ftrace"); 245 path = get_events_file("ftrace");
247 if (!path) { 246 if (!path) {
248 pr_debug("can't get tracing/events/ftrace"); 247 pr_debug("can't get tracing/events/ftrace");
249 return -ENOMEM; 248 return -ENOMEM;
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 16a776371d03..1aa368603268 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -75,6 +75,7 @@ void trace_event__cleanup(struct trace_event *t)
75static struct event_format* 75static struct event_format*
76tp_format(const char *sys, const char *name) 76tp_format(const char *sys, const char *name)
77{ 77{
78 char *tp_dir = get_events_file(sys);
78 struct pevent *pevent = tevent.pevent; 79 struct pevent *pevent = tevent.pevent;
79 struct event_format *event = NULL; 80 struct event_format *event = NULL;
80 char path[PATH_MAX]; 81 char path[PATH_MAX];
@@ -82,8 +83,11 @@ tp_format(const char *sys, const char *name)
82 char *data; 83 char *data;
83 int err; 84 int err;
84 85
85 scnprintf(path, PATH_MAX, "%s/%s/%s/format", 86 if (!tp_dir)
86 tracing_events_path, sys, name); 87 return ERR_PTR(-errno);
88
89 scnprintf(path, PATH_MAX, "%s/%s/format", tp_dir, name);
90 put_events_file(tp_dir);
87 91
88 err = filename__read_str(path, &data, &size); 92 err = filename__read_str(path, &data, &size);
89 if (err) 93 if (err)
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 7bdd239c795c..538db4e5d1e6 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -28,10 +28,11 @@ static int __report_module(struct addr_location *al, u64 ip,
28{ 28{
29 Dwfl_Module *mod; 29 Dwfl_Module *mod;
30 struct dso *dso = NULL; 30 struct dso *dso = NULL;
31 31 /*
32 thread__find_addr_location(ui->thread, 32 * Some callers will use al->sym, so we can't just use the
33 PERF_RECORD_MISC_USER, 33 * cheaper thread__find_map() here.
34 MAP__FUNCTION, ip, al); 34 */
35 thread__find_symbol(ui->thread, PERF_RECORD_MISC_USER, ip, al);
35 36
36 if (al->map) 37 if (al->map)
37 dso = al->map->dso; 38 dso = al->map->dso;
@@ -103,19 +104,7 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
103 struct addr_location al; 104 struct addr_location al;
104 ssize_t size; 105 ssize_t size;
105 106
106 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, 107 if (!thread__find_map(ui->thread, PERF_RECORD_MISC_USER, addr, &al)) {
107 MAP__FUNCTION, addr, &al);
108 if (!al.map) {
109 /*
110 * We've seen cases (softice) where DWARF unwinder went
111 * through non executable mmaps, which we need to lookup
112 * in MAP__VARIABLE tree.
113 */
114 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
115 MAP__VARIABLE, addr, &al);
116 }
117
118 if (!al.map) {
119 pr_debug("unwind: no map for %lx\n", (unsigned long)addr); 108 pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
120 return -1; 109 return -1;
121 } 110 }
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index af873044d33a..6a11bc7e6b27 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -366,19 +366,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
366static struct map *find_map(unw_word_t ip, struct unwind_info *ui) 366static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
367{ 367{
368 struct addr_location al; 368 struct addr_location al;
369 369 return thread__find_map(ui->thread, PERF_RECORD_MISC_USER, ip, &al);
370 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
371 MAP__FUNCTION, ip, &al);
372 if (!al.map) {
373 /*
374 * We've seen cases (softice) where DWARF unwinder went
375 * through non executable mmaps, which we need to lookup
376 * in MAP__VARIABLE tree.
377 */
378 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
379 MAP__VARIABLE, ip, &al);
380 }
381 return al.map;
382} 370}
383 371
384static int 372static int
@@ -586,12 +574,9 @@ static int entry(u64 ip, struct thread *thread,
586 struct unwind_entry e; 574 struct unwind_entry e;
587 struct addr_location al; 575 struct addr_location al;
588 576
589 thread__find_addr_location(thread, PERF_RECORD_MISC_USER, 577 e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
590 MAP__FUNCTION, ip, &al);
591
592 e.ip = al.addr; 578 e.ip = al.addr;
593 e.map = al.map; 579 e.map = al.map;
594 e.sym = al.sym;
595 580
596 pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", 581 pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
597 al.sym ? al.sym->name : "''", 582 al.sym ? al.sym->name : "''",
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 1019bbc5dbd8..eac5b858a371 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -38,11 +38,43 @@ void perf_set_multithreaded(void)
38} 38}
39 39
40unsigned int page_size; 40unsigned int page_size;
41int cacheline_size; 41
42#ifdef _SC_LEVEL1_DCACHE_LINESIZE
43#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
44#else
45static void cache_line_size(int *cacheline_sizep)
46{
47 if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
48 pr_debug("cannot determine cache line size");
49}
50#endif
51
52int cacheline_size(void)
53{
54 static int size;
55
56 if (!size)
57 cache_line_size(&size);
58
59 return size;
60}
42 61
43int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; 62int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
44int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK; 63int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
45 64
65int sysctl__max_stack(void)
66{
67 int value;
68
69 if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
70 sysctl_perf_event_max_stack = value;
71
72 if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
73 sysctl_perf_event_max_contexts_per_stack = value;
74
75 return sysctl_perf_event_max_stack;
76}
77
46bool test_attr__enabled; 78bool test_attr__enabled;
47 79
48bool perf_host = true; 80bool perf_host = true;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c9626c206208..dc58254a2b69 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -43,7 +43,9 @@ size_t hex_width(u64 v);
43int hex2u64(const char *ptr, u64 *val); 43int hex2u64(const char *ptr, u64 *val);
44 44
45extern unsigned int page_size; 45extern unsigned int page_size;
46extern int cacheline_size; 46int __pure cacheline_size(void);
47
48int sysctl__max_stack(void);
47 49
48int fetch_kernel_version(unsigned int *puint, 50int fetch_kernel_version(unsigned int *puint,
49 char *str, size_t str_sz); 51 char *str, size_t str_sz);
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 0acb1ec0e2f0..741af209b19d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -139,12 +139,10 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
139 struct thread *thread) 139 struct thread *thread)
140{ 140{
141 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 141 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
142 struct map *map; 142 struct map *map = map_groups__first(thread->mg);
143 struct dso *dso;
144 143
145 map = map_groups__first(thread->mg, MAP__FUNCTION);
146 for (; map ; map = map_groups__next(map)) { 144 for (; map ; map = map_groups__next(map)) {
147 dso = map->dso; 145 struct dso *dso = map->dso;
148 if (!dso || dso->long_name[0] != '/') 146 if (!dso || dso->long_name[0] != '/')
149 continue; 147 continue;
150 dso_type = dso__type(dso, machine); 148 dso_type = dso__type(dso, machine);