aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-18 08:39:00 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-18 08:39:00 -0500
commitca46afdb2754dbb4a5d5772332fa16957d9bc618 (patch)
tree7c57056770c8a1621555b58d2e52625955376cfa
parent8162b3d1a728cf63abf54be4167dd9beec5d9d37 (diff)
parent028713aa8389d960cb1935a9954327bdaa163cf8 (diff)
Merge tag 'perf-core-for-mingo-4.21-20181217' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: - Introduce 'perf record --aio' to use asynchronous IO trace writing, disabled by default (Alexey Budankov) - Add fallback routines to be used in places where we don't have the CPU mode (kernel/userspace/hypervisor) and thus must first fallback lookups looking at all map trees when trying to resolve symbols (Adrian Hunter) - Fix error with config term "pt=0", where we should just force "pt=1" and warn the user about the former being nonsensical (Adrian Hunter) - Fix 'perf test' entry where we expect 'sleep' to come in a PERF_RECORD_COMM but instead we get 'coreutils' when sleep is provided by some versions of the 'coreutils' package (Adrian Hunter) - Introduce 'perf top --kallsyms file' to match 'perf report --kallsyms', useful when dealing with BPF, where symbol resolution happens via kallsyms, not via the default vmlinux ELF symtabs (Arnaldo Carvalho de Melo) - Support 'srccode' output field in 'perf script' (Andi Kleen) - Introduce basic 'perf annotation' support for the ARC architecture (Eugeniy Paltsev) - Compute and display average IPC and IPC coverage per symbol in 'perf annotate' and 'perf report' (Jin Yao) - Make 'perf top' use ordered_events and process histograms in a separate thread (Jiri Olsa) - Make 'perf trace' use ordered_events (Jiri Olsa) - Add support for ETMv3 and PTMv1.1 decoding in cs-etm (Mathieu Poirier) - Support for ARM A32/T32 instruction sets in CoreSight trace (cs-etm) (Robert Walker) - Fix 'perf stat' shadow stats for clock events. (Ravi Bangoria) - Remove needless rb_tree extra indirection from map__find() (Eric Saint-Etienne) - Fix CSV mode column output for non-cgroup events in 'perf stat' (Stephane Eranian) - Add sanity check to libtraceevent's is_timestamp_in_us() (Tzvetomir Stoyanov) - Use ERR_CAST instead of ERR_PTR(PTR_ERR()) (Wen Yang) - Fix Load_Miss_Real_Latency on SKL/SKX intel vendor event files (Andi Kleen) - strncpy() fixes triggered by new warnings on gcc 8.2.0 (Arnaldo Carvalho de Melo) - Handle tracefs syscall tracepoint older 'nr' field in 'perf trace', that got renamed to '__syscall_nr' to work in older kernels (Arnaldo Carvalho de Melo) - Give better hint about devel package for libssl (Arnaldo Carvalho de Melo) - Fix the 'perf trace' build in architectures lacking explicit mmap.h file (Arnaldo Carvalho de Melo) - Remove extra rb_tree traversal indirection from map__find() (Eric Saint-Etienne) - Disable breakpoint tests for 32-bit ARM (Florian Fainelli) - Fix typos all over the place, mostly in comments, but also in some debug messages and JSON files (Ingo Molnar) - Allow specifying proc-map-timeout in config file (Mark Drayton) - Fix mmap_flags table generation script (Sihyeon Jang) - Fix 'size' parameter to snprintf in the 'perf config' code (Sihyeon Jang) - More libtraceevent renames to make it a proper library (Tzvetomir Stoyanov) - Implement new API tep_get_ref() in libtraceevent (Tzvetomir Stoyanov) - Added support for pkg-config in libtraceevent (Tzvetomir Stoyanov) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--tools/build/Makefile.feature6
-rw-r--r--tools/build/feature/Makefile6
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libaio.c16
-rw-r--r--tools/build/feature/test-libopencsd.c8
-rw-r--r--tools/include/linux/err.h13
-rw-r--r--tools/lib/subcmd/parse-options.h4
-rw-r--r--tools/lib/traceevent/Makefile27
-rw-r--r--tools/lib/traceevent/event-parse-api.c8
-rw-r--r--tools/lib/traceevent/event-parse-local.h13
-rw-r--r--tools/lib/traceevent/event-parse.c234
-rw-r--r--tools/lib/traceevent/event-parse.h77
-rw-r--r--tools/lib/traceevent/libtraceevent.pc.template10
-rw-r--r--tools/lib/traceevent/parse-filter.c42
-rw-r--r--tools/lib/traceevent/plugin_function.c2
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c4
-rw-r--r--tools/lib/traceevent/plugin_kmem.c2
-rw-r--r--tools/lib/traceevent/plugin_kvm.c16
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c4
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/perf/Documentation/perf-config.txt6
-rw-r--r--tools/perf/Documentation/perf-list.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt5
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-script.txt2
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Makefile.config8
-rw-r--r--tools/perf/Makefile.perf9
-rw-r--r--tools/perf/arch/arc/annotate/instructions.c9
-rw-r--r--tools/perf/arch/common.c21
-rw-r--r--tools/perf/arch/common.h1
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c2
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c11
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-kvm.c6
-rw-r--r--tools/perf/builtin-record.c263
-rw-r--r--tools/perf/builtin-report.c26
-rw-r--r--tools/perf/builtin-script.c59
-rw-r--r--tools/perf/builtin-top.c289
-rw-r--r--tools/perf/builtin-trace.c87
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json12
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/tests/attr.py2
-rw-r--r--tools/perf/tests/bp_signal.c20
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c4
-rw-r--r--tools/perf/tests/perf-record.c7
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh4
-rw-r--r--tools/perf/ui/browsers/hists.c11
-rw-r--r--tools/perf/ui/tui/helpline.c2
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/annotate.c49
-rw-r--r--tools/perf/util/annotate.h5
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/config.c8
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c60
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h19
-rw-r--r--tools/perf/util/cs-etm.c143
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/event.c61
-rw-r--r--tools/perf/util/event.h8
-rw-r--r--tools/perf/util/evlist.c6
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c1
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/jitdump.c2
-rw-r--r--tools/perf/util/machine.c33
-rw-r--r--tools/perf/util/machine.h6
-rw-r--r--tools/perf/util/map.c62
-rw-r--r--tools/perf/util/map.h16
-rw-r--r--tools/perf/util/mmap.c152
-rw-r--r--tools/perf/util/mmap.h26
-rw-r--r--tools/perf/util/ordered-events.c44
-rw-r--r--tools/perf/util/ordered-events.h8
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/probe-file.c2
-rw-r--r--tools/perf/util/python.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c24
-rw-r--r--tools/perf/util/session.c7
-rw-r--r--tools/perf/util/sort.c63
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/srccode.c186
-rw-r--r--tools/perf/util/srccode.h7
-rw-r--r--tools/perf/util/srcline.c28
-rw-r--r--tools/perf/util/srcline.h1
-rw-r--r--tools/perf/util/stat-display.c16
-rw-r--r--tools/perf/util/stat-shadow.c3
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c2
-rw-r--r--tools/perf/util/thread.h6
-rw-r--r--tools/perf/util/top.c8
-rw-r--r--tools/perf/util/top.h10
-rw-r--r--tools/perf/util/trace-event-parse.c16
-rw-r--r--tools/perf/util/trace-event-read.c4
-rw-r--r--tools/perf/util/trace-event.c8
-rw-r--r--tools/perf/util/trace-event.h16
119 files changed, 2051 insertions, 572 deletions
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 8a123834a2a3..d47b8f73e2e7 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -70,7 +70,8 @@ FEATURE_TESTS_BASIC := \
70 sched_getcpu \ 70 sched_getcpu \
71 sdt \ 71 sdt \
72 setns \ 72 setns \
73 libopencsd 73 libopencsd \
74 libaio
74 75
75# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list 76# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
76# of all feature tests 77# of all feature tests
@@ -116,7 +117,8 @@ FEATURE_DISPLAY ?= \
116 zlib \ 117 zlib \
117 lzma \ 118 lzma \
118 get_cpuid \ 119 get_cpuid \
119 bpf 120 bpf \
121 libaio
120 122
121# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. 123# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
122# If in the future we need per-feature checks/flags for features not 124# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 38c22e122cb0..2dbcc0d00f52 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -61,7 +61,8 @@ FILES= \
61 test-libopencsd.bin \ 61 test-libopencsd.bin \
62 test-clang.bin \ 62 test-clang.bin \
63 test-llvm.bin \ 63 test-llvm.bin \
64 test-llvm-version.bin 64 test-llvm-version.bin \
65 test-libaio.bin
65 66
66FILES := $(addprefix $(OUTPUT),$(FILES)) 67FILES := $(addprefix $(OUTPUT),$(FILES))
67 68
@@ -297,6 +298,9 @@ $(OUTPUT)test-clang.bin:
297 298
298-include $(OUTPUT)*.d 299-include $(OUTPUT)*.d
299 300
301$(OUTPUT)test-libaio.bin:
302 $(BUILD) -lrt
303
300############################### 304###############################
301 305
302clean: 306clean:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 58f01b950195..20cdaa4fc112 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -174,6 +174,10 @@
174# include "test-libopencsd.c" 174# include "test-libopencsd.c"
175#undef main 175#undef main
176 176
177#define main main_test_libaio
178# include "test-libaio.c"
179#undef main
180
177int main(int argc, char *argv[]) 181int main(int argc, char *argv[])
178{ 182{
179 main_test_libpython(); 183 main_test_libpython();
@@ -214,6 +218,7 @@ int main(int argc, char *argv[])
214 main_test_sdt(); 218 main_test_sdt();
215 main_test_setns(); 219 main_test_setns();
216 main_test_libopencsd(); 220 main_test_libopencsd();
221 main_test_libaio();
217 222
218 return 0; 223 return 0;
219} 224}
diff --git a/tools/build/feature/test-libaio.c b/tools/build/feature/test-libaio.c
new file mode 100644
index 000000000000..932133c9a265
--- /dev/null
+++ b/tools/build/feature/test-libaio.c
@@ -0,0 +1,16 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <aio.h>
3
4int main(void)
5{
6 struct aiocb aiocb;
7
8 aiocb.aio_fildes = 0;
9 aiocb.aio_offset = 0;
10 aiocb.aio_buf = 0;
11 aiocb.aio_nbytes = 0;
12 aiocb.aio_reqprio = 0;
13 aiocb.aio_sigevent.sigev_notify = 1 /*SIGEV_NONE*/;
14
15 return (int)aio_return(&aiocb);
16}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index 5ff1246e6194..d68eb4fb40cc 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -1,6 +1,14 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <opencsd/c_api/opencsd_c_api.h> 2#include <opencsd/c_api/opencsd_c_api.h>
3 3
4/*
5 * Check OpenCSD library version is sufficient to provide required features
6 */
7#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
8#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
9#error "OpenCSD >= 0.10.0 is required"
10#endif
11
4int main(void) 12int main(void)
5{ 13{
6 (void)ocsd_get_version(); 14 (void)ocsd_get_version();
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 094649667bae..2f5a12b88a86 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -59,4 +59,17 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
59 else 59 else
60 return 0; 60 return 0;
61} 61}
62
63/**
64 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
65 * @ptr: The pointer to cast.
66 *
67 * Explicitly cast an error-valued pointer to another pointer type in such a
68 * way as to make it clear that's what's going on.
69 */
70static inline void * __must_check ERR_CAST(__force const void *ptr)
71{
72 /* cast away the const */
73 return (void *) ptr;
74}
62#endif /* _LINUX_ERR_H */ 75#endif /* _LINUX_ERR_H */
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index 6ca2a8bfe716..af9def589863 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -71,7 +71,7 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
71 * 71 *
72 * `argh`:: 72 * `argh`::
73 * token to explain the kind of argument this option wants. Keep it 73 * token to explain the kind of argument this option wants. Keep it
74 * homogenous across the repository. 74 * homogeneous across the repository.
75 * 75 *
76 * `help`:: 76 * `help`::
77 * the short help associated to what the option does. 77 * the short help associated to what the option does.
@@ -80,7 +80,7 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
80 * 80 *
81 * `flags`:: 81 * `flags`::
82 * mask of parse_opt_option_flags. 82 * mask of parse_opt_option_flags.
83 * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) 83 * PARSE_OPT_OPTARG: says that the argument is optional (not for BOOLEANs)
84 * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs 84 * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs
85 * PARSE_OPT_NONEG: says that this option cannot be negated 85 * PARSE_OPT_NONEG: says that this option cannot be negated
86 * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in 86 * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 0b4e833088a4..67fe5d7ef190 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -25,6 +25,7 @@ endef
25$(call allow-override,CC,$(CROSS_COMPILE)gcc) 25$(call allow-override,CC,$(CROSS_COMPILE)gcc)
26$(call allow-override,AR,$(CROSS_COMPILE)ar) 26$(call allow-override,AR,$(CROSS_COMPILE)ar)
27$(call allow-override,NM,$(CROSS_COMPILE)nm) 27$(call allow-override,NM,$(CROSS_COMPILE)nm)
28$(call allow-override,PKG_CONFIG,pkg-config)
28 29
29EXT = -std=gnu99 30EXT = -std=gnu99
30INSTALL = install 31INSTALL = install
@@ -47,6 +48,8 @@ prefix ?= /usr/local
47libdir = $(prefix)/$(libdir_relative) 48libdir = $(prefix)/$(libdir_relative)
48man_dir = $(prefix)/share/man 49man_dir = $(prefix)/share/man
49man_dir_SQ = '$(subst ','\'',$(man_dir))' 50man_dir_SQ = '$(subst ','\'',$(man_dir))'
51pkgconfig_dir ?= $(word 1,$(shell $(PKG_CONFIG) \
52 --variable pc_path pkg-config | tr ":" " "))
50 53
51export man_dir man_dir_SQ INSTALL 54export man_dir man_dir_SQ INSTALL
52export DESTDIR DESTDIR_SQ 55export DESTDIR DESTDIR_SQ
@@ -270,7 +273,19 @@ define do_generate_dynamic_list_file
270 fi 273 fi
271endef 274endef
272 275
273install_lib: all_cmd install_plugins 276PKG_CONFIG_FILE = libtraceevent.pc
277define do_install_pkgconfig_file
278 if [ -n "${pkgconfig_dir}" ]; then \
279 cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE}; \
280 sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \
281 sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
282 $(call do_install,$(PKG_CONFIG_FILE),$(pkgconfig_dir),644); \
283 else \
284 (echo Failed to locate pkg-config directory) 1>&2; \
285 fi
286endef
287
288install_lib: all_cmd install_plugins install_headers install_pkgconfig
274 $(call QUIET_INSTALL, $(LIB_TARGET)) \ 289 $(call QUIET_INSTALL, $(LIB_TARGET)) \
275 $(call do_install_mkdir,$(libdir_SQ)); \ 290 $(call do_install_mkdir,$(libdir_SQ)); \
276 cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ) 291 cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
@@ -279,18 +294,24 @@ install_plugins: $(PLUGINS)
279 $(call QUIET_INSTALL, trace_plugins) \ 294 $(call QUIET_INSTALL, trace_plugins) \
280 $(call do_install_plugins, $(PLUGINS)) 295 $(call do_install_plugins, $(PLUGINS))
281 296
297install_pkgconfig:
298 $(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \
299 $(call do_install_pkgconfig_file,$(prefix))
300
282install_headers: 301install_headers:
283 $(call QUIET_INSTALL, headers) \ 302 $(call QUIET_INSTALL, headers) \
284 $(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \ 303 $(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \
285 $(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \ 304 $(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \
305 $(call do_install,trace-seq.h,$(prefix)/include/traceevent,644); \
286 $(call do_install,kbuffer.h,$(prefix)/include/traceevent,644) 306 $(call do_install,kbuffer.h,$(prefix)/include/traceevent,644)
287 307
288install: install_lib 308install: install_lib
289 309
290clean: 310clean:
291 $(call QUIET_CLEAN, libtraceevent) \ 311 $(call QUIET_CLEAN, libtraceevent) \
292 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \ 312 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
293 $(RM) TRACEEVENT-CFLAGS tags TAGS 313 $(RM) TRACEEVENT-CFLAGS tags TAGS; \
314 $(RM) $(PKG_CONFIG_FILE)
294 315
295PHONY += force plugins 316PHONY += force plugins
296force: 317force:
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 61f7149085ee..8b31c0e00ba3 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -15,7 +15,7 @@
15 * This returns pointer to the first element of the events array 15 * This returns pointer to the first element of the events array
16 * If @tep is NULL, NULL is returned. 16 * If @tep is NULL, NULL is returned.
17 */ 17 */
18struct tep_event_format *tep_get_first_event(struct tep_handle *tep) 18struct tep_event *tep_get_first_event(struct tep_handle *tep)
19{ 19{
20 if (tep && tep->events) 20 if (tep && tep->events)
21 return tep->events[0]; 21 return tep->events[0];
@@ -51,7 +51,7 @@ void tep_set_flag(struct tep_handle *tep, int flag)
51 tep->flags |= flag; 51 tep->flags |= flag;
52} 52}
53 53
54unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data) 54unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
55{ 55{
56 unsigned short swap; 56 unsigned short swap;
57 57
@@ -64,7 +64,7 @@ unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data)
64 return swap; 64 return swap;
65} 65}
66 66
67unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data) 67unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
68{ 68{
69 unsigned int swap; 69 unsigned int swap;
70 70
@@ -80,7 +80,7 @@ unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data)
80} 80}
81 81
82unsigned long long 82unsigned long long
83__tep_data2host8(struct tep_handle *pevent, unsigned long long data) 83tep_data2host8(struct tep_handle *pevent, unsigned long long data)
84{ 84{
85 unsigned long long swap; 85 unsigned long long swap;
86 86
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index b9bddde577f8..9a092dd4a86d 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -50,9 +50,9 @@ struct tep_handle {
50 unsigned int printk_count; 50 unsigned int printk_count;
51 51
52 52
53 struct tep_event_format **events; 53 struct tep_event **events;
54 int nr_events; 54 int nr_events;
55 struct tep_event_format **sort_events; 55 struct tep_event **sort_events;
56 enum tep_event_sort_type last_type; 56 enum tep_event_sort_type last_type;
57 57
58 int type_offset; 58 int type_offset;
@@ -84,9 +84,16 @@ struct tep_handle {
84 struct tep_function_handler *func_handlers; 84 struct tep_function_handler *func_handlers;
85 85
86 /* cache */ 86 /* cache */
87 struct tep_event_format *last_event; 87 struct tep_event *last_event;
88 88
89 char *trace_clock; 89 char *trace_clock;
90}; 90};
91 91
92void tep_free_event(struct tep_event *event);
93void tep_free_format_field(struct tep_format_field *field);
94
95unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
96unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
97unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
98
92#endif /* _PARSE_EVENTS_INT_H */ 99#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 3692f29fee46..a5ed291b8a9f 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -96,7 +96,7 @@ struct tep_function_handler {
96 96
97static unsigned long long 97static unsigned long long
98process_defined_func(struct trace_seq *s, void *data, int size, 98process_defined_func(struct trace_seq *s, void *data, int size,
99 struct tep_event_format *event, struct tep_print_arg *arg); 99 struct tep_event *event, struct tep_print_arg *arg);
100 100
101static void free_func_handle(struct tep_function_handler *func); 101static void free_func_handle(struct tep_function_handler *func);
102 102
@@ -739,16 +739,16 @@ void tep_print_printk(struct tep_handle *pevent)
739 } 739 }
740} 740}
741 741
742static struct tep_event_format *alloc_event(void) 742static struct tep_event *alloc_event(void)
743{ 743{
744 return calloc(1, sizeof(struct tep_event_format)); 744 return calloc(1, sizeof(struct tep_event));
745} 745}
746 746
747static int add_event(struct tep_handle *pevent, struct tep_event_format *event) 747static int add_event(struct tep_handle *pevent, struct tep_event *event)
748{ 748{
749 int i; 749 int i;
750 struct tep_event_format **events = realloc(pevent->events, sizeof(event) * 750 struct tep_event **events = realloc(pevent->events, sizeof(event) *
751 (pevent->nr_events + 1)); 751 (pevent->nr_events + 1));
752 if (!events) 752 if (!events)
753 return -1; 753 return -1;
754 754
@@ -1145,7 +1145,7 @@ static enum tep_event_type read_token(char **tok)
1145} 1145}
1146 1146
1147/** 1147/**
1148 * tep_read_token - access to utilites to use the pevent parser 1148 * tep_read_token - access to utilities to use the pevent parser
1149 * @tok: The token to return 1149 * @tok: The token to return
1150 * 1150 *
1151 * This will parse tokens from the string given by 1151 * This will parse tokens from the string given by
@@ -1355,7 +1355,7 @@ static unsigned int type_size(const char *name)
1355 return 0; 1355 return 0;
1356} 1356}
1357 1357
1358static int event_read_fields(struct tep_event_format *event, struct tep_format_field **fields) 1358static int event_read_fields(struct tep_event *event, struct tep_format_field **fields)
1359{ 1359{
1360 struct tep_format_field *field = NULL; 1360 struct tep_format_field *field = NULL;
1361 enum tep_event_type type; 1361 enum tep_event_type type;
@@ -1642,7 +1642,7 @@ fail_expect:
1642 return -1; 1642 return -1;
1643} 1643}
1644 1644
1645static int event_read_format(struct tep_event_format *event) 1645static int event_read_format(struct tep_event *event)
1646{ 1646{
1647 char *token; 1647 char *token;
1648 int ret; 1648 int ret;
@@ -1675,11 +1675,11 @@ static int event_read_format(struct tep_event_format *event)
1675} 1675}
1676 1676
1677static enum tep_event_type 1677static enum tep_event_type
1678process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg, 1678process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
1679 char **tok, enum tep_event_type type); 1679 char **tok, enum tep_event_type type);
1680 1680
1681static enum tep_event_type 1681static enum tep_event_type
1682process_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 1682process_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
1683{ 1683{
1684 enum tep_event_type type; 1684 enum tep_event_type type;
1685 char *token; 1685 char *token;
@@ -1691,14 +1691,14 @@ process_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **to
1691} 1691}
1692 1692
1693static enum tep_event_type 1693static enum tep_event_type
1694process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok); 1694process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok);
1695 1695
1696/* 1696/*
1697 * For __print_symbolic() and __print_flags, we need to completely 1697 * For __print_symbolic() and __print_flags, we need to completely
1698 * evaluate the first argument, which defines what to print next. 1698 * evaluate the first argument, which defines what to print next.
1699 */ 1699 */
1700static enum tep_event_type 1700static enum tep_event_type
1701process_field_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 1701process_field_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
1702{ 1702{
1703 enum tep_event_type type; 1703 enum tep_event_type type;
1704 1704
@@ -1712,7 +1712,7 @@ process_field_arg(struct tep_event_format *event, struct tep_print_arg *arg, cha
1712} 1712}
1713 1713
1714static enum tep_event_type 1714static enum tep_event_type
1715process_cond(struct tep_event_format *event, struct tep_print_arg *top, char **tok) 1715process_cond(struct tep_event *event, struct tep_print_arg *top, char **tok)
1716{ 1716{
1717 struct tep_print_arg *arg, *left, *right; 1717 struct tep_print_arg *arg, *left, *right;
1718 enum tep_event_type type; 1718 enum tep_event_type type;
@@ -1768,7 +1768,7 @@ out_free:
1768} 1768}
1769 1769
1770static enum tep_event_type 1770static enum tep_event_type
1771process_array(struct tep_event_format *event, struct tep_print_arg *top, char **tok) 1771process_array(struct tep_event *event, struct tep_print_arg *top, char **tok)
1772{ 1772{
1773 struct tep_print_arg *arg; 1773 struct tep_print_arg *arg;
1774 enum tep_event_type type; 1774 enum tep_event_type type;
@@ -1870,7 +1870,7 @@ static int set_op_prio(struct tep_print_arg *arg)
1870 1870
1871/* Note, *tok does not get freed, but will most likely be saved */ 1871/* Note, *tok does not get freed, but will most likely be saved */
1872static enum tep_event_type 1872static enum tep_event_type
1873process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 1873process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok)
1874{ 1874{
1875 struct tep_print_arg *left, *right = NULL; 1875 struct tep_print_arg *left, *right = NULL;
1876 enum tep_event_type type; 1876 enum tep_event_type type;
@@ -2071,7 +2071,7 @@ out_free:
2071} 2071}
2072 2072
2073static enum tep_event_type 2073static enum tep_event_type
2074process_entry(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg, 2074process_entry(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
2075 char **tok) 2075 char **tok)
2076{ 2076{
2077 enum tep_event_type type; 2077 enum tep_event_type type;
@@ -2110,7 +2110,7 @@ process_entry(struct tep_event_format *event __maybe_unused, struct tep_print_ar
2110 return TEP_EVENT_ERROR; 2110 return TEP_EVENT_ERROR;
2111} 2111}
2112 2112
2113static int alloc_and_process_delim(struct tep_event_format *event, char *next_token, 2113static int alloc_and_process_delim(struct tep_event *event, char *next_token,
2114 struct tep_print_arg **print_arg) 2114 struct tep_print_arg **print_arg)
2115{ 2115{
2116 struct tep_print_arg *field; 2116 struct tep_print_arg *field;
@@ -2445,7 +2445,7 @@ static char *arg_eval (struct tep_print_arg *arg)
2445} 2445}
2446 2446
2447static enum tep_event_type 2447static enum tep_event_type
2448process_fields(struct tep_event_format *event, struct tep_print_flag_sym **list, char **tok) 2448process_fields(struct tep_event *event, struct tep_print_flag_sym **list, char **tok)
2449{ 2449{
2450 enum tep_event_type type; 2450 enum tep_event_type type;
2451 struct tep_print_arg *arg = NULL; 2451 struct tep_print_arg *arg = NULL;
@@ -2526,7 +2526,7 @@ out_free:
2526} 2526}
2527 2527
2528static enum tep_event_type 2528static enum tep_event_type
2529process_flags(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2529process_flags(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2530{ 2530{
2531 struct tep_print_arg *field; 2531 struct tep_print_arg *field;
2532 enum tep_event_type type; 2532 enum tep_event_type type;
@@ -2579,7 +2579,7 @@ out_free:
2579} 2579}
2580 2580
2581static enum tep_event_type 2581static enum tep_event_type
2582process_symbols(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2582process_symbols(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2583{ 2583{
2584 struct tep_print_arg *field; 2584 struct tep_print_arg *field;
2585 enum tep_event_type type; 2585 enum tep_event_type type;
@@ -2618,7 +2618,7 @@ out_free:
2618} 2618}
2619 2619
2620static enum tep_event_type 2620static enum tep_event_type
2621process_hex_common(struct tep_event_format *event, struct tep_print_arg *arg, 2621process_hex_common(struct tep_event *event, struct tep_print_arg *arg,
2622 char **tok, enum tep_print_arg_type type) 2622 char **tok, enum tep_print_arg_type type)
2623{ 2623{
2624 memset(arg, 0, sizeof(*arg)); 2624 memset(arg, 0, sizeof(*arg));
@@ -2641,20 +2641,20 @@ out:
2641} 2641}
2642 2642
2643static enum tep_event_type 2643static enum tep_event_type
2644process_hex(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2644process_hex(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2645{ 2645{
2646 return process_hex_common(event, arg, tok, TEP_PRINT_HEX); 2646 return process_hex_common(event, arg, tok, TEP_PRINT_HEX);
2647} 2647}
2648 2648
2649static enum tep_event_type 2649static enum tep_event_type
2650process_hex_str(struct tep_event_format *event, struct tep_print_arg *arg, 2650process_hex_str(struct tep_event *event, struct tep_print_arg *arg,
2651 char **tok) 2651 char **tok)
2652{ 2652{
2653 return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR); 2653 return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR);
2654} 2654}
2655 2655
2656static enum tep_event_type 2656static enum tep_event_type
2657process_int_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2657process_int_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2658{ 2658{
2659 memset(arg, 0, sizeof(*arg)); 2659 memset(arg, 0, sizeof(*arg));
2660 arg->type = TEP_PRINT_INT_ARRAY; 2660 arg->type = TEP_PRINT_INT_ARRAY;
@@ -2682,7 +2682,7 @@ out:
2682} 2682}
2683 2683
2684static enum tep_event_type 2684static enum tep_event_type
2685process_dynamic_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2685process_dynamic_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2686{ 2686{
2687 struct tep_format_field *field; 2687 struct tep_format_field *field;
2688 enum tep_event_type type; 2688 enum tep_event_type type;
@@ -2746,7 +2746,7 @@ process_dynamic_array(struct tep_event_format *event, struct tep_print_arg *arg,
2746} 2746}
2747 2747
2748static enum tep_event_type 2748static enum tep_event_type
2749process_dynamic_array_len(struct tep_event_format *event, struct tep_print_arg *arg, 2749process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
2750 char **tok) 2750 char **tok)
2751{ 2751{
2752 struct tep_format_field *field; 2752 struct tep_format_field *field;
@@ -2782,7 +2782,7 @@ process_dynamic_array_len(struct tep_event_format *event, struct tep_print_arg *
2782} 2782}
2783 2783
2784static enum tep_event_type 2784static enum tep_event_type
2785process_paren(struct tep_event_format *event, struct tep_print_arg *arg, char **tok) 2785process_paren(struct tep_event *event, struct tep_print_arg *arg, char **tok)
2786{ 2786{
2787 struct tep_print_arg *item_arg; 2787 struct tep_print_arg *item_arg;
2788 enum tep_event_type type; 2788 enum tep_event_type type;
@@ -2845,7 +2845,7 @@ process_paren(struct tep_event_format *event, struct tep_print_arg *arg, char **
2845 2845
2846 2846
2847static enum tep_event_type 2847static enum tep_event_type
2848process_str(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg, 2848process_str(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
2849 char **tok) 2849 char **tok)
2850{ 2850{
2851 enum tep_event_type type; 2851 enum tep_event_type type;
@@ -2874,7 +2874,7 @@ process_str(struct tep_event_format *event __maybe_unused, struct tep_print_arg
2874} 2874}
2875 2875
2876static enum tep_event_type 2876static enum tep_event_type
2877process_bitmask(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg, 2877process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
2878 char **tok) 2878 char **tok)
2879{ 2879{
2880 enum tep_event_type type; 2880 enum tep_event_type type;
@@ -2935,7 +2935,7 @@ static void remove_func_handler(struct tep_handle *pevent, char *func_name)
2935} 2935}
2936 2936
2937static enum tep_event_type 2937static enum tep_event_type
2938process_func_handler(struct tep_event_format *event, struct tep_function_handler *func, 2938process_func_handler(struct tep_event *event, struct tep_function_handler *func,
2939 struct tep_print_arg *arg, char **tok) 2939 struct tep_print_arg *arg, char **tok)
2940{ 2940{
2941 struct tep_print_arg **next_arg; 2941 struct tep_print_arg **next_arg;
@@ -2993,7 +2993,7 @@ err:
2993} 2993}
2994 2994
2995static enum tep_event_type 2995static enum tep_event_type
2996process_function(struct tep_event_format *event, struct tep_print_arg *arg, 2996process_function(struct tep_event *event, struct tep_print_arg *arg,
2997 char *token, char **tok) 2997 char *token, char **tok)
2998{ 2998{
2999 struct tep_function_handler *func; 2999 struct tep_function_handler *func;
@@ -3049,7 +3049,7 @@ process_function(struct tep_event_format *event, struct tep_print_arg *arg,
3049} 3049}
3050 3050
3051static enum tep_event_type 3051static enum tep_event_type
3052process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg, 3052process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
3053 char **tok, enum tep_event_type type) 3053 char **tok, enum tep_event_type type)
3054{ 3054{
3055 char *token; 3055 char *token;
@@ -3137,7 +3137,7 @@ process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
3137 return type; 3137 return type;
3138} 3138}
3139 3139
3140static int event_read_print_args(struct tep_event_format *event, struct tep_print_arg **list) 3140static int event_read_print_args(struct tep_event *event, struct tep_print_arg **list)
3141{ 3141{
3142 enum tep_event_type type = TEP_EVENT_ERROR; 3142 enum tep_event_type type = TEP_EVENT_ERROR;
3143 struct tep_print_arg *arg; 3143 struct tep_print_arg *arg;
@@ -3195,7 +3195,7 @@ static int event_read_print_args(struct tep_event_format *event, struct tep_prin
3195 return args; 3195 return args;
3196} 3196}
3197 3197
3198static int event_read_print(struct tep_event_format *event) 3198static int event_read_print(struct tep_event *event)
3199{ 3199{
3200 enum tep_event_type type; 3200 enum tep_event_type type;
3201 char *token; 3201 char *token;
@@ -3258,10 +3258,10 @@ static int event_read_print(struct tep_event_format *event)
3258 * @name: the name of the common field to return 3258 * @name: the name of the common field to return
3259 * 3259 *
3260 * Returns a common field from the event by the given @name. 3260 * Returns a common field from the event by the given @name.
3261 * This only searchs the common fields and not all field. 3261 * This only searches the common fields and not all field.
3262 */ 3262 */
3263struct tep_format_field * 3263struct tep_format_field *
3264tep_find_common_field(struct tep_event_format *event, const char *name) 3264tep_find_common_field(struct tep_event *event, const char *name)
3265{ 3265{
3266 struct tep_format_field *format; 3266 struct tep_format_field *format;
3267 3267
@@ -3283,7 +3283,7 @@ tep_find_common_field(struct tep_event_format *event, const char *name)
3283 * This does not search common fields. 3283 * This does not search common fields.
3284 */ 3284 */
3285struct tep_format_field * 3285struct tep_format_field *
3286tep_find_field(struct tep_event_format *event, const char *name) 3286tep_find_field(struct tep_event *event, const char *name)
3287{ 3287{
3288 struct tep_format_field *format; 3288 struct tep_format_field *format;
3289 3289
@@ -3302,11 +3302,11 @@ tep_find_field(struct tep_event_format *event, const char *name)
3302 * @name: the name of the field 3302 * @name: the name of the field
3303 * 3303 *
3304 * Returns a field by the given @name. 3304 * Returns a field by the given @name.
3305 * This searchs the common field names first, then 3305 * This searches the common field names first, then
3306 * the non-common ones if a common one was not found. 3306 * the non-common ones if a common one was not found.
3307 */ 3307 */
3308struct tep_format_field * 3308struct tep_format_field *
3309tep_find_any_field(struct tep_event_format *event, const char *name) 3309tep_find_any_field(struct tep_event *event, const char *name)
3310{ 3310{
3311 struct tep_format_field *format; 3311 struct tep_format_field *format;
3312 3312
@@ -3328,15 +3328,18 @@ tep_find_any_field(struct tep_event_format *event, const char *name)
3328unsigned long long tep_read_number(struct tep_handle *pevent, 3328unsigned long long tep_read_number(struct tep_handle *pevent,
3329 const void *ptr, int size) 3329 const void *ptr, int size)
3330{ 3330{
3331 unsigned long long val;
3332
3331 switch (size) { 3333 switch (size) {
3332 case 1: 3334 case 1:
3333 return *(unsigned char *)ptr; 3335 return *(unsigned char *)ptr;
3334 case 2: 3336 case 2:
3335 return tep_data2host2(pevent, ptr); 3337 return tep_data2host2(pevent, *(unsigned short *)ptr);
3336 case 4: 3338 case 4:
3337 return tep_data2host4(pevent, ptr); 3339 return tep_data2host4(pevent, *(unsigned int *)ptr);
3338 case 8: 3340 case 8:
3339 return tep_data2host8(pevent, ptr); 3341 memcpy(&val, (ptr), sizeof(unsigned long long));
3342 return tep_data2host8(pevent, val);
3340 default: 3343 default:
3341 /* BUG! */ 3344 /* BUG! */
3342 return 0; 3345 return 0;
@@ -3375,7 +3378,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
3375static int get_common_info(struct tep_handle *pevent, 3378static int get_common_info(struct tep_handle *pevent,
3376 const char *type, int *offset, int *size) 3379 const char *type, int *offset, int *size)
3377{ 3380{
3378 struct tep_event_format *event; 3381 struct tep_event *event;
3379 struct tep_format_field *field; 3382 struct tep_format_field *field;
3380 3383
3381 /* 3384 /*
@@ -3462,11 +3465,11 @@ static int events_id_cmp(const void *a, const void *b);
3462 * 3465 *
3463 * Returns an event that has a given @id. 3466 * Returns an event that has a given @id.
3464 */ 3467 */
3465struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id) 3468struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
3466{ 3469{
3467 struct tep_event_format **eventptr; 3470 struct tep_event **eventptr;
3468 struct tep_event_format key; 3471 struct tep_event key;
3469 struct tep_event_format *pkey = &key; 3472 struct tep_event *pkey = &key;
3470 3473
3471 /* Check cache first */ 3474 /* Check cache first */
3472 if (pevent->last_event && pevent->last_event->id == id) 3475 if (pevent->last_event && pevent->last_event->id == id)
@@ -3494,11 +3497,11 @@ struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id)
3494 * This returns an event with a given @name and under the system 3497 * This returns an event with a given @name and under the system
3495 * @sys. If @sys is NULL the first event with @name is returned. 3498 * @sys. If @sys is NULL the first event with @name is returned.
3496 */ 3499 */
3497struct tep_event_format * 3500struct tep_event *
3498tep_find_event_by_name(struct tep_handle *pevent, 3501tep_find_event_by_name(struct tep_handle *pevent,
3499 const char *sys, const char *name) 3502 const char *sys, const char *name)
3500{ 3503{
3501 struct tep_event_format *event; 3504 struct tep_event *event = NULL;
3502 int i; 3505 int i;
3503 3506
3504 if (pevent->last_event && 3507 if (pevent->last_event &&
@@ -3523,7 +3526,7 @@ tep_find_event_by_name(struct tep_handle *pevent,
3523} 3526}
3524 3527
3525static unsigned long long 3528static unsigned long long
3526eval_num_arg(void *data, int size, struct tep_event_format *event, struct tep_print_arg *arg) 3529eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
3527{ 3530{
3528 struct tep_handle *pevent = event->pevent; 3531 struct tep_handle *pevent = event->pevent;
3529 unsigned long long val = 0; 3532 unsigned long long val = 0;
@@ -3838,7 +3841,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
3838 /* 3841 /*
3839 * data points to a bit mask of size bytes. 3842 * data points to a bit mask of size bytes.
3840 * In the kernel, this is an array of long words, thus 3843 * In the kernel, this is an array of long words, thus
3841 * endianess is very important. 3844 * endianness is very important.
3842 */ 3845 */
3843 if (pevent->file_bigendian) 3846 if (pevent->file_bigendian)
3844 index = size - (len + 1); 3847 index = size - (len + 1);
@@ -3863,7 +3866,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
3863} 3866}
3864 3867
3865static void print_str_arg(struct trace_seq *s, void *data, int size, 3868static void print_str_arg(struct trace_seq *s, void *data, int size,
3866 struct tep_event_format *event, const char *format, 3869 struct tep_event *event, const char *format,
3867 int len_arg, struct tep_print_arg *arg) 3870 int len_arg, struct tep_print_arg *arg)
3868{ 3871{
3869 struct tep_handle *pevent = event->pevent; 3872 struct tep_handle *pevent = event->pevent;
@@ -4062,7 +4065,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
4062 f = tep_find_any_field(event, arg->string.string); 4065 f = tep_find_any_field(event, arg->string.string);
4063 arg->string.offset = f->offset; 4066 arg->string.offset = f->offset;
4064 } 4067 }
4065 str_offset = tep_data2host4(pevent, data + arg->string.offset); 4068 str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset));
4066 str_offset &= 0xffff; 4069 str_offset &= 0xffff;
4067 print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset); 4070 print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
4068 break; 4071 break;
@@ -4080,7 +4083,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
4080 f = tep_find_any_field(event, arg->bitmask.bitmask); 4083 f = tep_find_any_field(event, arg->bitmask.bitmask);
4081 arg->bitmask.offset = f->offset; 4084 arg->bitmask.offset = f->offset;
4082 } 4085 }
4083 bitmask_offset = tep_data2host4(pevent, data + arg->bitmask.offset); 4086 bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset));
4084 bitmask_size = bitmask_offset >> 16; 4087 bitmask_size = bitmask_offset >> 16;
4085 bitmask_offset &= 0xffff; 4088 bitmask_offset &= 0xffff;
4086 print_bitmask_to_seq(pevent, s, format, len_arg, 4089 print_bitmask_to_seq(pevent, s, format, len_arg,
@@ -4118,7 +4121,7 @@ out_warning_field:
4118 4121
4119static unsigned long long 4122static unsigned long long
4120process_defined_func(struct trace_seq *s, void *data, int size, 4123process_defined_func(struct trace_seq *s, void *data, int size,
4121 struct tep_event_format *event, struct tep_print_arg *arg) 4124 struct tep_event *event, struct tep_print_arg *arg)
4122{ 4125{
4123 struct tep_function_handler *func_handle = arg->func.func; 4126 struct tep_function_handler *func_handle = arg->func.func;
4124 struct func_params *param; 4127 struct func_params *param;
@@ -4213,7 +4216,7 @@ static void free_args(struct tep_print_arg *args)
4213 } 4216 }
4214} 4217}
4215 4218
4216static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event_format *event) 4219static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
4217{ 4220{
4218 struct tep_handle *pevent = event->pevent; 4221 struct tep_handle *pevent = event->pevent;
4219 struct tep_format_field *field, *ip_field; 4222 struct tep_format_field *field, *ip_field;
@@ -4221,7 +4224,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
4221 unsigned long long ip, val; 4224 unsigned long long ip, val;
4222 char *ptr; 4225 char *ptr;
4223 void *bptr; 4226 void *bptr;
4224 int vsize; 4227 int vsize = 0;
4225 4228
4226 field = pevent->bprint_buf_field; 4229 field = pevent->bprint_buf_field;
4227 ip_field = pevent->bprint_ip_field; 4230 ip_field = pevent->bprint_ip_field;
@@ -4390,7 +4393,7 @@ out_free:
4390 4393
4391static char * 4394static char *
4392get_bprint_format(void *data, int size __maybe_unused, 4395get_bprint_format(void *data, int size __maybe_unused,
4393 struct tep_event_format *event) 4396 struct tep_event *event)
4394{ 4397{
4395 struct tep_handle *pevent = event->pevent; 4398 struct tep_handle *pevent = event->pevent;
4396 unsigned long long addr; 4399 unsigned long long addr;
@@ -4425,7 +4428,7 @@ get_bprint_format(void *data, int size __maybe_unused,
4425} 4428}
4426 4429
4427static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, 4430static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
4428 struct tep_event_format *event, struct tep_print_arg *arg) 4431 struct tep_event *event, struct tep_print_arg *arg)
4429{ 4432{
4430 unsigned char *buf; 4433 unsigned char *buf;
4431 const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"; 4434 const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
@@ -4578,7 +4581,7 @@ static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
4578 * %pISpc print an IP address based on sockaddr; p adds port. 4581 * %pISpc print an IP address based on sockaddr; p adds port.
4579 */ 4582 */
4580static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i, 4583static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
4581 void *data, int size, struct tep_event_format *event, 4584 void *data, int size, struct tep_event *event,
4582 struct tep_print_arg *arg) 4585 struct tep_print_arg *arg)
4583{ 4586{
4584 unsigned char *buf; 4587 unsigned char *buf;
@@ -4615,7 +4618,7 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
4615} 4618}
4616 4619
4617static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i, 4620static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
4618 void *data, int size, struct tep_event_format *event, 4621 void *data, int size, struct tep_event *event,
4619 struct tep_print_arg *arg) 4622 struct tep_print_arg *arg)
4620{ 4623{
4621 char have_c = 0; 4624 char have_c = 0;
@@ -4665,7 +4668,7 @@ static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
4665} 4668}
4666 4669
4667static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i, 4670static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
4668 void *data, int size, struct tep_event_format *event, 4671 void *data, int size, struct tep_event *event,
4669 struct tep_print_arg *arg) 4672 struct tep_print_arg *arg)
4670{ 4673{
4671 char have_c = 0, have_p = 0; 4674 char have_c = 0, have_p = 0;
@@ -4747,7 +4750,7 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
4747} 4750}
4748 4751
4749static int print_ip_arg(struct trace_seq *s, const char *ptr, 4752static int print_ip_arg(struct trace_seq *s, const char *ptr,
4750 void *data, int size, struct tep_event_format *event, 4753 void *data, int size, struct tep_event *event,
4751 struct tep_print_arg *arg) 4754 struct tep_print_arg *arg)
4752{ 4755{
4753 char i = *ptr; /* 'i' or 'I' */ 4756 char i = *ptr; /* 'i' or 'I' */
@@ -4854,7 +4857,7 @@ void tep_print_field(struct trace_seq *s, void *data,
4854} 4857}
4855 4858
4856void tep_print_fields(struct trace_seq *s, void *data, 4859void tep_print_fields(struct trace_seq *s, void *data,
4857 int size __maybe_unused, struct tep_event_format *event) 4860 int size __maybe_unused, struct tep_event *event)
4858{ 4861{
4859 struct tep_format_field *field; 4862 struct tep_format_field *field;
4860 4863
@@ -4866,7 +4869,7 @@ void tep_print_fields(struct trace_seq *s, void *data,
4866 } 4869 }
4867} 4870}
4868 4871
4869static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event_format *event) 4872static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
4870{ 4873{
4871 struct tep_handle *pevent = event->pevent; 4874 struct tep_handle *pevent = event->pevent;
4872 struct tep_print_fmt *print_fmt = &event->print_fmt; 4875 struct tep_print_fmt *print_fmt = &event->print_fmt;
@@ -4881,7 +4884,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
4881 char format[32]; 4884 char format[32];
4882 int show_func; 4885 int show_func;
4883 int len_as_arg; 4886 int len_as_arg;
4884 int len_arg; 4887 int len_arg = 0;
4885 int len; 4888 int len;
4886 int ls; 4889 int ls;
4887 4890
@@ -5146,8 +5149,8 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
5146 static int migrate_disable_exists; 5149 static int migrate_disable_exists;
5147 unsigned int lat_flags; 5150 unsigned int lat_flags;
5148 unsigned int pc; 5151 unsigned int pc;
5149 int lock_depth; 5152 int lock_depth = 0;
5150 int migrate_disable; 5153 int migrate_disable = 0;
5151 int hardirq; 5154 int hardirq;
5152 int softirq; 5155 int softirq;
5153 void *data = record->data; 5156 void *data = record->data;
@@ -5229,7 +5232,7 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
5229 * 5232 *
5230 * This returns the event form a given @type; 5233 * This returns the event form a given @type;
5231 */ 5234 */
5232struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type) 5235struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
5233{ 5236{
5234 return tep_find_event(pevent, type); 5237 return tep_find_event(pevent, type);
5235} 5238}
@@ -5313,9 +5316,9 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
5313 * This returns the cmdline structure that holds a pid for a given 5316 * This returns the cmdline structure that holds a pid for a given
5314 * comm, or NULL if none found. As there may be more than one pid for 5317 * comm, or NULL if none found. As there may be more than one pid for
5315 * a given comm, the result of this call can be passed back into 5318 * a given comm, the result of this call can be passed back into
5316 * a recurring call in the @next paramater, and then it will find the 5319 * a recurring call in the @next parameter, and then it will find the
5317 * next pid. 5320 * next pid.
5318 * Also, it does a linear seach, so it may be slow. 5321 * Also, it does a linear search, so it may be slow.
5319 */ 5322 */
5320struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 5323struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
5321 struct cmdline *next) 5324 struct cmdline *next)
@@ -5387,7 +5390,7 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
5387 * This parses the raw @data using the given @event information and 5390 * This parses the raw @data using the given @event information and
5388 * writes the print format into the trace_seq. 5391 * writes the print format into the trace_seq.
5389 */ 5392 */
5390void tep_event_info(struct trace_seq *s, struct tep_event_format *event, 5393void tep_event_info(struct trace_seq *s, struct tep_event *event,
5391 struct tep_record *record) 5394 struct tep_record *record)
5392{ 5395{
5393 int print_pretty = 1; 5396 int print_pretty = 1;
@@ -5409,7 +5412,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
5409 5412
5410static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock) 5413static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
5411{ 5414{
5412 if (!use_trace_clock) 5415 if (!trace_clock || !use_trace_clock)
5413 return true; 5416 return true;
5414 5417
5415 if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global") 5418 if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
@@ -5428,7 +5431,7 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
5428 * Returns the associated event for a given record, or NULL if non is 5431 * Returns the associated event for a given record, or NULL if non is
5429 * is found. 5432 * is found.
5430 */ 5433 */
5431struct tep_event_format * 5434struct tep_event *
5432tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record) 5435tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
5433{ 5436{
5434 int type; 5437 int type;
@@ -5453,7 +5456,7 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
5453 * Writes the tasks comm, pid and CPU to @s. 5456 * Writes the tasks comm, pid and CPU to @s.
5454 */ 5457 */
5455void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s, 5458void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
5456 struct tep_event_format *event, 5459 struct tep_event *event,
5457 struct tep_record *record) 5460 struct tep_record *record)
5458{ 5461{
5459 void *data = record->data; 5462 void *data = record->data;
@@ -5481,7 +5484,7 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
5481 * Writes the timestamp of the record into @s. 5484 * Writes the timestamp of the record into @s.
5482 */ 5485 */
5483void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, 5486void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
5484 struct tep_event_format *event, 5487 struct tep_event *event,
5485 struct tep_record *record, 5488 struct tep_record *record,
5486 bool use_trace_clock) 5489 bool use_trace_clock)
5487{ 5490{
@@ -5531,7 +5534,7 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
5531 * Writes the parsing of the record's data to @s. 5534 * Writes the parsing of the record's data to @s.
5532 */ 5535 */
5533void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s, 5536void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
5534 struct tep_event_format *event, 5537 struct tep_event *event,
5535 struct tep_record *record) 5538 struct tep_record *record)
5536{ 5539{
5537 static const char *spaces = " "; /* 20 spaces */ 5540 static const char *spaces = " "; /* 20 spaces */
@@ -5550,7 +5553,7 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
5550void tep_print_event(struct tep_handle *pevent, struct trace_seq *s, 5553void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
5551 struct tep_record *record, bool use_trace_clock) 5554 struct tep_record *record, bool use_trace_clock)
5552{ 5555{
5553 struct tep_event_format *event; 5556 struct tep_event *event;
5554 5557
5555 event = tep_find_event_by_record(pevent, record); 5558 event = tep_find_event_by_record(pevent, record);
5556 if (!event) { 5559 if (!event) {
@@ -5572,8 +5575,8 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
5572 5575
5573static int events_id_cmp(const void *a, const void *b) 5576static int events_id_cmp(const void *a, const void *b)
5574{ 5577{
5575 struct tep_event_format * const * ea = a; 5578 struct tep_event * const * ea = a;
5576 struct tep_event_format * const * eb = b; 5579 struct tep_event * const * eb = b;
5577 5580
5578 if ((*ea)->id < (*eb)->id) 5581 if ((*ea)->id < (*eb)->id)
5579 return -1; 5582 return -1;
@@ -5586,8 +5589,8 @@ static int events_id_cmp(const void *a, const void *b)
5586 5589
5587static int events_name_cmp(const void *a, const void *b) 5590static int events_name_cmp(const void *a, const void *b)
5588{ 5591{
5589 struct tep_event_format * const * ea = a; 5592 struct tep_event * const * ea = a;
5590 struct tep_event_format * const * eb = b; 5593 struct tep_event * const * eb = b;
5591 int res; 5594 int res;
5592 5595
5593 res = strcmp((*ea)->name, (*eb)->name); 5596 res = strcmp((*ea)->name, (*eb)->name);
@@ -5603,8 +5606,8 @@ static int events_name_cmp(const void *a, const void *b)
5603 5606
5604static int events_system_cmp(const void *a, const void *b) 5607static int events_system_cmp(const void *a, const void *b)
5605{ 5608{
5606 struct tep_event_format * const * ea = a; 5609 struct tep_event * const * ea = a;
5607 struct tep_event_format * const * eb = b; 5610 struct tep_event * const * eb = b;
5608 int res; 5611 int res;
5609 5612
5610 res = strcmp((*ea)->system, (*eb)->system); 5613 res = strcmp((*ea)->system, (*eb)->system);
@@ -5618,9 +5621,9 @@ static int events_system_cmp(const void *a, const void *b)
5618 return events_id_cmp(a, b); 5621 return events_id_cmp(a, b);
5619} 5622}
5620 5623
5621struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type) 5624struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
5622{ 5625{
5623 struct tep_event_format **events; 5626 struct tep_event **events;
5624 int (*sort)(const void *a, const void *b); 5627 int (*sort)(const void *a, const void *b);
5625 5628
5626 events = pevent->sort_events; 5629 events = pevent->sort_events;
@@ -5703,7 +5706,7 @@ get_event_fields(const char *type, const char *name,
5703 * Returns an allocated array of fields. The last item in the array is NULL. 5706 * Returns an allocated array of fields. The last item in the array is NULL.
5704 * The array must be freed with free(). 5707 * The array must be freed with free().
5705 */ 5708 */
5706struct tep_format_field **tep_event_common_fields(struct tep_event_format *event) 5709struct tep_format_field **tep_event_common_fields(struct tep_event *event)
5707{ 5710{
5708 return get_event_fields("common", event->name, 5711 return get_event_fields("common", event->name,
5709 event->format.nr_common, 5712 event->format.nr_common,
@@ -5717,7 +5720,7 @@ struct tep_format_field **tep_event_common_fields(struct tep_event_format *event
5717 * Returns an allocated array of fields. The last item in the array is NULL. 5720 * Returns an allocated array of fields. The last item in the array is NULL.
5718 * The array must be freed with free(). 5721 * The array must be freed with free().
5719 */ 5722 */
5720struct tep_format_field **tep_event_fields(struct tep_event_format *event) 5723struct tep_format_field **tep_event_fields(struct tep_event *event)
5721{ 5724{
5722 return get_event_fields("event", event->name, 5725 return get_event_fields("event", event->name,
5723 event->format.nr_fields, 5726 event->format.nr_fields,
@@ -5959,7 +5962,7 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
5959 return 0; 5962 return 0;
5960} 5963}
5961 5964
5962static int event_matches(struct tep_event_format *event, 5965static int event_matches(struct tep_event *event,
5963 int id, const char *sys_name, 5966 int id, const char *sys_name,
5964 const char *event_name) 5967 const char *event_name)
5965{ 5968{
@@ -5982,7 +5985,7 @@ static void free_handler(struct event_handler *handle)
5982 free(handle); 5985 free(handle);
5983} 5986}
5984 5987
5985static int find_event_handle(struct tep_handle *pevent, struct tep_event_format *event) 5988static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
5986{ 5989{
5987 struct event_handler *handle, **next; 5990 struct event_handler *handle, **next;
5988 5991
@@ -6023,11 +6026,11 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event_format
6023 * 6026 *
6024 * /sys/kernel/debug/tracing/events/.../.../format 6027 * /sys/kernel/debug/tracing/events/.../.../format
6025 */ 6028 */
6026enum tep_errno __tep_parse_format(struct tep_event_format **eventp, 6029enum tep_errno __tep_parse_format(struct tep_event **eventp,
6027 struct tep_handle *pevent, const char *buf, 6030 struct tep_handle *pevent, const char *buf,
6028 unsigned long size, const char *sys) 6031 unsigned long size, const char *sys)
6029{ 6032{
6030 struct tep_event_format *event; 6033 struct tep_event *event;
6031 int ret; 6034 int ret;
6032 6035
6033 init_input_buf(buf, size); 6036 init_input_buf(buf, size);
@@ -6132,12 +6135,12 @@ enum tep_errno __tep_parse_format(struct tep_event_format **eventp,
6132 6135
6133static enum tep_errno 6136static enum tep_errno
6134__parse_event(struct tep_handle *pevent, 6137__parse_event(struct tep_handle *pevent,
6135 struct tep_event_format **eventp, 6138 struct tep_event **eventp,
6136 const char *buf, unsigned long size, 6139 const char *buf, unsigned long size,
6137 const char *sys) 6140 const char *sys)
6138{ 6141{
6139 int ret = __tep_parse_format(eventp, pevent, buf, size, sys); 6142 int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
6140 struct tep_event_format *event = *eventp; 6143 struct tep_event *event = *eventp;
6141 6144
6142 if (event == NULL) 6145 if (event == NULL)
6143 return ret; 6146 return ret;
@@ -6154,7 +6157,7 @@ __parse_event(struct tep_handle *pevent,
6154 return 0; 6157 return 0;
6155 6158
6156event_add_failed: 6159event_add_failed:
6157 tep_free_format(event); 6160 tep_free_event(event);
6158 return ret; 6161 return ret;
6159} 6162}
6160 6163
@@ -6174,7 +6177,7 @@ event_add_failed:
6174 * /sys/kernel/debug/tracing/events/.../.../format 6177 * /sys/kernel/debug/tracing/events/.../.../format
6175 */ 6178 */
6176enum tep_errno tep_parse_format(struct tep_handle *pevent, 6179enum tep_errno tep_parse_format(struct tep_handle *pevent,
6177 struct tep_event_format **eventp, 6180 struct tep_event **eventp,
6178 const char *buf, 6181 const char *buf,
6179 unsigned long size, const char *sys) 6182 unsigned long size, const char *sys)
6180{ 6183{
@@ -6198,7 +6201,7 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
6198enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf, 6201enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
6199 unsigned long size, const char *sys) 6202 unsigned long size, const char *sys)
6200{ 6203{
6201 struct tep_event_format *event = NULL; 6204 struct tep_event *event = NULL;
6202 return __parse_event(pevent, &event, buf, size, sys); 6205 return __parse_event(pevent, &event, buf, size, sys);
6203} 6206}
6204 6207
@@ -6235,7 +6238,7 @@ int get_field_val(struct trace_seq *s, struct tep_format_field *field,
6235 * 6238 *
6236 * On failure, it returns NULL. 6239 * On failure, it returns NULL.
6237 */ 6240 */
6238void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event, 6241void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
6239 const char *name, struct tep_record *record, 6242 const char *name, struct tep_record *record,
6240 int *len, int err) 6243 int *len, int err)
6241{ 6244{
@@ -6282,7 +6285,7 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
6282 * 6285 *
6283 * Returns 0 on success -1 on field not found. 6286 * Returns 0 on success -1 on field not found.
6284 */ 6287 */
6285int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event, 6288int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
6286 const char *name, struct tep_record *record, 6289 const char *name, struct tep_record *record,
6287 unsigned long long *val, int err) 6290 unsigned long long *val, int err)
6288{ 6291{
@@ -6307,7 +6310,7 @@ int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
6307 * 6310 *
6308 * Returns 0 on success -1 on field not found. 6311 * Returns 0 on success -1 on field not found.
6309 */ 6312 */
6310int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event, 6313int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
6311 const char *name, struct tep_record *record, 6314 const char *name, struct tep_record *record,
6312 unsigned long long *val, int err) 6315 unsigned long long *val, int err)
6313{ 6316{
@@ -6332,7 +6335,7 @@ int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event
6332 * 6335 *
6333 * Returns 0 on success -1 on field not found. 6336 * Returns 0 on success -1 on field not found.
6334 */ 6337 */
6335int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event, 6338int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
6336 const char *name, struct tep_record *record, 6339 const char *name, struct tep_record *record,
6337 unsigned long long *val, int err) 6340 unsigned long long *val, int err)
6338{ 6341{
@@ -6358,7 +6361,7 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
6358 * Returns: 0 on success, -1 field not found, or 1 if buffer is full. 6361 * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
6359 */ 6362 */
6360int tep_print_num_field(struct trace_seq *s, const char *fmt, 6363int tep_print_num_field(struct trace_seq *s, const char *fmt,
6361 struct tep_event_format *event, const char *name, 6364 struct tep_event *event, const char *name,
6362 struct tep_record *record, int err) 6365 struct tep_record *record, int err)
6363{ 6366{
6364 struct tep_format_field *field = tep_find_field(event, name); 6367 struct tep_format_field *field = tep_find_field(event, name);
@@ -6390,7 +6393,7 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
6390 * Returns: 0 on success, -1 field not found, or 1 if buffer is full. 6393 * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
6391 */ 6394 */
6392int tep_print_func_field(struct trace_seq *s, const char *fmt, 6395int tep_print_func_field(struct trace_seq *s, const char *fmt,
6393 struct tep_event_format *event, const char *name, 6396 struct tep_event *event, const char *name,
6394 struct tep_record *record, int err) 6397 struct tep_record *record, int err)
6395{ 6398{
6396 struct tep_format_field *field = tep_find_field(event, name); 6399 struct tep_format_field *field = tep_find_field(event, name);
@@ -6550,11 +6553,11 @@ int tep_unregister_print_function(struct tep_handle *pevent,
6550 return -1; 6553 return -1;
6551} 6554}
6552 6555
6553static struct tep_event_format *search_event(struct tep_handle *pevent, int id, 6556static struct tep_event *search_event(struct tep_handle *pevent, int id,
6554 const char *sys_name, 6557 const char *sys_name,
6555 const char *event_name) 6558 const char *event_name)
6556{ 6559{
6557 struct tep_event_format *event; 6560 struct tep_event *event;
6558 6561
6559 if (id >= 0) { 6562 if (id >= 0) {
6560 /* search by id */ 6563 /* search by id */
@@ -6594,7 +6597,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
6594 const char *sys_name, const char *event_name, 6597 const char *sys_name, const char *event_name,
6595 tep_event_handler_func func, void *context) 6598 tep_event_handler_func func, void *context)
6596{ 6599{
6597 struct tep_event_format *event; 6600 struct tep_event *event;
6598 struct event_handler *handle; 6601 struct event_handler *handle;
6599 6602
6600 event = search_event(pevent, id, sys_name, event_name); 6603 event = search_event(pevent, id, sys_name, event_name);
@@ -6678,7 +6681,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
6678 const char *sys_name, const char *event_name, 6681 const char *sys_name, const char *event_name,
6679 tep_event_handler_func func, void *context) 6682 tep_event_handler_func func, void *context)
6680{ 6683{
6681 struct tep_event_format *event; 6684 struct tep_event *event;
6682 struct event_handler *handle; 6685 struct event_handler *handle;
6683 struct event_handler **next; 6686 struct event_handler **next;
6684 6687
@@ -6730,6 +6733,13 @@ void tep_ref(struct tep_handle *pevent)
6730 pevent->ref_count++; 6733 pevent->ref_count++;
6731} 6734}
6732 6735
6736int tep_get_ref(struct tep_handle *tep)
6737{
6738 if (tep)
6739 return tep->ref_count;
6740 return 0;
6741}
6742
6733void tep_free_format_field(struct tep_format_field *field) 6743void tep_free_format_field(struct tep_format_field *field)
6734{ 6744{
6735 free(field->type); 6745 free(field->type);
@@ -6756,7 +6766,7 @@ static void free_formats(struct tep_format *format)
6756 free_format_fields(format->fields); 6766 free_format_fields(format->fields);
6757} 6767}
6758 6768
6759void tep_free_format(struct tep_event_format *event) 6769void tep_free_event(struct tep_event *event)
6760{ 6770{
6761 free(event->name); 6771 free(event->name);
6762 free(event->system); 6772 free(event->system);
@@ -6842,7 +6852,7 @@ void tep_free(struct tep_handle *pevent)
6842 } 6852 }
6843 6853
6844 for (i = 0; i < pevent->nr_events; i++) 6854 for (i = 0; i < pevent->nr_events; i++)
6845 tep_free_format(pevent->events[i]); 6855 tep_free_event(pevent->events[i]);
6846 6856
6847 while (pevent->handlers) { 6857 while (pevent->handlers) {
6848 handle = pevent->handlers; 6858 handle = pevent->handlers;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 16bf4c890b6f..35d37087d3c5 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -57,11 +57,11 @@ struct tep_record {
57/* ----------------------- tep ----------------------- */ 57/* ----------------------- tep ----------------------- */
58 58
59struct tep_handle; 59struct tep_handle;
60struct tep_event_format; 60struct tep_event;
61 61
62typedef int (*tep_event_handler_func)(struct trace_seq *s, 62typedef int (*tep_event_handler_func)(struct trace_seq *s,
63 struct tep_record *record, 63 struct tep_record *record,
64 struct tep_event_format *event, 64 struct tep_event *event,
65 void *context); 65 void *context);
66 66
67typedef int (*tep_plugin_load_func)(struct tep_handle *pevent); 67typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
@@ -143,7 +143,7 @@ enum tep_format_flags {
143 143
144struct tep_format_field { 144struct tep_format_field {
145 struct tep_format_field *next; 145 struct tep_format_field *next;
146 struct tep_event_format *event; 146 struct tep_event *event;
147 char *type; 147 char *type;
148 char *name; 148 char *name;
149 char *alias; 149 char *alias;
@@ -277,7 +277,7 @@ struct tep_print_fmt {
277 struct tep_print_arg *args; 277 struct tep_print_arg *args;
278}; 278};
279 279
280struct tep_event_format { 280struct tep_event {
281 struct tep_handle *pevent; 281 struct tep_handle *pevent;
282 char *name; 282 char *name;
283 int id; 283 int id;
@@ -409,20 +409,6 @@ void tep_print_plugins(struct trace_seq *s,
409typedef char *(tep_func_resolver_t)(void *priv, 409typedef char *(tep_func_resolver_t)(void *priv,
410 unsigned long long *addrp, char **modp); 410 unsigned long long *addrp, char **modp);
411void tep_set_flag(struct tep_handle *tep, int flag); 411void tep_set_flag(struct tep_handle *tep, int flag);
412unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data);
413unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data);
414unsigned long long
415__tep_data2host8(struct tep_handle *pevent, unsigned long long data);
416
417#define tep_data2host2(pevent, ptr) __tep_data2host2(pevent, *(unsigned short *)(ptr))
418#define tep_data2host4(pevent, ptr) __tep_data2host4(pevent, *(unsigned int *)(ptr))
419#define tep_data2host8(pevent, ptr) \
420({ \
421 unsigned long long __val; \
422 \
423 memcpy(&__val, (ptr), sizeof(unsigned long long)); \
424 __tep_data2host8(pevent, __val); \
425})
426 412
427static inline int tep_host_bigendian(void) 413static inline int tep_host_bigendian(void)
428{ 414{
@@ -454,14 +440,14 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
454int tep_pid_is_registered(struct tep_handle *pevent, int pid); 440int tep_pid_is_registered(struct tep_handle *pevent, int pid);
455 441
456void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s, 442void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
457 struct tep_event_format *event, 443 struct tep_event *event,
458 struct tep_record *record); 444 struct tep_record *record);
459void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, 445void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
460 struct tep_event_format *event, 446 struct tep_event *event,
461 struct tep_record *record, 447 struct tep_record *record,
462 bool use_trace_clock); 448 bool use_trace_clock);
463void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s, 449void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
464 struct tep_event_format *event, 450 struct tep_event *event,
465 struct tep_record *record); 451 struct tep_record *record);
466void tep_print_event(struct tep_handle *pevent, struct trace_seq *s, 452void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
467 struct tep_record *record, bool use_trace_clock); 453 struct tep_record *record, bool use_trace_clock);
@@ -472,32 +458,30 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
472enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf, 458enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
473 unsigned long size, const char *sys); 459 unsigned long size, const char *sys);
474enum tep_errno tep_parse_format(struct tep_handle *pevent, 460enum tep_errno tep_parse_format(struct tep_handle *pevent,
475 struct tep_event_format **eventp, 461 struct tep_event **eventp,
476 const char *buf, 462 const char *buf,
477 unsigned long size, const char *sys); 463 unsigned long size, const char *sys);
478void tep_free_format(struct tep_event_format *event);
479void tep_free_format_field(struct tep_format_field *field);
480 464
481void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event, 465void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
482 const char *name, struct tep_record *record, 466 const char *name, struct tep_record *record,
483 int *len, int err); 467 int *len, int err);
484 468
485int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event, 469int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
486 const char *name, struct tep_record *record, 470 const char *name, struct tep_record *record,
487 unsigned long long *val, int err); 471 unsigned long long *val, int err);
488int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event, 472int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
489 const char *name, struct tep_record *record, 473 const char *name, struct tep_record *record,
490 unsigned long long *val, int err); 474 unsigned long long *val, int err);
491int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event, 475int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
492 const char *name, struct tep_record *record, 476 const char *name, struct tep_record *record,
493 unsigned long long *val, int err); 477 unsigned long long *val, int err);
494 478
495int tep_print_num_field(struct trace_seq *s, const char *fmt, 479int tep_print_num_field(struct trace_seq *s, const char *fmt,
496 struct tep_event_format *event, const char *name, 480 struct tep_event *event, const char *name,
497 struct tep_record *record, int err); 481 struct tep_record *record, int err);
498 482
499int tep_print_func_field(struct trace_seq *s, const char *fmt, 483int tep_print_func_field(struct trace_seq *s, const char *fmt,
500 struct tep_event_format *event, const char *name, 484 struct tep_event *event, const char *name,
501 struct tep_record *record, int err); 485 struct tep_record *record, int err);
502 486
503int tep_register_event_handler(struct tep_handle *pevent, int id, 487int tep_register_event_handler(struct tep_handle *pevent, int id,
@@ -513,9 +497,9 @@ int tep_register_print_function(struct tep_handle *pevent,
513int tep_unregister_print_function(struct tep_handle *pevent, 497int tep_unregister_print_function(struct tep_handle *pevent,
514 tep_func_handler func, char *name); 498 tep_func_handler func, char *name);
515 499
516struct tep_format_field *tep_find_common_field(struct tep_event_format *event, const char *name); 500struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
517struct tep_format_field *tep_find_field(struct tep_event_format *event, const char *name); 501struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
518struct tep_format_field *tep_find_any_field(struct tep_event_format *event, const char *name); 502struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
519 503
520const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr); 504const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
521unsigned long long 505unsigned long long
@@ -524,19 +508,19 @@ unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, i
524int tep_read_number_field(struct tep_format_field *field, const void *data, 508int tep_read_number_field(struct tep_format_field *field, const void *data,
525 unsigned long long *value); 509 unsigned long long *value);
526 510
527struct tep_event_format *tep_get_first_event(struct tep_handle *tep); 511struct tep_event *tep_get_first_event(struct tep_handle *tep);
528int tep_get_events_count(struct tep_handle *tep); 512int tep_get_events_count(struct tep_handle *tep);
529struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id); 513struct tep_event *tep_find_event(struct tep_handle *pevent, int id);
530 514
531struct tep_event_format * 515struct tep_event *
532tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name); 516tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
533struct tep_event_format * 517struct tep_event *
534tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record); 518tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
535 519
536void tep_data_lat_fmt(struct tep_handle *pevent, 520void tep_data_lat_fmt(struct tep_handle *pevent,
537 struct trace_seq *s, struct tep_record *record); 521 struct trace_seq *s, struct tep_record *record);
538int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); 522int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
539struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type); 523struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
540int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); 524int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
541int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); 525int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
542int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); 526int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
@@ -549,15 +533,15 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
549void tep_print_field(struct trace_seq *s, void *data, 533void tep_print_field(struct trace_seq *s, void *data,
550 struct tep_format_field *field); 534 struct tep_format_field *field);
551void tep_print_fields(struct trace_seq *s, void *data, 535void tep_print_fields(struct trace_seq *s, void *data,
552 int size __maybe_unused, struct tep_event_format *event); 536 int size __maybe_unused, struct tep_event *event);
553void tep_event_info(struct trace_seq *s, struct tep_event_format *event, 537void tep_event_info(struct trace_seq *s, struct tep_event *event,
554 struct tep_record *record); 538 struct tep_record *record);
555int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum, 539int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
556 char *buf, size_t buflen); 540 char *buf, size_t buflen);
557 541
558struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type); 542struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
559struct tep_format_field **tep_event_common_fields(struct tep_event_format *event); 543struct tep_format_field **tep_event_common_fields(struct tep_event *event);
560struct tep_format_field **tep_event_fields(struct tep_event_format *event); 544struct tep_format_field **tep_event_fields(struct tep_event *event);
561 545
562enum tep_endian { 546enum tep_endian {
563 TEP_LITTLE_ENDIAN = 0, 547 TEP_LITTLE_ENDIAN = 0,
@@ -581,6 +565,7 @@ struct tep_handle *tep_alloc(void);
581void tep_free(struct tep_handle *pevent); 565void tep_free(struct tep_handle *pevent);
582void tep_ref(struct tep_handle *pevent); 566void tep_ref(struct tep_handle *pevent);
583void tep_unref(struct tep_handle *pevent); 567void tep_unref(struct tep_handle *pevent);
568int tep_get_ref(struct tep_handle *tep);
584 569
585/* access to the internal parser */ 570/* access to the internal parser */
586void tep_buffer_init(const char *buf, unsigned long long size); 571void tep_buffer_init(const char *buf, unsigned long long size);
@@ -712,7 +697,7 @@ struct tep_filter_arg {
712 697
713struct tep_filter_type { 698struct tep_filter_type {
714 int event_id; 699 int event_id;
715 struct tep_event_format *event; 700 struct tep_event *event;
716 struct tep_filter_arg *filter; 701 struct tep_filter_arg *filter;
717}; 702};
718 703
diff --git a/tools/lib/traceevent/libtraceevent.pc.template b/tools/lib/traceevent/libtraceevent.pc.template
new file mode 100644
index 000000000000..42e4d6cb6b9e
--- /dev/null
+++ b/tools/lib/traceevent/libtraceevent.pc.template
@@ -0,0 +1,10 @@
1prefix=INSTALL_PREFIX
2libdir=${prefix}/lib64
3includedir=${prefix}/include/traceevent
4
5Name: libtraceevent
6URL: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
7Description: Linux kernel trace event library
8Version: LIB_VERSION
9Cflags: -I${includedir}
10Libs: -L${libdir} -ltraceevent
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index ed87cb56713d..cb5ce66dab6e 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -27,7 +27,7 @@ static struct tep_format_field cpu = {
27 27
28struct event_list { 28struct event_list {
29 struct event_list *next; 29 struct event_list *next;
30 struct tep_event_format *event; 30 struct tep_event *event;
31}; 31};
32 32
33static void show_error(char *error_buf, const char *fmt, ...) 33static void show_error(char *error_buf, const char *fmt, ...)
@@ -229,7 +229,7 @@ static void free_arg(struct tep_filter_arg *arg)
229} 229}
230 230
231static int add_event(struct event_list **events, 231static int add_event(struct event_list **events,
232 struct tep_event_format *event) 232 struct tep_event *event)
233{ 233{
234 struct event_list *list; 234 struct event_list *list;
235 235
@@ -243,7 +243,7 @@ static int add_event(struct event_list **events,
243 return 0; 243 return 0;
244} 244}
245 245
246static int event_match(struct tep_event_format *event, 246static int event_match(struct tep_event *event,
247 regex_t *sreg, regex_t *ereg) 247 regex_t *sreg, regex_t *ereg)
248{ 248{
249 if (sreg) { 249 if (sreg) {
@@ -259,7 +259,7 @@ static enum tep_errno
259find_event(struct tep_handle *pevent, struct event_list **events, 259find_event(struct tep_handle *pevent, struct event_list **events,
260 char *sys_name, char *event_name) 260 char *sys_name, char *event_name)
261{ 261{
262 struct tep_event_format *event; 262 struct tep_event *event;
263 regex_t ereg; 263 regex_t ereg;
264 regex_t sreg; 264 regex_t sreg;
265 int match = 0; 265 int match = 0;
@@ -334,7 +334,7 @@ static void free_events(struct event_list *events)
334} 334}
335 335
336static enum tep_errno 336static enum tep_errno
337create_arg_item(struct tep_event_format *event, const char *token, 337create_arg_item(struct tep_event *event, const char *token,
338 enum tep_event_type type, struct tep_filter_arg **parg, char *error_str) 338 enum tep_event_type type, struct tep_filter_arg **parg, char *error_str)
339{ 339{
340 struct tep_format_field *field; 340 struct tep_format_field *field;
@@ -940,7 +940,7 @@ static int collapse_tree(struct tep_filter_arg *arg,
940} 940}
941 941
942static enum tep_errno 942static enum tep_errno
943process_filter(struct tep_event_format *event, struct tep_filter_arg **parg, 943process_filter(struct tep_event *event, struct tep_filter_arg **parg,
944 char *error_str, int not) 944 char *error_str, int not)
945{ 945{
946 enum tep_event_type type; 946 enum tep_event_type type;
@@ -1180,7 +1180,7 @@ process_filter(struct tep_event_format *event, struct tep_filter_arg **parg,
1180} 1180}
1181 1181
1182static enum tep_errno 1182static enum tep_errno
1183process_event(struct tep_event_format *event, const char *filter_str, 1183process_event(struct tep_event *event, const char *filter_str,
1184 struct tep_filter_arg **parg, char *error_str) 1184 struct tep_filter_arg **parg, char *error_str)
1185{ 1185{
1186 int ret; 1186 int ret;
@@ -1205,7 +1205,7 @@ process_event(struct tep_event_format *event, const char *filter_str,
1205} 1205}
1206 1206
1207static enum tep_errno 1207static enum tep_errno
1208filter_event(struct tep_event_filter *filter, struct tep_event_format *event, 1208filter_event(struct tep_event_filter *filter, struct tep_event *event,
1209 const char *filter_str, char *error_str) 1209 const char *filter_str, char *error_str)
1210{ 1210{
1211 struct tep_filter_type *filter_type; 1211 struct tep_filter_type *filter_type;
@@ -1457,7 +1457,7 @@ static int copy_filter_type(struct tep_event_filter *filter,
1457 struct tep_filter_type *filter_type) 1457 struct tep_filter_type *filter_type)
1458{ 1458{
1459 struct tep_filter_arg *arg; 1459 struct tep_filter_arg *arg;
1460 struct tep_event_format *event; 1460 struct tep_event *event;
1461 const char *sys; 1461 const char *sys;
1462 const char *name; 1462 const char *name;
1463 char *str; 1463 char *str;
@@ -1539,7 +1539,7 @@ int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *s
1539{ 1539{
1540 struct tep_handle *src_pevent; 1540 struct tep_handle *src_pevent;
1541 struct tep_handle *dest_pevent; 1541 struct tep_handle *dest_pevent;
1542 struct tep_event_format *event; 1542 struct tep_event *event;
1543 struct tep_filter_type *filter_type; 1543 struct tep_filter_type *filter_type;
1544 struct tep_filter_arg *arg; 1544 struct tep_filter_arg *arg;
1545 char *str; 1545 char *str;
@@ -1683,11 +1683,11 @@ int tep_filter_event_has_trivial(struct tep_event_filter *filter,
1683 } 1683 }
1684} 1684}
1685 1685
1686static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg, 1686static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
1687 struct tep_record *record, enum tep_errno *err); 1687 struct tep_record *record, enum tep_errno *err);
1688 1688
1689static const char * 1689static const char *
1690get_comm(struct tep_event_format *event, struct tep_record *record) 1690get_comm(struct tep_event *event, struct tep_record *record)
1691{ 1691{
1692 const char *comm; 1692 const char *comm;
1693 int pid; 1693 int pid;
@@ -1698,7 +1698,7 @@ get_comm(struct tep_event_format *event, struct tep_record *record)
1698} 1698}
1699 1699
1700static unsigned long long 1700static unsigned long long
1701get_value(struct tep_event_format *event, 1701get_value(struct tep_event *event,
1702 struct tep_format_field *field, struct tep_record *record) 1702 struct tep_format_field *field, struct tep_record *record)
1703{ 1703{
1704 unsigned long long val; 1704 unsigned long long val;
@@ -1734,11 +1734,11 @@ get_value(struct tep_event_format *event,
1734} 1734}
1735 1735
1736static unsigned long long 1736static unsigned long long
1737get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg, 1737get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
1738 struct tep_record *record, enum tep_errno *err); 1738 struct tep_record *record, enum tep_errno *err);
1739 1739
1740static unsigned long long 1740static unsigned long long
1741get_exp_value(struct tep_event_format *event, struct tep_filter_arg *arg, 1741get_exp_value(struct tep_event *event, struct tep_filter_arg *arg,
1742 struct tep_record *record, enum tep_errno *err) 1742 struct tep_record *record, enum tep_errno *err)
1743{ 1743{
1744 unsigned long long lval, rval; 1744 unsigned long long lval, rval;
@@ -1793,7 +1793,7 @@ get_exp_value(struct tep_event_format *event, struct tep_filter_arg *arg,
1793} 1793}
1794 1794
1795static unsigned long long 1795static unsigned long long
1796get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg, 1796get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
1797 struct tep_record *record, enum tep_errno *err) 1797 struct tep_record *record, enum tep_errno *err)
1798{ 1798{
1799 switch (arg->type) { 1799 switch (arg->type) {
@@ -1817,7 +1817,7 @@ get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
1817 return 0; 1817 return 0;
1818} 1818}
1819 1819
1820static int test_num(struct tep_event_format *event, struct tep_filter_arg *arg, 1820static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
1821 struct tep_record *record, enum tep_errno *err) 1821 struct tep_record *record, enum tep_errno *err)
1822{ 1822{
1823 unsigned long long lval, rval; 1823 unsigned long long lval, rval;
@@ -1860,7 +1860,7 @@ static int test_num(struct tep_event_format *event, struct tep_filter_arg *arg,
1860 1860
1861static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record) 1861static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
1862{ 1862{
1863 struct tep_event_format *event; 1863 struct tep_event *event;
1864 struct tep_handle *pevent; 1864 struct tep_handle *pevent;
1865 unsigned long long addr; 1865 unsigned long long addr;
1866 const char *val = NULL; 1866 const char *val = NULL;
@@ -1908,7 +1908,7 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *
1908 return val; 1908 return val;
1909} 1909}
1910 1910
1911static int test_str(struct tep_event_format *event, struct tep_filter_arg *arg, 1911static int test_str(struct tep_event *event, struct tep_filter_arg *arg,
1912 struct tep_record *record, enum tep_errno *err) 1912 struct tep_record *record, enum tep_errno *err)
1913{ 1913{
1914 const char *val; 1914 const char *val;
@@ -1939,7 +1939,7 @@ static int test_str(struct tep_event_format *event, struct tep_filter_arg *arg,
1939 } 1939 }
1940} 1940}
1941 1941
1942static int test_op(struct tep_event_format *event, struct tep_filter_arg *arg, 1942static int test_op(struct tep_event *event, struct tep_filter_arg *arg,
1943 struct tep_record *record, enum tep_errno *err) 1943 struct tep_record *record, enum tep_errno *err)
1944{ 1944{
1945 switch (arg->op.type) { 1945 switch (arg->op.type) {
@@ -1961,7 +1961,7 @@ static int test_op(struct tep_event_format *event, struct tep_filter_arg *arg,
1961 } 1961 }
1962} 1962}
1963 1963
1964static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg, 1964static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
1965 struct tep_record *record, enum tep_errno *err) 1965 struct tep_record *record, enum tep_errno *err)
1966{ 1966{
1967 if (*err) { 1967 if (*err) {
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
index 528acc75d81a..a73eca34a8f9 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugin_function.c
@@ -124,7 +124,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
124} 124}
125 125
126static int function_handler(struct trace_seq *s, struct tep_record *record, 126static int function_handler(struct trace_seq *s, struct tep_record *record,
127 struct tep_event_format *event, void *context) 127 struct tep_event *event, void *context)
128{ 128{
129 struct tep_handle *pevent = event->pevent; 129 struct tep_handle *pevent = event->pevent;
130 unsigned long long function; 130 unsigned long long function;
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
index 9aa05b4ca811..5db5e401275f 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -27,7 +27,7 @@
27 27
28static int timer_expire_handler(struct trace_seq *s, 28static int timer_expire_handler(struct trace_seq *s,
29 struct tep_record *record, 29 struct tep_record *record,
30 struct tep_event_format *event, void *context) 30 struct tep_event *event, void *context)
31{ 31{
32 trace_seq_printf(s, "hrtimer="); 32 trace_seq_printf(s, "hrtimer=");
33 33
@@ -47,7 +47,7 @@ static int timer_expire_handler(struct trace_seq *s,
47 47
48static int timer_start_handler(struct trace_seq *s, 48static int timer_start_handler(struct trace_seq *s,
49 struct tep_record *record, 49 struct tep_record *record,
50 struct tep_event_format *event, void *context) 50 struct tep_event *event, void *context)
51{ 51{
52 trace_seq_printf(s, "hrtimer="); 52 trace_seq_printf(s, "hrtimer=");
53 53
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
index 1beb4eaddfdf..0e3c601f9ed1 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -25,7 +25,7 @@
25#include "trace-seq.h" 25#include "trace-seq.h"
26 26
27static int call_site_handler(struct trace_seq *s, struct tep_record *record, 27static int call_site_handler(struct trace_seq *s, struct tep_record *record,
28 struct tep_event_format *event, void *context) 28 struct tep_event *event, void *context)
29{ 29{
30 struct tep_format_field *field; 30 struct tep_format_field *field;
31 unsigned long long val, addr; 31 unsigned long long val, addr;
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index d13c22846fa9..754050eea467 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -249,7 +249,7 @@ static const char *find_exit_reason(unsigned isa, int val)
249} 249}
250 250
251static int print_exit_reason(struct trace_seq *s, struct tep_record *record, 251static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
252 struct tep_event_format *event, const char *field) 252 struct tep_event *event, const char *field)
253{ 253{
254 unsigned long long isa; 254 unsigned long long isa;
255 unsigned long long val; 255 unsigned long long val;
@@ -270,7 +270,7 @@ static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
270} 270}
271 271
272static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record, 272static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
273 struct tep_event_format *event, void *context) 273 struct tep_event *event, void *context)
274{ 274{
275 unsigned long long info1 = 0, info2 = 0; 275 unsigned long long info1 = 0, info2 = 0;
276 276
@@ -293,7 +293,7 @@ static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
293 293
294static int kvm_emulate_insn_handler(struct trace_seq *s, 294static int kvm_emulate_insn_handler(struct trace_seq *s,
295 struct tep_record *record, 295 struct tep_record *record,
296 struct tep_event_format *event, void *context) 296 struct tep_event *event, void *context)
297{ 297{
298 unsigned long long rip, csbase, len, flags, failed; 298 unsigned long long rip, csbase, len, flags, failed;
299 int llen; 299 int llen;
@@ -332,7 +332,7 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
332 332
333 333
334static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record, 334static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
335 struct tep_event_format *event, void *context) 335 struct tep_event *event, void *context)
336{ 336{
337 if (print_exit_reason(s, record, event, "exit_code") < 0) 337 if (print_exit_reason(s, record, event, "exit_code") < 0)
338 return -1; 338 return -1;
@@ -346,7 +346,7 @@ static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_reco
346} 346}
347 347
348static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record, 348static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
349 struct tep_event_format *event, void *context) 349 struct tep_event *event, void *context)
350{ 350{
351 tep_print_num_field(s, "rip %llx ", event, "rip", record, 1); 351 tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
352 352
@@ -372,7 +372,7 @@ union kvm_mmu_page_role {
372}; 372};
373 373
374static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record, 374static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
375 struct tep_event_format *event, void *context) 375 struct tep_event *event, void *context)
376{ 376{
377 unsigned long long val; 377 unsigned long long val;
378 static const char *access_str[] = { 378 static const char *access_str[] = {
@@ -387,7 +387,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
387 387
388 /* 388 /*
389 * We can only use the structure if file is of the same 389 * We can only use the structure if file is of the same
390 * endianess. 390 * endianness.
391 */ 391 */
392 if (tep_is_file_bigendian(event->pevent) == 392 if (tep_is_file_bigendian(event->pevent) ==
393 tep_is_host_bigendian(event->pevent)) { 393 tep_is_host_bigendian(event->pevent)) {
@@ -419,7 +419,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
419 419
420static int kvm_mmu_get_page_handler(struct trace_seq *s, 420static int kvm_mmu_get_page_handler(struct trace_seq *s,
421 struct tep_record *record, 421 struct tep_record *record,
422 struct tep_event_format *event, void *context) 422 struct tep_event *event, void *context)
423{ 423{
424 unsigned long long val; 424 unsigned long long val;
425 425
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
index da3855e7b86f..e38b9477aad2 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -26,7 +26,7 @@
26 26
27#define INDENT 65 27#define INDENT 65
28 28
29static void print_string(struct trace_seq *s, struct tep_event_format *event, 29static void print_string(struct trace_seq *s, struct tep_event *event,
30 const char *name, const void *data) 30 const char *name, const void *data)
31{ 31{
32 struct tep_format_field *f = tep_find_field(event, name); 32 struct tep_format_field *f = tep_find_field(event, name);
@@ -60,7 +60,7 @@ static void print_string(struct trace_seq *s, struct tep_event_format *event,
60 60
61static int drv_bss_info_changed(struct trace_seq *s, 61static int drv_bss_info_changed(struct trace_seq *s,
62 struct tep_record *record, 62 struct tep_record *record,
63 struct tep_event_format *event, void *context) 63 struct tep_event *event, void *context)
64{ 64{
65 void *data = record->data; 65 void *data = record->data;
66 66
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index 77882272672f..834c9e378ff8 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -67,7 +67,7 @@ static void write_and_save_comm(struct tep_format_field *field,
67 67
68static int sched_wakeup_handler(struct trace_seq *s, 68static int sched_wakeup_handler(struct trace_seq *s,
69 struct tep_record *record, 69 struct tep_record *record,
70 struct tep_event_format *event, void *context) 70 struct tep_event *event, void *context)
71{ 71{
72 struct tep_format_field *field; 72 struct tep_format_field *field;
73 unsigned long long val; 73 unsigned long long val;
@@ -96,7 +96,7 @@ static int sched_wakeup_handler(struct trace_seq *s,
96 96
97static int sched_switch_handler(struct trace_seq *s, 97static int sched_switch_handler(struct trace_seq *s,
98 struct tep_record *record, 98 struct tep_record *record,
99 struct tep_event_format *event, void *context) 99 struct tep_event *event, void *context)
100{ 100{
101 struct tep_format_field *field; 101 struct tep_format_field *field;
102 unsigned long long val; 102 unsigned long long val;
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 32f4a898e3f2..661b1fb3f8ba 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -199,6 +199,12 @@ colors.*::
199 Colors for headers in the output of a sub-commands (top, report). 199 Colors for headers in the output of a sub-commands (top, report).
200 Default values are 'white', 'blue'. 200 Default values are 'white', 'blue'.
201 201
202core.*::
203 core.proc-map-timeout::
204 Sets a timeout (in milliseconds) for parsing /proc/<pid>/maps files.
205 Can be overridden by the --proc-map-timeout option on supported
206 subcommands. The default timeout is 500ms.
207
202tui.*, gtk.*:: 208tui.*, gtk.*::
203 Subcommands that can be configured here are 'top', 'report' and 'annotate'. 209 Subcommands that can be configured here are 'top', 'report' and 'annotate'.
204 These values are booleans, for example: 210 These values are booleans, for example:
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 667c14e56031..138fb6e94b3c 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -172,7 +172,7 @@ like cycles and instructions and some software events.
172Other PMUs and global measurements are normally root only. 172Other PMUs and global measurements are normally root only.
173Some event qualifiers, such as "any", are also root only. 173Some event qualifiers, such as "any", are also root only.
174 174
175This can be overriden by setting the kernel.perf_event_paranoid 175This can be overridden by setting the kernel.perf_event_paranoid
176sysctl to -1, which allows non root to use these events. 176sysctl to -1, which allows non root to use these events.
177 177
178For accessing trace point events perf needs to have read access to 178For accessing trace point events perf needs to have read access to
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 246dee081efd..d232b13ea713 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -435,6 +435,11 @@ Specify vmlinux path which has debuginfo.
435--buildid-all:: 435--buildid-all::
436Record build-id of all DSOs regardless whether it's actually hit or not. 436Record build-id of all DSOs regardless whether it's actually hit or not.
437 437
438--aio[=n]::
439Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default: 1, max: 4).
440Asynchronous mode is supported only when linking Perf tool with libc library
441providing implementation for Posix AIO API.
442
438--all-kernel:: 443--all-kernel::
439Configure all used events to run in kernel space. 444Configure all used events to run in kernel space.
440 445
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 474a4941f65d..1a27bfe05039 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -126,6 +126,14 @@ OPTIONS
126 And default sort keys are changed to comm, dso_from, symbol_from, dso_to 126 And default sort keys are changed to comm, dso_from, symbol_from, dso_to
127 and symbol_to, see '--branch-stack'. 127 and symbol_to, see '--branch-stack'.
128 128
129 When the sort key symbol is specified, columns "IPC" and "IPC Coverage"
130 are enabled automatically. Column "IPC" reports the average IPC per function
131 and column "IPC coverage" reports the percentage of instructions with
132 sampled IPC in this function. IPC means Instruction Per Cycle. If it's low,
133 it indicates there may be a performance bottleneck when the function is
134 executed, such as a memory access bottleneck. If a function has high overhead
135 and low IPC, it's worth further analyzing it to optimize its performance.
136
129 If the --mem-mode option is used, the following sort keys are also available 137 If the --mem-mode option is used, the following sort keys are also available
130 (incompatible with --branch-stack): 138 (incompatible with --branch-stack):
131 symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline. 139 symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
@@ -244,7 +252,7 @@ OPTIONS
244 Usually more convenient to use --branch-history for this. 252 Usually more convenient to use --branch-history for this.
245 253
246 value can be: 254 value can be:
247 - percent: diplay overhead percent (default) 255 - percent: display overhead percent (default)
248 - period: display event period 256 - period: display event period
249 - count: display event count 257 - count: display event count
250 258
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a2b37ce48094..9e4def08d569 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
119 srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn, 119 srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
120 brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc. 120 brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode.
121 Field list can be prepended with the type, trace, sw or hw, 121 Field list can be prepended with the type, trace, sw or hw,
122 to indicate to which event type the field list applies. 122 to indicate to which event type the field list applies.
123 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace 123 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index b10a90b6a718..4bc2085e5197 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -50,7 +50,7 @@ report::
50 /sys/bus/event_source/devices/<pmu>/format/* 50 /sys/bus/event_source/devices/<pmu>/format/*
51 51
52 Note that the last two syntaxes support prefix and glob matching in 52 Note that the last two syntaxes support prefix and glob matching in
53 the PMU name to simplify creation of events accross multiple instances 53 the PMU name to simplify creation of events across multiple instances
54 of the same type of PMU in large systems (e.g. memory controller PMUs). 54 of the same type of PMU in large systems (e.g. memory controller PMUs).
55 Multiple PMU instances are typical for uncore PMUs, so the prefix 55 Multiple PMU instances are typical for uncore PMUs, so the prefix
56 'uncore_' is also ignored when performing this match. 56 'uncore_' is also ignored when performing this match.
@@ -277,7 +277,7 @@ echo 0 > /proc/sys/kernel/nmi_watchdog
277for best results. Otherwise the bottlenecks may be inconsistent 277for best results. Otherwise the bottlenecks may be inconsistent
278on workload with changing phases. 278on workload with changing phases.
279 279
280This enables --metric-only, unless overriden with --no-metric-only. 280This enables --metric-only, unless overridden with --no-metric-only.
281 281
282To interpret the results it is usually needed to know on which 282To interpret the results it is usually needed to know on which
283CPUs the workload runs on. If needed the CPUs can be forced using 283CPUs the workload runs on. If needed the CPUs can be forced using
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 808b664343c9..44d89fb9c788 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -70,6 +70,9 @@ Default is to monitor all CPUS.
70--ignore-vmlinux:: 70--ignore-vmlinux::
71 Ignore vmlinux files. 71 Ignore vmlinux files.
72 72
73--kallsyms=<file>::
74 kallsyms pathname
75
73-m <pages>:: 76-m <pages>::
74--mmap-pages=<pages>:: 77--mmap-pages=<pages>::
75 Number of mmap data pages (must be a power of two) or size 78 Number of mmap data pages (must be a power of two) or size
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e110010e7faa..b66f97a04b12 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -365,6 +365,12 @@ ifeq ($(feature-glibc), 1)
365 CFLAGS += -DHAVE_GLIBC_SUPPORT 365 CFLAGS += -DHAVE_GLIBC_SUPPORT
366endif 366endif
367 367
368ifeq ($(feature-libaio), 1)
369 ifndef NO_AIO
370 CFLAGS += -DHAVE_AIO_SUPPORT
371 endif
372endif
373
368ifdef NO_DWARF 374ifdef NO_DWARF
369 NO_LIBDW_DWARF_UNWIND := 1 375 NO_LIBDW_DWARF_UNWIND := 1
370endif 376endif
@@ -588,7 +594,7 @@ endif
588 594
589ifndef NO_LIBCRYPTO 595ifndef NO_LIBCRYPTO
590 ifneq ($(feature-libcrypto), 1) 596 ifneq ($(feature-libcrypto), 1)
591 msg := $(warning No libcrypto.h found, disables jitted code injection, please install libssl-devel or libssl-dev); 597 msg := $(warning No libcrypto.h found, disables jitted code injection, please install openssl-devel or libssl-dev);
592 NO_LIBCRYPTO := 1 598 NO_LIBCRYPTO := 1
593 else 599 else
594 CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT 600 CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 239e7b3270f4..bfdaefd500ab 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -101,8 +101,13 @@ include ../scripts/utilities.mak
101# Define LIBCLANGLLVM if you DO want builtin clang and llvm support. 101# Define LIBCLANGLLVM if you DO want builtin clang and llvm support.
102# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if 102# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
103# llvm-config is not in $PATH. 103# llvm-config is not in $PATH.
104 104#
105# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding. 105# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
106#
107# Define NO_AIO if you do not want support of Posix AIO based trace
108# streaming for record mode. Currently Posix AIO trace streaming is
109# supported only when linking with glibc.
110#
106 111
107# As per kernel Makefile, avoid funny character set dependencies 112# As per kernel Makefile, avoid funny character set dependencies
108unexport LC_ALL 113unexport LC_ALL
@@ -469,7 +474,7 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
469mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c 474mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
470mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh 475mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
471 476
472$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(arch_asm_uapi_dir)/mman.h $(mmap_flags_tbl) 477$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
473 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ 478 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
474 479
475mount_flags_array := $(beauty_outdir)/mount_flags_array.c 480mount_flags_array := $(beauty_outdir)/mount_flags_array.c
diff --git a/tools/perf/arch/arc/annotate/instructions.c b/tools/perf/arch/arc/annotate/instructions.c
new file mode 100644
index 000000000000..2f00e995c7e3
--- /dev/null
+++ b/tools/perf/arch/arc/annotate/instructions.c
@@ -0,0 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3
4static int arc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
5{
6 arch->initialized = true;
7 arch->objdump.comment_char = ';';
8 return 0;
9}
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 82657c01a3b8..f3824ca7c20b 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -5,6 +5,13 @@
5#include "../util/util.h" 5#include "../util/util.h"
6#include "../util/debug.h" 6#include "../util/debug.h"
7 7
8const char *const arc_triplets[] = {
9 "arc-linux-",
10 "arc-snps-linux-uclibc-",
11 "arc-snps-linux-gnu-",
12 NULL
13};
14
8const char *const arm_triplets[] = { 15const char *const arm_triplets[] = {
9 "arm-eabi-", 16 "arm-eabi-",
10 "arm-linux-androideabi-", 17 "arm-linux-androideabi-",
@@ -147,7 +154,9 @@ static int perf_env__lookup_binutils_path(struct perf_env *env,
147 zfree(&buf); 154 zfree(&buf);
148 } 155 }
149 156
150 if (!strcmp(arch, "arm")) 157 if (!strcmp(arch, "arc"))
158 path_list = arc_triplets;
159 else if (!strcmp(arch, "arm"))
151 path_list = arm_triplets; 160 path_list = arm_triplets;
152 else if (!strcmp(arch, "arm64")) 161 else if (!strcmp(arch, "arm64"))
153 path_list = arm64_triplets; 162 path_list = arm64_triplets;
@@ -200,3 +209,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
200 209
201 return perf_env__lookup_binutils_path(env, "objdump", path); 210 return perf_env__lookup_binutils_path(env, "objdump", path);
202} 211}
212
213/*
214 * Some architectures have a single address space for kernel and user addresses,
215 * which makes it possible to determine if an address is in kernel space or user
216 * space.
217 */
218bool perf_env__single_address_space(struct perf_env *env)
219{
220 return strcmp(perf_env__arch(env), "sparc");
221}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2167001b18c5..c298a446d1f6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,5 +5,6 @@
5#include "../util/env.h" 5#include "../util/env.h"
6 6
7int perf_env__lookup_objdump(struct perf_env *env, const char **path); 7int perf_env__lookup_objdump(struct perf_env *env, const char **path);
8bool perf_env__single_address_space(struct perf_env *env);
8 9
9#endif /* ARCH_PERF_COMMON_H */ 10#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index a5d24ae5810d..c3e5f4ab0d3e 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -170,7 +170,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
170 * 170 *
171 * If the test passes %0 is returned, otherwise %-1 is returned. Use the 171 * If the test passes %0 is returned, otherwise %-1 is returned. Use the
172 * verbose (-v) option to see all the instructions and whether or not they 172 * verbose (-v) option to see all the instructions and whether or not they
173 * decoded successfuly. 173 * decoded successfully.
174 */ 174 */
175int test__insn_x86(struct test *test __maybe_unused, int subtest __maybe_unused) 175int test__insn_x86(struct test *test __maybe_unused, int subtest __maybe_unused)
176{ 176{
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index db0ba8caf5a2..ba8ecaf52200 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
524 struct perf_evsel *evsel) 524 struct perf_evsel *evsel)
525{ 525{
526 int err; 526 int err;
527 char c;
527 528
528 if (!evsel) 529 if (!evsel)
529 return 0; 530 return 0;
530 531
532 /*
533 * If supported, force pass-through config term (pt=1) even if user
534 * sets pt=0, which avoids senseless kernel errors.
535 */
536 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
537 !(evsel->attr.config & 1)) {
538 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
539 evsel->attr.config |= 1;
540 }
541
531 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", 542 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
532 "cyc_thresh", "caps/psb_cyc", 543 "cyc_thresh", "caps/psb_cyc",
533 evsel->attr.config); 544 evsel->attr.config);
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 1c41b4eaf73c..3d29d0524a89 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -189,7 +189,7 @@ static void add_man_viewer(const char *name)
189 while (*p) 189 while (*p)
190 p = &((*p)->next); 190 p = &((*p)->next);
191 *p = zalloc(sizeof(**p) + len + 1); 191 *p = zalloc(sizeof(**p) + len + 1);
192 strncpy((*p)->name, name, len); 192 strcpy((*p)->name, name);
193} 193}
194 194
195static int supported_man_viewer(const char *name, size_t len) 195static int supported_man_viewer(const char *name, size_t len)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 2b1ef704169f..3d4cbc4e87c7 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1364,7 +1364,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1364 "show events other than" 1364 "show events other than"
1365 " HLT (x86 only) or Wait state (s390 only)" 1365 " HLT (x86 only) or Wait state (s390 only)"
1366 " that take longer than duration usecs"), 1366 " that take longer than duration usecs"),
1367 OPT_UINTEGER(0, "proc-map-timeout", &kvm->opts.proc_map_timeout, 1367 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1368 "per thread proc mmap processing timeout in ms"), 1368 "per thread proc mmap processing timeout in ms"),
1369 OPT_END() 1369 OPT_END()
1370 }; 1370 };
@@ -1394,7 +1394,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1394 kvm->opts.target.uses_mmap = false; 1394 kvm->opts.target.uses_mmap = false;
1395 kvm->opts.target.uid_str = NULL; 1395 kvm->opts.target.uid_str = NULL;
1396 kvm->opts.target.uid = UINT_MAX; 1396 kvm->opts.target.uid = UINT_MAX;
1397 kvm->opts.proc_map_timeout = 500;
1398 1397
1399 symbol__init(NULL); 1398 symbol__init(NULL);
1400 disable_buildid_cache(); 1399 disable_buildid_cache();
@@ -1453,8 +1452,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1453 perf_session__set_id_hdr_size(kvm->session); 1452 perf_session__set_id_hdr_size(kvm->session);
1454 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true); 1453 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
1455 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, 1454 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1456 kvm->evlist->threads, false, 1455 kvm->evlist->threads, false, 1);
1457 kvm->opts.proc_map_timeout, 1);
1458 err = kvm_live_open_events(kvm); 1456 err = kvm_live_open_events(kvm);
1459 if (err) 1457 if (err)
1460 goto out; 1458 goto out;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 488779bc4c8d..882285fb9f64 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -124,6 +124,210 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
124 return 0; 124 return 0;
125} 125}
126 126
127#ifdef HAVE_AIO_SUPPORT
128static int record__aio_write(struct aiocb *cblock, int trace_fd,
129 void *buf, size_t size, off_t off)
130{
131 int rc;
132
133 cblock->aio_fildes = trace_fd;
134 cblock->aio_buf = buf;
135 cblock->aio_nbytes = size;
136 cblock->aio_offset = off;
137 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
138
139 do {
140 rc = aio_write(cblock);
141 if (rc == 0) {
142 break;
143 } else if (errno != EAGAIN) {
144 cblock->aio_fildes = -1;
145 pr_err("failed to queue perf data, error: %m\n");
146 break;
147 }
148 } while (1);
149
150 return rc;
151}
152
153static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
154{
155 void *rem_buf;
156 off_t rem_off;
157 size_t rem_size;
158 int rc, aio_errno;
159 ssize_t aio_ret, written;
160
161 aio_errno = aio_error(cblock);
162 if (aio_errno == EINPROGRESS)
163 return 0;
164
165 written = aio_ret = aio_return(cblock);
166 if (aio_ret < 0) {
167 if (aio_errno != EINTR)
168 pr_err("failed to write perf data, error: %m\n");
169 written = 0;
170 }
171
172 rem_size = cblock->aio_nbytes - written;
173
174 if (rem_size == 0) {
175 cblock->aio_fildes = -1;
176 /*
177 * md->refcount is incremented in perf_mmap__push() for
178 * every enqueued aio write request so decrement it because
179 * the request is now complete.
180 */
181 perf_mmap__put(md);
182 rc = 1;
183 } else {
184 /*
185 * aio write request may require restart with the
186 * reminder if the kernel didn't write whole
187 * chunk at once.
188 */
189 rem_off = cblock->aio_offset + written;
190 rem_buf = (void *)(cblock->aio_buf + written);
191 record__aio_write(cblock, cblock->aio_fildes,
192 rem_buf, rem_size, rem_off);
193 rc = 0;
194 }
195
196 return rc;
197}
198
199static int record__aio_sync(struct perf_mmap *md, bool sync_all)
200{
201 struct aiocb **aiocb = md->aio.aiocb;
202 struct aiocb *cblocks = md->aio.cblocks;
203 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
204 int i, do_suspend;
205
206 do {
207 do_suspend = 0;
208 for (i = 0; i < md->aio.nr_cblocks; ++i) {
209 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
210 if (sync_all)
211 aiocb[i] = NULL;
212 else
213 return i;
214 } else {
215 /*
216 * Started aio write is not complete yet
217 * so it has to be waited before the
218 * next allocation.
219 */
220 aiocb[i] = &cblocks[i];
221 do_suspend = 1;
222 }
223 }
224 if (!do_suspend)
225 return -1;
226
227 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
228 if (!(errno == EAGAIN || errno == EINTR))
229 pr_err("failed to sync perf data, error: %m\n");
230 }
231 } while (1);
232}
233
234static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
235{
236 struct record *rec = to;
237 int ret, trace_fd = rec->session->data->file.fd;
238
239 rec->samples++;
240
241 ret = record__aio_write(cblock, trace_fd, bf, size, off);
242 if (!ret) {
243 rec->bytes_written += size;
244 if (switch_output_size(rec))
245 trigger_hit(&switch_output_trigger);
246 }
247
248 return ret;
249}
250
251static off_t record__aio_get_pos(int trace_fd)
252{
253 return lseek(trace_fd, 0, SEEK_CUR);
254}
255
256static void record__aio_set_pos(int trace_fd, off_t pos)
257{
258 lseek(trace_fd, pos, SEEK_SET);
259}
260
261static void record__aio_mmap_read_sync(struct record *rec)
262{
263 int i;
264 struct perf_evlist *evlist = rec->evlist;
265 struct perf_mmap *maps = evlist->mmap;
266
267 if (!rec->opts.nr_cblocks)
268 return;
269
270 for (i = 0; i < evlist->nr_mmaps; i++) {
271 struct perf_mmap *map = &maps[i];
272
273 if (map->base)
274 record__aio_sync(map, true);
275 }
276}
277
278static int nr_cblocks_default = 1;
279static int nr_cblocks_max = 4;
280
281static int record__aio_parse(const struct option *opt,
282 const char *str,
283 int unset)
284{
285 struct record_opts *opts = (struct record_opts *)opt->value;
286
287 if (unset) {
288 opts->nr_cblocks = 0;
289 } else {
290 if (str)
291 opts->nr_cblocks = strtol(str, NULL, 0);
292 if (!opts->nr_cblocks)
293 opts->nr_cblocks = nr_cblocks_default;
294 }
295
296 return 0;
297}
298#else /* HAVE_AIO_SUPPORT */
299static int nr_cblocks_max = 0;
300
301static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
302{
303 return -1;
304}
305
306static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
307 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
308{
309 return -1;
310}
311
312static off_t record__aio_get_pos(int trace_fd __maybe_unused)
313{
314 return -1;
315}
316
317static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
318{
319}
320
321static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
322{
323}
324#endif
325
326static int record__aio_enabled(struct record *rec)
327{
328 return rec->opts.nr_cblocks > 0;
329}
330
127static int process_synthesized_event(struct perf_tool *tool, 331static int process_synthesized_event(struct perf_tool *tool,
128 union perf_event *event, 332 union perf_event *event,
129 struct perf_sample *sample __maybe_unused, 333 struct perf_sample *sample __maybe_unused,
@@ -329,7 +533,7 @@ static int record__mmap_evlist(struct record *rec,
329 533
330 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, 534 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
331 opts->auxtrace_mmap_pages, 535 opts->auxtrace_mmap_pages,
332 opts->auxtrace_snapshot_mode) < 0) { 536 opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
333 if (errno == EPERM) { 537 if (errno == EPERM) {
334 pr_err("Permission error mapping pages.\n" 538 pr_err("Permission error mapping pages.\n"
335 "Consider increasing " 539 "Consider increasing "
@@ -525,6 +729,8 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
525 int i; 729 int i;
526 int rc = 0; 730 int rc = 0;
527 struct perf_mmap *maps; 731 struct perf_mmap *maps;
732 int trace_fd = rec->data.file.fd;
733 off_t off;
528 734
529 if (!evlist) 735 if (!evlist)
530 return 0; 736 return 0;
@@ -536,13 +742,30 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
536 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) 742 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
537 return 0; 743 return 0;
538 744
745 if (record__aio_enabled(rec))
746 off = record__aio_get_pos(trace_fd);
747
539 for (i = 0; i < evlist->nr_mmaps; i++) { 748 for (i = 0; i < evlist->nr_mmaps; i++) {
540 struct perf_mmap *map = &maps[i]; 749 struct perf_mmap *map = &maps[i];
541 750
542 if (map->base) { 751 if (map->base) {
543 if (perf_mmap__push(map, rec, record__pushfn) != 0) { 752 if (!record__aio_enabled(rec)) {
544 rc = -1; 753 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
545 goto out; 754 rc = -1;
755 goto out;
756 }
757 } else {
758 int idx;
759 /*
760 * Call record__aio_sync() to wait till map->data buffer
761 * becomes available after previous aio write request.
762 */
763 idx = record__aio_sync(map, false);
764 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
765 record__aio_set_pos(trace_fd, off);
766 rc = -1;
767 goto out;
768 }
546 } 769 }
547 } 770 }
548 771
@@ -553,6 +776,9 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
553 } 776 }
554 } 777 }
555 778
779 if (record__aio_enabled(rec))
780 record__aio_set_pos(trace_fd, off);
781
556 /* 782 /*
557 * Mark the round finished in case we wrote 783 * Mark the round finished in case we wrote
558 * at least one event. 784 * at least one event.
@@ -641,8 +867,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
641 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, 867 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
642 process_synthesized_event, 868 process_synthesized_event,
643 &rec->session->machines.host, 869 &rec->session->machines.host,
644 rec->opts.sample_address, 870 rec->opts.sample_address);
645 rec->opts.proc_map_timeout);
646 thread_map__put(thread_map); 871 thread_map__put(thread_map);
647 return err; 872 return err;
648} 873}
@@ -658,6 +883,8 @@ record__switch_output(struct record *rec, bool at_exit)
658 /* Same Size: "2015122520103046"*/ 883 /* Same Size: "2015122520103046"*/
659 char timestamp[] = "InvalidTimestamp"; 884 char timestamp[] = "InvalidTimestamp";
660 885
886 record__aio_mmap_read_sync(rec);
887
661 record__synthesize(rec, true); 888 record__synthesize(rec, true);
662 if (target__none(&rec->opts.target)) 889 if (target__none(&rec->opts.target))
663 record__synthesize_workload(rec, true); 890 record__synthesize_workload(rec, true);
@@ -857,7 +1084,7 @@ static int record__synthesize(struct record *rec, bool tail)
857 1084
858 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads, 1085 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
859 process_synthesized_event, opts->sample_address, 1086 process_synthesized_event, opts->sample_address,
860 opts->proc_map_timeout, 1); 1087 1);
861out: 1088out:
862 return err; 1089 return err;
863} 1090}
@@ -1168,6 +1395,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
1168 record__synthesize_workload(rec, true); 1395 record__synthesize_workload(rec, true);
1169 1396
1170out_child: 1397out_child:
1398 record__aio_mmap_read_sync(rec);
1399
1171 if (forks) { 1400 if (forks) {
1172 int exit_status; 1401 int exit_status;
1173 1402
@@ -1301,6 +1530,13 @@ static int perf_record_config(const char *var, const char *value, void *cb)
1301 var = "call-graph.record-mode"; 1530 var = "call-graph.record-mode";
1302 return perf_default_config(var, value, cb); 1531 return perf_default_config(var, value, cb);
1303 } 1532 }
1533#ifdef HAVE_AIO_SUPPORT
1534 if (!strcmp(var, "record.aio")) {
1535 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1536 if (!rec->opts.nr_cblocks)
1537 rec->opts.nr_cblocks = nr_cblocks_default;
1538 }
1539#endif
1304 1540
1305 return 0; 1541 return 0;
1306} 1542}
@@ -1546,7 +1782,6 @@ static struct record record = {
1546 .uses_mmap = true, 1782 .uses_mmap = true,
1547 .default_per_cpu = true, 1783 .default_per_cpu = true,
1548 }, 1784 },
1549 .proc_map_timeout = 500,
1550 }, 1785 },
1551 .tool = { 1786 .tool = {
1552 .sample = process_sample_event, 1787 .sample = process_sample_event,
@@ -1676,7 +1911,7 @@ static struct option __record_options[] = {
1676 parse_clockid), 1911 parse_clockid),
1677 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts, 1912 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1678 "opts", "AUX area tracing Snapshot Mode", ""), 1913 "opts", "AUX area tracing Snapshot Mode", ""),
1679 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout, 1914 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1680 "per thread proc mmap processing timeout in ms"), 1915 "per thread proc mmap processing timeout in ms"),
1681 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces, 1916 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1682 "Record namespaces events"), 1917 "Record namespaces events"),
@@ -1706,6 +1941,11 @@ static struct option __record_options[] = {
1706 "signal"), 1941 "signal"),
1707 OPT_BOOLEAN(0, "dry-run", &dry_run, 1942 OPT_BOOLEAN(0, "dry-run", &dry_run,
1708 "Parse options then exit"), 1943 "Parse options then exit"),
1944#ifdef HAVE_AIO_SUPPORT
1945 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
1946 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
1947 record__aio_parse),
1948#endif
1709 OPT_END() 1949 OPT_END()
1710}; 1950};
1711 1951
@@ -1898,6 +2138,11 @@ int cmd_record(int argc, const char **argv)
1898 goto out; 2138 goto out;
1899 } 2139 }
1900 2140
2141 if (rec->opts.nr_cblocks > nr_cblocks_max)
2142 rec->opts.nr_cblocks = nr_cblocks_max;
2143 if (verbose > 0)
2144 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2145
1901 err = __cmd_record(&record, argc, argv); 2146 err = __cmd_record(&record, argc, argv);
1902out: 2147out:
1903 perf_evlist__delete(rec->evlist); 2148 perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 257c9c18cb7e..4958095be4fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -85,6 +85,7 @@ struct report {
85 int socket_filter; 85 int socket_filter;
86 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 86 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
87 struct branch_type_stat brtype_stat; 87 struct branch_type_stat brtype_stat;
88 bool symbol_ipc;
88}; 89};
89 90
90static int report__config(const char *var, const char *value, void *cb) 91static int report__config(const char *var, const char *value, void *cb)
@@ -129,7 +130,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
129 struct mem_info *mi; 130 struct mem_info *mi;
130 struct branch_info *bi; 131 struct branch_info *bi;
131 132
132 if (!ui__has_annotation()) 133 if (!ui__has_annotation() && !rep->symbol_ipc)
133 return 0; 134 return 0;
134 135
135 hist__account_cycles(sample->branch_stack, al, sample, 136 hist__account_cycles(sample->branch_stack, al, sample,
@@ -174,7 +175,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
174 struct perf_evsel *evsel = iter->evsel; 175 struct perf_evsel *evsel = iter->evsel;
175 int err; 176 int err;
176 177
177 if (!ui__has_annotation()) 178 if (!ui__has_annotation() && !rep->symbol_ipc)
178 return 0; 179 return 0;
179 180
180 hist__account_cycles(sample->branch_stack, al, sample, 181 hist__account_cycles(sample->branch_stack, al, sample,
@@ -1133,6 +1134,7 @@ int cmd_report(int argc, const char **argv)
1133 .mode = PERF_DATA_MODE_READ, 1134 .mode = PERF_DATA_MODE_READ,
1134 }; 1135 };
1135 int ret = hists__init(); 1136 int ret = hists__init();
1137 char sort_tmp[128];
1136 1138
1137 if (ret < 0) 1139 if (ret < 0)
1138 return ret; 1140 return ret;
@@ -1284,6 +1286,24 @@ repeat:
1284 else 1286 else
1285 use_browser = 0; 1287 use_browser = 0;
1286 1288
1289 if (sort_order && strstr(sort_order, "ipc")) {
1290 parse_options_usage(report_usage, options, "s", 1);
1291 goto error;
1292 }
1293
1294 if (sort_order && strstr(sort_order, "symbol")) {
1295 if (sort__mode == SORT_MODE__BRANCH) {
1296 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1297 sort_order, "ipc_lbr");
1298 report.symbol_ipc = true;
1299 } else {
1300 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1301 sort_order, "ipc_null");
1302 }
1303
1304 sort_order = sort_tmp;
1305 }
1306
1287 if (setup_sorting(session->evlist) < 0) { 1307 if (setup_sorting(session->evlist) < 0) {
1288 if (sort_order) 1308 if (sort_order)
1289 parse_options_usage(report_usage, options, "s", 1); 1309 parse_options_usage(report_usage, options, "s", 1);
@@ -1311,7 +1331,7 @@ repeat:
1311 * so don't allocate extra space that won't be used in the stdio 1331 * so don't allocate extra space that won't be used in the stdio
1312 * implementation. 1332 * implementation.
1313 */ 1333 */
1314 if (ui__has_annotation()) { 1334 if (ui__has_annotation() || report.symbol_ipc) {
1315 ret = symbol__annotation_init(); 1335 ret = symbol__annotation_init();
1316 if (ret < 0) 1336 if (ret < 0)
1317 goto error; 1337 goto error;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 04913136bac9..3728b50e52e2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -96,6 +96,7 @@ enum perf_output_field {
96 PERF_OUTPUT_UREGS = 1U << 27, 96 PERF_OUTPUT_UREGS = 1U << 27,
97 PERF_OUTPUT_METRIC = 1U << 28, 97 PERF_OUTPUT_METRIC = 1U << 28,
98 PERF_OUTPUT_MISC = 1U << 29, 98 PERF_OUTPUT_MISC = 1U << 29,
99 PERF_OUTPUT_SRCCODE = 1U << 30,
99}; 100};
100 101
101struct output_option { 102struct output_option {
@@ -132,6 +133,7 @@ struct output_option {
132 {.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR}, 133 {.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
133 {.str = "metric", .field = PERF_OUTPUT_METRIC}, 134 {.str = "metric", .field = PERF_OUTPUT_METRIC},
134 {.str = "misc", .field = PERF_OUTPUT_MISC}, 135 {.str = "misc", .field = PERF_OUTPUT_MISC},
136 {.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
135}; 137};
136 138
137enum { 139enum {
@@ -424,7 +426,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
424 pr_err("Display of DSO requested but no address to convert.\n"); 426 pr_err("Display of DSO requested but no address to convert.\n");
425 return -EINVAL; 427 return -EINVAL;
426 } 428 }
427 if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) { 429 if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) {
428 pr_err("Display of source line number requested but sample IP is not\n" 430 pr_err("Display of source line number requested but sample IP is not\n"
429 "selected. Hence, no address to lookup the source line number.\n"); 431 "selected. Hence, no address to lookup the source line number.\n");
430 return -EINVAL; 432 return -EINVAL;
@@ -724,8 +726,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
724 if (PRINT_FIELD(DSO)) { 726 if (PRINT_FIELD(DSO)) {
725 memset(&alf, 0, sizeof(alf)); 727 memset(&alf, 0, sizeof(alf));
726 memset(&alt, 0, sizeof(alt)); 728 memset(&alt, 0, sizeof(alt));
727 thread__find_map(thread, sample->cpumode, from, &alf); 729 thread__find_map_fb(thread, sample->cpumode, from, &alf);
728 thread__find_map(thread, sample->cpumode, to, &alt); 730 thread__find_map_fb(thread, sample->cpumode, to, &alt);
729 } 731 }
730 732
731 printed += fprintf(fp, " 0x%"PRIx64, from); 733 printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -771,8 +773,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
771 from = br->entries[i].from; 773 from = br->entries[i].from;
772 to = br->entries[i].to; 774 to = br->entries[i].to;
773 775
774 thread__find_symbol(thread, sample->cpumode, from, &alf); 776 thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
775 thread__find_symbol(thread, sample->cpumode, to, &alt); 777 thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
776 778
777 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp); 779 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
778 if (PRINT_FIELD(DSO)) { 780 if (PRINT_FIELD(DSO)) {
@@ -816,11 +818,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
816 from = br->entries[i].from; 818 from = br->entries[i].from;
817 to = br->entries[i].to; 819 to = br->entries[i].to;
818 820
819 if (thread__find_map(thread, sample->cpumode, from, &alf) && 821 if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
820 !alf.map->dso->adjust_symbols) 822 !alf.map->dso->adjust_symbols)
821 from = map__map_ip(alf.map, from); 823 from = map__map_ip(alf.map, from);
822 824
823 if (thread__find_map(thread, sample->cpumode, to, &alt) && 825 if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
824 !alt.map->dso->adjust_symbols) 826 !alt.map->dso->adjust_symbols)
825 to = map__map_ip(alt.map, to); 827 to = map__map_ip(alt.map, to);
826 828
@@ -907,6 +909,22 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
907 return len; 909 return len;
908} 910}
909 911
912static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
913{
914 struct addr_location al;
915 int ret = 0;
916
917 memset(&al, 0, sizeof(al));
918 thread__find_map(thread, cpumode, addr, &al);
919 if (!al.map)
920 return 0;
921 ret = map__fprintf_srccode(al.map, al.addr, stdout,
922 &thread->srccode_state);
923 if (ret)
924 ret += printf("\n");
925 return ret;
926}
927
910static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, 928static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
911 struct perf_insn *x, u8 *inbuf, int len, 929 struct perf_insn *x, u8 *inbuf, int len,
912 int insn, FILE *fp, int *total_cycles) 930 int insn, FILE *fp, int *total_cycles)
@@ -998,6 +1016,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
998 x.cpumode, x.cpu, &lastsym, attr, fp); 1016 x.cpumode, x.cpu, &lastsym, attr, fp);
999 printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1], 1017 printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
1000 &x, buffer, len, 0, fp, &total_cycles); 1018 &x, buffer, len, 0, fp, &total_cycles);
1019 if (PRINT_FIELD(SRCCODE))
1020 printed += print_srccode(thread, x.cpumode, br->entries[nr - 1].from);
1001 } 1021 }
1002 1022
1003 /* Print all blocks */ 1023 /* Print all blocks */
@@ -1027,12 +1047,16 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1027 if (ip == end) { 1047 if (ip == end) {
1028 printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp, 1048 printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
1029 &total_cycles); 1049 &total_cycles);
1050 if (PRINT_FIELD(SRCCODE))
1051 printed += print_srccode(thread, x.cpumode, ip);
1030 break; 1052 break;
1031 } else { 1053 } else {
1032 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip, 1054 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
1033 dump_insn(&x, ip, buffer + off, len - off, &ilen)); 1055 dump_insn(&x, ip, buffer + off, len - off, &ilen));
1034 if (ilen == 0) 1056 if (ilen == 0)
1035 break; 1057 break;
1058 if (PRINT_FIELD(SRCCODE))
1059 print_srccode(thread, x.cpumode, ip);
1036 insn++; 1060 insn++;
1037 } 1061 }
1038 } 1062 }
@@ -1063,6 +1087,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1063 1087
1064 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip, 1088 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
1065 dump_insn(&x, sample->ip, buffer, len, NULL)); 1089 dump_insn(&x, sample->ip, buffer, len, NULL));
1090 if (PRINT_FIELD(SRCCODE))
1091 print_srccode(thread, x.cpumode, sample->ip);
1066 goto out; 1092 goto out;
1067 } 1093 }
1068 for (off = 0; off <= end - start; off += ilen) { 1094 for (off = 0; off <= end - start; off += ilen) {
@@ -1070,6 +1096,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1070 dump_insn(&x, start + off, buffer + off, len - off, &ilen)); 1096 dump_insn(&x, start + off, buffer + off, len - off, &ilen));
1071 if (ilen == 0) 1097 if (ilen == 0)
1072 break; 1098 break;
1099 if (PRINT_FIELD(SRCCODE))
1100 print_srccode(thread, x.cpumode, start + off);
1073 } 1101 }
1074out: 1102out:
1075 return printed; 1103 return printed;
@@ -1252,7 +1280,16 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
1252 printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp); 1280 printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
1253 1281
1254 printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp); 1282 printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
1255 return printed + fprintf(fp, "\n"); 1283 printed += fprintf(fp, "\n");
1284 if (PRINT_FIELD(SRCCODE)) {
1285 int ret = map__fprintf_srccode(al->map, al->addr, stdout,
1286 &thread->srccode_state);
1287 if (ret) {
1288 printed += ret;
1289 printed += printf("\n");
1290 }
1291 }
1292 return printed;
1256} 1293}
1257 1294
1258static struct { 1295static struct {
@@ -1792,6 +1829,12 @@ static void process_event(struct perf_script *script,
1792 fprintf(fp, "%16" PRIx64, sample->phys_addr); 1829 fprintf(fp, "%16" PRIx64, sample->phys_addr);
1793 fprintf(fp, "\n"); 1830 fprintf(fp, "\n");
1794 1831
1832 if (PRINT_FIELD(SRCCODE)) {
1833 if (map__fprintf_srccode(al->map, al->addr, stdout,
1834 &thread->srccode_state))
1835 printf("\n");
1836 }
1837
1795 if (PRINT_FIELD(METRIC)) 1838 if (PRINT_FIELD(METRIC))
1796 perf_sample__fprint_metric(script, thread, evsel, sample, fp); 1839 perf_sample__fprint_metric(script, thread, evsel, sample, fp);
1797 1840
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index aa0c73e57924..fe3ecfb2e64b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -46,6 +46,7 @@
46#include "arch/common.h" 46#include "arch/common.h"
47 47
48#include "util/debug.h" 48#include "util/debug.h"
49#include "util/ordered-events.h"
49 50
50#include <assert.h> 51#include <assert.h>
51#include <elf.h> 52#include <elf.h>
@@ -272,8 +273,6 @@ static void perf_top__print_sym_table(struct perf_top *top)
272 perf_top__header_snprintf(top, bf, sizeof(bf)); 273 perf_top__header_snprintf(top, bf, sizeof(bf));
273 printf("%s\n", bf); 274 printf("%s\n", bf);
274 275
275 perf_top__reset_sample_counters(top);
276
277 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 276 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
278 277
279 if (!top->record_opts.overwrite && 278 if (!top->record_opts.overwrite &&
@@ -553,8 +552,6 @@ static void perf_top__sort_new_samples(void *arg)
553 struct perf_evsel *evsel = t->sym_evsel; 552 struct perf_evsel *evsel = t->sym_evsel;
554 struct hists *hists; 553 struct hists *hists;
555 554
556 perf_top__reset_sample_counters(t);
557
558 if (t->evlist->selected != NULL) 555 if (t->evlist->selected != NULL)
559 t->sym_evsel = t->evlist->selected; 556 t->sym_evsel = t->evlist->selected;
560 557
@@ -571,6 +568,15 @@ static void perf_top__sort_new_samples(void *arg)
571 568
572 hists__collapse_resort(hists, NULL); 569 hists__collapse_resort(hists, NULL);
573 perf_evsel__output_resort(evsel, NULL); 570 perf_evsel__output_resort(evsel, NULL);
571
572 if (t->lost || t->drop)
573 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
574}
575
576static void stop_top(void)
577{
578 session_done = 1;
579 done = 1;
574} 580}
575 581
576static void *display_thread_tui(void *arg) 582static void *display_thread_tui(void *arg)
@@ -595,7 +601,7 @@ static void *display_thread_tui(void *arg)
595 601
596 /* 602 /*
597 * Initialize the uid_filter_str, in the future the TUI will allow 603 * Initialize the uid_filter_str, in the future the TUI will allow
598 * Zooming in/out UIDs. For now juse use whatever the user passed 604 * Zooming in/out UIDs. For now just use whatever the user passed
599 * via --uid. 605 * via --uid.
600 */ 606 */
601 evlist__for_each_entry(top->evlist, pos) { 607 evlist__for_each_entry(top->evlist, pos) {
@@ -609,13 +615,13 @@ static void *display_thread_tui(void *arg)
609 !top->record_opts.overwrite, 615 !top->record_opts.overwrite,
610 &top->annotation_opts); 616 &top->annotation_opts);
611 617
612 done = 1; 618 stop_top();
613 return NULL; 619 return NULL;
614} 620}
615 621
616static void display_sig(int sig __maybe_unused) 622static void display_sig(int sig __maybe_unused)
617{ 623{
618 done = 1; 624 stop_top();
619} 625}
620 626
621static void display_setup_sig(void) 627static void display_setup_sig(void)
@@ -668,7 +674,7 @@ repeat:
668 674
669 if (perf_top__handle_keypress(top, c)) 675 if (perf_top__handle_keypress(top, c))
670 goto repeat; 676 goto repeat;
671 done = 1; 677 stop_top();
672 } 678 }
673 } 679 }
674 680
@@ -800,78 +806,61 @@ static void perf_event__process_sample(struct perf_tool *tool,
800 addr_location__put(&al); 806 addr_location__put(&al);
801} 807}
802 808
809static void
810perf_top__process_lost(struct perf_top *top, union perf_event *event,
811 struct perf_evsel *evsel)
812{
813 struct hists *hists = evsel__hists(evsel);
814
815 top->lost += event->lost.lost;
816 top->lost_total += event->lost.lost;
817 hists->stats.total_lost += event->lost.lost;
818}
819
820static void
821perf_top__process_lost_samples(struct perf_top *top,
822 union perf_event *event,
823 struct perf_evsel *evsel)
824{
825 struct hists *hists = evsel__hists(evsel);
826
827 top->lost += event->lost_samples.lost;
828 top->lost_total += event->lost_samples.lost;
829 hists->stats.total_lost_samples += event->lost_samples.lost;
830}
831
832static u64 last_timestamp;
833
803static void perf_top__mmap_read_idx(struct perf_top *top, int idx) 834static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
804{ 835{
805 struct record_opts *opts = &top->record_opts; 836 struct record_opts *opts = &top->record_opts;
806 struct perf_evlist *evlist = top->evlist; 837 struct perf_evlist *evlist = top->evlist;
807 struct perf_sample sample;
808 struct perf_evsel *evsel;
809 struct perf_mmap *md; 838 struct perf_mmap *md;
810 struct perf_session *session = top->session;
811 union perf_event *event; 839 union perf_event *event;
812 struct machine *machine;
813 int ret;
814 840
815 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; 841 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
816 if (perf_mmap__read_init(md) < 0) 842 if (perf_mmap__read_init(md) < 0)
817 return; 843 return;
818 844
819 while ((event = perf_mmap__read_event(md)) != NULL) { 845 while ((event = perf_mmap__read_event(md)) != NULL) {
820 ret = perf_evlist__parse_sample(evlist, event, &sample); 846 int ret;
821 if (ret) {
822 pr_err("Can't parse sample, err = %d\n", ret);
823 goto next_event;
824 }
825 847
826 evsel = perf_evlist__id2evsel(session->evlist, sample.id); 848 ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
827 assert(evsel != NULL); 849 if (ret && ret != -1)
828
829 if (event->header.type == PERF_RECORD_SAMPLE)
830 ++top->samples;
831
832 switch (sample.cpumode) {
833 case PERF_RECORD_MISC_USER:
834 ++top->us_samples;
835 if (top->hide_user_symbols)
836 goto next_event;
837 machine = &session->machines.host;
838 break;
839 case PERF_RECORD_MISC_KERNEL:
840 ++top->kernel_samples;
841 if (top->hide_kernel_symbols)
842 goto next_event;
843 machine = &session->machines.host;
844 break;
845 case PERF_RECORD_MISC_GUEST_KERNEL:
846 ++top->guest_kernel_samples;
847 machine = perf_session__find_machine(session,
848 sample.pid);
849 break;
850 case PERF_RECORD_MISC_GUEST_USER:
851 ++top->guest_us_samples;
852 /*
853 * TODO: we don't process guest user from host side
854 * except simple counting.
855 */
856 goto next_event;
857 default:
858 if (event->header.type == PERF_RECORD_SAMPLE)
859 goto next_event;
860 machine = &session->machines.host;
861 break; 850 break;
862 }
863 851
852 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
853 if (ret)
854 break;
864 855
865 if (event->header.type == PERF_RECORD_SAMPLE) {
866 perf_event__process_sample(&top->tool, event, evsel,
867 &sample, machine);
868 } else if (event->header.type < PERF_RECORD_MAX) {
869 hists__inc_nr_events(evsel__hists(evsel), event->header.type);
870 machine__process_event(machine, event, &sample);
871 } else
872 ++session->evlist->stats.nr_unknown_events;
873next_event:
874 perf_mmap__consume(md); 856 perf_mmap__consume(md);
857
858 if (top->qe.rotate) {
859 pthread_mutex_lock(&top->qe.mutex);
860 top->qe.rotate = false;
861 pthread_cond_signal(&top->qe.cond);
862 pthread_mutex_unlock(&top->qe.mutex);
863 }
875 } 864 }
876 865
877 perf_mmap__read_done(md); 866 perf_mmap__read_done(md);
@@ -881,10 +870,8 @@ static void perf_top__mmap_read(struct perf_top *top)
881{ 870{
882 bool overwrite = top->record_opts.overwrite; 871 bool overwrite = top->record_opts.overwrite;
883 struct perf_evlist *evlist = top->evlist; 872 struct perf_evlist *evlist = top->evlist;
884 unsigned long long start, end;
885 int i; 873 int i;
886 874
887 start = rdclock();
888 if (overwrite) 875 if (overwrite)
889 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING); 876 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
890 877
@@ -895,13 +882,6 @@ static void perf_top__mmap_read(struct perf_top *top)
895 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY); 882 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
896 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 883 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
897 } 884 }
898 end = rdclock();
899
900 if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
901 ui__warning("Too slow to read ring buffer.\n"
902 "Please try increasing the period (-c) or\n"
903 "decreasing the freq (-F) or\n"
904 "limiting the number of CPUs (-C)\n");
905} 885}
906 886
907/* 887/*
@@ -1063,6 +1043,150 @@ static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1063 return 0; 1043 return 0;
1064} 1044}
1065 1045
1046static struct ordered_events *rotate_queues(struct perf_top *top)
1047{
1048 struct ordered_events *in = top->qe.in;
1049
1050 if (top->qe.in == &top->qe.data[1])
1051 top->qe.in = &top->qe.data[0];
1052 else
1053 top->qe.in = &top->qe.data[1];
1054
1055 return in;
1056}
1057
1058static void *process_thread(void *arg)
1059{
1060 struct perf_top *top = arg;
1061
1062 while (!done) {
1063 struct ordered_events *out, *in = top->qe.in;
1064
1065 if (!in->nr_events) {
1066 usleep(100);
1067 continue;
1068 }
1069
1070 out = rotate_queues(top);
1071
1072 pthread_mutex_lock(&top->qe.mutex);
1073 top->qe.rotate = true;
1074 pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1075 pthread_mutex_unlock(&top->qe.mutex);
1076
1077 if (ordered_events__flush(out, OE_FLUSH__TOP))
1078 pr_err("failed to process events\n");
1079 }
1080
1081 return NULL;
1082}
1083
1084/*
1085 * Allow only 'top->delay_secs' seconds behind samples.
1086 */
1087static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1088{
1089 union perf_event *event = qevent->event;
1090 u64 delay_timestamp;
1091
1092 if (event->header.type != PERF_RECORD_SAMPLE)
1093 return false;
1094
1095 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1096 return delay_timestamp < last_timestamp;
1097}
1098
1099static int deliver_event(struct ordered_events *qe,
1100 struct ordered_event *qevent)
1101{
1102 struct perf_top *top = qe->data;
1103 struct perf_evlist *evlist = top->evlist;
1104 struct perf_session *session = top->session;
1105 union perf_event *event = qevent->event;
1106 struct perf_sample sample;
1107 struct perf_evsel *evsel;
1108 struct machine *machine;
1109 int ret = -1;
1110
1111 if (should_drop(qevent, top)) {
1112 top->drop++;
1113 top->drop_total++;
1114 return 0;
1115 }
1116
1117 ret = perf_evlist__parse_sample(evlist, event, &sample);
1118 if (ret) {
1119 pr_err("Can't parse sample, err = %d\n", ret);
1120 goto next_event;
1121 }
1122
1123 evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1124 assert(evsel != NULL);
1125
1126 if (event->header.type == PERF_RECORD_SAMPLE)
1127 ++top->samples;
1128
1129 switch (sample.cpumode) {
1130 case PERF_RECORD_MISC_USER:
1131 ++top->us_samples;
1132 if (top->hide_user_symbols)
1133 goto next_event;
1134 machine = &session->machines.host;
1135 break;
1136 case PERF_RECORD_MISC_KERNEL:
1137 ++top->kernel_samples;
1138 if (top->hide_kernel_symbols)
1139 goto next_event;
1140 machine = &session->machines.host;
1141 break;
1142 case PERF_RECORD_MISC_GUEST_KERNEL:
1143 ++top->guest_kernel_samples;
1144 machine = perf_session__find_machine(session,
1145 sample.pid);
1146 break;
1147 case PERF_RECORD_MISC_GUEST_USER:
1148 ++top->guest_us_samples;
1149 /*
1150 * TODO: we don't process guest user from host side
1151 * except simple counting.
1152 */
1153 goto next_event;
1154 default:
1155 if (event->header.type == PERF_RECORD_SAMPLE)
1156 goto next_event;
1157 machine = &session->machines.host;
1158 break;
1159 }
1160
1161 if (event->header.type == PERF_RECORD_SAMPLE) {
1162 perf_event__process_sample(&top->tool, event, evsel,
1163 &sample, machine);
1164 } else if (event->header.type == PERF_RECORD_LOST) {
1165 perf_top__process_lost(top, event, evsel);
1166 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1167 perf_top__process_lost_samples(top, event, evsel);
1168 } else if (event->header.type < PERF_RECORD_MAX) {
1169 hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1170 machine__process_event(machine, event, &sample);
1171 } else
1172 ++session->evlist->stats.nr_unknown_events;
1173
1174 ret = 0;
1175next_event:
1176 return ret;
1177}
1178
1179static void init_process_thread(struct perf_top *top)
1180{
1181 ordered_events__init(&top->qe.data[0], deliver_event, top);
1182 ordered_events__init(&top->qe.data[1], deliver_event, top);
1183 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1184 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1185 top->qe.in = &top->qe.data[0];
1186 pthread_mutex_init(&top->qe.mutex, NULL);
1187 pthread_cond_init(&top->qe.cond, NULL);
1188}
1189
1066static int __cmd_top(struct perf_top *top) 1190static int __cmd_top(struct perf_top *top)
1067{ 1191{
1068 char msg[512]; 1192 char msg[512];
@@ -1070,7 +1194,7 @@ static int __cmd_top(struct perf_top *top)
1070 struct perf_evsel_config_term *err_term; 1194 struct perf_evsel_config_term *err_term;
1071 struct perf_evlist *evlist = top->evlist; 1195 struct perf_evlist *evlist = top->evlist;
1072 struct record_opts *opts = &top->record_opts; 1196 struct record_opts *opts = &top->record_opts;
1073 pthread_t thread; 1197 pthread_t thread, thread_process;
1074 int ret; 1198 int ret;
1075 1199
1076 top->session = perf_session__new(NULL, false, NULL); 1200 top->session = perf_session__new(NULL, false, NULL);
@@ -1094,9 +1218,10 @@ static int __cmd_top(struct perf_top *top)
1094 if (top->nr_threads_synthesize > 1) 1218 if (top->nr_threads_synthesize > 1)
1095 perf_set_multithreaded(); 1219 perf_set_multithreaded();
1096 1220
1221 init_process_thread(top);
1222
1097 machine__synthesize_threads(&top->session->machines.host, &opts->target, 1223 machine__synthesize_threads(&top->session->machines.host, &opts->target,
1098 top->evlist->threads, false, 1224 top->evlist->threads, false,
1099 opts->proc_map_timeout,
1100 top->nr_threads_synthesize); 1225 top->nr_threads_synthesize);
1101 1226
1102 if (top->nr_threads_synthesize > 1) 1227 if (top->nr_threads_synthesize > 1)
@@ -1135,10 +1260,15 @@ static int __cmd_top(struct perf_top *top)
1135 perf_evlist__enable(top->evlist); 1260 perf_evlist__enable(top->evlist);
1136 1261
1137 ret = -1; 1262 ret = -1;
1263 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1264 ui__error("Could not create process thread.\n");
1265 goto out_delete;
1266 }
1267
1138 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : 1268 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1139 display_thread), top)) { 1269 display_thread), top)) {
1140 ui__error("Could not create display thread.\n"); 1270 ui__error("Could not create display thread.\n");
1141 goto out_delete; 1271 goto out_join_thread;
1142 } 1272 }
1143 1273
1144 if (top->realtime_prio) { 1274 if (top->realtime_prio) {
@@ -1173,6 +1303,9 @@ static int __cmd_top(struct perf_top *top)
1173 ret = 0; 1303 ret = 0;
1174out_join: 1304out_join:
1175 pthread_join(thread, NULL); 1305 pthread_join(thread, NULL);
1306out_join_thread:
1307 pthread_cond_signal(&top->qe.cond);
1308 pthread_join(thread_process, NULL);
1176out_delete: 1309out_delete:
1177 perf_session__delete(top->session); 1310 perf_session__delete(top->session);
1178 top->session = NULL; 1311 top->session = NULL;
@@ -1256,7 +1389,6 @@ int cmd_top(int argc, const char **argv)
1256 .target = { 1389 .target = {
1257 .uses_mmap = true, 1390 .uses_mmap = true,
1258 }, 1391 },
1259 .proc_map_timeout = 500,
1260 /* 1392 /*
1261 * FIXME: This will lose PERF_RECORD_MMAP and other metadata 1393 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1262 * when we pause, fix that and reenable. Probably using a 1394 * when we pause, fix that and reenable. Probably using a
@@ -1265,6 +1397,7 @@ int cmd_top(int argc, const char **argv)
1265 * stays in overwrite mode. -acme 1397 * stays in overwrite mode. -acme
1266 * */ 1398 * */
1267 .overwrite = 0, 1399 .overwrite = 0,
1400 .sample_time = true,
1268 }, 1401 },
1269 .max_stack = sysctl__max_stack(), 1402 .max_stack = sysctl__max_stack(),
1270 .annotation_opts = annotation__default_options, 1403 .annotation_opts = annotation__default_options,
@@ -1289,6 +1422,8 @@ int cmd_top(int argc, const char **argv)
1289 "file", "vmlinux pathname"), 1422 "file", "vmlinux pathname"),
1290 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, 1423 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1291 "don't load vmlinux even if found"), 1424 "don't load vmlinux even if found"),
1425 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1426 "file", "kallsyms pathname"),
1292 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, 1427 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1293 "hide kernel symbols"), 1428 "hide kernel symbols"),
1294 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages", 1429 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
@@ -1367,7 +1502,7 @@ int cmd_top(int argc, const char **argv)
1367 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 1502 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1368 "width[,width...]", 1503 "width[,width...]",
1369 "don't try to adjust column width, use these fixed values"), 1504 "don't try to adjust column width, use these fixed values"),
1370 OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout, 1505 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1371 "per thread proc mmap processing timeout in ms"), 1506 "per thread proc mmap processing timeout in ms"),
1372 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack, 1507 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1373 "branch any", "sample any taken branches", 1508 "branch any", "sample any taken branches",
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 8e3c3f74a3a4..366ec3c8f580 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -127,6 +127,10 @@ struct trace {
127 bool force; 127 bool force;
128 bool vfs_getname; 128 bool vfs_getname;
129 int trace_pgfaults; 129 int trace_pgfaults;
130 struct {
131 struct ordered_events data;
132 u64 last;
133 } oe;
130}; 134};
131 135
132struct tp_field { 136struct tp_field {
@@ -258,7 +262,8 @@ static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel)
258 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp)); 262 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
259 263
260 if (evsel->priv != NULL) { 264 if (evsel->priv != NULL) {
261 if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr")) 265 if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
266 perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
262 goto out_delete; 267 goto out_delete;
263 return 0; 268 return 0;
264 } 269 }
@@ -885,7 +890,7 @@ static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
885 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 890 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
886 */ 891 */
887struct syscall { 892struct syscall {
888 struct tep_event_format *tp_format; 893 struct tep_event *tp_format;
889 int nr_args; 894 int nr_args;
890 int args_size; 895 int args_size;
891 bool is_exit; 896 bool is_exit;
@@ -1264,7 +1269,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1264 1269
1265 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1270 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1266 evlist->threads, trace__tool_process, false, 1271 evlist->threads, trace__tool_process, false,
1267 trace->opts.proc_map_timeout, 1); 1272 1);
1268out: 1273out:
1269 if (err) 1274 if (err)
1270 symbol__exit(); 1275 symbol__exit();
@@ -2636,6 +2641,57 @@ static int trace__set_filter_pids(struct trace *trace)
2636 return err; 2641 return err;
2637} 2642}
2638 2643
2644static int trace__deliver_event(struct trace *trace, union perf_event *event)
2645{
2646 struct perf_evlist *evlist = trace->evlist;
2647 struct perf_sample sample;
2648 int err;
2649
2650 err = perf_evlist__parse_sample(evlist, event, &sample);
2651 if (err)
2652 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2653 else
2654 trace__handle_event(trace, event, &sample);
2655
2656 return 0;
2657}
2658
2659static int trace__flush_ordered_events(struct trace *trace)
2660{
2661 u64 first = ordered_events__first_time(&trace->oe.data);
2662 u64 flush = trace->oe.last - NSEC_PER_SEC;
2663
2664 /* Is there some thing to flush.. */
2665 if (first && first < flush)
2666 return ordered_events__flush_time(&trace->oe.data, flush);
2667
2668 return 0;
2669}
2670
2671static int trace__deliver_ordered_event(struct trace *trace, union perf_event *event)
2672{
2673 struct perf_evlist *evlist = trace->evlist;
2674 int err;
2675
2676 err = perf_evlist__parse_sample_timestamp(evlist, event, &trace->oe.last);
2677 if (err && err != -1)
2678 return err;
2679
2680 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
2681 if (err)
2682 return err;
2683
2684 return trace__flush_ordered_events(trace);
2685}
2686
2687static int ordered_events__deliver_event(struct ordered_events *oe,
2688 struct ordered_event *event)
2689{
2690 struct trace *trace = container_of(oe, struct trace, oe.data);
2691
2692 return trace__deliver_event(trace, event->event);
2693}
2694
2639static int trace__run(struct trace *trace, int argc, const char **argv) 2695static int trace__run(struct trace *trace, int argc, const char **argv)
2640{ 2696{
2641 struct perf_evlist *evlist = trace->evlist; 2697 struct perf_evlist *evlist = trace->evlist;
@@ -2782,7 +2838,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
2782 * Now that we already used evsel->attr to ask the kernel to setup the 2838 * Now that we already used evsel->attr to ask the kernel to setup the
2783 * events, lets reuse evsel->attr.sample_max_stack as the limit in 2839 * events, lets reuse evsel->attr.sample_max_stack as the limit in
2784 * trace__resolve_callchain(), allowing per-event max-stack settings 2840 * trace__resolve_callchain(), allowing per-event max-stack settings
2785 * to override an explicitely set --max-stack global setting. 2841 * to override an explicitly set --max-stack global setting.
2786 */ 2842 */
2787 evlist__for_each_entry(evlist, evsel) { 2843 evlist__for_each_entry(evlist, evsel) {
2788 if (evsel__has_callchain(evsel) && 2844 if (evsel__has_callchain(evsel) &&
@@ -2801,18 +2857,12 @@ again:
2801 continue; 2857 continue;
2802 2858
2803 while ((event = perf_mmap__read_event(md)) != NULL) { 2859 while ((event = perf_mmap__read_event(md)) != NULL) {
2804 struct perf_sample sample;
2805
2806 ++trace->nr_events; 2860 ++trace->nr_events;
2807 2861
2808 err = perf_evlist__parse_sample(evlist, event, &sample); 2862 err = trace__deliver_ordered_event(trace, event);
2809 if (err) { 2863 if (err)
2810 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 2864 goto out_disable;
2811 goto next_event;
2812 }
2813 2865
2814 trace__handle_event(trace, event, &sample);
2815next_event:
2816 perf_mmap__consume(md); 2866 perf_mmap__consume(md);
2817 2867
2818 if (interrupted) 2868 if (interrupted)
@@ -2834,6 +2884,9 @@ next_event:
2834 draining = true; 2884 draining = true;
2835 2885
2836 goto again; 2886 goto again;
2887 } else {
2888 if (trace__flush_ordered_events(trace))
2889 goto out_disable;
2837 } 2890 }
2838 } else { 2891 } else {
2839 goto again; 2892 goto again;
@@ -2844,6 +2897,8 @@ out_disable:
2844 2897
2845 perf_evlist__disable(evlist); 2898 perf_evlist__disable(evlist);
2846 2899
2900 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
2901
2847 if (!err) { 2902 if (!err) {
2848 if (trace->summary) 2903 if (trace->summary)
2849 trace__fprintf_thread_summary(trace, trace->output); 2904 trace__fprintf_thread_summary(trace, trace->output);
@@ -3393,7 +3448,6 @@ int cmd_trace(int argc, const char **argv)
3393 .user_interval = ULLONG_MAX, 3448 .user_interval = ULLONG_MAX,
3394 .no_buffering = true, 3449 .no_buffering = true,
3395 .mmap_pages = UINT_MAX, 3450 .mmap_pages = UINT_MAX,
3396 .proc_map_timeout = 500,
3397 }, 3451 },
3398 .output = stderr, 3452 .output = stderr,
3399 .show_comm = true, 3453 .show_comm = true,
@@ -3464,7 +3518,7 @@ int cmd_trace(int argc, const char **argv)
3464 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 3518 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
3465 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 3519 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
3466 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 3520 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
3467 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout, 3521 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3468 "per thread proc mmap processing timeout in ms"), 3522 "per thread proc mmap processing timeout in ms"),
3469 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 3523 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
3470 trace__parse_cgroups), 3524 trace__parse_cgroups),
@@ -3555,6 +3609,9 @@ int cmd_trace(int argc, const char **argv)
3555 } 3609 }
3556 } 3610 }
3557 3611
3612 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
3613 ordered_events__set_copy_on_queue(&trace.oe.data, true);
3614
3558 /* 3615 /*
3559 * If we are augmenting syscalls, then combine what we put in the 3616 * If we are augmenting syscalls, then combine what we put in the
3560 * __augmented_syscalls__ BPF map with what is in the 3617 * __augmented_syscalls__ BPF map with what is in the
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 0ed4a34c74c4..388c6dd128b8 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -82,7 +82,7 @@ struct record_opts {
82 bool use_clockid; 82 bool use_clockid;
83 clockid_t clockid; 83 clockid_t clockid;
84 u64 clockid_res_ns; 84 u64 clockid_res_ns;
85 unsigned int proc_map_timeout; 85 int nr_cblocks;
86}; 86};
87 87
88struct option; 88struct option;
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index bba3152ec54a..0b080b0352d8 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -433,7 +433,7 @@
433 }, 433 },
434 { 434 {
435 "PEBS": "1", 435 "PEBS": "1",
436 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 436 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
437 "EventCode": "0xD0", 437 "EventCode": "0xD0",
438 "Counter": "0,1,2,3", 438 "Counter": "0,1,2,3",
439 "UMask": "0x41", 439 "UMask": "0x41",
@@ -445,7 +445,7 @@
445 }, 445 },
446 { 446 {
447 "PEBS": "1", 447 "PEBS": "1",
448 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 448 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
449 "EventCode": "0xD0", 449 "EventCode": "0xD0",
450 "Counter": "0,1,2,3", 450 "Counter": "0,1,2,3",
451 "UMask": "0x42", 451 "UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 97c5d0784c6c..999cf3066363 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -317,7 +317,7 @@
317 "CounterHTOff": "0,1,2,3,4,5,6,7" 317 "CounterHTOff": "0,1,2,3,4,5,6,7"
318 }, 318 },
319 { 319 {
320 "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", 320 "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
321 "EventCode": "0x87", 321 "EventCode": "0x87",
322 "Counter": "0,1,2,3", 322 "Counter": "0,1,2,3",
323 "UMask": "0x1", 323 "UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index bf243fe2a0ec..4ad425312bdc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -439,7 +439,7 @@
439 "PEBS": "1", 439 "PEBS": "1",
440 "Counter": "0,1,2,3", 440 "Counter": "0,1,2,3",
441 "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", 441 "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
442 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 442 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
443 "SampleAfterValue": "100003", 443 "SampleAfterValue": "100003",
444 "CounterHTOff": "0,1,2,3" 444 "CounterHTOff": "0,1,2,3"
445 }, 445 },
@@ -451,7 +451,7 @@
451 "PEBS": "1", 451 "PEBS": "1",
452 "Counter": "0,1,2,3", 452 "Counter": "0,1,2,3",
453 "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", 453 "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
454 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 454 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
455 "SampleAfterValue": "100003", 455 "SampleAfterValue": "100003",
456 "L1_Hit_Indication": "1", 456 "L1_Hit_Indication": "1",
457 "CounterHTOff": "0,1,2,3" 457 "CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 920c89da9111..0d04bf9db000 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -322,7 +322,7 @@
322 "BriefDescription": "Stalls caused by changing prefix length of the instruction.", 322 "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
323 "Counter": "0,1,2,3", 323 "Counter": "0,1,2,3",
324 "EventName": "ILD_STALL.LCP", 324 "EventName": "ILD_STALL.LCP",
325 "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", 325 "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
326 "SampleAfterValue": "2000003", 326 "SampleAfterValue": "2000003",
327 "CounterHTOff": "0,1,2,3,4,5,6,7" 327 "CounterHTOff": "0,1,2,3,4,5,6,7"
328 }, 328 },
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index bf0c51272068..141b1080429d 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -439,7 +439,7 @@
439 "PEBS": "1", 439 "PEBS": "1",
440 "Counter": "0,1,2,3", 440 "Counter": "0,1,2,3",
441 "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", 441 "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
442 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 442 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
443 "SampleAfterValue": "100003", 443 "SampleAfterValue": "100003",
444 "CounterHTOff": "0,1,2,3" 444 "CounterHTOff": "0,1,2,3"
445 }, 445 },
@@ -451,7 +451,7 @@
451 "PEBS": "1", 451 "PEBS": "1",
452 "Counter": "0,1,2,3", 452 "Counter": "0,1,2,3",
453 "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", 453 "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
454 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 454 "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
455 "SampleAfterValue": "100003", 455 "SampleAfterValue": "100003",
456 "L1_Hit_Indication": "1", 456 "L1_Hit_Indication": "1",
457 "CounterHTOff": "0,1,2,3" 457 "CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 920c89da9111..0d04bf9db000 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -322,7 +322,7 @@
322 "BriefDescription": "Stalls caused by changing prefix length of the instruction.", 322 "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
323 "Counter": "0,1,2,3", 323 "Counter": "0,1,2,3",
324 "EventName": "ILD_STALL.LCP", 324 "EventName": "ILD_STALL.LCP",
325 "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", 325 "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
326 "SampleAfterValue": "2000003", 326 "SampleAfterValue": "2000003",
327 "CounterHTOff": "0,1,2,3,4,5,6,7" 327 "CounterHTOff": "0,1,2,3,4,5,6,7"
328 }, 328 },
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index f723e8f7bb09..ee22e4a5e30d 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -31,7 +31,7 @@
31 }, 31 },
32 { 32 {
33 "PEBS": "1", 33 "PEBS": "1",
34 "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 34 "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
35 "EventCode": "0xD0", 35 "EventCode": "0xD0",
36 "Counter": "0,1,2,3", 36 "Counter": "0,1,2,3",
37 "UMask": "0x41", 37 "UMask": "0x41",
@@ -42,7 +42,7 @@
42 }, 42 },
43 { 43 {
44 "PEBS": "1", 44 "PEBS": "1",
45 "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 45 "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
46 "EventCode": "0xD0", 46 "EventCode": "0xD0",
47 "Counter": "0,1,2,3", 47 "Counter": "0,1,2,3",
48 "UMask": "0x42", 48 "UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 8a597e45ed84..34a519d9bfa0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -778,7 +778,7 @@
778 "CounterHTOff": "0,1,2,3,4,5,6,7" 778 "CounterHTOff": "0,1,2,3,4,5,6,7"
779 }, 779 },
780 { 780 {
781 "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", 781 "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
782 "EventCode": "0x03", 782 "EventCode": "0x03",
783 "Counter": "0,1,2,3", 783 "Counter": "0,1,2,3",
784 "UMask": "0x2", 784 "UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index 88ba5994b994..e434ec723001 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -121,7 +121,7 @@
121 "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING", 121 "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
122 "MSRIndex": "0x1a6", 122 "MSRIndex": "0x1a6",
123 "SampleAfterValue": "100007", 123 "SampleAfterValue": "100007",
124 "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 124 "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
125 "Offcore": "1" 125 "Offcore": "1"
126 }, 126 },
127 { 127 {
@@ -187,7 +187,7 @@
187 "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING", 187 "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
188 "MSRIndex": "0x1a6", 188 "MSRIndex": "0x1a6",
189 "SampleAfterValue": "100007", 189 "SampleAfterValue": "100007",
190 "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 190 "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
191 "Offcore": "1" 191 "Offcore": "1"
192 }, 192 },
193 { 193 {
@@ -253,7 +253,7 @@
253 "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING", 253 "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
254 "MSRIndex": "0x1a6", 254 "MSRIndex": "0x1a6",
255 "SampleAfterValue": "100007", 255 "SampleAfterValue": "100007",
256 "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 256 "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
257 "Offcore": "1" 257 "Offcore": "1"
258 }, 258 },
259 { 259 {
@@ -319,7 +319,7 @@
319 "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING", 319 "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
320 "MSRIndex": "0x1a6", 320 "MSRIndex": "0x1a6",
321 "SampleAfterValue": "100007", 321 "SampleAfterValue": "100007",
322 "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 322 "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
323 "Offcore": "1" 323 "Offcore": "1"
324 }, 324 },
325 { 325 {
@@ -385,7 +385,7 @@
385 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING", 385 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
386 "MSRIndex": "0x1a6", 386 "MSRIndex": "0x1a6",
387 "SampleAfterValue": "100007", 387 "SampleAfterValue": "100007",
388 "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 388 "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
389 "Offcore": "1" 389 "Offcore": "1"
390 }, 390 },
391 { 391 {
@@ -451,7 +451,7 @@
451 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING", 451 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
452 "MSRIndex": "0x1a6", 452 "MSRIndex": "0x1a6",
453 "SampleAfterValue": "100007", 453 "SampleAfterValue": "100007",
454 "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 454 "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
455 "Offcore": "1" 455 "Offcore": "1"
456 }, 456 },
457 { 457 {
@@ -539,7 +539,7 @@
539 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING", 539 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
540 "MSRIndex": "0x1a6", 540 "MSRIndex": "0x1a6",
541 "SampleAfterValue": "100007", 541 "SampleAfterValue": "100007",
542 "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 542 "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
543 "Offcore": "1" 543 "Offcore": "1"
544 }, 544 },
545 { 545 {
@@ -605,7 +605,7 @@
605 "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING", 605 "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
606 "MSRIndex": "0x1a6", 606 "MSRIndex": "0x1a6",
607 "SampleAfterValue": "100007", 607 "SampleAfterValue": "100007",
608 "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 608 "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
609 "Offcore": "1" 609 "Offcore": "1"
610 }, 610 },
611 { 611 {
@@ -682,7 +682,7 @@
682 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING", 682 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
683 "MSRIndex": "0x1a6", 683 "MSRIndex": "0x1a6",
684 "SampleAfterValue": "100007", 684 "SampleAfterValue": "100007",
685 "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 685 "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
686 "Offcore": "1" 686 "Offcore": "1"
687 }, 687 },
688 { 688 {
@@ -748,7 +748,7 @@
748 "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING", 748 "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
749 "MSRIndex": "0x1a6", 749 "MSRIndex": "0x1a6",
750 "SampleAfterValue": "100007", 750 "SampleAfterValue": "100007",
751 "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 751 "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
752 "Offcore": "1" 752 "Offcore": "1"
753 }, 753 },
754 { 754 {
@@ -869,7 +869,7 @@
869 "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING", 869 "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
870 "MSRIndex": "0x1a6", 870 "MSRIndex": "0x1a6",
871 "SampleAfterValue": "100007", 871 "SampleAfterValue": "100007",
872 "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 872 "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
873 "Offcore": "1" 873 "Offcore": "1"
874 }, 874 },
875 { 875 {
@@ -935,7 +935,7 @@
935 "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING", 935 "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
936 "MSRIndex": "0x1a6", 936 "MSRIndex": "0x1a6",
937 "SampleAfterValue": "100007", 937 "SampleAfterValue": "100007",
938 "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 938 "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
939 "Offcore": "1" 939 "Offcore": "1"
940 }, 940 },
941 { 941 {
@@ -1067,7 +1067,7 @@
1067 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING", 1067 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
1068 "MSRIndex": "0x1a6", 1068 "MSRIndex": "0x1a6",
1069 "SampleAfterValue": "100007", 1069 "SampleAfterValue": "100007",
1070 "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 1070 "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
1071 "Offcore": "1" 1071 "Offcore": "1"
1072 }, 1072 },
1073 { 1073 {
@@ -1133,7 +1133,7 @@
1133 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING", 1133 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
1134 "MSRIndex": "0x1a6", 1134 "MSRIndex": "0x1a6",
1135 "SampleAfterValue": "100007", 1135 "SampleAfterValue": "100007",
1136 "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 1136 "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
1137 "Offcore": "1" 1137 "Offcore": "1"
1138 }, 1138 },
1139 { 1139 {
@@ -1199,7 +1199,7 @@
1199 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING", 1199 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
1200 "MSRIndex": "0x1a6", 1200 "MSRIndex": "0x1a6",
1201 "SampleAfterValue": "100007", 1201 "SampleAfterValue": "100007",
1202 "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ", 1202 "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
1203 "Offcore": "1" 1203 "Offcore": "1"
1204 }, 1204 },
1205 { 1205 {
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index bef73c499f83..16b04a20bc12 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -31,7 +31,7 @@
31 }, 31 },
32 { 32 {
33 "PEBS": "1", 33 "PEBS": "1",
34 "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 34 "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
35 "EventCode": "0xD0", 35 "EventCode": "0xD0",
36 "Counter": "0,1,2,3", 36 "Counter": "0,1,2,3",
37 "UMask": "0x41", 37 "UMask": "0x41",
@@ -42,7 +42,7 @@
42 }, 42 },
43 { 43 {
44 "PEBS": "1", 44 "PEBS": "1",
45 "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", 45 "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
46 "EventCode": "0xD0", 46 "EventCode": "0xD0",
47 "Counter": "0,1,2,3", 47 "Counter": "0,1,2,3",
48 "UMask": "0x42", 48 "UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index 8a597e45ed84..34a519d9bfa0 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -778,7 +778,7 @@
778 "CounterHTOff": "0,1,2,3,4,5,6,7" 778 "CounterHTOff": "0,1,2,3,4,5,6,7"
779 }, 779 },
780 { 780 {
781 "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", 781 "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
782 "EventCode": "0x03", 782 "EventCode": "0x03",
783 "Counter": "0,1,2,3", 783 "Counter": "0,1,2,3",
784 "UMask": "0x2", 784 "UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -73,7 +73,7 @@
73 }, 73 },
74 { 74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", 75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )", 76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
77 "MetricGroup": "Memory_Bound;Memory_Lat", 77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency" 78 "MetricName": "Load_Miss_Real_Latency"
79 }, 79 },
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -73,7 +73,7 @@
73 }, 73 },
74 { 74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", 75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )", 76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
77 "MetricGroup": "Memory_Bound;Memory_Lat", 77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency" 78 "MetricName": "Load_Miss_Real_Latency"
79 }, 79 },
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
index de6e70e552e2..adb42c72f5c8 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
@@ -428,7 +428,7 @@
428 "EventCode": "0x5C", 428 "EventCode": "0x5C",
429 "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB", 429 "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
430 "PerPkg": "1", 430 "PerPkg": "1",
431 "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.", 431 "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
432 "UMask": "0x10", 432 "UMask": "0x10",
433 "Unit": "CHA" 433 "Unit": "CHA"
434 }, 434 },
@@ -967,7 +967,7 @@
967 "EventCode": "0x57", 967 "EventCode": "0x57",
968 "EventName": "UNC_M2M_PREFCAM_INSERTS", 968 "EventName": "UNC_M2M_PREFCAM_INSERTS",
969 "PerPkg": "1", 969 "PerPkg": "1",
970 "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory", 970 "PublicDescription": "Counts when the M2M (Mesh to Memory) receives a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
971 "Unit": "M2M" 971 "Unit": "M2M"
972 }, 972 },
973 { 973 {
@@ -1041,7 +1041,7 @@
1041 "EventCode": "0x31", 1041 "EventCode": "0x31",
1042 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0", 1042 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
1043 "PerPkg": "1", 1043 "PerPkg": "1",
1044 "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", 1044 "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
1045 "UMask": "0x1", 1045 "UMask": "0x1",
1046 "Unit": "UPI LL" 1046 "Unit": "UPI LL"
1047 }, 1047 },
@@ -1051,17 +1051,17 @@
1051 "EventCode": "0x31", 1051 "EventCode": "0x31",
1052 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1", 1052 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
1053 "PerPkg": "1", 1053 "PerPkg": "1",
1054 "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", 1054 "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
1055 "UMask": "0x2", 1055 "UMask": "0x2",
1056 "Unit": "UPI LL" 1056 "Unit": "UPI LL"
1057 }, 1057 },
1058 { 1058 {
1059 "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer", 1059 "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
1060 "Counter": "0,1,2,3", 1060 "Counter": "0,1,2,3",
1061 "EventCode": "0x31", 1061 "EventCode": "0x31",
1062 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2", 1062 "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
1063 "PerPkg": "1", 1063 "PerPkg": "1",
1064 "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", 1064 "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
1065 "UMask": "0x4", 1065 "UMask": "0x4",
1066 "Unit": "UPI LL" 1066 "Unit": "UPI LL"
1067 }, 1067 },
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 05dfe11c2f9e..d8426547219b 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -182,7 +182,7 @@ int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
182 char path_perf[PATH_MAX]; 182 char path_perf[PATH_MAX];
183 char path_dir[PATH_MAX]; 183 char path_dir[PATH_MAX];
184 184
185 /* First try developement tree tests. */ 185 /* First try development tree tests. */
186 if (!lstat("./tests", &st)) 186 if (!lstat("./tests", &st))
187 return run_dir("./tests", "./perf"); 187 return run_dir("./tests", "./perf");
188 188
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index ff9b60b99f52..44090a9a19f3 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -116,7 +116,7 @@ class Event(dict):
116 if not self.has_key(t) or not other.has_key(t): 116 if not self.has_key(t) or not other.has_key(t):
117 continue 117 continue
118 if not data_equal(self[t], other[t]): 118 if not data_equal(self[t], other[t]):
119 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) 119 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
120 120
121# Test file description needs to have following sections: 121# Test file description needs to have following sections:
122# [config] 122# [config]
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a467615c5a0e..910e25e64188 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
291 291
292bool test__bp_signal_is_supported(void) 292bool test__bp_signal_is_supported(void)
293{ 293{
294/* 294 /*
295 * The powerpc so far does not have support to even create 295 * PowerPC and S390 do not support creation of instruction
296 * instruction breakpoint using the perf event interface. 296 * breakpoints using the perf_event interface.
297 * Once it's there we can release this. 297 *
298 */ 298 * ARM requires explicit rounding down of the instruction
299#if defined(__powerpc__) || defined(__s390x__) 299 * pointer in Thumb mode, and then requires the single-step
300 * to be handled explicitly in the overflow handler to avoid
301 * stepping into the SIGIO handler and getting stuck on the
302 * breakpointed instruction.
303 *
304 * Just disable the test for these architectures until these
305 * issues are resolved.
306 */
307#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
300 return false; 308 return false;
301#else 309#else
302 return true; 310 return true;
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 6b049f3f5cf4..dbf2c69944d2 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -599,7 +599,7 @@ static int do_test_code_reading(bool try_kcore)
599 } 599 }
600 600
601 ret = perf_event__synthesize_thread_map(NULL, threads, 601 ret = perf_event__synthesize_thread_map(NULL, threads,
602 perf_event__process, machine, false, 500); 602 perf_event__process, machine, false);
603 if (ret < 0) { 603 if (ret < 0) {
604 pr_debug("perf_event__synthesize_thread_map failed\n"); 604 pr_debug("perf_event__synthesize_thread_map failed\n");
605 goto out_err; 605 goto out_err;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 2f008067d989..7c8d2e422401 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -34,7 +34,7 @@ static int init_live_machine(struct machine *machine)
34 pid_t pid = getpid(); 34 pid_t pid = getpid();
35 35
36 return perf_event__synthesize_mmap_events(NULL, &event, pid, pid, 36 return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
37 mmap_handler, machine, true, 500); 37 mmap_handler, machine, true);
38} 38}
39 39
40/* 40/*
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index b1af2499a3c9..5ede9b561d32 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -132,7 +132,7 @@ static int synth_all(struct machine *machine)
132{ 132{
133 return perf_event__synthesize_threads(NULL, 133 return perf_event__synthesize_threads(NULL,
134 perf_event__process, 134 perf_event__process,
135 machine, 0, 500, 1); 135 machine, 0, 1);
136} 136}
137 137
138static int synth_process(struct machine *machine) 138static int synth_process(struct machine *machine)
@@ -144,7 +144,7 @@ static int synth_process(struct machine *machine)
144 144
145 err = perf_event__synthesize_thread_map(NULL, map, 145 err = perf_event__synthesize_thread_map(NULL, map,
146 perf_event__process, 146 perf_event__process,
147 machine, 0, 500); 147 machine, 0);
148 148
149 thread_map__put(map); 149 thread_map__put(map);
150 return err; 150 return err;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 34394cc05077..07f6bd8ed719 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -58,6 +58,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
58 char *bname, *mmap_filename; 58 char *bname, *mmap_filename;
59 u64 prev_time = 0; 59 u64 prev_time = 0;
60 bool found_cmd_mmap = false, 60 bool found_cmd_mmap = false,
61 found_coreutils_mmap = false,
61 found_libc_mmap = false, 62 found_libc_mmap = false,
62 found_vdso_mmap = false, 63 found_vdso_mmap = false,
63 found_ld_mmap = false; 64 found_ld_mmap = false;
@@ -254,6 +255,8 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
254 if (bname != NULL) { 255 if (bname != NULL) {
255 if (!found_cmd_mmap) 256 if (!found_cmd_mmap)
256 found_cmd_mmap = !strcmp(bname + 1, cmd); 257 found_cmd_mmap = !strcmp(bname + 1, cmd);
258 if (!found_coreutils_mmap)
259 found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
257 if (!found_libc_mmap) 260 if (!found_libc_mmap)
258 found_libc_mmap = !strncmp(bname + 1, "libc", 4); 261 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
259 if (!found_ld_mmap) 262 if (!found_ld_mmap)
@@ -292,7 +295,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
292 } 295 }
293 296
294found_exit: 297found_exit:
295 if (nr_events[PERF_RECORD_COMM] > 1) { 298 if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
296 pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); 299 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
297 ++errs; 300 ++errs;
298 } 301 }
@@ -302,7 +305,7 @@ found_exit:
302 ++errs; 305 ++errs;
303 } 306 }
304 307
305 if (!found_cmd_mmap) { 308 if (!found_cmd_mmap && !found_coreutils_mmap) {
306 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); 309 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
307 ++errs; 310 ++errs;
308 } 311 }
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 22c3fdca8975..32bac9c0d694 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -20,12 +20,12 @@ egrep -q $regex ${arch_mman} && \
20(egrep $regex ${arch_mman} | \ 20(egrep $regex ${arch_mman} | \
21 sed -r "s/$regex/\2 \1/g" | \ 21 sed -r "s/$regex/\2 \1/g" | \
22 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") 22 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
23egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman} && 23([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
24(egrep $regex ${header_dir}/mman-common.h | \ 24(egrep $regex ${header_dir}/mman-common.h | \
25 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ 25 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
26 sed -r "s/$regex/\2 \1/g" | \ 26 sed -r "s/$regex/\2 \1/g" | \
27 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") 27 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
28egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman} && 28([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman}) &&
29(egrep $regex ${header_dir}/mman.h | \ 29(egrep $regex ${header_dir}/mman.h | \
30 sed -r "s/$regex/\2 \1/g" | \ 30 sed -r "s/$regex/\2 \1/g" | \
31 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") 31 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a96f62ca984a..ffac1d54a3d4 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2219,10 +2219,21 @@ static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf
2219 if (!is_report_browser(hbt)) { 2219 if (!is_report_browser(hbt)) {
2220 struct perf_top *top = hbt->arg; 2220 struct perf_top *top = hbt->arg;
2221 2221
2222 printed += scnprintf(bf + printed, size - printed,
2223 " lost: %" PRIu64 "/%" PRIu64,
2224 top->lost, top->lost_total);
2225
2226 printed += scnprintf(bf + printed, size - printed,
2227 " drop: %" PRIu64 "/%" PRIu64,
2228 top->drop, top->drop_total);
2229
2222 if (top->zero) 2230 if (top->zero)
2223 printed += scnprintf(bf + printed, size - printed, " [z]"); 2231 printed += scnprintf(bf + printed, size - printed, " [z]");
2232
2233 perf_top__reset_sample_counters(top);
2224 } 2234 }
2225 2235
2236
2226 return printed; 2237 return printed;
2227} 2238}
2228 2239
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 4ca799aadb4e..93d6b7240285 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -24,7 +24,7 @@ static void tui_helpline__push(const char *msg)
24 SLsmg_set_color(0); 24 SLsmg_set_color(0);
25 SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); 25 SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
26 SLsmg_refresh(); 26 SLsmg_refresh();
27 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; 27 strlcpy(ui_helpline__current, msg, sz);
28} 28}
29 29
30static int tui_helpline__show(const char *format, va_list ap) 30static int tui_helpline__show(const char *format, va_list ap)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index b7bf201fe8a8..af72be7f5b3b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -77,6 +77,7 @@ libperf-y += stat-shadow.o
77libperf-y += stat-display.o 77libperf-y += stat-display.o
78libperf-y += record.o 78libperf-y += record.o
79libperf-y += srcline.o 79libperf-y += srcline.o
80libperf-y += srccode.o
80libperf-y += data.o 81libperf-y += data.o
81libperf-y += tsc.o 82libperf-y += tsc.o
82libperf-y += cloexec.o 83libperf-y += cloexec.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 6936daf89ddd..ac9805e0bc76 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -134,6 +134,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
134 return 0; 134 return 0;
135} 135}
136 136
137#include "arch/arc/annotate/instructions.c"
137#include "arch/arm/annotate/instructions.c" 138#include "arch/arm/annotate/instructions.c"
138#include "arch/arm64/annotate/instructions.c" 139#include "arch/arm64/annotate/instructions.c"
139#include "arch/x86/annotate/instructions.c" 140#include "arch/x86/annotate/instructions.c"
@@ -143,6 +144,10 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
143 144
144static struct arch architectures[] = { 145static struct arch architectures[] = {
145 { 146 {
147 .name = "arc",
148 .init = arc__annotate_init,
149 },
150 {
146 .name = "arm", 151 .name = "arm",
147 .init = arm__annotate_init, 152 .init = arm__annotate_init,
148 }, 153 },
@@ -1000,6 +1005,7 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
1000static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 1005static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
1001{ 1006{
1002 unsigned n_insn; 1007 unsigned n_insn;
1008 unsigned int cover_insn = 0;
1003 u64 offset; 1009 u64 offset;
1004 1010
1005 n_insn = annotation__count_insn(notes, start, end); 1011 n_insn = annotation__count_insn(notes, start, end);
@@ -1013,21 +1019,34 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
1013 for (offset = start; offset <= end; offset++) { 1019 for (offset = start; offset <= end; offset++) {
1014 struct annotation_line *al = notes->offsets[offset]; 1020 struct annotation_line *al = notes->offsets[offset];
1015 1021
1016 if (al) 1022 if (al && al->ipc == 0.0) {
1017 al->ipc = ipc; 1023 al->ipc = ipc;
1024 cover_insn++;
1025 }
1026 }
1027
1028 if (cover_insn) {
1029 notes->hit_cycles += ch->cycles;
1030 notes->hit_insn += n_insn * ch->num;
1031 notes->cover_insn += cover_insn;
1018 } 1032 }
1019 } 1033 }
1020} 1034}
1021 1035
1022void annotation__compute_ipc(struct annotation *notes, size_t size) 1036void annotation__compute_ipc(struct annotation *notes, size_t size)
1023{ 1037{
1024 u64 offset; 1038 s64 offset;
1025 1039
1026 if (!notes->src || !notes->src->cycles_hist) 1040 if (!notes->src || !notes->src->cycles_hist)
1027 return; 1041 return;
1028 1042
1043 notes->total_insn = annotation__count_insn(notes, 0, size - 1);
1044 notes->hit_cycles = 0;
1045 notes->hit_insn = 0;
1046 notes->cover_insn = 0;
1047
1029 pthread_mutex_lock(&notes->lock); 1048 pthread_mutex_lock(&notes->lock);
1030 for (offset = 0; offset < size; ++offset) { 1049 for (offset = size - 1; offset >= 0; --offset) {
1031 struct cyc_hist *ch; 1050 struct cyc_hist *ch;
1032 1051
1033 ch = &notes->src->cycles_hist[offset]; 1052 ch = &notes->src->cycles_hist[offset];
@@ -1758,7 +1777,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1758 while (!feof(file)) { 1777 while (!feof(file)) {
1759 /* 1778 /*
1760 * The source code line number (lineno) needs to be kept in 1779 * The source code line number (lineno) needs to be kept in
1761 * accross calls to symbol__parse_objdump_line(), so that it 1780 * across calls to symbol__parse_objdump_line(), so that it
1762 * can associate it with the instructions till the next one. 1781 * can associate it with the instructions till the next one.
1763 * See disasm_line__new() and struct disasm_line::line_nr. 1782 * See disasm_line__new() and struct disasm_line::line_nr.
1764 */ 1783 */
@@ -2563,6 +2582,22 @@ call_like:
2563 disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset); 2582 disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
2564} 2583}
2565 2584
2585static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
2586{
2587 double ipc = 0.0, coverage = 0.0;
2588
2589 if (notes->hit_cycles)
2590 ipc = notes->hit_insn / ((double)notes->hit_cycles);
2591
2592 if (notes->total_insn) {
2593 coverage = notes->cover_insn * 100.0 /
2594 ((double)notes->total_insn);
2595 }
2596
2597 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
2598 ipc, coverage);
2599}
2600
2566static void __annotation_line__write(struct annotation_line *al, struct annotation *notes, 2601static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
2567 bool first_line, bool current_entry, bool change_color, int width, 2602 bool first_line, bool current_entry, bool change_color, int width,
2568 void *obj, unsigned int percent_type, 2603 void *obj, unsigned int percent_type,
@@ -2658,6 +2693,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
2658 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2693 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2659 "Cycle(min/max)"); 2694 "Cycle(min/max)");
2660 } 2695 }
2696
2697 if (show_title && !*al->line) {
2698 ipc_coverage_string(bf, sizeof(bf), notes);
2699 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2700 }
2661 } 2701 }
2662 2702
2663 obj__printf(obj, " "); 2703 obj__printf(obj, " ");
@@ -2763,6 +2803,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
2763 notes->nr_events = nr_pcnt; 2803 notes->nr_events = nr_pcnt;
2764 2804
2765 annotation__update_column_widths(notes); 2805 annotation__update_column_widths(notes);
2806 sym->annotate2 = true;
2766 2807
2767 return 0; 2808 return 0;
2768 2809
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 5399ba2321bb..fb6463730ba4 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -64,6 +64,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
64#define ANNOTATION__IPC_WIDTH 6 64#define ANNOTATION__IPC_WIDTH 6
65#define ANNOTATION__CYCLES_WIDTH 6 65#define ANNOTATION__CYCLES_WIDTH 6
66#define ANNOTATION__MINMAX_CYCLES_WIDTH 19 66#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
67#define ANNOTATION__AVG_IPC_WIDTH 36
67 68
68struct annotation_options { 69struct annotation_options {
69 bool hide_src_code, 70 bool hide_src_code,
@@ -262,6 +263,10 @@ struct annotation {
262 pthread_mutex_t lock; 263 pthread_mutex_t lock;
263 u64 max_coverage; 264 u64 max_coverage;
264 u64 start; 265 u64 start;
266 u64 hit_cycles;
267 u64 hit_insn;
268 unsigned int total_insn;
269 unsigned int cover_insn;
265 struct annotation_options *options; 270 struct annotation_options *options;
266 struct annotation_line **offsets; 271 struct annotation_line **offsets;
267 int nr_events; 272 int nr_events;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index f9ae1a993806..2f3eb6d293ee 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -99,7 +99,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
99 if (err) 99 if (err)
100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
101 } else 101 } else
102 pr_debug("bpf: successfull builtin compilation\n"); 102 pr_debug("bpf: successful builtin compilation\n");
103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
104 104
105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) 105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
@@ -1603,7 +1603,7 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
1603 1603
1604 op = bpf_map__add_newop(map, NULL); 1604 op = bpf_map__add_newop(map, NULL);
1605 if (IS_ERR(op)) 1605 if (IS_ERR(op))
1606 return ERR_PTR(PTR_ERR(op)); 1606 return ERR_CAST(op);
1607 op->op_type = BPF_MAP_OP_SET_EVSEL; 1607 op->op_type = BPF_MAP_OP_SET_EVSEL;
1608 op->v.evsel = evsel; 1608 op->v.evsel = evsel;
1609 } 1609 }
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 5ac157056cdf..1ea8f898f1a1 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -14,6 +14,7 @@
14#include "util.h" 14#include "util.h"
15#include "cache.h" 15#include "cache.h"
16#include <subcmd/exec-cmd.h> 16#include <subcmd/exec-cmd.h>
17#include "util/event.h" /* proc_map_timeout */
17#include "util/hist.h" /* perf_hist_config */ 18#include "util/hist.h" /* perf_hist_config */
18#include "util/llvm-utils.h" /* perf_llvm_config */ 19#include "util/llvm-utils.h" /* perf_llvm_config */
19#include "config.h" 20#include "config.h"
@@ -419,6 +420,9 @@ static int perf_buildid_config(const char *var, const char *value)
419static int perf_default_core_config(const char *var __maybe_unused, 420static int perf_default_core_config(const char *var __maybe_unused,
420 const char *value __maybe_unused) 421 const char *value __maybe_unused)
421{ 422{
423 if (!strcmp(var, "core.proc-map-timeout"))
424 proc_map_timeout = strtoul(value, NULL, 10);
425
422 /* Add other config variables here. */ 426 /* Add other config variables here. */
423 return 0; 427 return 0;
424} 428}
@@ -811,14 +815,14 @@ int config_error_nonbool(const char *var)
811void set_buildid_dir(const char *dir) 815void set_buildid_dir(const char *dir)
812{ 816{
813 if (dir) 817 if (dir)
814 scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir); 818 scnprintf(buildid_dir, MAXPATHLEN, "%s", dir);
815 819
816 /* default to $HOME/.debug */ 820 /* default to $HOME/.debug */
817 if (buildid_dir[0] == '\0') { 821 if (buildid_dir[0] == '\0') {
818 char *home = getenv("HOME"); 822 char *home = getenv("HOME");
819 823
820 if (home) { 824 if (home) {
821 snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", 825 snprintf(buildid_dir, MAXPATHLEN, "%s/%s",
822 home, DEBUG_CACHE_DIR); 826 home, DEBUG_CACHE_DIR);
823 } else { 827 } else {
824 strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); 828 strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 938def6d0bb9..0b4c8629f578 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -116,6 +116,19 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
116 return 1; 116 return 1;
117} 117}
118 118
119static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
120 ocsd_etmv3_cfg *config)
121{
122 config->reg_idr = params->etmv3.reg_idr;
123 config->reg_ctrl = params->etmv3.reg_ctrl;
124 config->reg_ccer = params->etmv3.reg_ccer;
125 config->reg_trc_id = params->etmv3.reg_trc_id;
126 config->arch_ver = ARCH_V7;
127 config->core_prof = profile_CortexA;
128
129 return 0;
130}
131
119static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, 132static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
120 ocsd_etmv4_cfg *config) 133 ocsd_etmv4_cfg *config)
121{ 134{
@@ -237,10 +250,19 @@ cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
237 struct cs_etm_decoder *decoder) 250 struct cs_etm_decoder *decoder)
238{ 251{
239 const char *decoder_name; 252 const char *decoder_name;
253 ocsd_etmv3_cfg config_etmv3;
240 ocsd_etmv4_cfg trace_config_etmv4; 254 ocsd_etmv4_cfg trace_config_etmv4;
241 void *trace_config; 255 void *trace_config;
242 256
243 switch (t_params->protocol) { 257 switch (t_params->protocol) {
258 case CS_ETM_PROTO_ETMV3:
259 case CS_ETM_PROTO_PTM:
260 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
261 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
262 OCSD_BUILTIN_DCD_ETMV3 :
263 OCSD_BUILTIN_DCD_PTM;
264 trace_config = &config_etmv3;
265 break;
244 case CS_ETM_PROTO_ETMV4i: 266 case CS_ETM_PROTO_ETMV4i:
245 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 267 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
246 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 268 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
@@ -263,9 +285,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
263 decoder->tail = 0; 285 decoder->tail = 0;
264 decoder->packet_count = 0; 286 decoder->packet_count = 0;
265 for (i = 0; i < MAX_BUFFER; i++) { 287 for (i = 0; i < MAX_BUFFER; i++) {
288 decoder->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
266 decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; 289 decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
267 decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; 290 decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
291 decoder->packet_buffer[i].instr_count = 0;
268 decoder->packet_buffer[i].last_instr_taken_branch = false; 292 decoder->packet_buffer[i].last_instr_taken_branch = false;
293 decoder->packet_buffer[i].last_instr_size = 0;
269 decoder->packet_buffer[i].exc = false; 294 decoder->packet_buffer[i].exc = false;
270 decoder->packet_buffer[i].exc_ret = false; 295 decoder->packet_buffer[i].exc_ret = false;
271 decoder->packet_buffer[i].cpu = INT_MIN; 296 decoder->packet_buffer[i].cpu = INT_MIN;
@@ -294,11 +319,15 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
294 decoder->packet_count++; 319 decoder->packet_count++;
295 320
296 decoder->packet_buffer[et].sample_type = sample_type; 321 decoder->packet_buffer[et].sample_type = sample_type;
322 decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
297 decoder->packet_buffer[et].exc = false; 323 decoder->packet_buffer[et].exc = false;
298 decoder->packet_buffer[et].exc_ret = false; 324 decoder->packet_buffer[et].exc_ret = false;
299 decoder->packet_buffer[et].cpu = *((int *)inode->priv); 325 decoder->packet_buffer[et].cpu = *((int *)inode->priv);
300 decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; 326 decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
301 decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; 327 decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
328 decoder->packet_buffer[et].instr_count = 0;
329 decoder->packet_buffer[et].last_instr_taken_branch = false;
330 decoder->packet_buffer[et].last_instr_size = 0;
302 331
303 if (decoder->packet_count == MAX_BUFFER - 1) 332 if (decoder->packet_count == MAX_BUFFER - 1)
304 return OCSD_RESP_WAIT; 333 return OCSD_RESP_WAIT;
@@ -321,8 +350,28 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
321 350
322 packet = &decoder->packet_buffer[decoder->tail]; 351 packet = &decoder->packet_buffer[decoder->tail];
323 352
353 switch (elem->isa) {
354 case ocsd_isa_aarch64:
355 packet->isa = CS_ETM_ISA_A64;
356 break;
357 case ocsd_isa_arm:
358 packet->isa = CS_ETM_ISA_A32;
359 break;
360 case ocsd_isa_thumb2:
361 packet->isa = CS_ETM_ISA_T32;
362 break;
363 case ocsd_isa_tee:
364 case ocsd_isa_jazelle:
365 case ocsd_isa_custom:
366 case ocsd_isa_unknown:
367 default:
368 packet->isa = CS_ETM_ISA_UNKNOWN;
369 }
370
324 packet->start_addr = elem->st_addr; 371 packet->start_addr = elem->st_addr;
325 packet->end_addr = elem->en_addr; 372 packet->end_addr = elem->en_addr;
373 packet->instr_count = elem->num_instr_range;
374
326 switch (elem->last_i_type) { 375 switch (elem->last_i_type) {
327 case OCSD_INSTR_BR: 376 case OCSD_INSTR_BR:
328 case OCSD_INSTR_BR_INDIRECT: 377 case OCSD_INSTR_BR_INDIRECT:
@@ -336,6 +385,8 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
336 break; 385 break;
337 } 386 }
338 387
388 packet->last_instr_size = elem->last_instr_sz;
389
339 return ret; 390 return ret;
340} 391}
341 392
@@ -398,11 +449,20 @@ static int cs_etm_decoder__create_etm_packet_decoder(
398 struct cs_etm_decoder *decoder) 449 struct cs_etm_decoder *decoder)
399{ 450{
400 const char *decoder_name; 451 const char *decoder_name;
452 ocsd_etmv3_cfg config_etmv3;
401 ocsd_etmv4_cfg trace_config_etmv4; 453 ocsd_etmv4_cfg trace_config_etmv4;
402 void *trace_config; 454 void *trace_config;
403 u8 csid; 455 u8 csid;
404 456
405 switch (t_params->protocol) { 457 switch (t_params->protocol) {
458 case CS_ETM_PROTO_ETMV3:
459 case CS_ETM_PROTO_PTM:
460 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
461 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
462 OCSD_BUILTIN_DCD_ETMV3 :
463 OCSD_BUILTIN_DCD_PTM;
464 trace_config = &config_etmv3;
465 break;
406 case CS_ETM_PROTO_ETMV4i: 466 case CS_ETM_PROTO_ETMV4i:
407 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 467 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
408 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 468 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 612b5755f742..b295dd2b8292 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -28,11 +28,21 @@ enum cs_etm_sample_type {
28 CS_ETM_TRACE_ON = 1 << 1, 28 CS_ETM_TRACE_ON = 1 << 1,
29}; 29};
30 30
31enum cs_etm_isa {
32 CS_ETM_ISA_UNKNOWN,
33 CS_ETM_ISA_A64,
34 CS_ETM_ISA_A32,
35 CS_ETM_ISA_T32,
36};
37
31struct cs_etm_packet { 38struct cs_etm_packet {
32 enum cs_etm_sample_type sample_type; 39 enum cs_etm_sample_type sample_type;
40 enum cs_etm_isa isa;
33 u64 start_addr; 41 u64 start_addr;
34 u64 end_addr; 42 u64 end_addr;
43 u32 instr_count;
35 u8 last_instr_taken_branch; 44 u8 last_instr_taken_branch;
45 u8 last_instr_size;
36 u8 exc; 46 u8 exc;
37 u8 exc_ret; 47 u8 exc_ret;
38 int cpu; 48 int cpu;
@@ -43,6 +53,13 @@ struct cs_etm_queue;
43typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64, 53typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
44 size_t, u8 *); 54 size_t, u8 *);
45 55
56struct cs_etmv3_trace_params {
57 u32 reg_ctrl;
58 u32 reg_trc_id;
59 u32 reg_ccer;
60 u32 reg_idr;
61};
62
46struct cs_etmv4_trace_params { 63struct cs_etmv4_trace_params {
47 u32 reg_idr0; 64 u32 reg_idr0;
48 u32 reg_idr1; 65 u32 reg_idr1;
@@ -55,6 +72,7 @@ struct cs_etmv4_trace_params {
55struct cs_etm_trace_params { 72struct cs_etm_trace_params {
56 int protocol; 73 int protocol;
57 union { 74 union {
75 struct cs_etmv3_trace_params etmv3;
58 struct cs_etmv4_trace_params etmv4; 76 struct cs_etmv4_trace_params etmv4;
59 }; 77 };
60}; 78};
@@ -78,6 +96,7 @@ enum {
78 CS_ETM_PROTO_ETMV3 = 1, 96 CS_ETM_PROTO_ETMV3 = 1,
79 CS_ETM_PROTO_ETMV4i, 97 CS_ETM_PROTO_ETMV4i,
80 CS_ETM_PROTO_ETMV4d, 98 CS_ETM_PROTO_ETMV4d,
99 CS_ETM_PROTO_PTM,
81}; 100};
82 101
83enum { 102enum {
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 73430b73570d..23159c33db2a 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -31,14 +31,6 @@
31 31
32#define MAX_TIMESTAMP (~0ULL) 32#define MAX_TIMESTAMP (~0ULL)
33 33
34/*
35 * A64 instructions are always 4 bytes
36 *
37 * Only A64 is supported, so can use this constant for converting between
38 * addresses and instruction counts, calculting offsets etc
39 */
40#define A64_INSTR_SIZE 4
41
42struct cs_etm_auxtrace { 34struct cs_etm_auxtrace {
43 struct auxtrace auxtrace; 35 struct auxtrace auxtrace;
44 struct auxtrace_queues queues; 36 struct auxtrace_queues queues;
@@ -91,6 +83,19 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
91static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, 83static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
92 pid_t tid, u64 time_); 84 pid_t tid, u64 time_);
93 85
86/* PTMs ETMIDR [11:8] set to b0011 */
87#define ETMIDR_PTM_VERSION 0x00000300
88
89static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
90{
91 etmidr &= ETMIDR_PTM_VERSION;
92
93 if (etmidr == ETMIDR_PTM_VERSION)
94 return CS_ETM_PROTO_PTM;
95
96 return CS_ETM_PROTO_ETMV3;
97}
98
94static void cs_etm__packet_dump(const char *pkt_string) 99static void cs_etm__packet_dump(const char *pkt_string)
95{ 100{
96 const char *color = PERF_COLOR_BLUE; 101 const char *color = PERF_COLOR_BLUE;
@@ -122,15 +127,31 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
122 /* Use metadata to fill in trace parameters for trace decoder */ 127 /* Use metadata to fill in trace parameters for trace decoder */
123 t_params = zalloc(sizeof(*t_params) * etm->num_cpu); 128 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
124 for (i = 0; i < etm->num_cpu; i++) { 129 for (i = 0; i < etm->num_cpu; i++) {
125 t_params[i].protocol = CS_ETM_PROTO_ETMV4i; 130 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
126 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; 131 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
127 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; 132
128 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; 133 t_params[i].protocol =
129 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; 134 cs_etm__get_v7_protocol_version(etmidr);
130 t_params[i].etmv4.reg_configr = 135 t_params[i].etmv3.reg_ctrl =
136 etm->metadata[i][CS_ETM_ETMCR];
137 t_params[i].etmv3.reg_trc_id =
138 etm->metadata[i][CS_ETM_ETMTRACEIDR];
139 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
140 __perf_cs_etmv4_magic) {
141 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
142 t_params[i].etmv4.reg_idr0 =
143 etm->metadata[i][CS_ETMV4_TRCIDR0];
144 t_params[i].etmv4.reg_idr1 =
145 etm->metadata[i][CS_ETMV4_TRCIDR1];
146 t_params[i].etmv4.reg_idr2 =
147 etm->metadata[i][CS_ETMV4_TRCIDR2];
148 t_params[i].etmv4.reg_idr8 =
149 etm->metadata[i][CS_ETMV4_TRCIDR8];
150 t_params[i].etmv4.reg_configr =
131 etm->metadata[i][CS_ETMV4_TRCCONFIGR]; 151 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
132 t_params[i].etmv4.reg_traceidr = 152 t_params[i].etmv4.reg_traceidr =
133 etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; 153 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
154 }
134 } 155 }
135 156
136 /* Set decoder parameters to simply print the trace packets */ 157 /* Set decoder parameters to simply print the trace packets */
@@ -360,15 +381,31 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
360 goto out_free; 381 goto out_free;
361 382
362 for (i = 0; i < etm->num_cpu; i++) { 383 for (i = 0; i < etm->num_cpu; i++) {
363 t_params[i].protocol = CS_ETM_PROTO_ETMV4i; 384 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
364 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; 385 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
365 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; 386
366 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; 387 t_params[i].protocol =
367 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; 388 cs_etm__get_v7_protocol_version(etmidr);
368 t_params[i].etmv4.reg_configr = 389 t_params[i].etmv3.reg_ctrl =
390 etm->metadata[i][CS_ETM_ETMCR];
391 t_params[i].etmv3.reg_trc_id =
392 etm->metadata[i][CS_ETM_ETMTRACEIDR];
393 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
394 __perf_cs_etmv4_magic) {
395 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
396 t_params[i].etmv4.reg_idr0 =
397 etm->metadata[i][CS_ETMV4_TRCIDR0];
398 t_params[i].etmv4.reg_idr1 =
399 etm->metadata[i][CS_ETMV4_TRCIDR1];
400 t_params[i].etmv4.reg_idr2 =
401 etm->metadata[i][CS_ETMV4_TRCIDR2];
402 t_params[i].etmv4.reg_idr8 =
403 etm->metadata[i][CS_ETMV4_TRCIDR8];
404 t_params[i].etmv4.reg_configr =
369 etm->metadata[i][CS_ETMV4_TRCCONFIGR]; 405 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
370 t_params[i].etmv4.reg_traceidr = 406 t_params[i].etmv4.reg_traceidr =
371 etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; 407 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
408 }
372 } 409 }
373 410
374 /* Set decoder parameters to simply print the trace packets */ 411 /* Set decoder parameters to simply print the trace packets */
@@ -510,21 +547,17 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
510 etmq->last_branch_rb->nr = 0; 547 etmq->last_branch_rb->nr = 0;
511} 548}
512 549
513static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) 550static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
514{ 551 u64 addr) {
515 /* Returns 0 for the CS_ETM_TRACE_ON packet */ 552 u8 instrBytes[2];
516 if (packet->sample_type == CS_ETM_TRACE_ON)
517 return 0;
518 553
554 cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes);
519 /* 555 /*
520 * The packet records the execution range with an exclusive end address 556 * T32 instruction size is indicated by bits[15:11] of the first
521 * 557 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
522 * A64 instructions are constant size, so the last executed 558 * denote a 32-bit instruction.
523 * instruction is A64_INSTR_SIZE before the end address
524 * Will need to do instruction level decode for T32 instructions as
525 * they can be variable size (not yet supported).
526 */ 559 */
527 return packet->end_addr - A64_INSTR_SIZE; 560 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
528} 561}
529 562
530static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet) 563static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
@@ -536,27 +569,32 @@ static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
536 return packet->start_addr; 569 return packet->start_addr;
537} 570}
538 571
539static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet) 572static inline
573u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
540{ 574{
541 /* 575 /* Returns 0 for the CS_ETM_TRACE_ON packet */
542 * Only A64 instructions are currently supported, so can get 576 if (packet->sample_type == CS_ETM_TRACE_ON)
543 * instruction count by dividing. 577 return 0;
544 * Will need to do instruction level decode for T32 instructions as 578
545 * they can be variable size (not yet supported). 579 return packet->end_addr - packet->last_instr_size;
546 */
547 return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
548} 580}
549 581
550static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet, 582static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
583 const struct cs_etm_packet *packet,
551 u64 offset) 584 u64 offset)
552{ 585{
553 /* 586 if (packet->isa == CS_ETM_ISA_T32) {
554 * Only A64 instructions are currently supported, so can get 587 u64 addr = packet->start_addr;
555 * instruction address by muliplying. 588
556 * Will need to do instruction level decode for T32 instructions as 589 while (offset > 0) {
557 * they can be variable size (not yet supported). 590 addr += cs_etm__t32_instr_size(etmq, addr);
558 */ 591 offset--;
559 return packet->start_addr + offset * A64_INSTR_SIZE; 592 }
593 return addr;
594 }
595
596 /* Assume a 4 byte instruction size (A32/A64) */
597 return packet->start_addr + offset * 4;
560} 598}
561 599
562static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq) 600static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
@@ -888,9 +926,8 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
888 struct cs_etm_auxtrace *etm = etmq->etm; 926 struct cs_etm_auxtrace *etm = etmq->etm;
889 struct cs_etm_packet *tmp; 927 struct cs_etm_packet *tmp;
890 int ret; 928 int ret;
891 u64 instrs_executed; 929 u64 instrs_executed = etmq->packet->instr_count;
892 930
893 instrs_executed = cs_etm__instr_count(etmq->packet);
894 etmq->period_instructions += instrs_executed; 931 etmq->period_instructions += instrs_executed;
895 932
896 /* 933 /*
@@ -920,7 +957,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
920 * executed, but PC has not advanced to next instruction) 957 * executed, but PC has not advanced to next instruction)
921 */ 958 */
922 u64 offset = (instrs_executed - instrs_over - 1); 959 u64 offset = (instrs_executed - instrs_over - 1);
923 u64 addr = cs_etm__instr_addr(etmq->packet, offset); 960 u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset);
924 961
925 ret = cs_etm__synth_instruction_sample( 962 ret = cs_etm__synth_instruction_sample(
926 etmq, addr, etm->instructions_sample_period); 963 etmq, addr, etm->instructions_sample_period);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bbed90e5d9bb..cee717a3794f 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
295 unlink(tmpbuf); 295 unlink(tmpbuf);
296 296
297 if (pathname && (fd >= 0)) 297 if (pathname && (fd >= 0))
298 strncpy(pathname, tmpbuf, len); 298 strlcpy(pathname, tmpbuf, len);
299 299
300 return fd; 300 return fd;
301} 301}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7693f8..4c23779e271a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
166 struct utsname uts; 166 struct utsname uts;
167 char *arch_name; 167 char *arch_name;
168 168
169 if (!env) { /* Assume local operation */ 169 if (!env || !env->arch) { /* Assume local operation */
170 if (uname(&uts) < 0) 170 if (uname(&uts) < 0)
171 return NULL; 171 return NULL;
172 arch_name = uts.machine; 172 arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index e9c108a6b1c3..937a5a4f71cc 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -25,6 +25,8 @@
25#include "asm/bug.h" 25#include "asm/bug.h"
26#include "stat.h" 26#include "stat.h"
27 27
28#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
29
28static const char *perf_event__names[] = { 30static const char *perf_event__names[] = {
29 [0] = "TOTAL", 31 [0] = "TOTAL",
30 [PERF_RECORD_MMAP] = "MMAP", 32 [PERF_RECORD_MMAP] = "MMAP",
@@ -72,6 +74,8 @@ static const char *perf_ns__names[] = {
72 [CGROUP_NS_INDEX] = "cgroup", 74 [CGROUP_NS_INDEX] = "cgroup",
73}; 75};
74 76
77unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
78
75const char *perf_event__name(unsigned int id) 79const char *perf_event__name(unsigned int id)
76{ 80{
77 if (id >= ARRAY_SIZE(perf_event__names)) 81 if (id >= ARRAY_SIZE(perf_event__names))
@@ -323,8 +327,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
323 pid_t pid, pid_t tgid, 327 pid_t pid, pid_t tgid,
324 perf_event__handler_t process, 328 perf_event__handler_t process,
325 struct machine *machine, 329 struct machine *machine,
326 bool mmap_data, 330 bool mmap_data)
327 unsigned int proc_map_timeout)
328{ 331{
329 char filename[PATH_MAX]; 332 char filename[PATH_MAX];
330 FILE *fp; 333 FILE *fp;
@@ -521,8 +524,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
521 perf_event__handler_t process, 524 perf_event__handler_t process,
522 struct perf_tool *tool, 525 struct perf_tool *tool,
523 struct machine *machine, 526 struct machine *machine,
524 bool mmap_data, 527 bool mmap_data)
525 unsigned int proc_map_timeout)
526{ 528{
527 char filename[PATH_MAX]; 529 char filename[PATH_MAX];
528 DIR *tasks; 530 DIR *tasks;
@@ -548,8 +550,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
548 */ 550 */
549 if (pid == tgid && 551 if (pid == tgid &&
550 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 552 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
551 process, machine, mmap_data, 553 process, machine, mmap_data))
552 proc_map_timeout))
553 return -1; 554 return -1;
554 555
555 return 0; 556 return 0;
@@ -598,7 +599,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
598 if (_pid == pid) { 599 if (_pid == pid) {
599 /* process the parent's maps too */ 600 /* process the parent's maps too */
600 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 601 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
601 process, machine, mmap_data, proc_map_timeout); 602 process, machine, mmap_data);
602 if (rc) 603 if (rc)
603 break; 604 break;
604 } 605 }
@@ -612,8 +613,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
612 struct thread_map *threads, 613 struct thread_map *threads,
613 perf_event__handler_t process, 614 perf_event__handler_t process,
614 struct machine *machine, 615 struct machine *machine,
615 bool mmap_data, 616 bool mmap_data)
616 unsigned int proc_map_timeout)
617{ 617{
618 union perf_event *comm_event, *mmap_event, *fork_event; 618 union perf_event *comm_event, *mmap_event, *fork_event;
619 union perf_event *namespaces_event; 619 union perf_event *namespaces_event;
@@ -643,7 +643,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
643 fork_event, namespaces_event, 643 fork_event, namespaces_event,
644 thread_map__pid(threads, thread), 0, 644 thread_map__pid(threads, thread), 0,
645 process, tool, machine, 645 process, tool, machine,
646 mmap_data, proc_map_timeout)) { 646 mmap_data)) {
647 err = -1; 647 err = -1;
648 break; 648 break;
649 } 649 }
@@ -669,7 +669,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
669 fork_event, namespaces_event, 669 fork_event, namespaces_event,
670 comm_event->comm.pid, 0, 670 comm_event->comm.pid, 0,
671 process, tool, machine, 671 process, tool, machine,
672 mmap_data, proc_map_timeout)) { 672 mmap_data)) {
673 err = -1; 673 err = -1;
674 break; 674 break;
675 } 675 }
@@ -690,7 +690,6 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
690 perf_event__handler_t process, 690 perf_event__handler_t process,
691 struct machine *machine, 691 struct machine *machine,
692 bool mmap_data, 692 bool mmap_data,
693 unsigned int proc_map_timeout,
694 struct dirent **dirent, 693 struct dirent **dirent,
695 int start, 694 int start,
696 int num) 695 int num)
@@ -734,8 +733,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
734 */ 733 */
735 __event__synthesize_thread(comm_event, mmap_event, fork_event, 734 __event__synthesize_thread(comm_event, mmap_event, fork_event,
736 namespaces_event, pid, 1, process, 735 namespaces_event, pid, 1, process,
737 tool, machine, mmap_data, 736 tool, machine, mmap_data);
738 proc_map_timeout);
739 } 737 }
740 err = 0; 738 err = 0;
741 739
@@ -755,7 +753,6 @@ struct synthesize_threads_arg {
755 perf_event__handler_t process; 753 perf_event__handler_t process;
756 struct machine *machine; 754 struct machine *machine;
757 bool mmap_data; 755 bool mmap_data;
758 unsigned int proc_map_timeout;
759 struct dirent **dirent; 756 struct dirent **dirent;
760 int num; 757 int num;
761 int start; 758 int start;
@@ -767,7 +764,7 @@ static void *synthesize_threads_worker(void *arg)
767 764
768 __perf_event__synthesize_threads(args->tool, args->process, 765 __perf_event__synthesize_threads(args->tool, args->process,
769 args->machine, args->mmap_data, 766 args->machine, args->mmap_data,
770 args->proc_map_timeout, args->dirent, 767 args->dirent,
771 args->start, args->num); 768 args->start, args->num);
772 return NULL; 769 return NULL;
773} 770}
@@ -776,7 +773,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
776 perf_event__handler_t process, 773 perf_event__handler_t process,
777 struct machine *machine, 774 struct machine *machine,
778 bool mmap_data, 775 bool mmap_data,
779 unsigned int proc_map_timeout,
780 unsigned int nr_threads_synthesize) 776 unsigned int nr_threads_synthesize)
781{ 777{
782 struct synthesize_threads_arg *args = NULL; 778 struct synthesize_threads_arg *args = NULL;
@@ -806,7 +802,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
806 if (thread_nr <= 1) { 802 if (thread_nr <= 1) {
807 err = __perf_event__synthesize_threads(tool, process, 803 err = __perf_event__synthesize_threads(tool, process,
808 machine, mmap_data, 804 machine, mmap_data,
809 proc_map_timeout,
810 dirent, base, n); 805 dirent, base, n);
811 goto free_dirent; 806 goto free_dirent;
812 } 807 }
@@ -828,7 +823,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
828 args[i].process = process; 823 args[i].process = process;
829 args[i].machine = machine; 824 args[i].machine = machine;
830 args[i].mmap_data = mmap_data; 825 args[i].mmap_data = mmap_data;
831 args[i].proc_map_timeout = proc_map_timeout;
832 args[i].dirent = dirent; 826 args[i].dirent = dirent;
833 } 827 }
834 for (i = 0; i < m; i++) { 828 for (i = 0; i < m; i++) {
@@ -1577,6 +1571,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1577 return al->map; 1571 return al->map;
1578} 1572}
1579 1573
1574/*
1575 * For branch stacks or branch samples, the sample cpumode might not be correct
1576 * because it applies only to the sample 'ip' and not necessary to 'addr' or
1577 * branch stack addresses. If possible, use a fallback to deal with those cases.
1578 */
1579struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1580 struct addr_location *al)
1581{
1582 struct map *map = thread__find_map(thread, cpumode, addr, al);
1583 struct machine *machine = thread->mg->machine;
1584 u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1585
1586 if (map || addr_cpumode == cpumode)
1587 return map;
1588
1589 return thread__find_map(thread, addr_cpumode, addr, al);
1590}
1591
1580struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, 1592struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1581 u64 addr, struct addr_location *al) 1593 u64 addr, struct addr_location *al)
1582{ 1594{
@@ -1586,6 +1598,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1586 return al->sym; 1598 return al->sym;
1587} 1599}
1588 1600
1601struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1602 u64 addr, struct addr_location *al)
1603{
1604 al->sym = NULL;
1605 if (thread__find_map_fb(thread, cpumode, addr, al))
1606 al->sym = map__find_symbol(al->map, al->addr);
1607 return al->sym;
1608}
1609
1589/* 1610/*
1590 * Callers need to drop the reference to al->thread, obtained in 1611 * Callers need to drop the reference to al->thread, obtained in
1591 * machine__findnew_thread() 1612 * machine__findnew_thread()
@@ -1679,7 +1700,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1679void thread__resolve(struct thread *thread, struct addr_location *al, 1700void thread__resolve(struct thread *thread, struct addr_location *al,
1680 struct perf_sample *sample) 1701 struct perf_sample *sample)
1681{ 1702{
1682 thread__find_map(thread, sample->cpumode, sample->addr, al); 1703 thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1683 1704
1684 al->cpu = sample->cpu; 1705 al->cpu = sample->cpu;
1685 al->sym = NULL; 1706 al->sym = NULL;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index bfa60bcafbde..eb95f3384958 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -669,8 +669,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
669int perf_event__synthesize_thread_map(struct perf_tool *tool, 669int perf_event__synthesize_thread_map(struct perf_tool *tool,
670 struct thread_map *threads, 670 struct thread_map *threads,
671 perf_event__handler_t process, 671 perf_event__handler_t process,
672 struct machine *machine, bool mmap_data, 672 struct machine *machine, bool mmap_data);
673 unsigned int proc_map_timeout);
674int perf_event__synthesize_thread_map2(struct perf_tool *tool, 673int perf_event__synthesize_thread_map2(struct perf_tool *tool,
675 struct thread_map *threads, 674 struct thread_map *threads,
676 perf_event__handler_t process, 675 perf_event__handler_t process,
@@ -682,7 +681,6 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
682int perf_event__synthesize_threads(struct perf_tool *tool, 681int perf_event__synthesize_threads(struct perf_tool *tool,
683 perf_event__handler_t process, 682 perf_event__handler_t process,
684 struct machine *machine, bool mmap_data, 683 struct machine *machine, bool mmap_data,
685 unsigned int proc_map_timeout,
686 unsigned int nr_threads_synthesize); 684 unsigned int nr_threads_synthesize);
687int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 685int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
688 perf_event__handler_t process, 686 perf_event__handler_t process,
@@ -797,8 +795,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
797 pid_t pid, pid_t tgid, 795 pid_t pid, pid_t tgid,
798 perf_event__handler_t process, 796 perf_event__handler_t process,
799 struct machine *machine, 797 struct machine *machine,
800 bool mmap_data, 798 bool mmap_data);
801 unsigned int proc_map_timeout);
802 799
803int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, 800int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
804 perf_event__handler_t process, 801 perf_event__handler_t process,
@@ -829,5 +826,6 @@ int perf_event_paranoid(void);
829 826
830extern int sysctl_perf_event_max_stack; 827extern int sysctl_perf_event_max_stack;
831extern int sysctl_perf_event_max_contexts_per_stack; 828extern int sysctl_perf_event_max_contexts_per_stack;
829extern unsigned int proc_map_timeout;
832 830
833#endif /* __PERF_RECORD_H */ 831#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 36526d229315..e90575192209 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1018,7 +1018,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1018 */ 1018 */
1019int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 1019int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1020 unsigned int auxtrace_pages, 1020 unsigned int auxtrace_pages,
1021 bool auxtrace_overwrite) 1021 bool auxtrace_overwrite, int nr_cblocks)
1022{ 1022{
1023 struct perf_evsel *evsel; 1023 struct perf_evsel *evsel;
1024 const struct cpu_map *cpus = evlist->cpus; 1024 const struct cpu_map *cpus = evlist->cpus;
@@ -1028,7 +1028,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1028 * Its value is decided by evsel's write_backward. 1028 * Its value is decided by evsel's write_backward.
1029 * So &mp should not be passed through const pointer. 1029 * So &mp should not be passed through const pointer.
1030 */ 1030 */
1031 struct mmap_params mp; 1031 struct mmap_params mp = { .nr_cblocks = nr_cblocks };
1032 1032
1033 if (!evlist->mmap) 1033 if (!evlist->mmap)
1034 evlist->mmap = perf_evlist__alloc_mmap(evlist, false); 1034 evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1060,7 +1060,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1060 1060
1061int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) 1061int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
1062{ 1062{
1063 return perf_evlist__mmap_ex(evlist, pages, 0, false); 1063 return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
1064} 1064}
1065 1065
1066int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1066int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index d108d167eb36..868294491194 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
162 162
163int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 163int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
164 unsigned int auxtrace_pages, 164 unsigned int auxtrace_pages,
165 bool auxtrace_overwrite); 165 bool auxtrace_overwrite, int nr_cblocks);
166int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); 166int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
167void perf_evlist__munmap(struct perf_evlist *evlist); 167void perf_evlist__munmap(struct perf_evlist *evlist);
168 168
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3147ca76c6fc..82a289ce8b0c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -106,7 +106,7 @@ struct perf_evsel {
106 char *name; 106 char *name;
107 double scale; 107 double scale;
108 const char *unit; 108 const char *unit;
109 struct tep_event_format *tp_format; 109 struct tep_event *tp_format;
110 off_t id_offset; 110 off_t id_offset;
111 struct perf_stat_evsel *stats; 111 struct perf_stat_evsel *stats;
112 void *priv; 112 void *priv;
@@ -216,7 +216,7 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *
216 216
217struct perf_evsel *perf_evsel__new_cycles(bool precise); 217struct perf_evsel *perf_evsel__new_cycles(bool precise);
218 218
219struct tep_event_format *event_format__new(const char *sys, const char *name); 219struct tep_event *event_format__new(const char *sys, const char *name);
220 220
221void perf_evsel__init(struct perf_evsel *evsel, 221void perf_evsel__init(struct perf_evsel *evsel,
222 struct perf_event_attr *attr, int idx); 222 struct perf_event_attr *attr, int idx);
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 0d0a4c6f368b..95ea147f9e18 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -173,6 +173,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
173 if (!print_oneline) 173 if (!print_oneline)
174 printed += fprintf(fp, "\n"); 174 printed += fprintf(fp, "\n");
175 175
176 /* Add srccode here too? */
176 if (symbol_conf.bt_stop_list && 177 if (symbol_conf.bt_stop_list &&
177 node->sym && 178 node->sym &&
178 strlist__has_entry(symbol_conf.bt_stop_list, 179 strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e31f52845e77..1171d8400bf4 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2798,7 +2798,7 @@ static int perf_header__adds_write(struct perf_header *header,
2798 lseek(fd, sec_start, SEEK_SET); 2798 lseek(fd, sec_start, SEEK_SET);
2799 /* 2799 /*
2800 * may write more than needed due to dropped feature, but 2800 * may write more than needed due to dropped feature, but
2801 * this is okay, reader will skip the mising entries 2801 * this is okay, reader will skip the missing entries
2802 */ 2802 */
2803 err = do_write(&ff, feat_sec, sec_size); 2803 err = do_write(&ff, feat_sec, sec_size);
2804 if (err < 0) 2804 if (err < 0)
@@ -3268,7 +3268,7 @@ static int read_attr(int fd, struct perf_header *ph,
3268static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3268static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3269 struct tep_handle *pevent) 3269 struct tep_handle *pevent)
3270{ 3270{
3271 struct tep_event_format *event; 3271 struct tep_event *event;
3272 char bf[128]; 3272 char bf[128];
3273 3273
3274 /* already prepared */ 3274 /* already prepared */
@@ -3583,7 +3583,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3583 if (ev == NULL) 3583 if (ev == NULL)
3584 return -ENOMEM; 3584 return -ENOMEM;
3585 3585
3586 strncpy(ev->data, evsel->unit, size); 3586 strlcpy(ev->data, evsel->unit, size + 1);
3587 err = process(tool, (union perf_event *)ev, NULL, NULL); 3587 err = process(tool, (union perf_event *)ev, NULL, NULL);
3588 free(ev); 3588 free(ev);
3589 return err; 3589 return err;
@@ -3622,7 +3622,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
3622 if (ev == NULL) 3622 if (ev == NULL)
3623 return -ENOMEM; 3623 return -ENOMEM;
3624 3624
3625 strncpy(ev->data, evsel->name, len); 3625 strlcpy(ev->data, evsel->name, len + 1);
3626 err = process(tool, (union perf_event*) ev, NULL, NULL); 3626 err = process(tool, (union perf_event*) ev, NULL, NULL);
3627 free(ev); 3627 free(ev);
3628 return err; 3628 return err;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 828cb9794c76..8aad8330e392 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1160,7 +1160,7 @@ void hist_entry__delete(struct hist_entry *he)
1160 1160
1161/* 1161/*
1162 * If this is not the last column, then we need to pad it according to the 1162 * If this is not the last column, then we need to pad it according to the
1163 * pre-calculated max lenght for this column, otherwise don't bother adding 1163 * pre-calculated max length for this column, otherwise don't bother adding
1164 * spaces because that would break viewing this with, for instance, 'less', 1164 * spaces because that would break viewing this with, for instance, 'less',
1165 * that would show tons of trailing spaces when a long C++ demangled method 1165 * that would show tons of trailing spaces when a long C++ demangled method
1166 * names is sampled. 1166 * names is sampled.
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3badd7f1e1b8..664b5eda8d51 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@ enum hist_column {
62 HISTC_TRACE, 62 HISTC_TRACE,
63 HISTC_SYM_SIZE, 63 HISTC_SYM_SIZE,
64 HISTC_DSO_SIZE, 64 HISTC_DSO_SIZE,
65 HISTC_SYMBOL_IPC,
65 HISTC_NR_COLS, /* Last entry */ 66 HISTC_NR_COLS, /* Last entry */
66}; 67};
67 68
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a1863000e972..bf249552a9b0 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -38,7 +38,7 @@ struct jit_buf_desc {
38 uint64_t sample_type; 38 uint64_t sample_type;
39 size_t bufsize; 39 size_t bufsize;
40 FILE *in; 40 FILE *in;
41 bool needs_bswap; /* handles cross-endianess */ 41 bool needs_bswap; /* handles cross-endianness */
42 bool use_arch_timestamp; 42 bool use_arch_timestamp;
43 void *debug_data; 43 void *debug_data;
44 void *unwinding_data; 44 void *unwinding_data;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8f36ce813bc5..6fcb3bce0442 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -137,7 +137,7 @@ struct machine *machine__new_kallsyms(void)
137 struct machine *machine = machine__new_host(); 137 struct machine *machine = machine__new_host();
138 /* 138 /*
139 * FIXME: 139 * FIXME:
140 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely 140 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
141 * ask for not using the kcore parsing code, once this one is fixed 141 * ask for not using the kcore parsing code, once this one is fixed
142 * to create a map per module. 142 * to create a map per module.
143 */ 143 */
@@ -2493,15 +2493,13 @@ int machines__for_each_thread(struct machines *machines,
2493int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2493int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2494 struct target *target, struct thread_map *threads, 2494 struct target *target, struct thread_map *threads,
2495 perf_event__handler_t process, bool data_mmap, 2495 perf_event__handler_t process, bool data_mmap,
2496 unsigned int proc_map_timeout,
2497 unsigned int nr_threads_synthesize) 2496 unsigned int nr_threads_synthesize)
2498{ 2497{
2499 if (target__has_task(target)) 2498 if (target__has_task(target))
2500 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 2499 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
2501 else if (target__has_cpu(target)) 2500 else if (target__has_cpu(target))
2502 return perf_event__synthesize_threads(tool, process, 2501 return perf_event__synthesize_threads(tool, process,
2503 machine, data_mmap, 2502 machine, data_mmap,
2504 proc_map_timeout,
2505 nr_threads_synthesize); 2503 nr_threads_synthesize);
2506 /* command specified */ 2504 /* command specified */
2507 return 0; 2505 return 0;
@@ -2592,6 +2590,33 @@ int machine__get_kernel_start(struct machine *machine)
2592 return err; 2590 return err;
2593} 2591}
2594 2592
2593u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
2594{
2595 u8 addr_cpumode = cpumode;
2596 bool kernel_ip;
2597
2598 if (!machine->single_address_space)
2599 goto out;
2600
2601 kernel_ip = machine__kernel_ip(machine, addr);
2602 switch (cpumode) {
2603 case PERF_RECORD_MISC_KERNEL:
2604 case PERF_RECORD_MISC_USER:
2605 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
2606 PERF_RECORD_MISC_USER;
2607 break;
2608 case PERF_RECORD_MISC_GUEST_KERNEL:
2609 case PERF_RECORD_MISC_GUEST_USER:
2610 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
2611 PERF_RECORD_MISC_GUEST_USER;
2612 break;
2613 default:
2614 break;
2615 }
2616out:
2617 return addr_cpumode;
2618}
2619
2595struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2620struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2596{ 2621{
2597 return dsos__findnew(&machine->dsos, filename); 2622 return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85862e2..a5d1da60f751 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@ struct machine {
42 u16 id_hdr_size; 42 u16 id_hdr_size;
43 bool comm_exec; 43 bool comm_exec;
44 bool kptr_restrict_warned; 44 bool kptr_restrict_warned;
45 bool single_address_space;
45 char *root_dir; 46 char *root_dir;
46 char *mmap_name; 47 char *mmap_name;
47 struct threads threads[THREADS__TABLE_SIZE]; 48 struct threads threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
99 return ip >= kernel_start; 100 return ip >= kernel_start;
100} 101}
101 102
103u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
104
102struct thread *machine__find_thread(struct machine *machine, pid_t pid, 105struct thread *machine__find_thread(struct machine *machine, pid_t pid,
103 pid_t tid); 106 pid_t tid);
104struct comm *machine__thread_exec_comm(struct machine *machine, 107struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -247,17 +250,14 @@ int machines__for_each_thread(struct machines *machines,
247int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 250int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
248 struct target *target, struct thread_map *threads, 251 struct target *target, struct thread_map *threads,
249 perf_event__handler_t process, bool data_mmap, 252 perf_event__handler_t process, bool data_mmap,
250 unsigned int proc_map_timeout,
251 unsigned int nr_threads_synthesize); 253 unsigned int nr_threads_synthesize);
252static inline 254static inline
253int machine__synthesize_threads(struct machine *machine, struct target *target, 255int machine__synthesize_threads(struct machine *machine, struct target *target,
254 struct thread_map *threads, bool data_mmap, 256 struct thread_map *threads, bool data_mmap,
255 unsigned int proc_map_timeout,
256 unsigned int nr_threads_synthesize) 257 unsigned int nr_threads_synthesize)
257{ 258{
258 return __machine__synthesize_threads(machine, NULL, target, threads, 259 return __machine__synthesize_threads(machine, NULL, target, threads,
259 perf_event__process, data_mmap, 260 perf_event__process, data_mmap,
260 proc_map_timeout,
261 nr_threads_synthesize); 261 nr_threads_synthesize);
262} 262}
263 263
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 781eed8e3265..6751301a755c 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -19,6 +19,7 @@
19#include "srcline.h" 19#include "srcline.h"
20#include "namespaces.h" 20#include "namespaces.h"
21#include "unwind.h" 21#include "unwind.h"
22#include "srccode.h"
22 23
23static void __maps__insert(struct maps *maps, struct map *map); 24static void __maps__insert(struct maps *maps, struct map *map);
24static void __maps__insert_name(struct maps *maps, struct map *map); 25static void __maps__insert_name(struct maps *maps, struct map *map);
@@ -421,6 +422,54 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
421 return ret; 422 return ret;
422} 423}
423 424
425int map__fprintf_srccode(struct map *map, u64 addr,
426 FILE *fp,
427 struct srccode_state *state)
428{
429 char *srcfile;
430 int ret = 0;
431 unsigned line;
432 int len;
433 char *srccode;
434
435 if (!map || !map->dso)
436 return 0;
437 srcfile = get_srcline_split(map->dso,
438 map__rip_2objdump(map, addr),
439 &line);
440 if (!srcfile)
441 return 0;
442
443 /* Avoid redundant printing */
444 if (state &&
445 state->srcfile &&
446 !strcmp(state->srcfile, srcfile) &&
447 state->line == line) {
448 free(srcfile);
449 return 0;
450 }
451
452 srccode = find_sourceline(srcfile, line, &len);
453 if (!srccode)
454 goto out_free_line;
455
456 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
457 state->srcfile = srcfile;
458 state->line = line;
459 return ret;
460
461out_free_line:
462 free(srcfile);
463 return ret;
464}
465
466
467void srccode_state_free(struct srccode_state *state)
468{
469 zfree(&state->srcfile);
470 state->line = 0;
471}
472
424/** 473/**
425 * map__rip_2objdump - convert symbol start address to objdump address. 474 * map__rip_2objdump - convert symbol start address to objdump address.
426 * @map: memory map 475 * @map: memory map
@@ -873,19 +922,18 @@ void maps__remove(struct maps *maps, struct map *map)
873 922
874struct map *maps__find(struct maps *maps, u64 ip) 923struct map *maps__find(struct maps *maps, u64 ip)
875{ 924{
876 struct rb_node **p, *parent = NULL; 925 struct rb_node *p;
877 struct map *m; 926 struct map *m;
878 927
879 down_read(&maps->lock); 928 down_read(&maps->lock);
880 929
881 p = &maps->entries.rb_node; 930 p = maps->entries.rb_node;
882 while (*p != NULL) { 931 while (p != NULL) {
883 parent = *p; 932 m = rb_entry(p, struct map, rb_node);
884 m = rb_entry(parent, struct map, rb_node);
885 if (ip < m->start) 933 if (ip < m->start)
886 p = &(*p)->rb_left; 934 p = p->rb_left;
887 else if (ip >= m->end) 935 else if (ip >= m->end)
888 p = &(*p)->rb_right; 936 p = p->rb_right;
889 else 937 else
890 goto out; 938 goto out;
891 } 939 }
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 5c792c90fc4c..09282aa45c80 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -174,6 +174,22 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
174int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 174int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
175 FILE *fp); 175 FILE *fp);
176 176
177struct srccode_state {
178 char *srcfile;
179 unsigned line;
180};
181
182static inline void srccode_state_init(struct srccode_state *state)
183{
184 state->srcfile = NULL;
185 state->line = 0;
186}
187
188void srccode_state_free(struct srccode_state *state);
189
190int map__fprintf_srccode(struct map *map, u64 addr,
191 FILE *fp, struct srccode_state *state);
192
177int map__load(struct map *map); 193int map__load(struct map *map);
178struct symbol *map__find_symbol(struct map *map, u64 addr); 194struct symbol *map__find_symbol(struct map *map, u64 addr);
179struct symbol *map__find_symbol_by_name(struct map *map, const char *name); 195struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index cdb95b3a1213..8fc39311a30d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -153,8 +153,158 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
153{ 153{
154} 154}
155 155
156#ifdef HAVE_AIO_SUPPORT
157static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
158{
159 int delta_max, i, prio;
160
161 map->aio.nr_cblocks = mp->nr_cblocks;
162 if (map->aio.nr_cblocks) {
163 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
164 if (!map->aio.aiocb) {
165 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
166 return -1;
167 }
168 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
169 if (!map->aio.cblocks) {
170 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
171 return -1;
172 }
173 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
174 if (!map->aio.data) {
175 pr_debug2("failed to allocate data buffer, error %m\n");
176 return -1;
177 }
178 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
179 for (i = 0; i < map->aio.nr_cblocks; ++i) {
180 map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
181 if (!map->aio.data[i]) {
182 pr_debug2("failed to allocate data buffer area, error %m");
183 return -1;
184 }
185 /*
186 * Use cblock.aio_fildes value different from -1
187 * to denote started aio write operation on the
188 * cblock so it requires explicit record__aio_sync()
189 * call prior the cblock may be reused again.
190 */
191 map->aio.cblocks[i].aio_fildes = -1;
192 /*
193 * Allocate cblocks with priority delta to have
194 * faster aio write system calls because queued requests
195 * are kept in separate per-prio queues and adding
196 * a new request will iterate thru shorter per-prio
197 * list. Blocks with numbers higher than
198 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
199 */
200 prio = delta_max - i;
201 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
202 }
203 }
204
205 return 0;
206}
207
208static void perf_mmap__aio_munmap(struct perf_mmap *map)
209{
210 int i;
211
212 for (i = 0; i < map->aio.nr_cblocks; ++i)
213 zfree(&map->aio.data[i]);
214 if (map->aio.data)
215 zfree(&map->aio.data);
216 zfree(&map->aio.cblocks);
217 zfree(&map->aio.aiocb);
218}
219
220int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
221 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
222 off_t *off)
223{
224 u64 head = perf_mmap__read_head(md);
225 unsigned char *data = md->base + page_size;
226 unsigned long size, size0 = 0;
227 void *buf;
228 int rc = 0;
229
230 rc = perf_mmap__read_init(md);
231 if (rc < 0)
232 return (rc == -EAGAIN) ? 0 : -1;
233
234 /*
235 * md->base data is copied into md->data[idx] buffer to
236 * release space in the kernel buffer as fast as possible,
237 * thru perf_mmap__consume() below.
238 *
239 * That lets the kernel to proceed with storing more
240 * profiling data into the kernel buffer earlier than other
241 * per-cpu kernel buffers are handled.
242 *
243 * Coping can be done in two steps in case the chunk of
244 * profiling data crosses the upper bound of the kernel buffer.
245 * In this case we first move part of data from md->start
246 * till the upper bound and then the reminder from the
247 * beginning of the kernel buffer till the end of
248 * the data chunk.
249 */
250
251 size = md->end - md->start;
252
253 if ((md->start & md->mask) + size != (md->end & md->mask)) {
254 buf = &data[md->start & md->mask];
255 size = md->mask + 1 - (md->start & md->mask);
256 md->start += size;
257 memcpy(md->aio.data[idx], buf, size);
258 size0 = size;
259 }
260
261 buf = &data[md->start & md->mask];
262 size = md->end - md->start;
263 md->start += size;
264 memcpy(md->aio.data[idx] + size0, buf, size);
265
266 /*
267 * Increment md->refcount to guard md->data[idx] buffer
268 * from premature deallocation because md object can be
269 * released earlier than aio write request started
270 * on mmap->data[idx] is complete.
271 *
272 * perf_mmap__put() is done at record__aio_complete()
273 * after started request completion.
274 */
275 perf_mmap__get(md);
276
277 md->prev = head;
278 perf_mmap__consume(md);
279
280 rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off);
281 if (!rc) {
282 *off += size0 + size;
283 } else {
284 /*
285 * Decrement md->refcount back if aio write
286 * operation failed to start.
287 */
288 perf_mmap__put(md);
289 }
290
291 return rc;
292}
293#else
294static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
295 struct mmap_params *mp __maybe_unused)
296{
297 return 0;
298}
299
300static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
301{
302}
303#endif
304
156void perf_mmap__munmap(struct perf_mmap *map) 305void perf_mmap__munmap(struct perf_mmap *map)
157{ 306{
307 perf_mmap__aio_munmap(map);
158 if (map->base != NULL) { 308 if (map->base != NULL) {
159 munmap(map->base, perf_mmap__mmap_len(map)); 309 munmap(map->base, perf_mmap__mmap_len(map));
160 map->base = NULL; 310 map->base = NULL;
@@ -197,7 +347,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
197 &mp->auxtrace_mp, map->base, fd)) 347 &mp->auxtrace_mp, map->base, fd))
198 return -1; 348 return -1;
199 349
200 return 0; 350 return perf_mmap__aio_mmap(map, mp);
201} 351}
202 352
203static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) 353static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index cc5e2d6d17a9..aeb6942fdb00 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -6,9 +6,13 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/ring_buffer.h> 7#include <linux/ring_buffer.h>
8#include <stdbool.h> 8#include <stdbool.h>
9#ifdef HAVE_AIO_SUPPORT
10#include <aio.h>
11#endif
9#include "auxtrace.h" 12#include "auxtrace.h"
10#include "event.h" 13#include "event.h"
11 14
15struct aiocb;
12/** 16/**
13 * struct perf_mmap - perf's ring buffer mmap details 17 * struct perf_mmap - perf's ring buffer mmap details
14 * 18 *
@@ -26,6 +30,14 @@ struct perf_mmap {
26 bool overwrite; 30 bool overwrite;
27 struct auxtrace_mmap auxtrace_mmap; 31 struct auxtrace_mmap auxtrace_mmap;
28 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 32 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
33#ifdef HAVE_AIO_SUPPORT
34 struct {
35 void **data;
36 struct aiocb *cblocks;
37 struct aiocb **aiocb;
38 int nr_cblocks;
39 } aio;
40#endif
29}; 41};
30 42
31/* 43/*
@@ -57,7 +69,7 @@ enum bkw_mmap_state {
57}; 69};
58 70
59struct mmap_params { 71struct mmap_params {
60 int prot, mask; 72 int prot, mask, nr_cblocks;
61 struct auxtrace_mmap_params auxtrace_mp; 73 struct auxtrace_mmap_params auxtrace_mp;
62}; 74};
63 75
@@ -85,6 +97,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
85 97
86int perf_mmap__push(struct perf_mmap *md, void *to, 98int perf_mmap__push(struct perf_mmap *md, void *to,
87 int push(struct perf_mmap *map, void *to, void *buf, size_t size)); 99 int push(struct perf_mmap *map, void *to, void *buf, size_t size));
100#ifdef HAVE_AIO_SUPPORT
101int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
102 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
103 off_t *off);
104#else
105static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused,
106 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
107 off_t *off __maybe_unused)
108{
109 return 0;
110}
111#endif
88 112
89size_t perf_mmap__mmap_len(struct perf_mmap *map); 113size_t perf_mmap__mmap_len(struct perf_mmap *map);
90 114
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 1904e7f6ec84..897589507d97 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -219,13 +219,12 @@ int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
219 return 0; 219 return 0;
220} 220}
221 221
222static int __ordered_events__flush(struct ordered_events *oe) 222static int do_flush(struct ordered_events *oe, bool show_progress)
223{ 223{
224 struct list_head *head = &oe->events; 224 struct list_head *head = &oe->events;
225 struct ordered_event *tmp, *iter; 225 struct ordered_event *tmp, *iter;
226 u64 limit = oe->next_flush; 226 u64 limit = oe->next_flush;
227 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; 227 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
228 bool show_progress = limit == ULLONG_MAX;
229 struct ui_progress prog; 228 struct ui_progress prog;
230 int ret; 229 int ret;
231 230
@@ -263,7 +262,8 @@ static int __ordered_events__flush(struct ordered_events *oe)
263 return 0; 262 return 0;
264} 263}
265 264
266int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) 265static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
266 u64 timestamp)
267{ 267{
268 static const char * const str[] = { 268 static const char * const str[] = {
269 "NONE", 269 "NONE",
@@ -272,12 +272,16 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
272 "HALF ", 272 "HALF ",
273 }; 273 };
274 int err; 274 int err;
275 bool show_progress = false;
275 276
276 if (oe->nr_events == 0) 277 if (oe->nr_events == 0)
277 return 0; 278 return 0;
278 279
279 switch (how) { 280 switch (how) {
280 case OE_FLUSH__FINAL: 281 case OE_FLUSH__FINAL:
282 show_progress = true;
283 __fallthrough;
284 case OE_FLUSH__TOP:
281 oe->next_flush = ULLONG_MAX; 285 oe->next_flush = ULLONG_MAX;
282 break; 286 break;
283 287
@@ -298,6 +302,11 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
298 break; 302 break;
299 } 303 }
300 304
305 case OE_FLUSH__TIME:
306 oe->next_flush = timestamp;
307 show_progress = false;
308 break;
309
301 case OE_FLUSH__ROUND: 310 case OE_FLUSH__ROUND:
302 case OE_FLUSH__NONE: 311 case OE_FLUSH__NONE:
303 default: 312 default:
@@ -308,7 +317,7 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
308 str[how], oe->nr_events); 317 str[how], oe->nr_events);
309 pr_oe_time(oe->max_timestamp, "max_timestamp\n"); 318 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
310 319
311 err = __ordered_events__flush(oe); 320 err = do_flush(oe, show_progress);
312 321
313 if (!err) { 322 if (!err) {
314 if (how == OE_FLUSH__ROUND) 323 if (how == OE_FLUSH__ROUND)
@@ -324,7 +333,29 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
324 return err; 333 return err;
325} 334}
326 335
327void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver) 336int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
337{
338 return __ordered_events__flush(oe, how, 0);
339}
340
341int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
342{
343 return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
344}
345
346u64 ordered_events__first_time(struct ordered_events *oe)
347{
348 struct ordered_event *event;
349
350 if (list_empty(&oe->events))
351 return 0;
352
353 event = list_first_entry(&oe->events, struct ordered_event, list);
354 return event->timestamp;
355}
356
357void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
358 void *data)
328{ 359{
329 INIT_LIST_HEAD(&oe->events); 360 INIT_LIST_HEAD(&oe->events);
330 INIT_LIST_HEAD(&oe->cache); 361 INIT_LIST_HEAD(&oe->cache);
@@ -332,6 +363,7 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d
332 oe->max_alloc_size = (u64) -1; 363 oe->max_alloc_size = (u64) -1;
333 oe->cur_alloc_size = 0; 364 oe->cur_alloc_size = 0;
334 oe->deliver = deliver; 365 oe->deliver = deliver;
366 oe->data = data;
335} 367}
336 368
337static void 369static void
@@ -375,5 +407,5 @@ void ordered_events__reinit(struct ordered_events *oe)
375 407
376 ordered_events__free(oe); 408 ordered_events__free(oe);
377 memset(oe, '\0', sizeof(*oe)); 409 memset(oe, '\0', sizeof(*oe));
378 ordered_events__init(oe, old_deliver); 410 ordered_events__init(oe, old_deliver, oe->data);
379} 411}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 1338d5c345dc..0920fb0ec6cc 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -18,6 +18,8 @@ enum oe_flush {
18 OE_FLUSH__FINAL, 18 OE_FLUSH__FINAL,
19 OE_FLUSH__ROUND, 19 OE_FLUSH__ROUND,
20 OE_FLUSH__HALF, 20 OE_FLUSH__HALF,
21 OE_FLUSH__TOP,
22 OE_FLUSH__TIME,
21}; 23};
22 24
23struct ordered_events; 25struct ordered_events;
@@ -47,15 +49,19 @@ struct ordered_events {
47 enum oe_flush last_flush_type; 49 enum oe_flush last_flush_type;
48 u32 nr_unordered_events; 50 u32 nr_unordered_events;
49 bool copy_on_queue; 51 bool copy_on_queue;
52 void *data;
50}; 53};
51 54
52int ordered_events__queue(struct ordered_events *oe, union perf_event *event, 55int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
53 u64 timestamp, u64 file_offset); 56 u64 timestamp, u64 file_offset);
54void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); 57void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
55int ordered_events__flush(struct ordered_events *oe, enum oe_flush how); 58int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
56void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver); 59int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp);
60void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
61 void *data);
57void ordered_events__free(struct ordered_events *oe); 62void ordered_events__free(struct ordered_events *oe);
58void ordered_events__reinit(struct ordered_events *oe); 63void ordered_events__reinit(struct ordered_events *oe);
64u64 ordered_events__first_time(struct ordered_events *oe);
59 65
60static inline 66static inline
61void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size) 67void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 59be3466d64d..920e1e6551dd 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2462,7 +2462,7 @@ restart:
2462 if (!name_only && strlen(syms->alias)) 2462 if (!name_only && strlen(syms->alias))
2463 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); 2463 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
2464 else 2464 else
2465 strncpy(name, syms->symbol, MAX_NAME_LEN); 2465 strlcpy(name, syms->symbol, MAX_NAME_LEN);
2466 2466
2467 evt_list[evt_i] = strdup(name); 2467 evt_list[evt_i] = strdup(name);
2468 if (evt_list[evt_i] == NULL) 2468 if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e86f8be89157..18a59fba97ff 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -692,7 +692,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
692 return ret; 692 return ret;
693 693
694 for (i = 0; i < ntevs && ret >= 0; i++) { 694 for (i = 0; i < ntevs && ret >= 0; i++) {
695 /* point.address is the addres of point.symbol + point.offset */ 695 /* point.address is the address of point.symbol + point.offset */
696 tevs[i].point.address -= stext; 696 tevs[i].point.address -= stext;
697 tevs[i].point.module = strdup(exec); 697 tevs[i].point.module = strdup(exec);
698 if (!tevs[i].point.module) { 698 if (!tevs[i].point.module) {
@@ -3062,7 +3062,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
3062 /* 3062 /*
3063 * Give it a '0x' leading symbol name. 3063 * Give it a '0x' leading symbol name.
3064 * In __add_probe_trace_events, a NULL symbol is interpreted as 3064 * In __add_probe_trace_events, a NULL symbol is interpreted as
3065 * invalud. 3065 * invalid.
3066 */ 3066 */
3067 if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) 3067 if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
3068 goto errout; 3068 goto errout;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index aac7817d9e14..0b1195cad0e5 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
424 424
425 if (target && build_id_cache__cached(target)) { 425 if (target && build_id_cache__cached(target)) {
426 /* This is a cached buildid */ 426 /* This is a cached buildid */
427 strncpy(sbuildid, target, SBUILD_ID_SIZE); 427 strlcpy(sbuildid, target, SBUILD_ID_SIZE);
428 dir_name = build_id_cache__linkname(sbuildid, NULL, 0); 428 dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
429 goto found; 429 goto found;
430 } 430 }
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 50150dfc0cdf..47628e85c5eb 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -386,7 +386,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
386 struct tep_format_field *field; 386 struct tep_format_field *field;
387 387
388 if (!evsel->tp_format) { 388 if (!evsel->tp_format) {
389 struct tep_event_format *tp_format; 389 struct tep_event *tp_format;
390 390
391 tp_format = trace_event__tp_format_id(evsel->attr.config); 391 tp_format = trace_event__tp_format_id(evsel->attr.config);
392 if (!tp_format) 392 if (!tp_format)
@@ -1240,7 +1240,7 @@ static struct {
1240static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1240static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1241 PyObject *args, PyObject *kwargs) 1241 PyObject *args, PyObject *kwargs)
1242{ 1242{
1243 struct tep_event_format *tp_format; 1243 struct tep_event *tp_format;
1244 static char *kwlist[] = { "sys", "name", NULL }; 1244 static char *kwlist[] = { "sys", "name", NULL };
1245 char *sys = NULL; 1245 char *sys = NULL;
1246 char *name = NULL; 1246 char *name = NULL;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 89cb887648f9..b93f36b887b5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -189,7 +189,7 @@ static void define_flag_field(const char *ev_name,
189 LEAVE; 189 LEAVE;
190} 190}
191 191
192static void define_event_symbols(struct tep_event_format *event, 192static void define_event_symbols(struct tep_event *event,
193 const char *ev_name, 193 const char *ev_name,
194 struct tep_print_arg *args) 194 struct tep_print_arg *args)
195{ 195{
@@ -338,7 +338,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
338 struct addr_location *al) 338 struct addr_location *al)
339{ 339{
340 struct thread *thread = al->thread; 340 struct thread *thread = al->thread;
341 struct tep_event_format *event = evsel->tp_format; 341 struct tep_event *event = evsel->tp_format;
342 struct tep_format_field *field; 342 struct tep_format_field *field;
343 static char handler[256]; 343 static char handler[256];
344 unsigned long long val; 344 unsigned long long val;
@@ -537,7 +537,7 @@ static int perl_stop_script(void)
537 537
538static int perl_generate_script(struct tep_handle *pevent, const char *outfile) 538static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
539{ 539{
540 struct tep_event_format *event = NULL; 540 struct tep_event *event = NULL;
541 struct tep_format_field *f; 541 struct tep_format_field *f;
542 char fname[PATH_MAX]; 542 char fname[PATH_MAX];
543 int not_first, count; 543 int not_first, count;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 69aa93d4ee99..87ef16a1b17e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -264,7 +264,7 @@ static void define_field(enum tep_print_arg_type field_type,
264 Py_DECREF(t); 264 Py_DECREF(t);
265} 265}
266 266
267static void define_event_symbols(struct tep_event_format *event, 267static void define_event_symbols(struct tep_event *event,
268 const char *ev_name, 268 const char *ev_name,
269 struct tep_print_arg *args) 269 struct tep_print_arg *args)
270{ 270{
@@ -332,7 +332,7 @@ static void define_event_symbols(struct tep_event_format *event,
332 define_event_symbols(event, ev_name, args->next); 332 define_event_symbols(event, ev_name, args->next);
333} 333}
334 334
335static PyObject *get_field_numeric_entry(struct tep_event_format *event, 335static PyObject *get_field_numeric_entry(struct tep_event *event,
336 struct tep_format_field *field, void *data) 336 struct tep_format_field *field, void *data)
337{ 337{
338 bool is_array = field->flags & TEP_FIELD_IS_ARRAY; 338 bool is_array = field->flags & TEP_FIELD_IS_ARRAY;
@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
494 pydict_set_item_string_decref(pyelem, "cycles", 494 pydict_set_item_string_decref(pyelem, "cycles",
495 PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles)); 495 PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
496 496
497 thread__find_map(thread, sample->cpumode, 497 thread__find_map_fb(thread, sample->cpumode,
498 br->entries[i].from, &al); 498 br->entries[i].from, &al);
499 dsoname = get_dsoname(al.map); 499 dsoname = get_dsoname(al.map);
500 pydict_set_item_string_decref(pyelem, "from_dsoname", 500 pydict_set_item_string_decref(pyelem, "from_dsoname",
501 _PyUnicode_FromString(dsoname)); 501 _PyUnicode_FromString(dsoname));
502 502
503 thread__find_map(thread, sample->cpumode, 503 thread__find_map_fb(thread, sample->cpumode,
504 br->entries[i].to, &al); 504 br->entries[i].to, &al);
505 dsoname = get_dsoname(al.map); 505 dsoname = get_dsoname(al.map);
506 pydict_set_item_string_decref(pyelem, "to_dsoname", 506 pydict_set_item_string_decref(pyelem, "to_dsoname",
507 _PyUnicode_FromString(dsoname)); 507 _PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
576 if (!pyelem) 576 if (!pyelem)
577 Py_FatalError("couldn't create Python dictionary"); 577 Py_FatalError("couldn't create Python dictionary");
578 578
579 thread__find_symbol(thread, sample->cpumode, 579 thread__find_symbol_fb(thread, sample->cpumode,
580 br->entries[i].from, &al); 580 br->entries[i].from, &al);
581 get_symoff(al.sym, &al, true, bf, sizeof(bf)); 581 get_symoff(al.sym, &al, true, bf, sizeof(bf));
582 pydict_set_item_string_decref(pyelem, "from", 582 pydict_set_item_string_decref(pyelem, "from",
583 _PyUnicode_FromString(bf)); 583 _PyUnicode_FromString(bf));
584 584
585 thread__find_symbol(thread, sample->cpumode, 585 thread__find_symbol_fb(thread, sample->cpumode,
586 br->entries[i].to, &al); 586 br->entries[i].to, &al);
587 get_symoff(al.sym, &al, true, bf, sizeof(bf)); 587 get_symoff(al.sym, &al, true, bf, sizeof(bf));
588 pydict_set_item_string_decref(pyelem, "to", 588 pydict_set_item_string_decref(pyelem, "to",
589 _PyUnicode_FromString(bf)); 589 _PyUnicode_FromString(bf));
@@ -790,7 +790,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
790 struct perf_evsel *evsel, 790 struct perf_evsel *evsel,
791 struct addr_location *al) 791 struct addr_location *al)
792{ 792{
793 struct tep_event_format *event = evsel->tp_format; 793 struct tep_event *event = evsel->tp_format;
794 PyObject *handler, *context, *t, *obj = NULL, *callchain; 794 PyObject *handler, *context, *t, *obj = NULL, *callchain;
795 PyObject *dict = NULL, *all_entries_dict = NULL; 795 PyObject *dict = NULL, *all_entries_dict = NULL;
796 static char handler_name[256]; 796 static char handler_name[256];
@@ -1590,7 +1590,7 @@ static int python_stop_script(void)
1590 1590
1591static int python_generate_script(struct tep_handle *pevent, const char *outfile) 1591static int python_generate_script(struct tep_handle *pevent, const char *outfile)
1592{ 1592{
1593 struct tep_event_format *event = NULL; 1593 struct tep_event *event = NULL;
1594 struct tep_format_field *f; 1594 struct tep_format_field *f;
1595 char fname[PATH_MAX]; 1595 char fname[PATH_MAX];
1596 int not_first, count; 1596 int not_first, count;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7d2c8ce6cfad..78a067777144 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
24#include "thread.h" 24#include "thread.h"
25#include "thread-stack.h" 25#include "thread-stack.h"
26#include "stat.h" 26#include "stat.h"
27#include "arch/common.h"
27 28
28static int perf_session__deliver_event(struct perf_session *session, 29static int perf_session__deliver_event(struct perf_session *session,
29 union perf_event *event, 30 union perf_event *event,
@@ -125,7 +126,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
125 session->tool = tool; 126 session->tool = tool;
126 INIT_LIST_HEAD(&session->auxtrace_index); 127 INIT_LIST_HEAD(&session->auxtrace_index);
127 machines__init(&session->machines); 128 machines__init(&session->machines);
128 ordered_events__init(&session->ordered_events, ordered_events__deliver_event); 129 ordered_events__init(&session->ordered_events,
130 ordered_events__deliver_event, NULL);
129 131
130 if (data) { 132 if (data) {
131 if (perf_data__open(data)) 133 if (perf_data__open(data))
@@ -150,6 +152,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
150 session->machines.host.env = &perf_env; 152 session->machines.host.env = &perf_env;
151 } 153 }
152 154
155 session->machines.host.single_address_space =
156 perf_env__single_address_space(session->machines.host.env);
157
153 if (!data || perf_data__is_write(data)) { 158 if (!data || perf_data__is_write(data)) {
154 /* 159 /*
155 * In O_RDONLY mode this will be performed when reading the 160 * In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index f96c005b3c41..6c1a83768eb0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -13,6 +13,7 @@
13#include "strlist.h" 13#include "strlist.h"
14#include <traceevent/event-parse.h> 14#include <traceevent/event-parse.h>
15#include "mem-events.h" 15#include "mem-events.h"
16#include "annotate.h"
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17 18
18regex_t parent_regex; 19regex_t parent_regex;
@@ -36,7 +37,7 @@ enum sort_mode sort__mode = SORT_MODE__NORMAL;
36 * -t, --field-separator 37 * -t, --field-separator
37 * 38 *
38 * option, that uses a special separator character and don't pad with spaces, 39 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other 40 * replacing all occurrences of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator. 41 * output) with a '.' character, that thus it's the only non valid separator.
41*/ 42*/
42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
@@ -422,6 +423,64 @@ struct sort_entry sort_srcline_to = {
422 .se_width_idx = HISTC_SRCLINE_TO, 423 .se_width_idx = HISTC_SRCLINE_TO,
423}; 424};
424 425
426static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
427 size_t size, unsigned int width)
428{
429
430 struct symbol *sym = he->ms.sym;
431 struct map *map = he->ms.map;
432 struct perf_evsel *evsel = hists_to_evsel(he->hists);
433 struct annotation *notes;
434 double ipc = 0.0, coverage = 0.0;
435 char tmp[64];
436
437 if (!sym)
438 return repsep_snprintf(bf, size, "%-*s", width, "-");
439
440 if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
441 &annotation__default_options, NULL) < 0) {
442 return 0;
443 }
444
445 notes = symbol__annotation(sym);
446
447 if (notes->hit_cycles)
448 ipc = notes->hit_insn / ((double)notes->hit_cycles);
449
450 if (notes->total_insn) {
451 coverage = notes->cover_insn * 100.0 /
452 ((double)notes->total_insn);
453 }
454
455 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
456 return repsep_snprintf(bf, size, "%-*s", width, tmp);
457}
458
459struct sort_entry sort_sym_ipc = {
460 .se_header = "IPC [IPC Coverage]",
461 .se_cmp = sort__sym_cmp,
462 .se_snprintf = hist_entry__sym_ipc_snprintf,
463 .se_width_idx = HISTC_SYMBOL_IPC,
464};
465
466static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
467 __maybe_unused,
468 char *bf, size_t size,
469 unsigned int width)
470{
471 char tmp[64];
472
473 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
474 return repsep_snprintf(bf, size, "%-*s", width, tmp);
475}
476
477struct sort_entry sort_sym_ipc_null = {
478 .se_header = "IPC [IPC Coverage]",
479 .se_cmp = sort__sym_cmp,
480 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
481 .se_width_idx = HISTC_SYMBOL_IPC,
482};
483
425/* --sort srcfile */ 484/* --sort srcfile */
426 485
427static char no_srcfile[1]; 486static char no_srcfile[1];
@@ -1574,6 +1633,7 @@ static struct sort_dimension common_sort_dimensions[] = {
1574 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1633 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1575 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1634 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1576 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1635 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1636 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1577}; 1637};
1578 1638
1579#undef DIM 1639#undef DIM
@@ -1591,6 +1651,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
1591 DIM(SORT_CYCLES, "cycles", sort_cycles), 1651 DIM(SORT_CYCLES, "cycles", sort_cycles),
1592 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1652 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1593 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1653 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1654 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1594}; 1655};
1595 1656
1596#undef DIM 1657#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index a97cf8e6be86..130fe37fe2df 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -229,6 +229,7 @@ enum sort_type {
229 SORT_SYM_SIZE, 229 SORT_SYM_SIZE,
230 SORT_DSO_SIZE, 230 SORT_DSO_SIZE,
231 SORT_CGROUP_ID, 231 SORT_CGROUP_ID,
232 SORT_SYM_IPC_NULL,
232 233
233 /* branch stack specific sort keys */ 234 /* branch stack specific sort keys */
234 __SORT_BRANCH_STACK, 235 __SORT_BRANCH_STACK,
@@ -242,6 +243,7 @@ enum sort_type {
242 SORT_CYCLES, 243 SORT_CYCLES,
243 SORT_SRCLINE_FROM, 244 SORT_SRCLINE_FROM,
244 SORT_SRCLINE_TO, 245 SORT_SRCLINE_TO,
246 SORT_SYM_IPC,
245 247
246 /* memory mode specific sort keys */ 248 /* memory mode specific sort keys */
247 __SORT_MEMORY_MODE, 249 __SORT_MEMORY_MODE,
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
new file mode 100644
index 000000000000..fcc8630f6dff
--- /dev/null
+++ b/tools/perf/util/srccode.c
@@ -0,0 +1,186 @@
1/*
2 * Manage printing of source lines
3 * Copyright (c) 2017, Intel Corporation.
4 * Author: Andi Kleen
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#include "linux/list.h"
16#include <stdlib.h>
17#include <sys/mman.h>
18#include <sys/stat.h>
19#include <fcntl.h>
20#include <unistd.h>
21#include <assert.h>
22#include <string.h>
23#include "srccode.h"
24#include "debug.h"
25#include "util.h"
26
27#define MAXSRCCACHE (32*1024*1024)
28#define MAXSRCFILES 64
29#define SRC_HTAB_SZ 64
30
31struct srcfile {
32 struct hlist_node hash_nd;
33 struct list_head nd;
34 char *fn;
35 char **lines;
36 char *map;
37 unsigned numlines;
38 size_t maplen;
39};
40
41static struct hlist_head srcfile_htab[SRC_HTAB_SZ];
42static LIST_HEAD(srcfile_list);
43static long map_total_sz;
44static int num_srcfiles;
45
46static unsigned shash(unsigned char *s)
47{
48 unsigned h = 0;
49 while (*s)
50 h = 65599 * h + *s++;
51 return h ^ (h >> 16);
52}
53
54static int countlines(char *map, int maplen)
55{
56 int numl;
57 char *end = map + maplen;
58 char *p = map;
59
60 if (maplen == 0)
61 return 0;
62 numl = 0;
63 while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
64 numl++;
65 p++;
66 }
67 if (p < end)
68 numl++;
69 return numl;
70}
71
72static void fill_lines(char **lines, int maxline, char *map, int maplen)
73{
74 int l;
75 char *end = map + maplen;
76 char *p = map;
77
78 if (maplen == 0 || maxline == 0)
79 return;
80 l = 0;
81 lines[l++] = map;
82 while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
83 if (l >= maxline)
84 return;
85 lines[l++] = ++p;
86 }
87 if (p < end)
88 lines[l] = p;
89}
90
91static void free_srcfile(struct srcfile *sf)
92{
93 list_del(&sf->nd);
94 hlist_del(&sf->hash_nd);
95 map_total_sz -= sf->maplen;
96 munmap(sf->map, sf->maplen);
97 free(sf->lines);
98 free(sf->fn);
99 free(sf);
100 num_srcfiles--;
101}
102
103static struct srcfile *find_srcfile(char *fn)
104{
105 struct stat st;
106 struct srcfile *h;
107 int fd;
108 unsigned long sz;
109 unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ;
110
111 hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
112 if (!strcmp(fn, h->fn)) {
113 /* Move to front */
114 list_del(&h->nd);
115 list_add(&h->nd, &srcfile_list);
116 return h;
117 }
118 }
119
120 /* Only prune if there is more than one entry */
121 while ((num_srcfiles > MAXSRCFILES || map_total_sz > MAXSRCCACHE) &&
122 srcfile_list.next != &srcfile_list) {
123 assert(!list_empty(&srcfile_list));
124 h = list_entry(srcfile_list.prev, struct srcfile, nd);
125 free_srcfile(h);
126 }
127
128 fd = open(fn, O_RDONLY);
129 if (fd < 0 || fstat(fd, &st) < 0) {
130 pr_debug("cannot open source file %s\n", fn);
131 return NULL;
132 }
133
134 h = malloc(sizeof(struct srcfile));
135 if (!h)
136 return NULL;
137
138 h->fn = strdup(fn);
139 if (!h->fn)
140 goto out_h;
141
142 h->maplen = st.st_size;
143 sz = (h->maplen + page_size - 1) & ~(page_size - 1);
144 h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
145 close(fd);
146 if (h->map == (char *)-1) {
147 pr_debug("cannot mmap source file %s\n", fn);
148 goto out_fn;
149 }
150 h->numlines = countlines(h->map, h->maplen);
151 h->lines = calloc(h->numlines, sizeof(char *));
152 if (!h->lines)
153 goto out_map;
154 fill_lines(h->lines, h->numlines, h->map, h->maplen);
155 list_add(&h->nd, &srcfile_list);
156 hlist_add_head(&h->hash_nd, &srcfile_htab[hval]);
157 map_total_sz += h->maplen;
158 num_srcfiles++;
159 return h;
160
161out_map:
162 munmap(h->map, sz);
163out_fn:
164 free(h->fn);
165out_h:
166 free(h);
167 return NULL;
168}
169
170/* Result is not 0 terminated */
171char *find_sourceline(char *fn, unsigned line, int *lenp)
172{
173 char *l, *p;
174 struct srcfile *sf = find_srcfile(fn);
175 if (!sf)
176 return NULL;
177 line--;
178 if (line >= sf->numlines)
179 return NULL;
180 l = sf->lines[line];
181 if (!l)
182 return NULL;
183 p = memchr(l, '\n', sf->map + sf->maplen - l);
184 *lenp = p - l;
185 return l;
186}
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
new file mode 100644
index 000000000000..e500a746d5f1
--- /dev/null
+++ b/tools/perf/util/srccode.h
@@ -0,0 +1,7 @@
1#ifndef SRCCODE_H
2#define SRCCODE_H 1
3
4/* Result is not 0 terminated */
5char *find_sourceline(char *fn, unsigned line, int *lenp);
6
7#endif
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index e767c4a9d4d2..dc86597d0cc4 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -548,6 +548,34 @@ out:
548 return srcline; 548 return srcline;
549} 549}
550 550
551/* Returns filename and fills in line number in line */
552char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
553{
554 char *file = NULL;
555 const char *dso_name;
556
557 if (!dso->has_srcline)
558 goto out;
559
560 dso_name = dso__name(dso);
561 if (dso_name == NULL)
562 goto out;
563
564 if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
565 goto out;
566
567 dso->a2l_fails = 0;
568 return file;
569
570out:
571 if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
572 dso->has_srcline = 0;
573 dso__free_a2l(dso);
574 }
575
576 return NULL;
577}
578
551void free_srcline(char *srcline) 579void free_srcline(char *srcline)
552{ 580{
553 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) 581 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index b2bb5502fd62..5762212dc342 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -16,6 +16,7 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
16 bool show_sym, bool show_addr, bool unwind_inlines, 16 bool show_sym, bool show_addr, bool unwind_inlines,
17 u64 ip); 17 u64 ip);
18void free_srcline(char *srcline); 18void free_srcline(char *srcline);
19char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
19 20
20/* insert the srcline into the DSO, which will take ownership */ 21/* insert the srcline into the DSO, which will take ownership */
21void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline); 22void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index e7b4c44ebb62..665ee374fc01 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -59,6 +59,15 @@ static void print_noise(struct perf_stat_config *config,
59 print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg); 59 print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg);
60} 60}
61 61
62static void print_cgroup(struct perf_stat_config *config, struct perf_evsel *evsel)
63{
64 if (nr_cgroups) {
65 const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : "";
66 fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
67 }
68}
69
70
62static void aggr_printout(struct perf_stat_config *config, 71static void aggr_printout(struct perf_stat_config *config,
63 struct perf_evsel *evsel, int id, int nr) 72 struct perf_evsel *evsel, int id, int nr)
64{ 73{
@@ -336,8 +345,7 @@ static void abs_printout(struct perf_stat_config *config,
336 345
337 fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel)); 346 fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel));
338 347
339 if (evsel->cgrp) 348 print_cgroup(config, evsel);
340 fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name);
341} 349}
342 350
343static bool is_mixed_hw_group(struct perf_evsel *counter) 351static bool is_mixed_hw_group(struct perf_evsel *counter)
@@ -431,9 +439,7 @@ static void printout(struct perf_stat_config *config, int id, int nr,
431 config->csv_output ? 0 : -25, 439 config->csv_output ? 0 : -25,
432 perf_evsel__name(counter)); 440 perf_evsel__name(counter));
433 441
434 if (counter->cgrp) 442 print_cgroup(config, counter);
435 fprintf(config->output, "%s%s",
436 config->csv_sep, counter->cgrp->name);
437 443
438 if (!config->csv_output) 444 if (!config->csv_output)
439 pm(config, &os, NULL, NULL, "", 0); 445 pm(config, &os, NULL, NULL, "", 0);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index f0a8cec55c47..3c22c58b3e90 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -209,11 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
209 int cpu, struct runtime_stat *st) 209 int cpu, struct runtime_stat *st)
210{ 210{
211 int ctx = evsel_context(counter); 211 int ctx = evsel_context(counter);
212 u64 count_ns = count;
212 213
213 count *= counter->scale; 214 count *= counter->scale;
214 215
215 if (perf_evsel__is_clock(counter)) 216 if (perf_evsel__is_clock(counter))
216 update_runtime_stat(st, STAT_NSECS, 0, cpu, count); 217 update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
217 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 218 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
218 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count); 219 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
219 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 220 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1cbada2dc6be..f735ee038713 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -334,7 +334,7 @@ static char *cpu_model(void)
334 if (file) { 334 if (file) {
335 while (fgets(buf, 255, file)) { 335 while (fgets(buf, 255, file)) {
336 if (strstr(buf, "model name")) { 336 if (strstr(buf, "model name")) {
337 strncpy(cpu_m, &buf[13], 255); 337 strlcpy(cpu_m, &buf[13], 255);
338 break; 338 break;
339 } 339 }
340 } 340 }
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index d026d215bdc6..14d9d438e7e2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -63,6 +63,7 @@ struct symbol {
63 u8 ignore:1; 63 u8 ignore:1;
64 u8 inlined:1; 64 u8 inlined:1;
65 u8 arch_sym; 65 u8 arch_sym;
66 bool annotate2;
66 char name[0]; 67 char name[0];
67}; 68};
68 69
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 3d9ed7d0e281..c83372329f89 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -64,6 +64,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
64 RB_CLEAR_NODE(&thread->rb_node); 64 RB_CLEAR_NODE(&thread->rb_node);
65 /* Thread holds first ref to nsdata. */ 65 /* Thread holds first ref to nsdata. */
66 thread->nsinfo = nsinfo__new(pid); 66 thread->nsinfo = nsinfo__new(pid);
67 srccode_state_init(&thread->srccode_state);
67 } 68 }
68 69
69 return thread; 70 return thread;
@@ -103,6 +104,7 @@ void thread__delete(struct thread *thread)
103 104
104 unwind__finish_access(thread); 105 unwind__finish_access(thread);
105 nsinfo__zput(thread->nsinfo); 106 nsinfo__zput(thread->nsinfo);
107 srccode_state_free(&thread->srccode_state);
106 108
107 exit_rwsem(&thread->namespaces_lock); 109 exit_rwsem(&thread->namespaces_lock);
108 exit_rwsem(&thread->comm_lock); 110 exit_rwsem(&thread->comm_lock);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 30e2b4c165fe..712dd48cc0ca 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -8,6 +8,7 @@
8#include <unistd.h> 8#include <unistd.h>
9#include <sys/types.h> 9#include <sys/types.h>
10#include "symbol.h" 10#include "symbol.h"
11#include "map.h"
11#include <strlist.h> 12#include <strlist.h>
12#include <intlist.h> 13#include <intlist.h>
13#include "rwsem.h" 14#include "rwsem.h"
@@ -38,6 +39,7 @@ struct thread {
38 void *priv; 39 void *priv;
39 struct thread_stack *ts; 40 struct thread_stack *ts;
40 struct nsinfo *nsinfo; 41 struct nsinfo *nsinfo;
42 struct srccode_state srccode_state;
41#ifdef HAVE_LIBUNWIND_SUPPORT 43#ifdef HAVE_LIBUNWIND_SUPPORT
42 void *addr_space; 44 void *addr_space;
43 struct unwind_libunwind_ops *unwind_libunwind_ops; 45 struct unwind_libunwind_ops *unwind_libunwind_ops;
@@ -96,9 +98,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
96 98
97struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, 99struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
98 struct addr_location *al); 100 struct addr_location *al);
101struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
102 struct addr_location *al);
99 103
100struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, 104struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
101 u64 addr, struct addr_location *al); 105 u64 addr, struct addr_location *al);
106struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
107 u64 addr, struct addr_location *al);
102 108
103void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, 109void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
104 struct addr_location *al); 110 struct addr_location *al);
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 8e517def925b..4c8da8c4435f 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -46,8 +46,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
46 samples_per_sec; 46 samples_per_sec;
47 ret = SNPRINTF(bf, size, 47 ret = SNPRINTF(bf, size,
48 " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" 48 " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
49 " exact: %4.1f%% [", samples_per_sec, 49 " exact: %4.1f%% lost: %" PRIu64 "/%" PRIu64 " drop: %" PRIu64 "/%" PRIu64 " [",
50 ksamples_percent, esamples_percent); 50 samples_per_sec, ksamples_percent, esamples_percent,
51 top->lost, top->lost_total, top->drop, top->drop_total);
51 } else { 52 } else {
52 float us_samples_per_sec = top->us_samples / top->delay_secs; 53 float us_samples_per_sec = top->us_samples / top->delay_secs;
53 float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; 54 float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
@@ -106,6 +107,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
106 top->evlist->cpus->nr > 1 ? "s" : ""); 107 top->evlist->cpus->nr > 1 ? "s" : "");
107 } 108 }
108 109
110 perf_top__reset_sample_counters(top);
109 return ret; 111 return ret;
110} 112}
111 113
@@ -113,5 +115,5 @@ void perf_top__reset_sample_counters(struct perf_top *top)
113{ 115{
114 top->samples = top->us_samples = top->kernel_samples = 116 top->samples = top->us_samples = top->kernel_samples =
115 top->exact_samples = top->guest_kernel_samples = 117 top->exact_samples = top->guest_kernel_samples =
116 top->guest_us_samples = 0; 118 top->guest_us_samples = top->lost = top->drop = 0;
117} 119}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 9add1f72ce95..19f95eaf75c8 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -22,7 +22,7 @@ struct perf_top {
22 * Symbols will be added here in perf_event__process_sample and will 22 * Symbols will be added here in perf_event__process_sample and will
23 * get out after decayed. 23 * get out after decayed.
24 */ 24 */
25 u64 samples; 25 u64 samples, lost, lost_total, drop, drop_total;
26 u64 kernel_samples, us_samples; 26 u64 kernel_samples, us_samples;
27 u64 exact_samples; 27 u64 exact_samples;
28 u64 guest_us_samples, guest_kernel_samples; 28 u64 guest_us_samples, guest_kernel_samples;
@@ -40,6 +40,14 @@ struct perf_top {
40 const char *sym_filter; 40 const char *sym_filter;
41 float min_percent; 41 float min_percent;
42 unsigned int nr_threads_synthesize; 42 unsigned int nr_threads_synthesize;
43
44 struct {
45 struct ordered_events *in;
46 struct ordered_events data[2];
47 bool rotate;
48 pthread_mutex_t mutex;
49 pthread_cond_t cond;
50 } qe;
43}; 51};
44 52
45#define CONSOLE_CLEAR "" 53#define CONSOLE_CLEAR ""
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 32e558a65af3..ad74be1f0e42 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -33,7 +33,7 @@ static int get_common_field(struct scripting_context *context,
33 int *offset, int *size, const char *type) 33 int *offset, int *size, const char *type)
34{ 34{
35 struct tep_handle *pevent = context->pevent; 35 struct tep_handle *pevent = context->pevent;
36 struct tep_event_format *event; 36 struct tep_event *event;
37 struct tep_format_field *field; 37 struct tep_format_field *field;
38 38
39 if (!*size) { 39 if (!*size) {
@@ -95,7 +95,7 @@ int common_pc(struct scripting_context *context)
95} 95}
96 96
97unsigned long long 97unsigned long long
98raw_field_value(struct tep_event_format *event, const char *name, void *data) 98raw_field_value(struct tep_event *event, const char *name, void *data)
99{ 99{
100 struct tep_format_field *field; 100 struct tep_format_field *field;
101 unsigned long long val; 101 unsigned long long val;
@@ -109,12 +109,12 @@ raw_field_value(struct tep_event_format *event, const char *name, void *data)
109 return val; 109 return val;
110} 110}
111 111
112unsigned long long read_size(struct tep_event_format *event, void *ptr, int size) 112unsigned long long read_size(struct tep_event *event, void *ptr, int size)
113{ 113{
114 return tep_read_number(event->pevent, ptr, size); 114 return tep_read_number(event->pevent, ptr, size);
115} 115}
116 116
117void event_format__fprintf(struct tep_event_format *event, 117void event_format__fprintf(struct tep_event *event,
118 int cpu, void *data, int size, FILE *fp) 118 int cpu, void *data, int size, FILE *fp)
119{ 119{
120 struct tep_record record; 120 struct tep_record record;
@@ -131,7 +131,7 @@ void event_format__fprintf(struct tep_event_format *event,
131 trace_seq_destroy(&s); 131 trace_seq_destroy(&s);
132} 132}
133 133
134void event_format__print(struct tep_event_format *event, 134void event_format__print(struct tep_event *event,
135 int cpu, void *data, int size) 135 int cpu, void *data, int size)
136{ 136{
137 return event_format__fprintf(event, cpu, data, size, stdout); 137 return event_format__fprintf(event, cpu, data, size, stdout);
@@ -190,12 +190,12 @@ int parse_event_file(struct tep_handle *pevent,
190 return tep_parse_event(pevent, buf, size, sys); 190 return tep_parse_event(pevent, buf, size, sys);
191} 191}
192 192
193struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, 193struct tep_event *trace_find_next_event(struct tep_handle *pevent,
194 struct tep_event_format *event) 194 struct tep_event *event)
195{ 195{
196 static int idx; 196 static int idx;
197 int events_count; 197 int events_count;
198 struct tep_event_format *all_events; 198 struct tep_event *all_events;
199 199
200 all_events = tep_get_first_event(pevent); 200 all_events = tep_get_first_event(pevent);
201 events_count = tep_get_events_count(pevent); 201 events_count = tep_get_events_count(pevent);
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 76f12c705ef9..efe2f58cff4e 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -102,7 +102,7 @@ static unsigned int read4(struct tep_handle *pevent)
102 102
103 if (do_read(&data, 4) < 0) 103 if (do_read(&data, 4) < 0)
104 return 0; 104 return 0;
105 return __tep_data2host4(pevent, data); 105 return tep_read_number(pevent, &data, 4);
106} 106}
107 107
108static unsigned long long read8(struct tep_handle *pevent) 108static unsigned long long read8(struct tep_handle *pevent)
@@ -111,7 +111,7 @@ static unsigned long long read8(struct tep_handle *pevent)
111 111
112 if (do_read(&data, 8) < 0) 112 if (do_read(&data, 8) < 0)
113 return 0; 113 return 0;
114 return __tep_data2host8(pevent, data); 114 return tep_read_number(pevent, &data, 8);
115} 115}
116 116
117static char *read_string(void) 117static char *read_string(void)
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 95664b2f771e..cbe0dd758e3a 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -72,12 +72,12 @@ void trace_event__cleanup(struct trace_event *t)
72/* 72/*
73 * Returns pointer with encoded error via <linux/err.h> interface. 73 * Returns pointer with encoded error via <linux/err.h> interface.
74 */ 74 */
75static struct tep_event_format* 75static struct tep_event*
76tp_format(const char *sys, const char *name) 76tp_format(const char *sys, const char *name)
77{ 77{
78 char *tp_dir = get_events_file(sys); 78 char *tp_dir = get_events_file(sys);
79 struct tep_handle *pevent = tevent.pevent; 79 struct tep_handle *pevent = tevent.pevent;
80 struct tep_event_format *event = NULL; 80 struct tep_event *event = NULL;
81 char path[PATH_MAX]; 81 char path[PATH_MAX];
82 size_t size; 82 size_t size;
83 char *data; 83 char *data;
@@ -102,7 +102,7 @@ tp_format(const char *sys, const char *name)
102/* 102/*
103 * Returns pointer with encoded error via <linux/err.h> interface. 103 * Returns pointer with encoded error via <linux/err.h> interface.
104 */ 104 */
105struct tep_event_format* 105struct tep_event*
106trace_event__tp_format(const char *sys, const char *name) 106trace_event__tp_format(const char *sys, const char *name)
107{ 107{
108 if (!tevent_initialized && trace_event__init2()) 108 if (!tevent_initialized && trace_event__init2())
@@ -111,7 +111,7 @@ trace_event__tp_format(const char *sys, const char *name)
111 return tp_format(sys, name); 111 return tp_format(sys, name);
112} 112}
113 113
114struct tep_event_format *trace_event__tp_format_id(int id) 114struct tep_event *trace_event__tp_format_id(int id)
115{ 115{
116 if (!tevent_initialized && trace_event__init2()) 116 if (!tevent_initialized && trace_event__init2())
117 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index f024d73bfc40..d9b0a942090a 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -22,17 +22,17 @@ int trace_event__init(struct trace_event *t);
22void trace_event__cleanup(struct trace_event *t); 22void trace_event__cleanup(struct trace_event *t);
23int trace_event__register_resolver(struct machine *machine, 23int trace_event__register_resolver(struct machine *machine,
24 tep_func_resolver_t *func); 24 tep_func_resolver_t *func);
25struct tep_event_format* 25struct tep_event*
26trace_event__tp_format(const char *sys, const char *name); 26trace_event__tp_format(const char *sys, const char *name);
27 27
28struct tep_event_format *trace_event__tp_format_id(int id); 28struct tep_event *trace_event__tp_format_id(int id);
29 29
30int bigendian(void); 30int bigendian(void);
31 31
32void event_format__fprintf(struct tep_event_format *event, 32void event_format__fprintf(struct tep_event *event,
33 int cpu, void *data, int size, FILE *fp); 33 int cpu, void *data, int size, FILE *fp);
34 34
35void event_format__print(struct tep_event_format *event, 35void event_format__print(struct tep_event *event,
36 int cpu, void *data, int size); 36 int cpu, void *data, int size);
37 37
38int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size); 38int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
@@ -40,7 +40,7 @@ int parse_event_file(struct tep_handle *pevent,
40 char *buf, unsigned long size, char *sys); 40 char *buf, unsigned long size, char *sys);
41 41
42unsigned long long 42unsigned long long
43raw_field_value(struct tep_event_format *event, const char *name, void *data); 43raw_field_value(struct tep_event *event, const char *name, void *data);
44 44
45void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); 45void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size);
46void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); 46void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size);
@@ -48,9 +48,9 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
48 48
49ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); 49ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
50 50
51struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, 51struct tep_event *trace_find_next_event(struct tep_handle *pevent,
52 struct tep_event_format *event); 52 struct tep_event *event);
53unsigned long long read_size(struct tep_event_format *event, void *ptr, int size); 53unsigned long long read_size(struct tep_event *event, void *ptr, int size);
54unsigned long long eval_flag(const char *flag); 54unsigned long long eval_flag(const char *flag);
55 55
56int read_tracing_data(int fd, struct list_head *pattrs); 56int read_tracing_data(int fd, struct list_head *pattrs);