aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/perf_event.h3
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c15
-rw-r--r--tools/perf/Makefile.perf49
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c38
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/builtin-kvm.c25
-rw-r--r--tools/perf/builtin-probe.c65
-rw-r--r--tools/perf/builtin-record.c18
-rw-r--r--tools/perf/builtin-script.c8
-rw-r--r--tools/perf/builtin-timechart.c9
-rw-r--r--tools/perf/builtin-trace.c6
-rw-r--r--tools/perf/config/Makefile44
-rw-r--r--tools/perf/config/Makefile.arch8
-rw-r--r--tools/perf/config/feature-checks/Makefile16
-rw-r--r--tools/perf/config/feature-checks/test-all.c5
-rw-r--r--tools/perf/config/feature-checks/test-compile.c4
-rw-r--r--tools/perf/config/feature-checks/test-zlib.c9
-rw-r--r--tools/perf/perf-read-vdso.c34
-rw-r--r--tools/perf/perf.h3
-rw-r--r--tools/perf/scripts/python/bin/export-to-postgresql-record8
-rw-r--r--tools/perf/scripts/python/bin/export-to-postgresql-report29
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py444
-rw-r--r--tools/perf/tests/code-reading.c13
-rw-r--r--tools/perf/tests/dwarf-unwind.c18
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c2
-rw-r--r--tools/perf/util/build-id.c337
-rw-r--r--tools/perf/util/build-id.h11
-rw-r--r--tools/perf/util/callchain.c4
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/comm.h4
-rw-r--r--tools/perf/util/db-export.c428
-rw-r--r--tools/perf/util/db-export.h107
-rw-r--r--tools/perf/util/dso.c71
-rw-r--r--tools/perf/util/dso.h13
-rw-r--r--tools/perf/util/event.c20
-rw-r--r--tools/perf/util/event.h42
-rw-r--r--tools/perf/util/evlist.c28
-rw-r--r--tools/perf/util/evsel.c2
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/find-vdso-map.c30
-rw-r--r--tools/perf/util/header.c350
-rw-r--r--tools/perf/util/header.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h17
-rw-r--r--tools/perf/util/include/linux/bitops.h2
-rw-r--r--tools/perf/util/machine.c93
-rw-r--r--tools/perf/util/machine.h17
-rw-r--r--tools/perf/util/map.c8
-rw-r--r--tools/perf/util/map.h4
-rw-r--r--tools/perf/util/parse-options.c78
-rw-r--r--tools/perf/util/parse-options.h4
-rw-r--r--tools/perf/util/pmu.c41
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/probe-event.c18
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c29
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c440
-rw-r--r--tools/perf/util/session.c141
-rw-r--r--tools/perf/util/session.h15
-rw-r--r--tools/perf/util/symbol-elf.c35
-rw-r--r--tools/perf/util/symbol.c19
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread-stack.c747
-rw-r--r--tools/perf/util/thread-stack.h111
-rw-r--r--tools/perf/util/thread.c9
-rw-r--r--tools/perf/util/thread.h9
-rw-r--r--tools/perf/util/tool.h3
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/perf/util/unwind-libunwind.c17
-rw-r--r--tools/perf/util/unwind.h2
-rw-r--r--tools/perf/util/util.h5
-rw-r--r--tools/perf/util/vdso.c217
-rw-r--r--tools/perf/util/vdso.h4
-rw-r--r--tools/perf/util/zlib.c78
74 files changed, 3841 insertions, 678 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8dfc9fd094a3..dc0f6ed35b08 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -177,6 +177,9 @@ struct x86_pmu_capability {
177#define IBS_CAPS_BRNTRGT (1U<<5) 177#define IBS_CAPS_BRNTRGT (1U<<5)
178#define IBS_CAPS_OPCNTEXT (1U<<6) 178#define IBS_CAPS_OPCNTEXT (1U<<6)
179#define IBS_CAPS_RIPINVALIDCHK (1U<<7) 179#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
180#define IBS_CAPS_OPBRNFUSE (1U<<8)
181#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
182#define IBS_CAPS_OPDATA4 (1U<<10)
180 183
181#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 184#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
182 | IBS_CAPS_FETCHSAM \ 185 | IBS_CAPS_FETCHSAM \
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index e21331ce368f..8f02f6990759 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -206,6 +206,7 @@
206#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) 206#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
207#define MSR_AMD64_IBSCTL 0xc001103a 207#define MSR_AMD64_IBSCTL 0xc001103a
208#define MSR_AMD64_IBSBRTARGET 0xc001103b 208#define MSR_AMD64_IBSBRTARGET 0xc001103b
209#define MSR_AMD64_IBSOPDATA4 0xc001103d
209#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 210#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
210 211
211/* Fam 16h MSRs */ 212/* Fam 16h MSRs */
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index cbb1be3ed9e4..a61f5c6911da 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -565,6 +565,21 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
565 perf_ibs->offset_max, 565 perf_ibs->offset_max,
566 offset + 1); 566 offset + 1);
567 } while (offset < offset_max); 567 } while (offset < offset_max);
568 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
569 /*
570 * Read IbsBrTarget and IbsOpData4 separately
571 * depending on their availability.
572 * Can't add to offset_max as they are staggered
573 */
574 if (ibs_caps & IBS_CAPS_BRNTRGT) {
575 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
576 size++;
577 }
578 if (ibs_caps & IBS_CAPS_OPDATA4) {
579 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
580 size++;
581 }
582 }
568 ibs_data.size = sizeof(u64) * size; 583 ibs_data.size = sizeof(u64) * size;
569 584
570 regs = *iregs; 585 regs = *iregs;
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 262916f4a377..aecf61dcd754 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -60,6 +60,15 @@ include config/utilities.mak
60# 60#
61# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support 61# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
62# for dwarf backtrace post unwind. 62# for dwarf backtrace post unwind.
63#
64# Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
65# for reading the 32-bit compatibility VDSO in 64-bit mode
66#
67# Define NO_PERF_READ_VDSOX32 if you do not want to build perf-read-vdsox32
68# for reading the x32 mode 32-bit compatibility VDSO in 64-bit mode
69#
70# Define NO_ZLIB if you do not want to support compressed kernel modules
71
63 72
64ifeq ($(srctree),) 73ifeq ($(srctree),)
65srctree := $(patsubst %/,%,$(dir $(shell pwd))) 74srctree := $(patsubst %/,%,$(dir $(shell pwd)))
@@ -171,11 +180,16 @@ $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
171 180
172SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) 181SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
173 182
174#
175# Single 'perf' binary right now:
176#
177PROGRAMS += $(OUTPUT)perf 183PROGRAMS += $(OUTPUT)perf
178 184
185ifndef NO_PERF_READ_VDSO32
186PROGRAMS += $(OUTPUT)perf-read-vdso32
187endif
188
189ifndef NO_PERF_READ_VDSOX32
190PROGRAMS += $(OUTPUT)perf-read-vdsox32
191endif
192
179# what 'all' will build and 'install' will install, in perfexecdir 193# what 'all' will build and 'install' will install, in perfexecdir
180ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) 194ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
181 195
@@ -247,12 +261,14 @@ LIB_H += util/annotate.h
247LIB_H += util/cache.h 261LIB_H += util/cache.h
248LIB_H += util/callchain.h 262LIB_H += util/callchain.h
249LIB_H += util/build-id.h 263LIB_H += util/build-id.h
264LIB_H += util/db-export.h
250LIB_H += util/debug.h 265LIB_H += util/debug.h
251LIB_H += util/pmu.h 266LIB_H += util/pmu.h
252LIB_H += util/event.h 267LIB_H += util/event.h
253LIB_H += util/evsel.h 268LIB_H += util/evsel.h
254LIB_H += util/evlist.h 269LIB_H += util/evlist.h
255LIB_H += util/exec_cmd.h 270LIB_H += util/exec_cmd.h
271LIB_H += util/find-vdso-map.c
256LIB_H += util/levenshtein.h 272LIB_H += util/levenshtein.h
257LIB_H += util/machine.h 273LIB_H += util/machine.h
258LIB_H += util/map.h 274LIB_H += util/map.h
@@ -304,6 +320,7 @@ LIB_H += ui/util.h
304LIB_H += ui/ui.h 320LIB_H += ui/ui.h
305LIB_H += util/data.h 321LIB_H += util/data.h
306LIB_H += util/kvm-stat.h 322LIB_H += util/kvm-stat.h
323LIB_H += util/thread-stack.h
307 324
308LIB_OBJS += $(OUTPUT)util/abspath.o 325LIB_OBJS += $(OUTPUT)util/abspath.o
309LIB_OBJS += $(OUTPUT)util/alias.o 326LIB_OBJS += $(OUTPUT)util/alias.o
@@ -311,6 +328,7 @@ LIB_OBJS += $(OUTPUT)util/annotate.o
311LIB_OBJS += $(OUTPUT)util/build-id.o 328LIB_OBJS += $(OUTPUT)util/build-id.o
312LIB_OBJS += $(OUTPUT)util/config.o 329LIB_OBJS += $(OUTPUT)util/config.o
313LIB_OBJS += $(OUTPUT)util/ctype.o 330LIB_OBJS += $(OUTPUT)util/ctype.o
331LIB_OBJS += $(OUTPUT)util/db-export.o
314LIB_OBJS += $(OUTPUT)util/pmu.o 332LIB_OBJS += $(OUTPUT)util/pmu.o
315LIB_OBJS += $(OUTPUT)util/environment.o 333LIB_OBJS += $(OUTPUT)util/environment.o
316LIB_OBJS += $(OUTPUT)util/event.o 334LIB_OBJS += $(OUTPUT)util/event.o
@@ -380,6 +398,7 @@ LIB_OBJS += $(OUTPUT)util/srcline.o
380LIB_OBJS += $(OUTPUT)util/data.o 398LIB_OBJS += $(OUTPUT)util/data.o
381LIB_OBJS += $(OUTPUT)util/tsc.o 399LIB_OBJS += $(OUTPUT)util/tsc.o
382LIB_OBJS += $(OUTPUT)util/cloexec.o 400LIB_OBJS += $(OUTPUT)util/cloexec.o
401LIB_OBJS += $(OUTPUT)util/thread-stack.o
383 402
384LIB_OBJS += $(OUTPUT)ui/setup.o 403LIB_OBJS += $(OUTPUT)ui/setup.o
385LIB_OBJS += $(OUTPUT)ui/helpline.o 404LIB_OBJS += $(OUTPUT)ui/helpline.o
@@ -568,6 +587,10 @@ ifndef NO_LIBNUMA
568 BUILTIN_OBJS += $(OUTPUT)bench/numa.o 587 BUILTIN_OBJS += $(OUTPUT)bench/numa.o
569endif 588endif
570 589
590ifndef NO_ZLIB
591 LIB_OBJS += $(OUTPUT)util/zlib.o
592endif
593
571ifdef ASCIIDOC8 594ifdef ASCIIDOC8
572 export ASCIIDOC8 595 export ASCIIDOC8
573endif 596endif
@@ -732,6 +755,16 @@ $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Uti
732$(OUTPUT)perf-%: %.o $(PERFLIBS) 755$(OUTPUT)perf-%: %.o $(PERFLIBS)
733 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) 756 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
734 757
758ifndef NO_PERF_READ_VDSO32
759$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c
760 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
761endif
762
763ifndef NO_PERF_READ_VDSOX32
764$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c
765 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
766endif
767
735$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) 768$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
736$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) 769$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
737 770
@@ -876,6 +909,14 @@ install-bin: all install-gtk
876 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \ 909 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
877 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \ 910 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
878 $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace' 911 $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'
912ifndef NO_PERF_READ_VDSO32
913 $(call QUIET_INSTALL, perf-read-vdso32) \
914 $(INSTALL) $(OUTPUT)perf-read-vdso32 '$(DESTDIR_SQ)$(bindir_SQ)';
915endif
916ifndef NO_PERF_READ_VDSOX32
917 $(call QUIET_INSTALL, perf-read-vdsox32) \
918 $(INSTALL) $(OUTPUT)perf-read-vdsox32 '$(DESTDIR_SQ)$(bindir_SQ)';
919endif
879 $(call QUIET_INSTALL, libexec) \ 920 $(call QUIET_INSTALL, libexec) \
880 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 921 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
881 $(call QUIET_INSTALL, perf-archive) \ 922 $(call QUIET_INSTALL, perf-archive) \
@@ -928,7 +969,7 @@ config-clean:
928 969
929clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean 970clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
930 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS) 971 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
931 $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf 972 $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
932 $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* 973 $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
933 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean 974 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
934 $(python-clean) 975 $(python-clean)
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index d73ef8bb08c7..3bb50eac5542 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
145 * yet used) 145 * yet used)
146 * -1 in case of errors 146 * -1 in case of errors
147 */ 147 */
148static int check_return_addr(const char *exec_file, Dwarf_Addr pc) 148static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
149{ 149{
150 int rc = -1; 150 int rc = -1;
151 Dwfl *dwfl; 151 Dwfl *dwfl;
@@ -156,15 +156,27 @@ static int check_return_addr(const char *exec_file, Dwarf_Addr pc)
156 Dwarf_Addr end = pc; 156 Dwarf_Addr end = pc;
157 bool signalp; 157 bool signalp;
158 158
159 dwfl = dwfl_begin(&offline_callbacks); 159 dwfl = dso->dwfl;
160 if (!dwfl) {
161 pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1));
162 return -1;
163 }
164 160
165 if (dwfl_report_offline(dwfl, "", exec_file, -1) == NULL) { 161 if (!dwfl) {
166 pr_debug("dwfl_report_offline() failed %s\n", dwarf_errmsg(-1)); 162 dwfl = dwfl_begin(&offline_callbacks);
167 goto out; 163 if (!dwfl) {
164 pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1));
165 return -1;
166 }
167
168 if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) {
169 pr_debug("dwfl_report_offline() failed %s\n",
170 dwarf_errmsg(-1));
171 /*
172 * We normally cache the DWARF debug info and never
173 * call dwfl_end(). But to prevent fd leak, free in
174 * case of error.
175 */
176 dwfl_end(dwfl);
177 goto out;
178 }
179 dso->dwfl = dwfl;
168 } 180 }
169 181
170 mod = dwfl_addrmodule(dwfl, pc); 182 mod = dwfl_addrmodule(dwfl, pc);
@@ -194,7 +206,6 @@ static int check_return_addr(const char *exec_file, Dwarf_Addr pc)
194 rc = check_return_reg(ra_regno, frame); 206 rc = check_return_reg(ra_regno, frame);
195 207
196out: 208out:
197 dwfl_end(dwfl);
198 return rc; 209 return rc;
199} 210}
200 211
@@ -221,8 +232,7 @@ out:
221 * index: of callchain entry that needs to be ignored (if any) 232 * index: of callchain entry that needs to be ignored (if any)
222 * -1 if no entry needs to be ignored or in case of errors 233 * -1 if no entry needs to be ignored or in case of errors
223 */ 234 */
224int arch_skip_callchain_idx(struct machine *machine, struct thread *thread, 235int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
225 struct ip_callchain *chain)
226{ 236{
227 struct addr_location al; 237 struct addr_location al;
228 struct dso *dso = NULL; 238 struct dso *dso = NULL;
@@ -235,7 +245,7 @@ int arch_skip_callchain_idx(struct machine *machine, struct thread *thread,
235 245
236 ip = chain->ips[2]; 246 ip = chain->ips[2];
237 247
238 thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER, 248 thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
239 MAP__FUNCTION, ip, &al); 249 MAP__FUNCTION, ip, &al);
240 250
241 if (al.map) 251 if (al.map)
@@ -246,7 +256,7 @@ int arch_skip_callchain_idx(struct machine *machine, struct thread *thread,
246 return skip_slot; 256 return skip_slot;
247 } 257 }
248 258
249 rc = check_return_addr(dso->long_name, ip); 259 rc = check_return_addr(dso, ip);
250 260
251 pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n", 261 pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
252 dso->long_name, chain->nr, ip, rc); 262 dso->long_name, chain->nr, ip, rc);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index de99ca1bb942..84df2deed988 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -217,8 +217,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
217 goto repipe; 217 goto repipe;
218 } 218 }
219 219
220 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 220 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, &al);
221 sample->ip, &al);
222 221
223 if (al.map != NULL) { 222 if (al.map != NULL) {
224 if (!al.map->dso->hit) { 223 if (!al.map->dso->hit) {
@@ -410,6 +409,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
410 .tracing_data = perf_event__repipe_op2_synth, 409 .tracing_data = perf_event__repipe_op2_synth,
411 .finished_round = perf_event__repipe_op2_synth, 410 .finished_round = perf_event__repipe_op2_synth,
412 .build_id = perf_event__repipe_op2_synth, 411 .build_id = perf_event__repipe_op2_synth,
412 .id_index = perf_event__repipe_op2_synth,
413 }, 413 },
414 .input_name = "-", 414 .input_name = "-",
415 .samples = LIST_HEAD_INIT(inject.samples), 415 .samples = LIST_HEAD_INIT(inject.samples),
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index b65eb0507b38..3c0f3d4fb021 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1132,6 +1132,10 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1132 "-m", "1024", 1132 "-m", "1024",
1133 "-c", "1", 1133 "-c", "1",
1134 }; 1134 };
1135 const char * const kvm_stat_record_usage[] = {
1136 "perf kvm stat record [<options>]",
1137 NULL
1138 };
1135 const char * const *events_tp; 1139 const char * const *events_tp;
1136 events_tp_size = 0; 1140 events_tp_size = 0;
1137 1141
@@ -1159,6 +1163,27 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1159 for (j = 1; j < (unsigned int)argc; j++, i++) 1163 for (j = 1; j < (unsigned int)argc; j++, i++)
1160 rec_argv[i] = argv[j]; 1164 rec_argv[i] = argv[j];
1161 1165
1166 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
1167 set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
1168 set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
1169
1170 set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
1171 set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
1172 set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
1173 set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
1174 set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
1175 set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
1176 set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
1177 set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
1178 set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
1179 set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
1180 set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
1181 set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
1182 set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
1183 set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
1184 set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
1185
1186 record_usage = kvm_stat_record_usage;
1162 return cmd_record(i, rec_argv, NULL); 1187 return cmd_record(i, rec_argv, NULL);
1163} 1188}
1164 1189
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 7af26acf06d9..921bb6942503 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -55,6 +55,7 @@ static struct {
55 bool show_funcs; 55 bool show_funcs;
56 bool mod_events; 56 bool mod_events;
57 bool uprobes; 57 bool uprobes;
58 bool quiet;
58 int nevents; 59 int nevents;
59 struct perf_probe_event events[MAX_PROBES]; 60 struct perf_probe_event events[MAX_PROBES];
60 struct strlist *dellist; 61 struct strlist *dellist;
@@ -312,9 +313,11 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
312#endif 313#endif
313 NULL 314 NULL
314}; 315};
315 const struct option options[] = { 316 struct option options[] = {
316 OPT_INCR('v', "verbose", &verbose, 317 OPT_INCR('v', "verbose", &verbose,
317 "be more verbose (show parsed arguments, etc)"), 318 "be more verbose (show parsed arguments, etc)"),
319 OPT_BOOLEAN('q', "quiet", &params.quiet,
320 "be quiet (do not show any mesages)"),
318 OPT_BOOLEAN('l', "list", &params.list_events, 321 OPT_BOOLEAN('l', "list", &params.list_events,
319 "list up current probe events"), 322 "list up current probe events"),
320 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", 323 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
@@ -382,6 +385,14 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
382 }; 385 };
383 int ret; 386 int ret;
384 387
388 set_option_flag(options, 'a', "add", PARSE_OPT_EXCLUSIVE);
389 set_option_flag(options, 'd', "del", PARSE_OPT_EXCLUSIVE);
390 set_option_flag(options, 'l', "list", PARSE_OPT_EXCLUSIVE);
391#ifdef HAVE_DWARF_SUPPORT
392 set_option_flag(options, 'L', "line", PARSE_OPT_EXCLUSIVE);
393 set_option_flag(options, 'V', "vars", PARSE_OPT_EXCLUSIVE);
394#endif
395
385 argc = parse_options(argc, argv, options, probe_usage, 396 argc = parse_options(argc, argv, options, probe_usage,
386 PARSE_OPT_STOP_AT_NON_OPTION); 397 PARSE_OPT_STOP_AT_NON_OPTION);
387 if (argc > 0) { 398 if (argc > 0) {
@@ -396,6 +407,14 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
396 } 407 }
397 } 408 }
398 409
410 if (params.quiet) {
411 if (verbose != 0) {
412 pr_err(" Error: -v and -q are exclusive.\n");
413 return -EINVAL;
414 }
415 verbose = -1;
416 }
417
399 if (params.max_probe_points == 0) 418 if (params.max_probe_points == 0)
400 params.max_probe_points = MAX_PROBES; 419 params.max_probe_points = MAX_PROBES;
401 420
@@ -409,22 +428,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
409 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); 428 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
410 429
411 if (params.list_events) { 430 if (params.list_events) {
412 if (params.mod_events) {
413 pr_err(" Error: Don't use --list with --add/--del.\n");
414 usage_with_options(probe_usage, options);
415 }
416 if (params.show_lines) {
417 pr_err(" Error: Don't use --list with --line.\n");
418 usage_with_options(probe_usage, options);
419 }
420 if (params.show_vars) {
421 pr_err(" Error: Don't use --list with --vars.\n");
422 usage_with_options(probe_usage, options);
423 }
424 if (params.show_funcs) {
425 pr_err(" Error: Don't use --list with --funcs.\n");
426 usage_with_options(probe_usage, options);
427 }
428 if (params.uprobes) { 431 if (params.uprobes) {
429 pr_warning(" Error: Don't use --list with --exec.\n"); 432 pr_warning(" Error: Don't use --list with --exec.\n");
430 usage_with_options(probe_usage, options); 433 usage_with_options(probe_usage, options);
@@ -435,19 +438,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
435 return ret; 438 return ret;
436 } 439 }
437 if (params.show_funcs) { 440 if (params.show_funcs) {
438 if (params.nevents != 0 || params.dellist) {
439 pr_err(" Error: Don't use --funcs with"
440 " --add/--del.\n");
441 usage_with_options(probe_usage, options);
442 }
443 if (params.show_lines) {
444 pr_err(" Error: Don't use --funcs with --line.\n");
445 usage_with_options(probe_usage, options);
446 }
447 if (params.show_vars) {
448 pr_err(" Error: Don't use --funcs with --vars.\n");
449 usage_with_options(probe_usage, options);
450 }
451 if (!params.filter) 441 if (!params.filter)
452 params.filter = strfilter__new(DEFAULT_FUNC_FILTER, 442 params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
453 NULL); 443 NULL);
@@ -462,16 +452,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
462 452
463#ifdef HAVE_DWARF_SUPPORT 453#ifdef HAVE_DWARF_SUPPORT
464 if (params.show_lines) { 454 if (params.show_lines) {
465 if (params.mod_events) {
466 pr_err(" Error: Don't use --line with"
467 " --add/--del.\n");
468 usage_with_options(probe_usage, options);
469 }
470 if (params.show_vars) {
471 pr_err(" Error: Don't use --line with --vars.\n");
472 usage_with_options(probe_usage, options);
473 }
474
475 ret = show_line_range(&params.line_range, params.target, 455 ret = show_line_range(&params.line_range, params.target,
476 params.uprobes); 456 params.uprobes);
477 if (ret < 0) 457 if (ret < 0)
@@ -479,11 +459,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
479 return ret; 459 return ret;
480 } 460 }
481 if (params.show_vars) { 461 if (params.show_vars) {
482 if (params.mod_events) {
483 pr_err(" Error: Don't use --vars with"
484 " --add/--del.\n");
485 usage_with_options(probe_usage, options);
486 }
487 if (!params.filter) 462 if (!params.filter)
488 params.filter = strfilter__new(DEFAULT_VAR_FILTER, 463 params.filter = strfilter__new(DEFAULT_VAR_FILTER,
489 NULL); 464 NULL);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 2583a9b04317..582c4da155ea 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -200,6 +200,17 @@ static int process_buildids(struct record *rec)
200 if (size == 0) 200 if (size == 0)
201 return 0; 201 return 0;
202 202
203 /*
204 * During this process, it'll load kernel map and replace the
205 * dso->long_name to a real pathname it found. In this case
206 * we prefer the vmlinux path like
207 * /lib/modules/3.16.4/build/vmlinux
208 *
209 * rather than build-id path (in debug directory).
210 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
211 */
212 symbol_conf.ignore_vmlinux_buildid = true;
213
203 return __perf_session__process_events(session, start, 214 return __perf_session__process_events(session, start,
204 size - start, 215 size - start,
205 size, &build_id__mark_dso_hit_ops); 216 size, &build_id__mark_dso_hit_ops);
@@ -680,11 +691,12 @@ static int perf_record_config(const char *var, const char *value, void *cb)
680 return perf_default_config(var, value, cb); 691 return perf_default_config(var, value, cb);
681} 692}
682 693
683static const char * const record_usage[] = { 694static const char * const __record_usage[] = {
684 "perf record [<options>] [<command>]", 695 "perf record [<options>] [<command>]",
685 "perf record [<options>] -- <command> [<options>]", 696 "perf record [<options>] -- <command> [<options>]",
686 NULL 697 NULL
687}; 698};
699const char * const *record_usage = __record_usage;
688 700
689/* 701/*
690 * XXX Ideally would be local to cmd_record() and passed to a record__new 702 * XXX Ideally would be local to cmd_record() and passed to a record__new
@@ -725,7 +737,7 @@ const char record_callchain_help[] = CALLCHAIN_HELP "fp";
725 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', 737 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
726 * using pipes, etc. 738 * using pipes, etc.
727 */ 739 */
728const struct option record_options[] = { 740struct option __record_options[] = {
729 OPT_CALLBACK('e', "event", &record.evlist, "event", 741 OPT_CALLBACK('e', "event", &record.evlist, "event",
730 "event selector. use 'perf list' to list available events", 742 "event selector. use 'perf list' to list available events",
731 parse_events_option), 743 parse_events_option),
@@ -802,6 +814,8 @@ const struct option record_options[] = {
802 OPT_END() 814 OPT_END()
803}; 815};
804 816
817struct option *record_options = __record_options;
818
805int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) 819int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
806{ 820{
807 int err = -ENOMEM; 821 int err = -ENOMEM;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9708a1290571..ce304dfd962a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -23,7 +23,6 @@ static char const *generate_script_lang;
23static bool debug_mode; 23static bool debug_mode;
24static u64 last_timestamp; 24static u64 last_timestamp;
25static u64 nr_unordered; 25static u64 nr_unordered;
26extern const struct option record_options[];
27static bool no_callchain; 26static bool no_callchain;
28static bool latency_format; 27static bool latency_format;
29static bool system_wide; 28static bool system_wide;
@@ -379,7 +378,6 @@ static void print_sample_start(struct perf_sample *sample,
379 378
380static void print_sample_addr(union perf_event *event, 379static void print_sample_addr(union perf_event *event,
381 struct perf_sample *sample, 380 struct perf_sample *sample,
382 struct machine *machine,
383 struct thread *thread, 381 struct thread *thread,
384 struct perf_event_attr *attr) 382 struct perf_event_attr *attr)
385{ 383{
@@ -390,7 +388,7 @@ static void print_sample_addr(union perf_event *event,
390 if (!sample_addr_correlates_sym(attr)) 388 if (!sample_addr_correlates_sym(attr))
391 return; 389 return;
392 390
393 perf_event__preprocess_sample_addr(event, sample, machine, thread, &al); 391 perf_event__preprocess_sample_addr(event, sample, thread, &al);
394 392
395 if (PRINT_FIELD(SYM)) { 393 if (PRINT_FIELD(SYM)) {
396 printf(" "); 394 printf(" ");
@@ -438,7 +436,7 @@ static void print_sample_bts(union perf_event *event,
438 ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && 436 ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
439 !output[attr->type].user_set)) { 437 !output[attr->type].user_set)) {
440 printf(" => "); 438 printf(" => ");
441 print_sample_addr(event, sample, al->machine, thread, attr); 439 print_sample_addr(event, sample, thread, attr);
442 } 440 }
443 441
444 if (print_srcline_last) 442 if (print_srcline_last)
@@ -475,7 +473,7 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
475 event_format__print(evsel->tp_format, sample->cpu, 473 event_format__print(evsel->tp_format, sample->cpu,
476 sample->raw_data, sample->raw_size); 474 sample->raw_data, sample->raw_size);
477 if (PRINT_FIELD(ADDR)) 475 if (PRINT_FIELD(ADDR))
478 print_sample_addr(event, sample, al->machine, thread, attr); 476 print_sample_addr(event, sample, thread, attr);
479 477
480 if (PRINT_FIELD(IP)) { 478 if (PRINT_FIELD(IP)) {
481 if (!symbol_conf.use_callchain) 479 if (!symbol_conf.use_callchain)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 35b425b6293f..f3bb1a4bf060 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -528,7 +528,7 @@ static const char *cat_backtrace(union perf_event *event,
528 } 528 }
529 529
530 tal.filtered = 0; 530 tal.filtered = 0;
531 thread__find_addr_location(al.thread, machine, cpumode, 531 thread__find_addr_location(al.thread, cpumode,
532 MAP__FUNCTION, ip, &tal); 532 MAP__FUNCTION, ip, &tal);
533 533
534 if (tal.sym) 534 if (tal.sym)
@@ -1963,7 +1963,7 @@ int cmd_timechart(int argc, const char **argv,
1963 NULL 1963 NULL
1964 }; 1964 };
1965 1965
1966 const struct option record_options[] = { 1966 const struct option timechart_record_options[] = {
1967 OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"), 1967 OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1968 OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, 1968 OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1969 "output processes data only"), 1969 "output processes data only"),
@@ -1972,7 +1972,7 @@ int cmd_timechart(int argc, const char **argv,
1972 OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"), 1972 OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1973 OPT_END() 1973 OPT_END()
1974 }; 1974 };
1975 const char * const record_usage[] = { 1975 const char * const timechart_record_usage[] = {
1976 "perf timechart record [<options>]", 1976 "perf timechart record [<options>]",
1977 NULL 1977 NULL
1978 }; 1978 };
@@ -1985,7 +1985,8 @@ int cmd_timechart(int argc, const char **argv,
1985 } 1985 }
1986 1986
1987 if (argc && !strncmp(argv[0], "rec", 3)) { 1987 if (argc && !strncmp(argv[0], "rec", 3)) {
1988 argc = parse_options(argc, argv, record_options, record_usage, 1988 argc = parse_options(argc, argv, timechart_record_options,
1989 timechart_record_usage,
1989 PARSE_OPT_STOP_AT_NON_OPTION); 1990 PARSE_OPT_STOP_AT_NON_OPTION);
1990 1991
1991 if (tchart.power_only && tchart.tasks_only) { 1992 if (tchart.power_only && tchart.tasks_only) {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index fb126459b134..83a4835c8118 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1846,7 +1846,7 @@ static int trace__pgfault(struct trace *trace,
1846 if (trace->summary_only) 1846 if (trace->summary_only)
1847 return 0; 1847 return 0;
1848 1848
1849 thread__find_addr_location(thread, trace->host, cpumode, MAP__FUNCTION, 1849 thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
1850 sample->ip, &al); 1850 sample->ip, &al);
1851 1851
1852 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output); 1852 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
@@ -1859,11 +1859,11 @@ static int trace__pgfault(struct trace *trace,
1859 1859
1860 fprintf(trace->output, "] => "); 1860 fprintf(trace->output, "] => ");
1861 1861
1862 thread__find_addr_location(thread, trace->host, cpumode, MAP__VARIABLE, 1862 thread__find_addr_location(thread, cpumode, MAP__VARIABLE,
1863 sample->addr, &al); 1863 sample->addr, &al);
1864 1864
1865 if (!al.map) { 1865 if (!al.map) {
1866 thread__find_addr_location(thread, trace->host, cpumode, 1866 thread__find_addr_location(thread, cpumode,
1867 MAP__FUNCTION, sample->addr, &al); 1867 MAP__FUNCTION, sample->addr, &al);
1868 1868
1869 if (al.map) 1869 if (al.map)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 58f609198c6d..79f906c7124e 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -200,7 +200,8 @@ CORE_FEATURE_TESTS = \
200 libunwind \ 200 libunwind \
201 stackprotector-all \ 201 stackprotector-all \
202 timerfd \ 202 timerfd \
203 libdw-dwarf-unwind 203 libdw-dwarf-unwind \
204 zlib
204 205
205LIB_FEATURE_TESTS = \ 206LIB_FEATURE_TESTS = \
206 dwarf \ 207 dwarf \
@@ -214,7 +215,8 @@ LIB_FEATURE_TESTS = \
214 libpython \ 215 libpython \
215 libslang \ 216 libslang \
216 libunwind \ 217 libunwind \
217 libdw-dwarf-unwind 218 libdw-dwarf-unwind \
219 zlib
218 220
219VF_FEATURE_TESTS = \ 221VF_FEATURE_TESTS = \
220 backtrace \ 222 backtrace \
@@ -230,7 +232,9 @@ VF_FEATURE_TESTS = \
230 bionic \ 232 bionic \
231 liberty \ 233 liberty \
232 liberty-z \ 234 liberty-z \
233 cplus-demangle 235 cplus-demangle \
236 compile-32 \
237 compile-x32
234 238
235# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features. 239# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
236# If in the future we need per-feature checks/flags for features not 240# If in the future we need per-feature checks/flags for features not
@@ -602,6 +606,15 @@ ifneq ($(filter -lbfd,$(EXTLIBS)),)
602 CFLAGS += -DHAVE_LIBBFD_SUPPORT 606 CFLAGS += -DHAVE_LIBBFD_SUPPORT
603endif 607endif
604 608
609ifndef NO_ZLIB
610 ifeq ($(feature-zlib), 1)
611 CFLAGS += -DHAVE_ZLIB_SUPPORT
612 EXTLIBS += -lz
613 else
614 NO_ZLIB := 1
615 endif
616endif
617
605ifndef NO_BACKTRACE 618ifndef NO_BACKTRACE
606 ifeq ($(feature-backtrace), 1) 619 ifeq ($(feature-backtrace), 1)
607 CFLAGS += -DHAVE_BACKTRACE_SUPPORT 620 CFLAGS += -DHAVE_BACKTRACE_SUPPORT
@@ -622,6 +635,31 @@ ifdef HAVE_KVM_STAT_SUPPORT
622 CFLAGS += -DHAVE_KVM_STAT_SUPPORT 635 CFLAGS += -DHAVE_KVM_STAT_SUPPORT
623endif 636endif
624 637
638ifeq (${IS_64_BIT}, 1)
639 ifndef NO_PERF_READ_VDSO32
640 $(call feature_check,compile-32)
641 ifeq ($(feature-compile-32), 1)
642 CFLAGS += -DHAVE_PERF_READ_VDSO32
643 else
644 NO_PERF_READ_VDSO32 := 1
645 endif
646 endif
647 ifneq (${IS_X86_64}, 1)
648 NO_PERF_READ_VDSOX32 := 1
649 endif
650 ifndef NO_PERF_READ_VDSOX32
651 $(call feature_check,compile-x32)
652 ifeq ($(feature-compile-x32), 1)
653 CFLAGS += -DHAVE_PERF_READ_VDSOX32
654 else
655 NO_PERF_READ_VDSOX32 := 1
656 endif
657 endif
658else
659 NO_PERF_READ_VDSO32 := 1
660 NO_PERF_READ_VDSOX32 := 1
661endif
662
625# Among the variables below, these: 663# Among the variables below, these:
626# perfexecdir 664# perfexecdir
627# template_dir 665# template_dir
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index 4b06719ee984..851cd0172a76 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -21,3 +21,11 @@ ifeq ($(ARCH),x86_64)
21 RAW_ARCH := x86_64 21 RAW_ARCH := x86_64
22 endif 22 endif
23endif 23endif
24
25ifeq (${IS_X86_64}, 1)
26 IS_64_BIT := 1
27else ifeq ($(ARCH),x86)
28 IS_64_BIT := 0
29else
30 IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
31endif
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 72ab2984718e..53f19b5dbc37 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -27,7 +27,10 @@ FILES= \
27 test-libunwind-debug-frame.bin \ 27 test-libunwind-debug-frame.bin \
28 test-stackprotector-all.bin \ 28 test-stackprotector-all.bin \
29 test-timerfd.bin \ 29 test-timerfd.bin \
30 test-libdw-dwarf-unwind.bin 30 test-libdw-dwarf-unwind.bin \
31 test-compile-32.bin \
32 test-compile-x32.bin \
33 test-zlib.bin
31 34
32CC := $(CROSS_COMPILE)gcc -MD 35CC := $(CROSS_COMPILE)gcc -MD
33PKG_CONFIG := $(CROSS_COMPILE)pkg-config 36PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -39,7 +42,7 @@ BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
39############################### 42###############################
40 43
41test-all.bin: 44test-all.bin:
42 $(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl 45 $(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz
43 46
44test-hello.bin: 47test-hello.bin:
45 $(BUILD) 48 $(BUILD)
@@ -131,6 +134,15 @@ test-libdw-dwarf-unwind.bin:
131test-sync-compare-and-swap.bin: 134test-sync-compare-and-swap.bin:
132 $(BUILD) -Werror 135 $(BUILD) -Werror
133 136
137test-compile-32.bin:
138 $(CC) -m32 -o $(OUTPUT)$@ test-compile.c
139
140test-compile-x32.bin:
141 $(CC) -mx32 -o $(OUTPUT)$@ test-compile.c
142
143test-zlib.bin:
144 $(BUILD) -lz
145
134-include *.d 146-include *.d
135 147
136############################### 148###############################
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index a7d022e161c0..652e0098eba6 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -93,6 +93,10 @@
93# include "test-sync-compare-and-swap.c" 93# include "test-sync-compare-and-swap.c"
94#undef main 94#undef main
95 95
96#define main main_test_zlib
97# include "test-zlib.c"
98#undef main
99
96int main(int argc, char *argv[]) 100int main(int argc, char *argv[])
97{ 101{
98 main_test_libpython(); 102 main_test_libpython();
@@ -116,6 +120,7 @@ int main(int argc, char *argv[])
116 main_test_stackprotector_all(); 120 main_test_stackprotector_all();
117 main_test_libdw_dwarf_unwind(); 121 main_test_libdw_dwarf_unwind();
118 main_test_sync_compare_and_swap(argc, argv); 122 main_test_sync_compare_and_swap(argc, argv);
123 main_test_zlib();
119 124
120 return 0; 125 return 0;
121} 126}
diff --git a/tools/perf/config/feature-checks/test-compile.c b/tools/perf/config/feature-checks/test-compile.c
new file mode 100644
index 000000000000..31dbf45bf99c
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-compile.c
@@ -0,0 +1,4 @@
1int main(void)
2{
3 return 0;
4}
diff --git a/tools/perf/config/feature-checks/test-zlib.c b/tools/perf/config/feature-checks/test-zlib.c
new file mode 100644
index 000000000000..e111fff6240e
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-zlib.c
@@ -0,0 +1,9 @@
1#include <zlib.h>
2
3int main(void)
4{
5 z_stream zs;
6
7 inflateInit(&zs);
8 return 0;
9}
diff --git a/tools/perf/perf-read-vdso.c b/tools/perf/perf-read-vdso.c
new file mode 100644
index 000000000000..764e2547c25a
--- /dev/null
+++ b/tools/perf/perf-read-vdso.c
@@ -0,0 +1,34 @@
1#include <stdio.h>
2#include <string.h>
3
4#define VDSO__MAP_NAME "[vdso]"
5
6/*
7 * Include definition of find_vdso_map() also used in util/vdso.c for
8 * building perf.
9 */
10#include "util/find-vdso-map.c"
11
12int main(void)
13{
14 void *start, *end;
15 size_t size, written;
16
17 if (find_vdso_map(&start, &end))
18 return 1;
19
20 size = end - start;
21
22 while (size) {
23 written = fwrite(start, 1, size, stdout);
24 if (!written)
25 return 1;
26 start += written;
27 size -= written;
28 }
29
30 if (fflush(stdout))
31 return 1;
32
33 return 0;
34}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 220d44e44c1b..511c2831aa81 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -62,4 +62,7 @@ struct record_opts {
62 unsigned initial_delay; 62 unsigned initial_delay;
63}; 63};
64 64
65struct option;
66extern const char * const *record_usage;
67extern struct option *record_options;
65#endif 68#endif
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-record b/tools/perf/scripts/python/bin/export-to-postgresql-record
new file mode 100644
index 000000000000..221d66e05713
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-record
@@ -0,0 +1,8 @@
1#!/bin/bash
2
3#
4# export perf data to a postgresql database. Can cover
5# perf ip samples (excluding the tracepoints). No special
6# record requirements, just record what you want to export.
7#
8perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-report b/tools/perf/scripts/python/bin/export-to-postgresql-report
new file mode 100644
index 000000000000..cd335b6e2a01
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-report
@@ -0,0 +1,29 @@
1#!/bin/bash
2# description: export perf data to a postgresql database
3# args: [database name] [columns] [calls]
4n_args=0
5for i in "$@"
6do
7 if expr match "$i" "-" > /dev/null ; then
8 break
9 fi
10 n_args=$(( $n_args + 1 ))
11done
12if [ "$n_args" -gt 3 ] ; then
13 echo "usage: export-to-postgresql-report [database name] [columns] [calls]"
14 exit
15fi
16if [ "$n_args" -gt 2 ] ; then
17 dbname=$1
18 columns=$2
19 calls=$3
20 shift 3
21elif [ "$n_args" -gt 1 ] ; then
22 dbname=$1
23 columns=$2
24 shift 2
25elif [ "$n_args" -gt 0 ] ; then
26 dbname=$1
27 shift
28fi
29perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-postgresql.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
new file mode 100644
index 000000000000..4cdafd880074
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -0,0 +1,444 @@
1# export-to-postgresql.py: export perf data to a postgresql database
2# Copyright (c) 2014, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12
13import os
14import sys
15import struct
16import datetime
17
18from PySide.QtSql import *
19
20# Need to access PostgreSQL C library directly to use COPY FROM STDIN
21from ctypes import *
22libpq = CDLL("libpq.so.5")
23PQconnectdb = libpq.PQconnectdb
24PQconnectdb.restype = c_void_p
25PQfinish = libpq.PQfinish
26PQstatus = libpq.PQstatus
27PQexec = libpq.PQexec
28PQexec.restype = c_void_p
29PQresultStatus = libpq.PQresultStatus
30PQputCopyData = libpq.PQputCopyData
31PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
32PQputCopyEnd = libpq.PQputCopyEnd
33PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
34
35sys.path.append(os.environ['PERF_EXEC_PATH'] + \
36 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
37
38# These perf imports are not used at present
39#from perf_trace_context import *
40#from Core import *
41
42perf_db_export_mode = True
43perf_db_export_calls = False
44
45def usage():
46 print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
47 print >> sys.stderr, "where: columns 'all' or 'branches'"
48 print >> sys.stderr, " calls 'calls' => create calls table"
49 raise Exception("Too few arguments")
50
51if (len(sys.argv) < 2):
52 usage()
53
54dbname = sys.argv[1]
55
56if (len(sys.argv) >= 3):
57 columns = sys.argv[2]
58else:
59 columns = "all"
60
61if columns not in ("all", "branches"):
62 usage()
63
64branches = (columns == "branches")
65
66if (len(sys.argv) >= 4):
67 if (sys.argv[3] == "calls"):
68 perf_db_export_calls = True
69 else:
70 usage()
71
72output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
73os.mkdir(output_dir_name)
74
75def do_query(q, s):
76 if (q.exec_(s)):
77 return
78 raise Exception("Query failed: " + q.lastError().text())
79
80print datetime.datetime.today(), "Creating database..."
81
82db = QSqlDatabase.addDatabase('QPSQL')
83query = QSqlQuery(db)
84db.setDatabaseName('postgres')
85db.open()
86try:
87 do_query(query, 'CREATE DATABASE ' + dbname)
88except:
89 os.rmdir(output_dir_name)
90 raise
91query.finish()
92query.clear()
93db.close()
94
95db.setDatabaseName(dbname)
96db.open()
97
98query = QSqlQuery(db)
99do_query(query, 'SET client_min_messages TO WARNING')
100
101do_query(query, 'CREATE TABLE selected_events ('
102 'id bigint NOT NULL,'
103 'name varchar(80))')
104do_query(query, 'CREATE TABLE machines ('
105 'id bigint NOT NULL,'
106 'pid integer,'
107 'root_dir varchar(4096))')
108do_query(query, 'CREATE TABLE threads ('
109 'id bigint NOT NULL,'
110 'machine_id bigint,'
111 'process_id bigint,'
112 'pid integer,'
113 'tid integer)')
114do_query(query, 'CREATE TABLE comms ('
115 'id bigint NOT NULL,'
116 'comm varchar(16))')
117do_query(query, 'CREATE TABLE comm_threads ('
118 'id bigint NOT NULL,'
119 'comm_id bigint,'
120 'thread_id bigint)')
121do_query(query, 'CREATE TABLE dsos ('
122 'id bigint NOT NULL,'
123 'machine_id bigint,'
124 'short_name varchar(256),'
125 'long_name varchar(4096),'
126 'build_id varchar(64))')
127do_query(query, 'CREATE TABLE symbols ('
128 'id bigint NOT NULL,'
129 'dso_id bigint,'
130 'sym_start bigint,'
131 'sym_end bigint,'
132 'binding integer,'
133 'name varchar(2048))')
134do_query(query, 'CREATE TABLE branch_types ('
135 'id integer NOT NULL,'
136 'name varchar(80))')
137
138if branches:
139 do_query(query, 'CREATE TABLE samples ('
140 'id bigint NOT NULL,'
141 'evsel_id bigint,'
142 'machine_id bigint,'
143 'thread_id bigint,'
144 'comm_id bigint,'
145 'dso_id bigint,'
146 'symbol_id bigint,'
147 'sym_offset bigint,'
148 'ip bigint,'
149 'time bigint,'
150 'cpu integer,'
151 'to_dso_id bigint,'
152 'to_symbol_id bigint,'
153 'to_sym_offset bigint,'
154 'to_ip bigint,'
155 'branch_type integer,'
156 'in_tx boolean)')
157else:
158 do_query(query, 'CREATE TABLE samples ('
159 'id bigint NOT NULL,'
160 'evsel_id bigint,'
161 'machine_id bigint,'
162 'thread_id bigint,'
163 'comm_id bigint,'
164 'dso_id bigint,'
165 'symbol_id bigint,'
166 'sym_offset bigint,'
167 'ip bigint,'
168 'time bigint,'
169 'cpu integer,'
170 'to_dso_id bigint,'
171 'to_symbol_id bigint,'
172 'to_sym_offset bigint,'
173 'to_ip bigint,'
174 'period bigint,'
175 'weight bigint,'
176 'transaction bigint,'
177 'data_src bigint,'
178 'branch_type integer,'
179 'in_tx boolean)')
180
181if perf_db_export_calls:
182 do_query(query, 'CREATE TABLE call_paths ('
183 'id bigint NOT NULL,'
184 'parent_id bigint,'
185 'symbol_id bigint,'
186 'ip bigint)')
187 do_query(query, 'CREATE TABLE calls ('
188 'id bigint NOT NULL,'
189 'thread_id bigint,'
190 'comm_id bigint,'
191 'call_path_id bigint,'
192 'call_time bigint,'
193 'return_time bigint,'
194 'branch_count bigint,'
195 'call_id bigint,'
196 'return_id bigint,'
197 'parent_call_path_id bigint,'
198 'flags integer)')
199
200do_query(query, 'CREATE VIEW samples_view AS '
201 'SELECT '
202 'id,'
203 'time,'
204 'cpu,'
205 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
206 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
207 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
208 '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
209 'to_hex(ip) AS ip_hex,'
210 '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
211 'sym_offset,'
212 '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
213 'to_hex(to_ip) AS to_ip_hex,'
214 '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
215 'to_sym_offset,'
216 '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
217 '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
218 'in_tx'
219 ' FROM samples')
220
221
222file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
223file_trailer = "\377\377"
224
225def open_output_file(file_name):
226 path_name = output_dir_name + "/" + file_name
227 file = open(path_name, "w+")
228 file.write(file_header)
229 return file
230
231def close_output_file(file):
232 file.write(file_trailer)
233 file.close()
234
235def copy_output_file_direct(file, table_name):
236 close_output_file(file)
237 sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
238 do_query(query, sql)
239
240# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
241def copy_output_file(file, table_name):
242 conn = PQconnectdb("dbname = " + dbname)
243 if (PQstatus(conn)):
244 raise Exception("COPY FROM STDIN PQconnectdb failed")
245 file.write(file_trailer)
246 file.seek(0)
247 sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
248 res = PQexec(conn, sql)
249 if (PQresultStatus(res) != 4):
250 raise Exception("COPY FROM STDIN PQexec failed")
251 data = file.read(65536)
252 while (len(data)):
253 ret = PQputCopyData(conn, data, len(data))
254 if (ret != 1):
255 raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
256 data = file.read(65536)
257 ret = PQputCopyEnd(conn, None)
258 if (ret != 1):
259 raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
260 PQfinish(conn)
261
262def remove_output_file(file):
263 name = file.name
264 file.close()
265 os.unlink(name)
266
267evsel_file = open_output_file("evsel_table.bin")
268machine_file = open_output_file("machine_table.bin")
269thread_file = open_output_file("thread_table.bin")
270comm_file = open_output_file("comm_table.bin")
271comm_thread_file = open_output_file("comm_thread_table.bin")
272dso_file = open_output_file("dso_table.bin")
273symbol_file = open_output_file("symbol_table.bin")
274branch_type_file = open_output_file("branch_type_table.bin")
275sample_file = open_output_file("sample_table.bin")
276if perf_db_export_calls:
277 call_path_file = open_output_file("call_path_table.bin")
278 call_file = open_output_file("call_table.bin")
279
280def trace_begin():
281 print datetime.datetime.today(), "Writing to intermediate files..."
282 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
283 evsel_table(0, "unknown")
284 machine_table(0, 0, "unknown")
285 thread_table(0, 0, 0, -1, -1)
286 comm_table(0, "unknown")
287 dso_table(0, 0, "unknown", "unknown", "")
288 symbol_table(0, 0, 0, 0, 0, "unknown")
289 sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
290 if perf_db_export_calls:
291 call_path_table(0, 0, 0, 0)
292
293unhandled_count = 0
294
295def trace_end():
296 print datetime.datetime.today(), "Copying to database..."
297 copy_output_file(evsel_file, "selected_events")
298 copy_output_file(machine_file, "machines")
299 copy_output_file(thread_file, "threads")
300 copy_output_file(comm_file, "comms")
301 copy_output_file(comm_thread_file, "comm_threads")
302 copy_output_file(dso_file, "dsos")
303 copy_output_file(symbol_file, "symbols")
304 copy_output_file(branch_type_file, "branch_types")
305 copy_output_file(sample_file, "samples")
306 if perf_db_export_calls:
307 copy_output_file(call_path_file, "call_paths")
308 copy_output_file(call_file, "calls")
309
310 print datetime.datetime.today(), "Removing intermediate files..."
311 remove_output_file(evsel_file)
312 remove_output_file(machine_file)
313 remove_output_file(thread_file)
314 remove_output_file(comm_file)
315 remove_output_file(comm_thread_file)
316 remove_output_file(dso_file)
317 remove_output_file(symbol_file)
318 remove_output_file(branch_type_file)
319 remove_output_file(sample_file)
320 if perf_db_export_calls:
321 remove_output_file(call_path_file)
322 remove_output_file(call_file)
323 os.rmdir(output_dir_name)
324 print datetime.datetime.today(), "Adding primary keys"
325 do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
326 do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
327 do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
328 do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
329 do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
330 do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
331 do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
332 do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
333 do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
334 if perf_db_export_calls:
335 do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
336 do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
337
338 print datetime.datetime.today(), "Adding foreign keys"
339 do_query(query, 'ALTER TABLE threads '
340 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
341 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
342 do_query(query, 'ALTER TABLE comm_threads '
343 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
344 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
345 do_query(query, 'ALTER TABLE dsos '
346 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
347 do_query(query, 'ALTER TABLE symbols '
348 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
349 do_query(query, 'ALTER TABLE samples '
350 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
351 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
352 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
353 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
354 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
355 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
356 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
357 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
358 if perf_db_export_calls:
359 do_query(query, 'ALTER TABLE call_paths '
360 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
361 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
362 do_query(query, 'ALTER TABLE calls '
363 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
364 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
365 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
366 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
367 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
368 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
369 do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
370
371 if (unhandled_count):
372 print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
373 print datetime.datetime.today(), "Done"
374
375def trace_unhandled(event_name, context, event_fields_dict):
376 global unhandled_count
377 unhandled_count += 1
378
379def sched__sched_switch(*x):
380 pass
381
382def evsel_table(evsel_id, evsel_name, *x):
383 n = len(evsel_name)
384 fmt = "!hiqi" + str(n) + "s"
385 value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
386 evsel_file.write(value)
387
388def machine_table(machine_id, pid, root_dir, *x):
389 n = len(root_dir)
390 fmt = "!hiqiii" + str(n) + "s"
391 value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
392 machine_file.write(value)
393
394def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
395 value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
396 thread_file.write(value)
397
398def comm_table(comm_id, comm_str, *x):
399 n = len(comm_str)
400 fmt = "!hiqi" + str(n) + "s"
401 value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
402 comm_file.write(value)
403
404def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
405 fmt = "!hiqiqiq"
406 value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
407 comm_thread_file.write(value)
408
409def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
410 n1 = len(short_name)
411 n2 = len(long_name)
412 n3 = len(build_id)
413 fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
414 value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
415 dso_file.write(value)
416
417def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
418 n = len(symbol_name)
419 fmt = "!hiqiqiqiqiii" + str(n) + "s"
420 value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
421 symbol_file.write(value)
422
423def branch_type_table(branch_type, name, *x):
424 n = len(name)
425 fmt = "!hiii" + str(n) + "s"
426 value = struct.pack(fmt, 2, 4, branch_type, n, name)
427 branch_type_file.write(value)
428
429def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
430 if branches:
431 value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
432 else:
433 value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
434 sample_file.write(value)
435
436def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
437 fmt = "!hiqiqiqiq"
438 value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
439 call_path_file.write(value)
440
441def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
442 fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
443 value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
444 call_file.write(value)
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 67f2d6323558..f671ec37a7c4 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -133,8 +133,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
133} 133}
134 134
135static int read_object_code(u64 addr, size_t len, u8 cpumode, 135static int read_object_code(u64 addr, size_t len, u8 cpumode,
136 struct thread *thread, struct machine *machine, 136 struct thread *thread, struct state *state)
137 struct state *state)
138{ 137{
139 struct addr_location al; 138 struct addr_location al;
140 unsigned char buf1[BUFSZ]; 139 unsigned char buf1[BUFSZ];
@@ -145,8 +144,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
145 144
146 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 145 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
147 146
148 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, addr, 147 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
149 &al);
150 if (!al.map || !al.map->dso) { 148 if (!al.map || !al.map->dso) {
151 pr_debug("thread__find_addr_map failed\n"); 149 pr_debug("thread__find_addr_map failed\n");
152 return -1; 150 return -1;
@@ -170,8 +168,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
170 len = al.map->end - addr; 168 len = al.map->end - addr;
171 169
172 /* Read the object code using perf */ 170 /* Read the object code using perf */
173 ret_len = dso__data_read_offset(al.map->dso, machine, al.addr, buf1, 171 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
174 len); 172 al.addr, buf1, len);
175 if (ret_len != len) { 173 if (ret_len != len) {
176 pr_debug("dso__data_read_offset failed\n"); 174 pr_debug("dso__data_read_offset failed\n");
177 return -1; 175 return -1;
@@ -264,8 +262,7 @@ static int process_sample_event(struct machine *machine,
264 262
265 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 263 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
266 264
267 return read_object_code(sample.ip, READLEN, cpumode, thread, machine, 265 return read_object_code(sample.ip, READLEN, cpumode, thread, state);
268 state);
269} 266}
270 267
271static int process_event(struct machine *machine, struct perf_evlist *evlist, 268static int process_event(struct machine *machine, struct perf_evlist *evlist,
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index fc25e57f4a5d..ab28cca2cb97 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -59,7 +59,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
59} 59}
60 60
61__attribute__ ((noinline)) 61__attribute__ ((noinline))
62static int unwind_thread(struct thread *thread, struct machine *machine) 62static int unwind_thread(struct thread *thread)
63{ 63{
64 struct perf_sample sample; 64 struct perf_sample sample;
65 unsigned long cnt = 0; 65 unsigned long cnt = 0;
@@ -72,7 +72,7 @@ static int unwind_thread(struct thread *thread, struct machine *machine)
72 goto out; 72 goto out;
73 } 73 }
74 74
75 err = unwind__get_entries(unwind_entry, &cnt, machine, thread, 75 err = unwind__get_entries(unwind_entry, &cnt, thread,
76 &sample, MAX_STACK); 76 &sample, MAX_STACK);
77 if (err) 77 if (err)
78 pr_debug("unwind failed\n"); 78 pr_debug("unwind failed\n");
@@ -89,21 +89,21 @@ static int unwind_thread(struct thread *thread, struct machine *machine)
89} 89}
90 90
91__attribute__ ((noinline)) 91__attribute__ ((noinline))
92static int krava_3(struct thread *thread, struct machine *machine) 92static int krava_3(struct thread *thread)
93{ 93{
94 return unwind_thread(thread, machine); 94 return unwind_thread(thread);
95} 95}
96 96
97__attribute__ ((noinline)) 97__attribute__ ((noinline))
98static int krava_2(struct thread *thread, struct machine *machine) 98static int krava_2(struct thread *thread)
99{ 99{
100 return krava_3(thread, machine); 100 return krava_3(thread);
101} 101}
102 102
103__attribute__ ((noinline)) 103__attribute__ ((noinline))
104static int krava_1(struct thread *thread, struct machine *machine) 104static int krava_1(struct thread *thread)
105{ 105{
106 return krava_2(thread, machine); 106 return krava_2(thread);
107} 107}
108 108
109int test__dwarf_unwind(void) 109int test__dwarf_unwind(void)
@@ -137,7 +137,7 @@ int test__dwarf_unwind(void)
137 goto out; 137 goto out;
138 } 138 }
139 139
140 err = krava_1(thread, machine); 140 err = krava_1(thread);
141 141
142 out: 142 out:
143 machine__delete_threads(machine); 143 machine__delete_threads(machine);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 5a31787cc6b9..74f257a81265 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -43,7 +43,7 @@ static struct sample fake_samples[] = {
43}; 43};
44 44
45static int add_hist_entries(struct perf_evlist *evlist, 45static int add_hist_entries(struct perf_evlist *evlist,
46 struct machine *machine __maybe_unused) 46 struct machine *machine)
47{ 47{
48 struct perf_evsel *evsel; 48 struct perf_evsel *evsel;
49 struct addr_location al; 49 struct addr_location al;
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 4a456fef66ca..2113f1c8611f 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -187,7 +187,7 @@ static int mmap_events(synth_cb synth)
187 187
188 pr_debug("looking for map %p\n", td->map); 188 pr_debug("looking for map %p\n", td->map);
189 189
190 thread__find_addr_map(thread, machine, 190 thread__find_addr_map(thread,
191 PERF_RECORD_MISC_USER, MAP__FUNCTION, 191 PERF_RECORD_MISC_USER, MAP__FUNCTION,
192 (unsigned long) (td->map + 1), &al); 192 (unsigned long) (td->map + 1), &al);
193 193
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a904a4cfe7d3..dd2a3e52ada1 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -15,6 +15,8 @@
15#include "debug.h" 15#include "debug.h"
16#include "session.h" 16#include "session.h"
17#include "tool.h" 17#include "tool.h"
18#include "header.h"
19#include "vdso.h"
18 20
19int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, 21int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
20 union perf_event *event, 22 union perf_event *event,
@@ -33,8 +35,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
33 return -1; 35 return -1;
34 } 36 }
35 37
36 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 38 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, &al);
37 sample->ip, &al);
38 39
39 if (al.map != NULL) 40 if (al.map != NULL)
40 al.map->dso->hit = 1; 41 al.map->dso->hit = 1;
@@ -106,3 +107,335 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
106 build_id_hex, build_id_hex + 2); 107 build_id_hex, build_id_hex + 2);
107 return bf; 108 return bf;
108} 109}
110
111#define dsos__for_each_with_build_id(pos, head) \
112 list_for_each_entry(pos, head, node) \
113 if (!pos->has_build_id) \
114 continue; \
115 else
116
117static int write_buildid(const char *name, size_t name_len, u8 *build_id,
118 pid_t pid, u16 misc, int fd)
119{
120 int err;
121 struct build_id_event b;
122 size_t len;
123
124 len = name_len + 1;
125 len = PERF_ALIGN(len, NAME_ALIGN);
126
127 memset(&b, 0, sizeof(b));
128 memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
129 b.pid = pid;
130 b.header.misc = misc;
131 b.header.size = sizeof(b) + len;
132
133 err = writen(fd, &b, sizeof(b));
134 if (err < 0)
135 return err;
136
137 return write_padded(fd, name, name_len + 1, len);
138}
139
140static int __dsos__write_buildid_table(struct list_head *head,
141 struct machine *machine,
142 pid_t pid, u16 misc, int fd)
143{
144 char nm[PATH_MAX];
145 struct dso *pos;
146
147 dsos__for_each_with_build_id(pos, head) {
148 int err;
149 const char *name;
150 size_t name_len;
151
152 if (!pos->hit)
153 continue;
154
155 if (dso__is_vdso(pos)) {
156 name = pos->short_name;
157 name_len = pos->short_name_len + 1;
158 } else if (dso__is_kcore(pos)) {
159 machine__mmap_name(machine, nm, sizeof(nm));
160 name = nm;
161 name_len = strlen(nm) + 1;
162 } else {
163 name = pos->long_name;
164 name_len = pos->long_name_len + 1;
165 }
166
167 err = write_buildid(name, name_len, pos->build_id,
168 pid, misc, fd);
169 if (err)
170 return err;
171 }
172
173 return 0;
174}
175
176static int machine__write_buildid_table(struct machine *machine, int fd)
177{
178 int err;
179 u16 kmisc = PERF_RECORD_MISC_KERNEL,
180 umisc = PERF_RECORD_MISC_USER;
181
182 if (!machine__is_host(machine)) {
183 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
184 umisc = PERF_RECORD_MISC_GUEST_USER;
185 }
186
187 err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
188 machine->pid, kmisc, fd);
189 if (err == 0)
190 err = __dsos__write_buildid_table(&machine->user_dsos.head,
191 machine, machine->pid, umisc,
192 fd);
193 return err;
194}
195
196int perf_session__write_buildid_table(struct perf_session *session, int fd)
197{
198 struct rb_node *nd;
199 int err = machine__write_buildid_table(&session->machines.host, fd);
200
201 if (err)
202 return err;
203
204 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
205 struct machine *pos = rb_entry(nd, struct machine, rb_node);
206 err = machine__write_buildid_table(pos, fd);
207 if (err)
208 break;
209 }
210 return err;
211}
212
213static int __dsos__hit_all(struct list_head *head)
214{
215 struct dso *pos;
216
217 list_for_each_entry(pos, head, node)
218 pos->hit = true;
219
220 return 0;
221}
222
223static int machine__hit_all_dsos(struct machine *machine)
224{
225 int err;
226
227 err = __dsos__hit_all(&machine->kernel_dsos.head);
228 if (err)
229 return err;
230
231 return __dsos__hit_all(&machine->user_dsos.head);
232}
233
234int dsos__hit_all(struct perf_session *session)
235{
236 struct rb_node *nd;
237 int err;
238
239 err = machine__hit_all_dsos(&session->machines.host);
240 if (err)
241 return err;
242
243 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
244 struct machine *pos = rb_entry(nd, struct machine, rb_node);
245
246 err = machine__hit_all_dsos(pos);
247 if (err)
248 return err;
249 }
250
251 return 0;
252}
253
254int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
255 const char *name, bool is_kallsyms, bool is_vdso)
256{
257 const size_t size = PATH_MAX;
258 char *realname, *filename = zalloc(size),
259 *linkname = zalloc(size), *targetname;
260 int len, err = -1;
261 bool slash = is_kallsyms || is_vdso;
262
263 if (is_kallsyms) {
264 if (symbol_conf.kptr_restrict) {
265 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
266 err = 0;
267 goto out_free;
268 }
269 realname = (char *) name;
270 } else
271 realname = realpath(name, NULL);
272
273 if (realname == NULL || filename == NULL || linkname == NULL)
274 goto out_free;
275
276 len = scnprintf(filename, size, "%s%s%s",
277 debugdir, slash ? "/" : "",
278 is_vdso ? DSO__NAME_VDSO : realname);
279 if (mkdir_p(filename, 0755))
280 goto out_free;
281
282 snprintf(filename + len, size - len, "/%s", sbuild_id);
283
284 if (access(filename, F_OK)) {
285 if (is_kallsyms) {
286 if (copyfile("/proc/kallsyms", filename))
287 goto out_free;
288 } else if (link(realname, filename) && copyfile(name, filename))
289 goto out_free;
290 }
291
292 len = scnprintf(linkname, size, "%s/.build-id/%.2s",
293 debugdir, sbuild_id);
294
295 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
296 goto out_free;
297
298 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
299 targetname = filename + strlen(debugdir) - 5;
300 memcpy(targetname, "../..", 5);
301
302 if (symlink(targetname, linkname) == 0)
303 err = 0;
304out_free:
305 if (!is_kallsyms)
306 free(realname);
307 free(filename);
308 free(linkname);
309 return err;
310}
311
312static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
313 const char *name, const char *debugdir,
314 bool is_kallsyms, bool is_vdso)
315{
316 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
317
318 build_id__sprintf(build_id, build_id_size, sbuild_id);
319
320 return build_id_cache__add_s(sbuild_id, debugdir, name,
321 is_kallsyms, is_vdso);
322}
323
324int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
325{
326 const size_t size = PATH_MAX;
327 char *filename = zalloc(size),
328 *linkname = zalloc(size);
329 int err = -1;
330
331 if (filename == NULL || linkname == NULL)
332 goto out_free;
333
334 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
335 debugdir, sbuild_id, sbuild_id + 2);
336
337 if (access(linkname, F_OK))
338 goto out_free;
339
340 if (readlink(linkname, filename, size - 1) < 0)
341 goto out_free;
342
343 if (unlink(linkname))
344 goto out_free;
345
346 /*
347 * Since the link is relative, we must make it absolute:
348 */
349 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
350 debugdir, sbuild_id, filename);
351
352 if (unlink(linkname))
353 goto out_free;
354
355 err = 0;
356out_free:
357 free(filename);
358 free(linkname);
359 return err;
360}
361
362static int dso__cache_build_id(struct dso *dso, struct machine *machine,
363 const char *debugdir)
364{
365 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
366 bool is_vdso = dso__is_vdso(dso);
367 const char *name = dso->long_name;
368 char nm[PATH_MAX];
369
370 if (dso__is_kcore(dso)) {
371 is_kallsyms = true;
372 machine__mmap_name(machine, nm, sizeof(nm));
373 name = nm;
374 }
375 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
376 debugdir, is_kallsyms, is_vdso);
377}
378
379static int __dsos__cache_build_ids(struct list_head *head,
380 struct machine *machine, const char *debugdir)
381{
382 struct dso *pos;
383 int err = 0;
384
385 dsos__for_each_with_build_id(pos, head)
386 if (dso__cache_build_id(pos, machine, debugdir))
387 err = -1;
388
389 return err;
390}
391
392static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
393{
394 int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
395 debugdir);
396 ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
397 debugdir);
398 return ret;
399}
400
401int perf_session__cache_build_ids(struct perf_session *session)
402{
403 struct rb_node *nd;
404 int ret;
405 char debugdir[PATH_MAX];
406
407 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
408
409 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
410 return -1;
411
412 ret = machine__cache_build_ids(&session->machines.host, debugdir);
413
414 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
415 struct machine *pos = rb_entry(nd, struct machine, rb_node);
416 ret |= machine__cache_build_ids(pos, debugdir);
417 }
418 return ret ? -1 : 0;
419}
420
421static bool machine__read_build_ids(struct machine *machine, bool with_hits)
422{
423 bool ret;
424
425 ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
426 ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
427 return ret;
428}
429
430bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
431{
432 struct rb_node *nd;
433 bool ret = machine__read_build_ids(&session->machines.host, with_hits);
434
435 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
436 struct machine *pos = rb_entry(nd, struct machine, rb_node);
437 ret |= machine__read_build_ids(pos, with_hits);
438 }
439
440 return ret;
441}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index ae392561470b..666a3bd4f64e 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -15,4 +15,15 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
15int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, 15int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
16 struct perf_sample *sample, struct perf_evsel *evsel, 16 struct perf_sample *sample, struct perf_evsel *evsel,
17 struct machine *machine); 17 struct machine *machine);
18
19int dsos__hit_all(struct perf_session *session);
20
21bool perf_session__read_build_ids(struct perf_session *session, bool with_hits);
22int perf_session__write_buildid_table(struct perf_session *session, int fd);
23int perf_session__cache_build_ids(struct perf_session *session);
24
25int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
26 const char *name, bool is_kallsyms, bool is_vdso);
27int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
28
18#endif 29#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index c84d3f8dcb75..00229809a904 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -754,8 +754,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent
754 754
755 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain || 755 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
756 sort__has_parent) { 756 sort__has_parent) {
757 return machine__resolve_callchain(al->machine, evsel, al->thread, 757 return thread__resolve_callchain(al->thread, evsel, sample,
758 sample, parent, al, max_stack); 758 parent, al, max_stack);
759 } 759 }
760 return 0; 760 return 0;
761} 761}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 94cfefddf4db..3caccc2c173c 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -184,11 +184,9 @@ static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
184} 184}
185 185
186#ifdef HAVE_SKIP_CALLCHAIN_IDX 186#ifdef HAVE_SKIP_CALLCHAIN_IDX
187extern int arch_skip_callchain_idx(struct machine *machine, 187extern int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain);
188 struct thread *thread, struct ip_callchain *chain);
189#else 188#else
190static inline int arch_skip_callchain_idx(struct machine *machine __maybe_unused, 189static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
191 struct thread *thread __maybe_unused,
192 struct ip_callchain *chain __maybe_unused) 190 struct ip_callchain *chain __maybe_unused)
193{ 191{
194 return -1; 192 return -1;
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 51c10ab257f8..71c9c39340d4 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -12,6 +12,10 @@ struct comm {
12 u64 start; 12 u64 start;
13 struct list_head list; 13 struct list_head list;
14 bool exec; 14 bool exec;
15 union { /* Tool specific area */
16 void *priv;
17 u64 db_id;
18 };
15}; 19};
16 20
17void comm__free(struct comm *comm); 21void comm__free(struct comm *comm);
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
new file mode 100644
index 000000000000..c81dae399763
--- /dev/null
+++ b/tools/perf/util/db-export.c
@@ -0,0 +1,428 @@
1/*
2 * db-export.c: Support for exporting data suitable for import to a database
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <errno.h>
17
18#include "evsel.h"
19#include "machine.h"
20#include "thread.h"
21#include "comm.h"
22#include "symbol.h"
23#include "event.h"
24#include "util.h"
25#include "thread-stack.h"
26#include "db-export.h"
27
28struct deferred_export {
29 struct list_head node;
30 struct comm *comm;
31};
32
33static int db_export__deferred(struct db_export *dbe)
34{
35 struct deferred_export *de;
36 int err;
37
38 while (!list_empty(&dbe->deferred)) {
39 de = list_entry(dbe->deferred.next, struct deferred_export,
40 node);
41 err = dbe->export_comm(dbe, de->comm);
42 list_del(&de->node);
43 free(de);
44 if (err)
45 return err;
46 }
47
48 return 0;
49}
50
51static void db_export__free_deferred(struct db_export *dbe)
52{
53 struct deferred_export *de;
54
55 while (!list_empty(&dbe->deferred)) {
56 de = list_entry(dbe->deferred.next, struct deferred_export,
57 node);
58 list_del(&de->node);
59 free(de);
60 }
61}
62
63static int db_export__defer_comm(struct db_export *dbe, struct comm *comm)
64{
65 struct deferred_export *de;
66
67 de = zalloc(sizeof(struct deferred_export));
68 if (!de)
69 return -ENOMEM;
70
71 de->comm = comm;
72 list_add_tail(&de->node, &dbe->deferred);
73
74 return 0;
75}
76
77int db_export__init(struct db_export *dbe)
78{
79 memset(dbe, 0, sizeof(struct db_export));
80 INIT_LIST_HEAD(&dbe->deferred);
81 return 0;
82}
83
84int db_export__flush(struct db_export *dbe)
85{
86 return db_export__deferred(dbe);
87}
88
89void db_export__exit(struct db_export *dbe)
90{
91 db_export__free_deferred(dbe);
92 call_return_processor__free(dbe->crp);
93 dbe->crp = NULL;
94}
95
96int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel)
97{
98 if (evsel->db_id)
99 return 0;
100
101 evsel->db_id = ++dbe->evsel_last_db_id;
102
103 if (dbe->export_evsel)
104 return dbe->export_evsel(dbe, evsel);
105
106 return 0;
107}
108
109int db_export__machine(struct db_export *dbe, struct machine *machine)
110{
111 if (machine->db_id)
112 return 0;
113
114 machine->db_id = ++dbe->machine_last_db_id;
115
116 if (dbe->export_machine)
117 return dbe->export_machine(dbe, machine);
118
119 return 0;
120}
121
122int db_export__thread(struct db_export *dbe, struct thread *thread,
123 struct machine *machine, struct comm *comm)
124{
125 u64 main_thread_db_id = 0;
126 int err;
127
128 if (thread->db_id)
129 return 0;
130
131 thread->db_id = ++dbe->thread_last_db_id;
132
133 if (thread->pid_ != -1) {
134 struct thread *main_thread;
135
136 if (thread->pid_ == thread->tid) {
137 main_thread = thread;
138 } else {
139 main_thread = machine__findnew_thread(machine,
140 thread->pid_,
141 thread->pid_);
142 if (!main_thread)
143 return -ENOMEM;
144 err = db_export__thread(dbe, main_thread, machine,
145 comm);
146 if (err)
147 return err;
148 if (comm) {
149 err = db_export__comm_thread(dbe, comm, thread);
150 if (err)
151 return err;
152 }
153 }
154 main_thread_db_id = main_thread->db_id;
155 }
156
157 if (dbe->export_thread)
158 return dbe->export_thread(dbe, thread, main_thread_db_id,
159 machine);
160
161 return 0;
162}
163
164int db_export__comm(struct db_export *dbe, struct comm *comm,
165 struct thread *main_thread)
166{
167 int err;
168
169 if (comm->db_id)
170 return 0;
171
172 comm->db_id = ++dbe->comm_last_db_id;
173
174 if (dbe->export_comm) {
175 if (main_thread->comm_set)
176 err = dbe->export_comm(dbe, comm);
177 else
178 err = db_export__defer_comm(dbe, comm);
179 if (err)
180 return err;
181 }
182
183 return db_export__comm_thread(dbe, comm, main_thread);
184}
185
186int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
187 struct thread *thread)
188{
189 u64 db_id;
190
191 db_id = ++dbe->comm_thread_last_db_id;
192
193 if (dbe->export_comm_thread)
194 return dbe->export_comm_thread(dbe, db_id, comm, thread);
195
196 return 0;
197}
198
199int db_export__dso(struct db_export *dbe, struct dso *dso,
200 struct machine *machine)
201{
202 if (dso->db_id)
203 return 0;
204
205 dso->db_id = ++dbe->dso_last_db_id;
206
207 if (dbe->export_dso)
208 return dbe->export_dso(dbe, dso, machine);
209
210 return 0;
211}
212
213int db_export__symbol(struct db_export *dbe, struct symbol *sym,
214 struct dso *dso)
215{
216 u64 *sym_db_id = symbol__priv(sym);
217
218 if (*sym_db_id)
219 return 0;
220
221 *sym_db_id = ++dbe->symbol_last_db_id;
222
223 if (dbe->export_symbol)
224 return dbe->export_symbol(dbe, sym, dso);
225
226 return 0;
227}
228
229static struct thread *get_main_thread(struct machine *machine, struct thread *thread)
230{
231 if (thread->pid_ == thread->tid)
232 return thread;
233
234 if (thread->pid_ == -1)
235 return NULL;
236
237 return machine__find_thread(machine, thread->pid_, thread->pid_);
238}
239
240static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
241 u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
242{
243 int err;
244
245 if (al->map) {
246 struct dso *dso = al->map->dso;
247
248 err = db_export__dso(dbe, dso, al->machine);
249 if (err)
250 return err;
251 *dso_db_id = dso->db_id;
252
253 if (!al->sym) {
254 al->sym = symbol__new(al->addr, 0, 0, "unknown");
255 if (al->sym)
256 symbols__insert(&dso->symbols[al->map->type],
257 al->sym);
258 }
259
260 if (al->sym) {
261 u64 *db_id = symbol__priv(al->sym);
262
263 err = db_export__symbol(dbe, al->sym, dso);
264 if (err)
265 return err;
266 *sym_db_id = *db_id;
267 *offset = al->addr - al->sym->start;
268 }
269 }
270
271 return 0;
272}
273
274int db_export__branch_type(struct db_export *dbe, u32 branch_type,
275 const char *name)
276{
277 if (dbe->export_branch_type)
278 return dbe->export_branch_type(dbe, branch_type, name);
279
280 return 0;
281}
282
283int db_export__sample(struct db_export *dbe, union perf_event *event,
284 struct perf_sample *sample, struct perf_evsel *evsel,
285 struct thread *thread, struct addr_location *al)
286{
287 struct export_sample es = {
288 .event = event,
289 .sample = sample,
290 .evsel = evsel,
291 .thread = thread,
292 .al = al,
293 };
294 struct thread *main_thread;
295 struct comm *comm = NULL;
296 int err;
297
298 err = db_export__evsel(dbe, evsel);
299 if (err)
300 return err;
301
302 err = db_export__machine(dbe, al->machine);
303 if (err)
304 return err;
305
306 main_thread = get_main_thread(al->machine, thread);
307 if (main_thread)
308 comm = machine__thread_exec_comm(al->machine, main_thread);
309
310 err = db_export__thread(dbe, thread, al->machine, comm);
311 if (err)
312 return err;
313
314 if (comm) {
315 err = db_export__comm(dbe, comm, main_thread);
316 if (err)
317 return err;
318 es.comm_db_id = comm->db_id;
319 }
320
321 es.db_id = ++dbe->sample_last_db_id;
322
323 err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
324 if (err)
325 return err;
326
327 if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
328 sample_addr_correlates_sym(&evsel->attr)) {
329 struct addr_location addr_al;
330
331 perf_event__preprocess_sample_addr(event, sample, thread, &addr_al);
332 err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
333 &es.addr_sym_db_id, &es.addr_offset);
334 if (err)
335 return err;
336 if (dbe->crp) {
337 err = thread_stack__process(thread, comm, sample, al,
338 &addr_al, es.db_id,
339 dbe->crp);
340 if (err)
341 return err;
342 }
343 }
344
345 if (dbe->export_sample)
346 return dbe->export_sample(dbe, &es);
347
348 return 0;
349}
350
351static struct {
352 u32 branch_type;
353 const char *name;
354} branch_types[] = {
355 {0, "no branch"},
356 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
357 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
358 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
359 {PERF_IP_FLAG_BRANCH, "unconditional jump"},
360 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
361 "software interrupt"},
362 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
363 "return from interrupt"},
364 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
365 "system call"},
366 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
367 "return from system call"},
368 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
369 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
370 PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
371 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
372 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
373 {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
374 {0, NULL}
375};
376
377int db_export__branch_types(struct db_export *dbe)
378{
379 int i, err = 0;
380
381 for (i = 0; branch_types[i].name ; i++) {
382 err = db_export__branch_type(dbe, branch_types[i].branch_type,
383 branch_types[i].name);
384 if (err)
385 break;
386 }
387 return err;
388}
389
390int db_export__call_path(struct db_export *dbe, struct call_path *cp)
391{
392 int err;
393
394 if (cp->db_id)
395 return 0;
396
397 if (cp->parent) {
398 err = db_export__call_path(dbe, cp->parent);
399 if (err)
400 return err;
401 }
402
403 cp->db_id = ++dbe->call_path_last_db_id;
404
405 if (dbe->export_call_path)
406 return dbe->export_call_path(dbe, cp);
407
408 return 0;
409}
410
411int db_export__call_return(struct db_export *dbe, struct call_return *cr)
412{
413 int err;
414
415 if (cr->db_id)
416 return 0;
417
418 err = db_export__call_path(dbe, cr->cp);
419 if (err)
420 return err;
421
422 cr->db_id = ++dbe->call_return_last_db_id;
423
424 if (dbe->export_call_return)
425 return dbe->export_call_return(dbe, cr);
426
427 return 0;
428}
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
new file mode 100644
index 000000000000..adbd22d66798
--- /dev/null
+++ b/tools/perf/util/db-export.h
@@ -0,0 +1,107 @@
1/*
2 * db-export.h: Support for exporting data suitable for import to a database
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef __PERF_DB_EXPORT_H
17#define __PERF_DB_EXPORT_H
18
19#include <linux/types.h>
20#include <linux/list.h>
21
22struct perf_evsel;
23struct machine;
24struct thread;
25struct comm;
26struct dso;
27struct perf_sample;
28struct addr_location;
29struct call_return_processor;
30struct call_path;
31struct call_return;
32
33struct export_sample {
34 union perf_event *event;
35 struct perf_sample *sample;
36 struct perf_evsel *evsel;
37 struct thread *thread;
38 struct addr_location *al;
39 u64 db_id;
40 u64 comm_db_id;
41 u64 dso_db_id;
42 u64 sym_db_id;
43 u64 offset; /* ip offset from symbol start */
44 u64 addr_dso_db_id;
45 u64 addr_sym_db_id;
46 u64 addr_offset; /* addr offset from symbol start */
47};
48
49struct db_export {
50 int (*export_evsel)(struct db_export *dbe, struct perf_evsel *evsel);
51 int (*export_machine)(struct db_export *dbe, struct machine *machine);
52 int (*export_thread)(struct db_export *dbe, struct thread *thread,
53 u64 main_thread_db_id, struct machine *machine);
54 int (*export_comm)(struct db_export *dbe, struct comm *comm);
55 int (*export_comm_thread)(struct db_export *dbe, u64 db_id,
56 struct comm *comm, struct thread *thread);
57 int (*export_dso)(struct db_export *dbe, struct dso *dso,
58 struct machine *machine);
59 int (*export_symbol)(struct db_export *dbe, struct symbol *sym,
60 struct dso *dso);
61 int (*export_branch_type)(struct db_export *dbe, u32 branch_type,
62 const char *name);
63 int (*export_sample)(struct db_export *dbe, struct export_sample *es);
64 int (*export_call_path)(struct db_export *dbe, struct call_path *cp);
65 int (*export_call_return)(struct db_export *dbe,
66 struct call_return *cr);
67 struct call_return_processor *crp;
68 u64 evsel_last_db_id;
69 u64 machine_last_db_id;
70 u64 thread_last_db_id;
71 u64 comm_last_db_id;
72 u64 comm_thread_last_db_id;
73 u64 dso_last_db_id;
74 u64 symbol_last_db_id;
75 u64 sample_last_db_id;
76 u64 call_path_last_db_id;
77 u64 call_return_last_db_id;
78 struct list_head deferred;
79};
80
81int db_export__init(struct db_export *dbe);
82int db_export__flush(struct db_export *dbe);
83void db_export__exit(struct db_export *dbe);
84int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel);
85int db_export__machine(struct db_export *dbe, struct machine *machine);
86int db_export__thread(struct db_export *dbe, struct thread *thread,
87 struct machine *machine, struct comm *comm);
88int db_export__comm(struct db_export *dbe, struct comm *comm,
89 struct thread *main_thread);
90int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
91 struct thread *thread);
92int db_export__dso(struct db_export *dbe, struct dso *dso,
93 struct machine *machine);
94int db_export__symbol(struct db_export *dbe, struct symbol *sym,
95 struct dso *dso);
96int db_export__branch_type(struct db_export *dbe, u32 branch_type,
97 const char *name);
98int db_export__sample(struct db_export *dbe, union perf_event *event,
99 struct perf_sample *sample, struct perf_evsel *evsel,
100 struct thread *thread, struct addr_location *al);
101
102int db_export__branch_types(struct db_export *dbe);
103
104int db_export__call_path(struct db_export *dbe, struct call_path *cp);
105int db_export__call_return(struct db_export *dbe, struct call_return *cr);
106
107#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 0247acfdfaca..45be944d450a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -21,8 +21,10 @@ char dso__symtab_origin(const struct dso *dso)
21 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 21 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
22 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 22 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
23 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 23 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
24 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 25 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
25 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 26 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
27 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
26 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 28 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
27 }; 29 };
28 30
@@ -112,11 +114,13 @@ int dso__read_binary_type_filename(const struct dso *dso,
112 break; 114 break;
113 115
114 case DSO_BINARY_TYPE__GUEST_KMODULE: 116 case DSO_BINARY_TYPE__GUEST_KMODULE:
117 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
115 path__join3(filename, size, symbol_conf.symfs, 118 path__join3(filename, size, symbol_conf.symfs,
116 root_dir, dso->long_name); 119 root_dir, dso->long_name);
117 break; 120 break;
118 121
119 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 122 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
123 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
120 __symbol__join_symfs(filename, size, dso->long_name); 124 __symbol__join_symfs(filename, size, dso->long_name);
121 break; 125 break;
122 126
@@ -137,6 +141,73 @@ int dso__read_binary_type_filename(const struct dso *dso,
137 return ret; 141 return ret;
138} 142}
139 143
144static const struct {
145 const char *fmt;
146 int (*decompress)(const char *input, int output);
147} compressions[] = {
148#ifdef HAVE_ZLIB_SUPPORT
149 { "gz", gzip_decompress_to_file },
150#endif
151 { NULL, NULL },
152};
153
154bool is_supported_compression(const char *ext)
155{
156 unsigned i;
157
158 for (i = 0; compressions[i].fmt; i++) {
159 if (!strcmp(ext, compressions[i].fmt))
160 return true;
161 }
162 return false;
163}
164
165bool is_kmodule_extension(const char *ext)
166{
167 if (strncmp(ext, "ko", 2))
168 return false;
169
170 if (ext[2] == '\0' || (ext[2] == '.' && is_supported_compression(ext+3)))
171 return true;
172
173 return false;
174}
175
176bool is_kernel_module(const char *pathname, bool *compressed)
177{
178 const char *ext = strrchr(pathname, '.');
179
180 if (ext == NULL)
181 return false;
182
183 if (is_supported_compression(ext + 1)) {
184 if (compressed)
185 *compressed = true;
186 ext -= 3;
187 } else if (compressed)
188 *compressed = false;
189
190 return is_kmodule_extension(ext + 1);
191}
192
193bool decompress_to_file(const char *ext, const char *filename, int output_fd)
194{
195 unsigned i;
196
197 for (i = 0; compressions[i].fmt; i++) {
198 if (!strcmp(ext, compressions[i].fmt))
199 return !compressions[i].decompress(filename,
200 output_fd);
201 }
202 return false;
203}
204
205bool dso__needs_decompress(struct dso *dso)
206{
207 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
208 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
209}
210
140/* 211/*
141 * Global list of open DSOs and the counter. 212 * Global list of open DSOs and the counter.
142 */ 213 */
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index acb651acc7fd..3782c82c6e44 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -22,7 +22,9 @@ enum dso_binary_type {
22 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 22 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
23 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 23 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
24 DSO_BINARY_TYPE__GUEST_KMODULE, 24 DSO_BINARY_TYPE__GUEST_KMODULE,
25 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
25 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 26 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
27 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
26 DSO_BINARY_TYPE__KCORE, 28 DSO_BINARY_TYPE__KCORE,
27 DSO_BINARY_TYPE__GUEST_KCORE, 29 DSO_BINARY_TYPE__GUEST_KCORE,
28 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 30 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
@@ -127,6 +129,7 @@ struct dso {
127 const char *long_name; 129 const char *long_name;
128 u16 long_name_len; 130 u16 long_name_len;
129 u16 short_name_len; 131 u16 short_name_len;
132 void *dwfl; /* DWARF debug info */
130 133
131 /* dso data file */ 134 /* dso data file */
132 struct { 135 struct {
@@ -138,6 +141,11 @@ struct dso {
138 struct list_head open_entry; 141 struct list_head open_entry;
139 } data; 142 } data;
140 143
144 union { /* Tool specific area */
145 void *priv;
146 u64 db_id;
147 };
148
141 char name[0]; 149 char name[0];
142}; 150};
143 151
@@ -179,6 +187,11 @@ int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
179char dso__symtab_origin(const struct dso *dso); 187char dso__symtab_origin(const struct dso *dso);
180int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type, 188int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
181 char *root_dir, char *filename, size_t size); 189 char *root_dir, char *filename, size_t size);
190bool is_supported_compression(const char *ext);
191bool is_kmodule_extension(const char *ext);
192bool is_kernel_module(const char *pathname, bool *compressed);
193bool decompress_to_file(const char *ext, const char *filename, int output_fd);
194bool dso__needs_decompress(struct dso *dso);
182 195
183/* 196/*
184 * The dso__data_* external interface provides following functions: 197 * The dso__data_* external interface provides following functions:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 4af6b279e34a..6c6d044e959a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -28,6 +28,7 @@ static const char *perf_event__names[] = {
28 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 28 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
29 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 29 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
30 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 30 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
31 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
31}; 32};
32 33
33const char *perf_event__name(unsigned int id) 34const char *perf_event__name(unsigned int id)
@@ -730,12 +731,12 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
730 return machine__process_event(machine, event, sample); 731 return machine__process_event(machine, event, sample);
731} 732}
732 733
733void thread__find_addr_map(struct thread *thread, 734void thread__find_addr_map(struct thread *thread, u8 cpumode,
734 struct machine *machine, u8 cpumode,
735 enum map_type type, u64 addr, 735 enum map_type type, u64 addr,
736 struct addr_location *al) 736 struct addr_location *al)
737{ 737{
738 struct map_groups *mg = thread->mg; 738 struct map_groups *mg = thread->mg;
739 struct machine *machine = mg->machine;
739 bool load_map = false; 740 bool load_map = false;
740 741
741 al->machine = machine; 742 al->machine = machine;
@@ -806,14 +807,14 @@ try_again:
806 } 807 }
807} 808}
808 809
809void thread__find_addr_location(struct thread *thread, struct machine *machine, 810void thread__find_addr_location(struct thread *thread,
810 u8 cpumode, enum map_type type, u64 addr, 811 u8 cpumode, enum map_type type, u64 addr,
811 struct addr_location *al) 812 struct addr_location *al)
812{ 813{
813 thread__find_addr_map(thread, machine, cpumode, type, addr, al); 814 thread__find_addr_map(thread, cpumode, type, addr, al);
814 if (al->map != NULL) 815 if (al->map != NULL)
815 al->sym = map__find_symbol(al->map, al->addr, 816 al->sym = map__find_symbol(al->map, al->addr,
816 machine->symbol_filter); 817 thread->mg->machine->symbol_filter);
817 else 818 else
818 al->sym = NULL; 819 al->sym = NULL;
819} 820}
@@ -842,8 +843,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
842 machine->vmlinux_maps[MAP__FUNCTION] == NULL) 843 machine->vmlinux_maps[MAP__FUNCTION] == NULL)
843 machine__create_kernel_maps(machine); 844 machine__create_kernel_maps(machine);
844 845
845 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 846 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
846 sample->ip, al);
847 dump_printf(" ...... dso: %s\n", 847 dump_printf(" ...... dso: %s\n",
848 al->map ? al->map->dso->long_name : 848 al->map ? al->map->dso->long_name :
849 al->level == 'H' ? "[hypervisor]" : "<not found>"); 849 al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -902,16 +902,14 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
902 902
903void perf_event__preprocess_sample_addr(union perf_event *event, 903void perf_event__preprocess_sample_addr(union perf_event *event,
904 struct perf_sample *sample, 904 struct perf_sample *sample,
905 struct machine *machine,
906 struct thread *thread, 905 struct thread *thread,
907 struct addr_location *al) 906 struct addr_location *al)
908{ 907{
909 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 908 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
910 909
911 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 910 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
912 sample->addr, al);
913 if (!al->map) 911 if (!al->map)
914 thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE, 912 thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
915 sample->addr, al); 913 sample->addr, al);
916 914
917 al->cpu = sample->cpu; 915 al->cpu = sample->cpu;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 5699e7e2a790..7be389735402 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -143,6 +143,32 @@ struct branch_stack {
143 struct branch_entry entries[0]; 143 struct branch_entry entries[0];
144}; 144};
145 145
146enum {
147 PERF_IP_FLAG_BRANCH = 1ULL << 0,
148 PERF_IP_FLAG_CALL = 1ULL << 1,
149 PERF_IP_FLAG_RETURN = 1ULL << 2,
150 PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
151 PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
152 PERF_IP_FLAG_ASYNC = 1ULL << 5,
153 PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
154 PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
155 PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
156 PERF_IP_FLAG_TRACE_END = 1ULL << 9,
157 PERF_IP_FLAG_IN_TX = 1ULL << 10,
158};
159
160#define PERF_BRANCH_MASK (\
161 PERF_IP_FLAG_BRANCH |\
162 PERF_IP_FLAG_CALL |\
163 PERF_IP_FLAG_RETURN |\
164 PERF_IP_FLAG_CONDITIONAL |\
165 PERF_IP_FLAG_SYSCALLRET |\
166 PERF_IP_FLAG_ASYNC |\
167 PERF_IP_FLAG_INTERRUPT |\
168 PERF_IP_FLAG_TX_ABORT |\
169 PERF_IP_FLAG_TRACE_BEGIN |\
170 PERF_IP_FLAG_TRACE_END)
171
146struct perf_sample { 172struct perf_sample {
147 u64 ip; 173 u64 ip;
148 u32 pid, tid; 174 u32 pid, tid;
@@ -187,6 +213,7 @@ enum perf_user_event_type { /* above any possible kernel type */
187 PERF_RECORD_HEADER_TRACING_DATA = 66, 213 PERF_RECORD_HEADER_TRACING_DATA = 66,
188 PERF_RECORD_HEADER_BUILD_ID = 67, 214 PERF_RECORD_HEADER_BUILD_ID = 67,
189 PERF_RECORD_FINISHED_ROUND = 68, 215 PERF_RECORD_FINISHED_ROUND = 68,
216 PERF_RECORD_ID_INDEX = 69,
190 PERF_RECORD_HEADER_MAX 217 PERF_RECORD_HEADER_MAX
191}; 218};
192 219
@@ -239,6 +266,19 @@ struct tracing_data_event {
239 u32 size; 266 u32 size;
240}; 267};
241 268
269struct id_index_entry {
270 u64 id;
271 u64 idx;
272 u64 cpu;
273 u64 tid;
274};
275
276struct id_index_event {
277 struct perf_event_header header;
278 u64 nr;
279 struct id_index_entry entries[0];
280};
281
242union perf_event { 282union perf_event {
243 struct perf_event_header header; 283 struct perf_event_header header;
244 struct mmap_event mmap; 284 struct mmap_event mmap;
@@ -253,6 +293,7 @@ union perf_event {
253 struct event_type_event event_type; 293 struct event_type_event event_type;
254 struct tracing_data_event tracing_data; 294 struct tracing_data_event tracing_data;
255 struct build_id_event build_id; 295 struct build_id_event build_id;
296 struct id_index_event id_index;
256}; 297};
257 298
258void perf_event__print_totals(void); 299void perf_event__print_totals(void);
@@ -322,7 +363,6 @@ bool is_bts_event(struct perf_event_attr *attr);
322bool sample_addr_correlates_sym(struct perf_event_attr *attr); 363bool sample_addr_correlates_sym(struct perf_event_attr *attr);
323void perf_event__preprocess_sample_addr(union perf_event *event, 364void perf_event__preprocess_sample_addr(union perf_event *event,
324 struct perf_sample *sample, 365 struct perf_sample *sample,
325 struct machine *machine,
326 struct thread *thread, 366 struct thread *thread,
327 struct addr_location *al); 367 struct addr_location *al);
328 368
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3c9e77d6b4c2..7e23dae54f1d 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -413,7 +413,7 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
413 int nfds = 0; 413 int nfds = 0;
414 struct perf_evsel *evsel; 414 struct perf_evsel *evsel;
415 415
416 list_for_each_entry(evsel, &evlist->entries, node) { 416 evlist__for_each(evlist, evsel) {
417 if (evsel->system_wide) 417 if (evsel->system_wide)
418 nfds += nr_cpus; 418 nfds += nr_cpus;
419 else 419 else
@@ -527,6 +527,22 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
527 return 0; 527 return 0;
528} 528}
529 529
530static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
531 struct perf_evsel *evsel, int idx, int cpu,
532 int thread)
533{
534 struct perf_sample_id *sid = SID(evsel, cpu, thread);
535 sid->idx = idx;
536 if (evlist->cpus && cpu >= 0)
537 sid->cpu = evlist->cpus->map[cpu];
538 else
539 sid->cpu = -1;
540 if (!evsel->system_wide && evlist->threads && thread >= 0)
541 sid->tid = evlist->threads->map[thread];
542 else
543 sid->tid = -1;
544}
545
530struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 546struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
531{ 547{
532 struct hlist_head *head; 548 struct hlist_head *head;
@@ -805,9 +821,13 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
805 return -1; 821 return -1;
806 } 822 }
807 823
808 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 824 if (evsel->attr.read_format & PERF_FORMAT_ID) {
809 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 825 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
810 return -1; 826 fd) < 0)
827 return -1;
828 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
829 thread);
830 }
811 } 831 }
812 832
813 return 0; 833 return 0;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2f9e68025ede..12b4396c7175 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -853,8 +853,6 @@ void perf_evsel__exit(struct perf_evsel *evsel)
853 perf_evsel__free_id(evsel); 853 perf_evsel__free_id(evsel);
854 close_cgroup(evsel->cgrp); 854 close_cgroup(evsel->cgrp);
855 zfree(&evsel->group_name); 855 zfree(&evsel->group_name);
856 if (evsel->tp_format)
857 pevent_free_format(evsel->tp_format);
858 zfree(&evsel->name); 856 zfree(&evsel->name);
859 perf_evsel__object.fini(evsel); 857 perf_evsel__object.fini(evsel);
860} 858}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 163c5604e5d1..979790951bfb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -36,6 +36,9 @@ struct perf_sample_id {
36 struct hlist_node node; 36 struct hlist_node node;
37 u64 id; 37 u64 id;
38 struct perf_evsel *evsel; 38 struct perf_evsel *evsel;
39 int idx;
40 int cpu;
41 pid_t tid;
39 42
40 /* Holds total ID period value for PERF_SAMPLE_READ processing. */ 43 /* Holds total ID period value for PERF_SAMPLE_READ processing. */
41 u64 period; 44 u64 period;
@@ -54,6 +57,7 @@ struct cgroup_sel;
54 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or 57 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
55 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all 58 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
56 * is used there is an id sample appended to non-sample events 59 * is used there is an id sample appended to non-sample events
60 * @priv: And what is in its containing unnamed union are tool specific
57 */ 61 */
58struct perf_evsel { 62struct perf_evsel {
59 struct list_head node; 63 struct list_head node;
@@ -73,6 +77,7 @@ struct perf_evsel {
73 union { 77 union {
74 void *priv; 78 void *priv;
75 off_t id_offset; 79 off_t id_offset;
80 u64 db_id;
76 }; 81 };
77 struct cgroup_sel *cgrp; 82 struct cgroup_sel *cgrp;
78 void *handler; 83 void *handler;
diff --git a/tools/perf/util/find-vdso-map.c b/tools/perf/util/find-vdso-map.c
new file mode 100644
index 000000000000..95ef1cffc056
--- /dev/null
+++ b/tools/perf/util/find-vdso-map.c
@@ -0,0 +1,30 @@
1static int find_vdso_map(void **start, void **end)
2{
3 FILE *maps;
4 char line[128];
5 int found = 0;
6
7 maps = fopen("/proc/self/maps", "r");
8 if (!maps) {
9 fprintf(stderr, "vdso: cannot open maps\n");
10 return -1;
11 }
12
13 while (!found && fgets(line, sizeof(line), maps)) {
14 int m = -1;
15
16 /* We care only about private r-x mappings. */
17 if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n",
18 start, end, &m))
19 continue;
20 if (m < 0)
21 continue;
22
23 if (!strncmp(&line[m], VDSO__MAP_NAME,
24 sizeof(VDSO__MAP_NAME) - 1))
25 found = 1;
26 }
27
28 fclose(maps);
29 return !found;
30}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26f5b2fe5dc8..76442caca37e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -79,10 +79,7 @@ static int do_write(int fd, const void *buf, size_t size)
79 return 0; 79 return 0;
80} 80}
81 81
82#define NAME_ALIGN 64 82int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
83
84static int write_padded(int fd, const void *bf, size_t count,
85 size_t count_aligned)
86{ 83{
87 static const char zero_buf[NAME_ALIGN]; 84 static const char zero_buf[NAME_ALIGN];
88 int err = do_write(fd, bf, count); 85 int err = do_write(fd, bf, count);
@@ -171,340 +168,6 @@ perf_header__set_cmdline(int argc, const char **argv)
171 return 0; 168 return 0;
172} 169}
173 170
174#define dsos__for_each_with_build_id(pos, head) \
175 list_for_each_entry(pos, head, node) \
176 if (!pos->has_build_id) \
177 continue; \
178 else
179
180static int write_buildid(const char *name, size_t name_len, u8 *build_id,
181 pid_t pid, u16 misc, int fd)
182{
183 int err;
184 struct build_id_event b;
185 size_t len;
186
187 len = name_len + 1;
188 len = PERF_ALIGN(len, NAME_ALIGN);
189
190 memset(&b, 0, sizeof(b));
191 memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
192 b.pid = pid;
193 b.header.misc = misc;
194 b.header.size = sizeof(b) + len;
195
196 err = do_write(fd, &b, sizeof(b));
197 if (err < 0)
198 return err;
199
200 return write_padded(fd, name, name_len + 1, len);
201}
202
203static int __dsos__hit_all(struct list_head *head)
204{
205 struct dso *pos;
206
207 list_for_each_entry(pos, head, node)
208 pos->hit = true;
209
210 return 0;
211}
212
213static int machine__hit_all_dsos(struct machine *machine)
214{
215 int err;
216
217 err = __dsos__hit_all(&machine->kernel_dsos.head);
218 if (err)
219 return err;
220
221 return __dsos__hit_all(&machine->user_dsos.head);
222}
223
224int dsos__hit_all(struct perf_session *session)
225{
226 struct rb_node *nd;
227 int err;
228
229 err = machine__hit_all_dsos(&session->machines.host);
230 if (err)
231 return err;
232
233 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
234 struct machine *pos = rb_entry(nd, struct machine, rb_node);
235
236 err = machine__hit_all_dsos(pos);
237 if (err)
238 return err;
239 }
240
241 return 0;
242}
243
244static int __dsos__write_buildid_table(struct list_head *head,
245 struct machine *machine,
246 pid_t pid, u16 misc, int fd)
247{
248 char nm[PATH_MAX];
249 struct dso *pos;
250
251 dsos__for_each_with_build_id(pos, head) {
252 int err;
253 const char *name;
254 size_t name_len;
255
256 if (!pos->hit)
257 continue;
258
259 if (dso__is_vdso(pos)) {
260 name = pos->short_name;
261 name_len = pos->short_name_len + 1;
262 } else if (dso__is_kcore(pos)) {
263 machine__mmap_name(machine, nm, sizeof(nm));
264 name = nm;
265 name_len = strlen(nm) + 1;
266 } else {
267 name = pos->long_name;
268 name_len = pos->long_name_len + 1;
269 }
270
271 err = write_buildid(name, name_len, pos->build_id,
272 pid, misc, fd);
273 if (err)
274 return err;
275 }
276
277 return 0;
278}
279
280static int machine__write_buildid_table(struct machine *machine, int fd)
281{
282 int err;
283 u16 kmisc = PERF_RECORD_MISC_KERNEL,
284 umisc = PERF_RECORD_MISC_USER;
285
286 if (!machine__is_host(machine)) {
287 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
288 umisc = PERF_RECORD_MISC_GUEST_USER;
289 }
290
291 err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
292 machine->pid, kmisc, fd);
293 if (err == 0)
294 err = __dsos__write_buildid_table(&machine->user_dsos.head,
295 machine, machine->pid, umisc,
296 fd);
297 return err;
298}
299
300static int dsos__write_buildid_table(struct perf_header *header, int fd)
301{
302 struct perf_session *session = container_of(header,
303 struct perf_session, header);
304 struct rb_node *nd;
305 int err = machine__write_buildid_table(&session->machines.host, fd);
306
307 if (err)
308 return err;
309
310 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
311 struct machine *pos = rb_entry(nd, struct machine, rb_node);
312 err = machine__write_buildid_table(pos, fd);
313 if (err)
314 break;
315 }
316 return err;
317}
318
319int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
320 const char *name, bool is_kallsyms, bool is_vdso)
321{
322 const size_t size = PATH_MAX;
323 char *realname, *filename = zalloc(size),
324 *linkname = zalloc(size), *targetname;
325 int len, err = -1;
326 bool slash = is_kallsyms || is_vdso;
327
328 if (is_kallsyms) {
329 if (symbol_conf.kptr_restrict) {
330 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
331 err = 0;
332 goto out_free;
333 }
334 realname = (char *) name;
335 } else
336 realname = realpath(name, NULL);
337
338 if (realname == NULL || filename == NULL || linkname == NULL)
339 goto out_free;
340
341 len = scnprintf(filename, size, "%s%s%s",
342 debugdir, slash ? "/" : "",
343 is_vdso ? DSO__NAME_VDSO : realname);
344 if (mkdir_p(filename, 0755))
345 goto out_free;
346
347 snprintf(filename + len, size - len, "/%s", sbuild_id);
348
349 if (access(filename, F_OK)) {
350 if (is_kallsyms) {
351 if (copyfile("/proc/kallsyms", filename))
352 goto out_free;
353 } else if (link(realname, filename) && copyfile(name, filename))
354 goto out_free;
355 }
356
357 len = scnprintf(linkname, size, "%s/.build-id/%.2s",
358 debugdir, sbuild_id);
359
360 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
361 goto out_free;
362
363 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
364 targetname = filename + strlen(debugdir) - 5;
365 memcpy(targetname, "../..", 5);
366
367 if (symlink(targetname, linkname) == 0)
368 err = 0;
369out_free:
370 if (!is_kallsyms)
371 free(realname);
372 free(filename);
373 free(linkname);
374 return err;
375}
376
377static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
378 const char *name, const char *debugdir,
379 bool is_kallsyms, bool is_vdso)
380{
381 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
382
383 build_id__sprintf(build_id, build_id_size, sbuild_id);
384
385 return build_id_cache__add_s(sbuild_id, debugdir, name,
386 is_kallsyms, is_vdso);
387}
388
389int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
390{
391 const size_t size = PATH_MAX;
392 char *filename = zalloc(size),
393 *linkname = zalloc(size);
394 int err = -1;
395
396 if (filename == NULL || linkname == NULL)
397 goto out_free;
398
399 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
400 debugdir, sbuild_id, sbuild_id + 2);
401
402 if (access(linkname, F_OK))
403 goto out_free;
404
405 if (readlink(linkname, filename, size - 1) < 0)
406 goto out_free;
407
408 if (unlink(linkname))
409 goto out_free;
410
411 /*
412 * Since the link is relative, we must make it absolute:
413 */
414 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
415 debugdir, sbuild_id, filename);
416
417 if (unlink(linkname))
418 goto out_free;
419
420 err = 0;
421out_free:
422 free(filename);
423 free(linkname);
424 return err;
425}
426
427static int dso__cache_build_id(struct dso *dso, struct machine *machine,
428 const char *debugdir)
429{
430 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
431 bool is_vdso = dso__is_vdso(dso);
432 const char *name = dso->long_name;
433 char nm[PATH_MAX];
434
435 if (dso__is_kcore(dso)) {
436 is_kallsyms = true;
437 machine__mmap_name(machine, nm, sizeof(nm));
438 name = nm;
439 }
440 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
441 debugdir, is_kallsyms, is_vdso);
442}
443
444static int __dsos__cache_build_ids(struct list_head *head,
445 struct machine *machine, const char *debugdir)
446{
447 struct dso *pos;
448 int err = 0;
449
450 dsos__for_each_with_build_id(pos, head)
451 if (dso__cache_build_id(pos, machine, debugdir))
452 err = -1;
453
454 return err;
455}
456
457static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
458{
459 int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
460 debugdir);
461 ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
462 debugdir);
463 return ret;
464}
465
466static int perf_session__cache_build_ids(struct perf_session *session)
467{
468 struct rb_node *nd;
469 int ret;
470 char debugdir[PATH_MAX];
471
472 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
473
474 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
475 return -1;
476
477 ret = machine__cache_build_ids(&session->machines.host, debugdir);
478
479 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
480 struct machine *pos = rb_entry(nd, struct machine, rb_node);
481 ret |= machine__cache_build_ids(pos, debugdir);
482 }
483 return ret ? -1 : 0;
484}
485
486static bool machine__read_build_ids(struct machine *machine, bool with_hits)
487{
488 bool ret;
489
490 ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
491 ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
492 return ret;
493}
494
495static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
496{
497 struct rb_node *nd;
498 bool ret = machine__read_build_ids(&session->machines.host, with_hits);
499
500 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
501 struct machine *pos = rb_entry(nd, struct machine, rb_node);
502 ret |= machine__read_build_ids(pos, with_hits);
503 }
504
505 return ret;
506}
507
508static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, 171static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
509 struct perf_evlist *evlist) 172 struct perf_evlist *evlist)
510{ 173{
@@ -523,7 +186,7 @@ static int write_build_id(int fd, struct perf_header *h,
523 if (!perf_session__read_build_ids(session, true)) 186 if (!perf_session__read_build_ids(session, true))
524 return -1; 187 return -1;
525 188
526 err = dsos__write_buildid_table(h, fd); 189 err = perf_session__write_buildid_table(session, fd);
527 if (err < 0) { 190 if (err < 0) {
528 pr_debug("failed to write buildid table\n"); 191 pr_debug("failed to write buildid table\n");
529 return err; 192 return err;
@@ -601,8 +264,10 @@ static int __write_cpudesc(int fd, const char *cpuinfo_proc)
601 break; 264 break;
602 } 265 }
603 266
604 if (ret) 267 if (ret) {
268 ret = -1;
605 goto done; 269 goto done;
270 }
606 271
607 s = buf; 272 s = buf;
608 273
@@ -965,7 +630,8 @@ static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
965 n = sscanf(buf, "%*s %"PRIu64, &mem); 630 n = sscanf(buf, "%*s %"PRIu64, &mem);
966 if (n == 1) 631 if (n == 1)
967 ret = do_write(fd, &mem, sizeof(mem)); 632 ret = do_write(fd, &mem, sizeof(mem));
968 } 633 } else
634 ret = -1;
969 free(buf); 635 free(buf);
970 fclose(fp); 636 fclose(fp);
971 return ret; 637 return ret;
@@ -1603,7 +1269,7 @@ static int __event_process_build_id(struct build_id_event *bev,
1603 1269
1604 dso__set_build_id(dso, &bev->build_id); 1270 dso__set_build_id(dso, &bev->build_id);
1605 1271
1606 if (filename[0] == '[') 1272 if (!is_kernel_module(filename, NULL))
1607 dso->kernel = dso_type; 1273 dso->kernel = dso_type;
1608 1274
1609 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1275 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 8f5cbaea64a5..3bb90ac172a1 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -122,10 +122,6 @@ int perf_header__process_sections(struct perf_header *header, int fd,
122 122
123int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); 123int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
124 124
125int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
126 const char *name, bool is_kallsyms, bool is_vdso);
127int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
128
129int perf_event__synthesize_attr(struct perf_tool *tool, 125int perf_event__synthesize_attr(struct perf_tool *tool,
130 struct perf_event_attr *attr, u32 ids, u64 *id, 126 struct perf_event_attr *attr, u32 ids, u64 *id,
131 perf_event__handler_t process); 127 perf_event__handler_t process);
@@ -151,7 +147,9 @@ int perf_event__process_build_id(struct perf_tool *tool,
151 struct perf_session *session); 147 struct perf_session *session);
152bool is_perf_magic(u64 magic); 148bool is_perf_magic(u64 magic);
153 149
154int dsos__hit_all(struct perf_session *session); 150#define NAME_ALIGN 64
151
152int write_padded(int fd, const void *bf, size_t count, size_t count_aligned);
155 153
156/* 154/*
157 * arch specific callback 155 * arch specific callback
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
index 01ffd12dc791..40bd21488032 100644
--- a/tools/perf/util/include/linux/bitmap.h
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -46,4 +46,21 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
46 __bitmap_or(dst, src1, src2, nbits); 46 __bitmap_or(dst, src1, src2, nbits);
47} 47}
48 48
49/**
50 * test_and_set_bit - Set a bit and return its old value
51 * @nr: Bit to set
52 * @addr: Address to count from
53 */
54static inline int test_and_set_bit(int nr, unsigned long *addr)
55{
56 unsigned long mask = BIT_MASK(nr);
57 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
58 unsigned long old;
59
60 old = *p;
61 *p = old | mask;
62
63 return (old & mask) != 0;
64}
65
49#endif /* _PERF_BITOPS_H */ 66#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index dadfa7e54287..c3294163de17 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -15,6 +15,8 @@
15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) 15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
17#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) 17#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
18#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
19#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
18 20
19#define for_each_set_bit(bit, addr, size) \ 21#define for_each_set_bit(bit, addr, size) \
20 for ((bit) = find_first_bit((addr), (size)); \ 22 for ((bit) = find_first_bit((addr), (size)); \
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 34fc7c8672e4..52e94902afb1 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -21,7 +21,7 @@ static void dsos__init(struct dsos *dsos)
21 21
22int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 22int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
23{ 23{
24 map_groups__init(&machine->kmaps); 24 map_groups__init(&machine->kmaps, machine);
25 RB_CLEAR_NODE(&machine->rb_node); 25 RB_CLEAR_NODE(&machine->rb_node);
26 dsos__init(&machine->user_dsos); 26 dsos__init(&machine->user_dsos);
27 dsos__init(&machine->kernel_dsos); 27 dsos__init(&machine->kernel_dsos);
@@ -32,7 +32,6 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
32 32
33 machine->vdso_info = NULL; 33 machine->vdso_info = NULL;
34 34
35 machine->kmaps.machine = machine;
36 machine->pid = pid; 35 machine->pid = pid;
37 36
38 machine->symbol_filter = NULL; 37 machine->symbol_filter = NULL;
@@ -319,7 +318,7 @@ static void machine__update_thread_pid(struct machine *machine,
319 goto out_err; 318 goto out_err;
320 319
321 if (!leader->mg) 320 if (!leader->mg)
322 leader->mg = map_groups__new(); 321 leader->mg = map_groups__new(machine);
323 322
324 if (!leader->mg) 323 if (!leader->mg)
325 goto out_err; 324 goto out_err;
@@ -465,6 +464,7 @@ struct map *machine__new_module(struct machine *machine, u64 start,
465{ 464{
466 struct map *map; 465 struct map *map;
467 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 466 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
467 bool compressed;
468 468
469 if (dso == NULL) 469 if (dso == NULL)
470 return NULL; 470 return NULL;
@@ -477,6 +477,11 @@ struct map *machine__new_module(struct machine *machine, u64 start,
477 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 477 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
478 else 478 else
479 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 479 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
480
481 /* _KMODULE_COMP should be next to _KMODULE */
482 if (is_kernel_module(filename, &compressed) && compressed)
483 dso->symtab_type++;
484
480 map_groups__insert(&machine->kmaps, map); 485 map_groups__insert(&machine->kmaps, map);
481 return map; 486 return map;
482} 487}
@@ -862,8 +867,14 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
862 struct map *map; 867 struct map *map;
863 char *long_name; 868 char *long_name;
864 869
865 if (dot == NULL || strcmp(dot, ".ko")) 870 if (dot == NULL)
866 continue; 871 continue;
872
873 /* On some system, modules are compressed like .ko.gz */
874 if (is_supported_compression(dot + 1) &&
875 is_kmodule_extension(dot - 2))
876 dot -= 3;
877
867 snprintf(dso_name, sizeof(dso_name), "[%.*s]", 878 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
868 (int)(dot - dent->d_name), dent->d_name); 879 (int)(dot - dent->d_name), dent->d_name);
869 880
@@ -1045,6 +1056,11 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1045 dot = strrchr(name, '.'); 1056 dot = strrchr(name, '.');
1046 if (dot == NULL) 1057 if (dot == NULL)
1047 goto out_problem; 1058 goto out_problem;
1059 /* On some system, modules are compressed like .ko.gz */
1060 if (is_supported_compression(dot + 1))
1061 dot -= 3;
1062 if (!is_kmodule_extension(dot + 1))
1063 goto out_problem;
1048 snprintf(short_module_name, sizeof(short_module_name), 1064 snprintf(short_module_name, sizeof(short_module_name),
1049 "[%.*s]", (int)(dot - name), name); 1065 "[%.*s]", (int)(dot - name), name);
1050 strxfrchar(short_module_name, '-', '_'); 1066 strxfrchar(short_module_name, '-', '_');
@@ -1069,8 +1085,20 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1069 * Should be there already, from the build-id table in 1085 * Should be there already, from the build-id table in
1070 * the header. 1086 * the header.
1071 */ 1087 */
1072 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 1088 struct dso *kernel = NULL;
1073 kmmap_prefix); 1089 struct dso *dso;
1090
1091 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1092 if (is_kernel_module(dso->long_name, NULL))
1093 continue;
1094
1095 kernel = dso;
1096 break;
1097 }
1098
1099 if (kernel == NULL)
1100 kernel = __dsos__findnew(&machine->kernel_dsos,
1101 kmmap_prefix);
1074 if (kernel == NULL) 1102 if (kernel == NULL)
1075 goto out_problem; 1103 goto out_problem;
1076 1104
@@ -1078,6 +1106,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1078 if (__machine__create_kernel_maps(machine, kernel) < 0) 1106 if (__machine__create_kernel_maps(machine, kernel) < 0)
1079 goto out_problem; 1107 goto out_problem;
1080 1108
1109 if (strstr(dso->long_name, "vmlinux"))
1110 dso__set_short_name(dso, "[kernel.vmlinux]", false);
1111
1081 machine__set_kernel_mmap_len(machine, event); 1112 machine__set_kernel_mmap_len(machine, event);
1082 1113
1083 /* 1114 /*
@@ -1290,7 +1321,7 @@ static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1290 return 0; 1321 return 0;
1291} 1322}
1292 1323
1293static void ip__resolve_ams(struct machine *machine, struct thread *thread, 1324static void ip__resolve_ams(struct thread *thread,
1294 struct addr_map_symbol *ams, 1325 struct addr_map_symbol *ams,
1295 u64 ip) 1326 u64 ip)
1296{ 1327{
@@ -1304,7 +1335,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1304 * Thus, we have to try consecutively until we find a match 1335 * Thus, we have to try consecutively until we find a match
1305 * or else, the symbol is unknown 1336 * or else, the symbol is unknown
1306 */ 1337 */
1307 thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al); 1338 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1308 1339
1309 ams->addr = ip; 1340 ams->addr = ip;
1310 ams->al_addr = al.addr; 1341 ams->al_addr = al.addr;
@@ -1312,23 +1343,21 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1312 ams->map = al.map; 1343 ams->map = al.map;
1313} 1344}
1314 1345
1315static void ip__resolve_data(struct machine *machine, struct thread *thread, 1346static void ip__resolve_data(struct thread *thread,
1316 u8 m, struct addr_map_symbol *ams, u64 addr) 1347 u8 m, struct addr_map_symbol *ams, u64 addr)
1317{ 1348{
1318 struct addr_location al; 1349 struct addr_location al;
1319 1350
1320 memset(&al, 0, sizeof(al)); 1351 memset(&al, 0, sizeof(al));
1321 1352
1322 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, 1353 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1323 &al);
1324 if (al.map == NULL) { 1354 if (al.map == NULL) {
1325 /* 1355 /*
1326 * some shared data regions have execute bit set which puts 1356 * some shared data regions have execute bit set which puts
1327 * their mapping in the MAP__FUNCTION type array. 1357 * their mapping in the MAP__FUNCTION type array.
1328 * Check there as a fallback option before dropping the sample. 1358 * Check there as a fallback option before dropping the sample.
1329 */ 1359 */
1330 thread__find_addr_location(thread, machine, m, MAP__FUNCTION, addr, 1360 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1331 &al);
1332 } 1361 }
1333 1362
1334 ams->addr = addr; 1363 ams->addr = addr;
@@ -1345,9 +1374,8 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1345 if (!mi) 1374 if (!mi)
1346 return NULL; 1375 return NULL;
1347 1376
1348 ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip); 1377 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1349 ip__resolve_data(al->machine, al->thread, al->cpumode, 1378 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
1350 &mi->daddr, sample->addr);
1351 mi->data_src.val = sample->data_src; 1379 mi->data_src.val = sample->data_src;
1352 1380
1353 return mi; 1381 return mi;
@@ -1364,15 +1392,14 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1364 return NULL; 1392 return NULL;
1365 1393
1366 for (i = 0; i < bs->nr; i++) { 1394 for (i = 0; i < bs->nr; i++) {
1367 ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to); 1395 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1368 ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from); 1396 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1369 bi[i].flags = bs->entries[i].flags; 1397 bi[i].flags = bs->entries[i].flags;
1370 } 1398 }
1371 return bi; 1399 return bi;
1372} 1400}
1373 1401
1374static int machine__resolve_callchain_sample(struct machine *machine, 1402static int thread__resolve_callchain_sample(struct thread *thread,
1375 struct thread *thread,
1376 struct ip_callchain *chain, 1403 struct ip_callchain *chain,
1377 struct symbol **parent, 1404 struct symbol **parent,
1378 struct addr_location *root_al, 1405 struct addr_location *root_al,
@@ -1396,7 +1423,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
1396 * Based on DWARF debug information, some architectures skip 1423 * Based on DWARF debug information, some architectures skip
1397 * a callchain entry saved by the kernel. 1424 * a callchain entry saved by the kernel.
1398 */ 1425 */
1399 skip_idx = arch_skip_callchain_idx(machine, thread, chain); 1426 skip_idx = arch_skip_callchain_idx(thread, chain);
1400 1427
1401 for (i = 0; i < chain_nr; i++) { 1428 for (i = 0; i < chain_nr; i++) {
1402 u64 ip; 1429 u64 ip;
@@ -1438,7 +1465,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
1438 } 1465 }
1439 1466
1440 al.filtered = 0; 1467 al.filtered = 0;
1441 thread__find_addr_location(thread, machine, cpumode, 1468 thread__find_addr_location(thread, cpumode,
1442 MAP__FUNCTION, ip, &al); 1469 MAP__FUNCTION, ip, &al);
1443 if (al.sym != NULL) { 1470 if (al.sym != NULL) {
1444 if (sort__has_parent && !*parent && 1471 if (sort__has_parent && !*parent &&
@@ -1469,19 +1496,15 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
1469 entry->map, entry->sym); 1496 entry->map, entry->sym);
1470} 1497}
1471 1498
1472int machine__resolve_callchain(struct machine *machine, 1499int thread__resolve_callchain(struct thread *thread,
1473 struct perf_evsel *evsel, 1500 struct perf_evsel *evsel,
1474 struct thread *thread, 1501 struct perf_sample *sample,
1475 struct perf_sample *sample, 1502 struct symbol **parent,
1476 struct symbol **parent, 1503 struct addr_location *root_al,
1477 struct addr_location *root_al, 1504 int max_stack)
1478 int max_stack)
1479{ 1505{
1480 int ret; 1506 int ret = thread__resolve_callchain_sample(thread, sample->callchain,
1481 1507 parent, root_al, max_stack);
1482 ret = machine__resolve_callchain_sample(machine, thread,
1483 sample->callchain, parent,
1484 root_al, max_stack);
1485 if (ret) 1508 if (ret)
1486 return ret; 1509 return ret;
1487 1510
@@ -1495,7 +1518,7 @@ int machine__resolve_callchain(struct machine *machine,
1495 (!sample->user_stack.size)) 1518 (!sample->user_stack.size))
1496 return 0; 1519 return 0;
1497 1520
1498 return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 1521 return unwind__get_entries(unwind_entry, &callchain_cursor,
1499 thread, sample, max_stack); 1522 thread, sample, max_stack);
1500 1523
1501} 1524}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 2b651a7f5d0d..e8b7779a0a3f 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -40,6 +40,10 @@ struct machine {
40 u64 kernel_start; 40 u64 kernel_start;
41 symbol_filter_t symbol_filter; 41 symbol_filter_t symbol_filter;
42 pid_t *current_tid; 42 pid_t *current_tid;
43 union { /* Tool specific area */
44 void *priv;
45 u64 db_id;
46 };
43}; 47};
44 48
45static inline 49static inline
@@ -122,13 +126,12 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
122 struct addr_location *al); 126 struct addr_location *al);
123struct mem_info *sample__resolve_mem(struct perf_sample *sample, 127struct mem_info *sample__resolve_mem(struct perf_sample *sample,
124 struct addr_location *al); 128 struct addr_location *al);
125int machine__resolve_callchain(struct machine *machine, 129int thread__resolve_callchain(struct thread *thread,
126 struct perf_evsel *evsel, 130 struct perf_evsel *evsel,
127 struct thread *thread, 131 struct perf_sample *sample,
128 struct perf_sample *sample, 132 struct symbol **parent,
129 struct symbol **parent, 133 struct addr_location *root_al,
130 struct addr_location *root_al, 134 int max_stack);
131 int max_stack);
132 135
133/* 136/*
134 * Default guest kernel is defined by parameter --guestkallsyms 137 * Default guest kernel is defined by parameter --guestkallsyms
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 2137c4596ec7..040a785c857b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -413,14 +413,14 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
413 return ip + map->reloc; 413 return ip + map->reloc;
414} 414}
415 415
416void map_groups__init(struct map_groups *mg) 416void map_groups__init(struct map_groups *mg, struct machine *machine)
417{ 417{
418 int i; 418 int i;
419 for (i = 0; i < MAP__NR_TYPES; ++i) { 419 for (i = 0; i < MAP__NR_TYPES; ++i) {
420 mg->maps[i] = RB_ROOT; 420 mg->maps[i] = RB_ROOT;
421 INIT_LIST_HEAD(&mg->removed_maps[i]); 421 INIT_LIST_HEAD(&mg->removed_maps[i]);
422 } 422 }
423 mg->machine = NULL; 423 mg->machine = machine;
424 mg->refcnt = 1; 424 mg->refcnt = 1;
425} 425}
426 426
@@ -471,12 +471,12 @@ bool map_groups__empty(struct map_groups *mg)
471 return true; 471 return true;
472} 472}
473 473
474struct map_groups *map_groups__new(void) 474struct map_groups *map_groups__new(struct machine *machine)
475{ 475{
476 struct map_groups *mg = malloc(sizeof(*mg)); 476 struct map_groups *mg = malloc(sizeof(*mg));
477 477
478 if (mg != NULL) 478 if (mg != NULL)
479 map_groups__init(mg); 479 map_groups__init(mg, machine);
480 480
481 return mg; 481 return mg;
482} 482}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 2f83954af050..6951a9d42339 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -64,7 +64,7 @@ struct map_groups {
64 int refcnt; 64 int refcnt;
65}; 65};
66 66
67struct map_groups *map_groups__new(void); 67struct map_groups *map_groups__new(struct machine *machine);
68void map_groups__delete(struct map_groups *mg); 68void map_groups__delete(struct map_groups *mg);
69bool map_groups__empty(struct map_groups *mg); 69bool map_groups__empty(struct map_groups *mg);
70 70
@@ -150,7 +150,7 @@ void maps__remove(struct rb_root *maps, struct map *map);
150struct map *maps__find(struct rb_root *maps, u64 addr); 150struct map *maps__find(struct rb_root *maps, u64 addr);
151struct map *maps__first(struct rb_root *maps); 151struct map *maps__first(struct rb_root *maps);
152struct map *maps__next(struct map *map); 152struct map *maps__next(struct map *map);
153void map_groups__init(struct map_groups *mg); 153void map_groups__init(struct map_groups *mg, struct machine *machine);
154void map_groups__exit(struct map_groups *mg); 154void map_groups__exit(struct map_groups *mg);
155int map_groups__clone(struct map_groups *mg, 155int map_groups__clone(struct map_groups *mg,
156 struct map_groups *parent, enum map_type type); 156 struct map_groups *parent, enum map_type type);
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index bf48092983c6..f62dee7bd924 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -42,7 +42,26 @@ static int get_value(struct parse_opt_ctx_t *p,
42 return opterror(opt, "takes no value", flags); 42 return opterror(opt, "takes no value", flags);
43 if (unset && (opt->flags & PARSE_OPT_NONEG)) 43 if (unset && (opt->flags & PARSE_OPT_NONEG))
44 return opterror(opt, "isn't available", flags); 44 return opterror(opt, "isn't available", flags);
45 45 if (opt->flags & PARSE_OPT_DISABLED)
46 return opterror(opt, "is not usable", flags);
47
48 if (opt->flags & PARSE_OPT_EXCLUSIVE) {
49 if (p->excl_opt) {
50 char msg[128];
51
52 if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
53 p->excl_opt->long_name == NULL) {
54 scnprintf(msg, sizeof(msg), "cannot be used with switch `%c'",
55 p->excl_opt->short_name);
56 } else {
57 scnprintf(msg, sizeof(msg), "cannot be used with %s",
58 p->excl_opt->long_name);
59 }
60 opterror(opt, msg, flags);
61 return -3;
62 }
63 p->excl_opt = opt;
64 }
46 if (!(flags & OPT_SHORT) && p->opt) { 65 if (!(flags & OPT_SHORT) && p->opt) {
47 switch (opt->type) { 66 switch (opt->type) {
48 case OPTION_CALLBACK: 67 case OPTION_CALLBACK:
@@ -343,13 +362,14 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
343 const char * const usagestr[]) 362 const char * const usagestr[])
344{ 363{
345 int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); 364 int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
365 int excl_short_opt = 1;
366 const char *arg;
346 367
347 /* we must reset ->opt, unknown short option leave it dangling */ 368 /* we must reset ->opt, unknown short option leave it dangling */
348 ctx->opt = NULL; 369 ctx->opt = NULL;
349 370
350 for (; ctx->argc; ctx->argc--, ctx->argv++) { 371 for (; ctx->argc; ctx->argc--, ctx->argv++) {
351 const char *arg = ctx->argv[0]; 372 arg = ctx->argv[0];
352
353 if (*arg != '-' || !arg[1]) { 373 if (*arg != '-' || !arg[1]) {
354 if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) 374 if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION)
355 break; 375 break;
@@ -358,19 +378,21 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
358 } 378 }
359 379
360 if (arg[1] != '-') { 380 if (arg[1] != '-') {
361 ctx->opt = arg + 1; 381 ctx->opt = ++arg;
362 if (internal_help && *ctx->opt == 'h') 382 if (internal_help && *ctx->opt == 'h')
363 return usage_with_options_internal(usagestr, options, 0); 383 return usage_with_options_internal(usagestr, options, 0);
364 switch (parse_short_opt(ctx, options)) { 384 switch (parse_short_opt(ctx, options)) {
365 case -1: 385 case -1:
366 return parse_options_usage(usagestr, options, arg + 1, 1); 386 return parse_options_usage(usagestr, options, arg, 1);
367 case -2: 387 case -2:
368 goto unknown; 388 goto unknown;
389 case -3:
390 goto exclusive;
369 default: 391 default:
370 break; 392 break;
371 } 393 }
372 if (ctx->opt) 394 if (ctx->opt)
373 check_typos(arg + 1, options); 395 check_typos(arg, options);
374 while (ctx->opt) { 396 while (ctx->opt) {
375 if (internal_help && *ctx->opt == 'h') 397 if (internal_help && *ctx->opt == 'h')
376 return usage_with_options_internal(usagestr, options, 0); 398 return usage_with_options_internal(usagestr, options, 0);
@@ -387,6 +409,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
387 ctx->argv[0] = strdup(ctx->opt - 1); 409 ctx->argv[0] = strdup(ctx->opt - 1);
388 *(char *)ctx->argv[0] = '-'; 410 *(char *)ctx->argv[0] = '-';
389 goto unknown; 411 goto unknown;
412 case -3:
413 goto exclusive;
390 default: 414 default:
391 break; 415 break;
392 } 416 }
@@ -402,19 +426,23 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
402 break; 426 break;
403 } 427 }
404 428
405 if (internal_help && !strcmp(arg + 2, "help-all")) 429 arg += 2;
430 if (internal_help && !strcmp(arg, "help-all"))
406 return usage_with_options_internal(usagestr, options, 1); 431 return usage_with_options_internal(usagestr, options, 1);
407 if (internal_help && !strcmp(arg + 2, "help")) 432 if (internal_help && !strcmp(arg, "help"))
408 return usage_with_options_internal(usagestr, options, 0); 433 return usage_with_options_internal(usagestr, options, 0);
409 if (!strcmp(arg + 2, "list-opts")) 434 if (!strcmp(arg, "list-opts"))
410 return PARSE_OPT_LIST_OPTS; 435 return PARSE_OPT_LIST_OPTS;
411 if (!strcmp(arg + 2, "list-cmds")) 436 if (!strcmp(arg, "list-cmds"))
412 return PARSE_OPT_LIST_SUBCMDS; 437 return PARSE_OPT_LIST_SUBCMDS;
413 switch (parse_long_opt(ctx, arg + 2, options)) { 438 switch (parse_long_opt(ctx, arg, options)) {
414 case -1: 439 case -1:
415 return parse_options_usage(usagestr, options, arg + 2, 0); 440 return parse_options_usage(usagestr, options, arg, 0);
416 case -2: 441 case -2:
417 goto unknown; 442 goto unknown;
443 case -3:
444 excl_short_opt = 0;
445 goto exclusive;
418 default: 446 default:
419 break; 447 break;
420 } 448 }
@@ -426,6 +454,17 @@ unknown:
426 ctx->opt = NULL; 454 ctx->opt = NULL;
427 } 455 }
428 return PARSE_OPT_DONE; 456 return PARSE_OPT_DONE;
457
458exclusive:
459 parse_options_usage(usagestr, options, arg, excl_short_opt);
460 if ((excl_short_opt && ctx->excl_opt->short_name) ||
461 ctx->excl_opt->long_name == NULL) {
462 char opt = ctx->excl_opt->short_name;
463 parse_options_usage(NULL, options, &opt, 1);
464 } else {
465 parse_options_usage(NULL, options, ctx->excl_opt->long_name, 0);
466 }
467 return PARSE_OPT_HELP;
429} 468}
430 469
431int parse_options_end(struct parse_opt_ctx_t *ctx) 470int parse_options_end(struct parse_opt_ctx_t *ctx)
@@ -509,6 +548,8 @@ static void print_option_help(const struct option *opts, int full)
509 } 548 }
510 if (!full && (opts->flags & PARSE_OPT_HIDDEN)) 549 if (!full && (opts->flags & PARSE_OPT_HIDDEN))
511 return; 550 return;
551 if (opts->flags & PARSE_OPT_DISABLED)
552 return;
512 553
513 pos = fprintf(stderr, " "); 554 pos = fprintf(stderr, " ");
514 if (opts->short_name) 555 if (opts->short_name)
@@ -679,3 +720,16 @@ int parse_opt_verbosity_cb(const struct option *opt,
679 } 720 }
680 return 0; 721 return 0;
681} 722}
723
724void set_option_flag(struct option *opts, int shortopt, const char *longopt,
725 int flag)
726{
727 for (; opts->type != OPTION_END; opts++) {
728 if ((shortopt && opts->short_name == shortopt) ||
729 (opts->long_name && longopt &&
730 !strcmp(opts->long_name, longopt))) {
731 opts->flags |= flag;
732 break;
733 }
734 }
735}
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index b59ba858e73d..97b153fb4999 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -38,6 +38,8 @@ enum parse_opt_option_flags {
38 PARSE_OPT_NONEG = 4, 38 PARSE_OPT_NONEG = 4,
39 PARSE_OPT_HIDDEN = 8, 39 PARSE_OPT_HIDDEN = 8,
40 PARSE_OPT_LASTARG_DEFAULT = 16, 40 PARSE_OPT_LASTARG_DEFAULT = 16,
41 PARSE_OPT_DISABLED = 32,
42 PARSE_OPT_EXCLUSIVE = 64,
41}; 43};
42 44
43struct option; 45struct option;
@@ -173,6 +175,7 @@ struct parse_opt_ctx_t {
173 const char **out; 175 const char **out;
174 int argc, cpidx; 176 int argc, cpidx;
175 const char *opt; 177 const char *opt;
178 const struct option *excl_opt;
176 int flags; 179 int flags;
177}; 180};
178 181
@@ -211,4 +214,5 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
211 214
212extern const char *parse_options_fix_filename(const char *prefix, const char *file); 215extern const char *parse_options_fix_filename(const char *prefix, const char *file);
213 216
217void set_option_flag(struct option *opts, int sopt, const char *lopt, int flag);
214#endif /* __PERF_PARSE_OPTIONS_H */ 218#endif /* __PERF_PARSE_OPTIONS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index e243ad962a4d..881b75490533 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -747,15 +747,18 @@ void print_pmu_events(const char *event_glob, bool name_only)
747 747
748 pmu = NULL; 748 pmu = NULL;
749 len = 0; 749 len = 0;
750 while ((pmu = perf_pmu__scan(pmu)) != NULL) 750 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
751 list_for_each_entry(alias, &pmu->aliases, list) 751 list_for_each_entry(alias, &pmu->aliases, list)
752 len++; 752 len++;
753 aliases = malloc(sizeof(char *) * len); 753 if (pmu->selectable)
754 len++;
755 }
756 aliases = zalloc(sizeof(char *) * len);
754 if (!aliases) 757 if (!aliases)
755 return; 758 goto out_enomem;
756 pmu = NULL; 759 pmu = NULL;
757 j = 0; 760 j = 0;
758 while ((pmu = perf_pmu__scan(pmu)) != NULL) 761 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
759 list_for_each_entry(alias, &pmu->aliases, list) { 762 list_for_each_entry(alias, &pmu->aliases, list) {
760 char *name = format_alias(buf, sizeof(buf), pmu, alias); 763 char *name = format_alias(buf, sizeof(buf), pmu, alias);
761 bool is_cpu = !strcmp(pmu->name, "cpu"); 764 bool is_cpu = !strcmp(pmu->name, "cpu");
@@ -765,13 +768,23 @@ void print_pmu_events(const char *event_glob, bool name_only)
765 (!is_cpu && strglobmatch(alias->name, 768 (!is_cpu && strglobmatch(alias->name,
766 event_glob)))) 769 event_glob))))
767 continue; 770 continue;
768 aliases[j] = name; 771
769 if (is_cpu && !name_only) 772 if (is_cpu && !name_only)
770 aliases[j] = format_alias_or(buf, sizeof(buf), 773 name = format_alias_or(buf, sizeof(buf), pmu, alias);
771 pmu, alias); 774
772 aliases[j] = strdup(aliases[j]); 775 aliases[j] = strdup(name);
776 if (aliases[j] == NULL)
777 goto out_enomem;
773 j++; 778 j++;
774 } 779 }
780 if (pmu->selectable) {
781 char *s;
782 if (asprintf(&s, "%s//", pmu->name) < 0)
783 goto out_enomem;
784 aliases[j] = s;
785 j++;
786 }
787 }
775 len = j; 788 len = j;
776 qsort(aliases, len, sizeof(char *), cmp_string); 789 qsort(aliases, len, sizeof(char *), cmp_string);
777 for (j = 0; j < len; j++) { 790 for (j = 0; j < len; j++) {
@@ -780,12 +793,20 @@ void print_pmu_events(const char *event_glob, bool name_only)
780 continue; 793 continue;
781 } 794 }
782 printf(" %-50s [Kernel PMU event]\n", aliases[j]); 795 printf(" %-50s [Kernel PMU event]\n", aliases[j]);
783 zfree(&aliases[j]);
784 printed++; 796 printed++;
785 } 797 }
786 if (printed) 798 if (printed)
787 printf("\n"); 799 printf("\n");
788 free(aliases); 800out_free:
801 for (j = 0; j < len; j++)
802 zfree(&aliases[j]);
803 zfree(&aliases);
804 return;
805
806out_enomem:
807 printf("FATAL: not enough memory to print PMU events\n");
808 if (aliases)
809 goto out_free;
789} 810}
790 811
791bool pmu_have_event(const char *pname, const char *name) 812bool pmu_have_event(const char *pname, const char *name)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index fe9dfbee8eed..8092de78e818 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -18,6 +18,7 @@ struct perf_event_attr;
18struct perf_pmu { 18struct perf_pmu {
19 char *name; 19 char *name;
20 __u32 type; 20 __u32 type;
21 bool selectable;
21 struct perf_event_attr *default_config; 22 struct perf_event_attr *default_config;
22 struct cpu_map *cpus; 23 struct cpu_map *cpus;
23 struct list_head format; /* HEAD struct perf_pmu_format -> list */ 24 struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index c150ca4343eb..28eb1417cb2a 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1910,21 +1910,21 @@ static int show_perf_probe_event(struct perf_probe_event *pev,
1910 if (ret < 0) 1910 if (ret < 0)
1911 return ret; 1911 return ret;
1912 1912
1913 printf(" %-20s (on %s", buf, place); 1913 pr_info(" %-20s (on %s", buf, place);
1914 if (module) 1914 if (module)
1915 printf(" in %s", module); 1915 pr_info(" in %s", module);
1916 1916
1917 if (pev->nargs > 0) { 1917 if (pev->nargs > 0) {
1918 printf(" with"); 1918 pr_info(" with");
1919 for (i = 0; i < pev->nargs; i++) { 1919 for (i = 0; i < pev->nargs; i++) {
1920 ret = synthesize_perf_probe_arg(&pev->args[i], 1920 ret = synthesize_perf_probe_arg(&pev->args[i],
1921 buf, 128); 1921 buf, 128);
1922 if (ret < 0) 1922 if (ret < 0)
1923 break; 1923 break;
1924 printf(" %s", buf); 1924 pr_info(" %s", buf);
1925 } 1925 }
1926 } 1926 }
1927 printf(")\n"); 1927 pr_info(")\n");
1928 free(place); 1928 free(place);
1929 return ret; 1929 return ret;
1930} 1930}
@@ -2124,7 +2124,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2124 } 2124 }
2125 2125
2126 ret = 0; 2126 ret = 0;
2127 printf("Added new event%s\n", (ntevs > 1) ? "s:" : ":"); 2127 pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
2128 for (i = 0; i < ntevs; i++) { 2128 for (i = 0; i < ntevs; i++) {
2129 tev = &tevs[i]; 2129 tev = &tevs[i];
2130 if (pev->event) 2130 if (pev->event)
@@ -2179,8 +2179,8 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2179 2179
2180 if (ret >= 0) { 2180 if (ret >= 0) {
2181 /* Show how to use the event. */ 2181 /* Show how to use the event. */
2182 printf("\nYou can now use it in all perf tools, such as:\n\n"); 2182 pr_info("\nYou can now use it in all perf tools, such as:\n\n");
2183 printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group, 2183 pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
2184 tev->event); 2184 tev->event);
2185 } 2185 }
2186 2186
@@ -2444,7 +2444,7 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
2444 goto error; 2444 goto error;
2445 } 2445 }
2446 2446
2447 printf("Removed event: %s\n", ent->s); 2447 pr_info("Removed event: %s\n", ent->s);
2448 return 0; 2448 return 0;
2449error: 2449error:
2450 pr_warning("Failed to delete event: %s\n", 2450 pr_warning("Failed to delete event: %s\n",
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 0a01bac4ce02..22ebc46226e7 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -24,6 +24,7 @@
24#include <string.h> 24#include <string.h>
25#include <ctype.h> 25#include <ctype.h>
26#include <errno.h> 26#include <errno.h>
27#include <linux/bitmap.h>
27 28
28#include "../util.h" 29#include "../util.h"
29#include <EXTERN.h> 30#include <EXTERN.h>
@@ -57,7 +58,7 @@ INTERP my_perl;
57#define FTRACE_MAX_EVENT \ 58#define FTRACE_MAX_EVENT \
58 ((1 << (sizeof(unsigned short) * 8)) - 1) 59 ((1 << (sizeof(unsigned short) * 8)) - 1)
59 60
60struct event_format *events[FTRACE_MAX_EVENT]; 61static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
61 62
62extern struct scripting_context *scripting_context; 63extern struct scripting_context *scripting_context;
63 64
@@ -238,35 +239,15 @@ static void define_event_symbols(struct event_format *event,
238 define_event_symbols(event, ev_name, args->next); 239 define_event_symbols(event, ev_name, args->next);
239} 240}
240 241
241static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
242{
243 static char ev_name[256];
244 struct event_format *event;
245 int type = evsel->attr.config;
246
247 if (events[type])
248 return events[type];
249
250 events[type] = event = evsel->tp_format;
251 if (!event)
252 return NULL;
253
254 sprintf(ev_name, "%s::%s", event->system, event->name);
255
256 define_event_symbols(event, ev_name, event->print_fmt.args);
257
258 return event;
259}
260
261static void perl_process_tracepoint(struct perf_sample *sample, 242static void perl_process_tracepoint(struct perf_sample *sample,
262 struct perf_evsel *evsel, 243 struct perf_evsel *evsel,
263 struct thread *thread) 244 struct thread *thread)
264{ 245{
246 struct event_format *event = evsel->tp_format;
265 struct format_field *field; 247 struct format_field *field;
266 static char handler[256]; 248 static char handler[256];
267 unsigned long long val; 249 unsigned long long val;
268 unsigned long s, ns; 250 unsigned long s, ns;
269 struct event_format *event;
270 int pid; 251 int pid;
271 int cpu = sample->cpu; 252 int cpu = sample->cpu;
272 void *data = sample->raw_data; 253 void *data = sample->raw_data;
@@ -278,7 +259,6 @@ static void perl_process_tracepoint(struct perf_sample *sample,
278 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 259 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
279 return; 260 return;
280 261
281 event = find_cache_event(evsel);
282 if (!event) 262 if (!event)
283 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); 263 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
284 264
@@ -286,6 +266,9 @@ static void perl_process_tracepoint(struct perf_sample *sample,
286 266
287 sprintf(handler, "%s::%s", event->system, event->name); 267 sprintf(handler, "%s::%s", event->system, event->name);
288 268
269 if (!test_and_set_bit(event->id, events_defined))
270 define_event_symbols(event, handler, event->print_fmt.args);
271
289 s = nsecs / NSECS_PER_SEC; 272 s = nsecs / NSECS_PER_SEC;
290 ns = nsecs - s * NSECS_PER_SEC; 273 ns = nsecs - s * NSECS_PER_SEC;
291 274
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 496f21cadd97..d808a328f4dc 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -24,7 +24,9 @@
24#include <stdio.h> 24#include <stdio.h>
25#include <stdlib.h> 25#include <stdlib.h>
26#include <string.h> 26#include <string.h>
27#include <stdbool.h>
27#include <errno.h> 28#include <errno.h>
29#include <linux/bitmap.h>
28 30
29#include "../../perf.h" 31#include "../../perf.h"
30#include "../debug.h" 32#include "../debug.h"
@@ -33,6 +35,10 @@
33#include "../util.h" 35#include "../util.h"
34#include "../event.h" 36#include "../event.h"
35#include "../thread.h" 37#include "../thread.h"
38#include "../comm.h"
39#include "../machine.h"
40#include "../db-export.h"
41#include "../thread-stack.h"
36#include "../trace-event.h" 42#include "../trace-event.h"
37#include "../machine.h" 43#include "../machine.h"
38 44
@@ -41,7 +47,7 @@ PyMODINIT_FUNC initperf_trace_context(void);
41#define FTRACE_MAX_EVENT \ 47#define FTRACE_MAX_EVENT \
42 ((1 << (sizeof(unsigned short) * 8)) - 1) 48 ((1 << (sizeof(unsigned short) * 8)) - 1)
43 49
44struct event_format *events[FTRACE_MAX_EVENT]; 50static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
45 51
46#define MAX_FIELDS 64 52#define MAX_FIELDS 64
47#define N_COMMON_FIELDS 7 53#define N_COMMON_FIELDS 7
@@ -53,6 +59,24 @@ static int zero_flag_atom;
53 59
54static PyObject *main_module, *main_dict; 60static PyObject *main_module, *main_dict;
55 61
62struct tables {
63 struct db_export dbe;
64 PyObject *evsel_handler;
65 PyObject *machine_handler;
66 PyObject *thread_handler;
67 PyObject *comm_handler;
68 PyObject *comm_thread_handler;
69 PyObject *dso_handler;
70 PyObject *symbol_handler;
71 PyObject *branch_type_handler;
72 PyObject *sample_handler;
73 PyObject *call_path_handler;
74 PyObject *call_return_handler;
75 bool db_export_mode;
76};
77
78static struct tables tables_global;
79
56static void handler_call_die(const char *handler_name) NORETURN; 80static void handler_call_die(const char *handler_name) NORETURN;
57static void handler_call_die(const char *handler_name) 81static void handler_call_die(const char *handler_name)
58{ 82{
@@ -232,31 +256,6 @@ static void define_event_symbols(struct event_format *event,
232 define_event_symbols(event, ev_name, args->next); 256 define_event_symbols(event, ev_name, args->next);
233} 257}
234 258
235static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
236{
237 static char ev_name[256];
238 struct event_format *event;
239 int type = evsel->attr.config;
240
241 /*
242 * XXX: Do we really need to cache this since now we have evsel->tp_format
243 * cached already? Need to re-read this "cache" routine that as well calls
244 * define_event_symbols() :-\
245 */
246 if (events[type])
247 return events[type];
248
249 events[type] = event = evsel->tp_format;
250 if (!event)
251 return NULL;
252
253 sprintf(ev_name, "%s__%s", event->system, event->name);
254
255 define_event_symbols(event, ev_name, event->print_fmt.args);
256
257 return event;
258}
259
260static PyObject *get_field_numeric_entry(struct event_format *event, 259static PyObject *get_field_numeric_entry(struct event_format *event,
261 struct format_field *field, void *data) 260 struct format_field *field, void *data)
262{ 261{
@@ -312,9 +311,9 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
312 if (!symbol_conf.use_callchain || !sample->callchain) 311 if (!symbol_conf.use_callchain || !sample->callchain)
313 goto exit; 312 goto exit;
314 313
315 if (machine__resolve_callchain(al->machine, evsel, al->thread, 314 if (thread__resolve_callchain(al->thread, evsel,
316 sample, NULL, NULL, 315 sample, NULL, NULL,
317 PERF_MAX_STACK_DEPTH) != 0) { 316 PERF_MAX_STACK_DEPTH) != 0) {
318 pr_err("Failed to resolve callchain. Skipping\n"); 317 pr_err("Failed to resolve callchain. Skipping\n");
319 goto exit; 318 goto exit;
320 } 319 }
@@ -380,12 +379,12 @@ static void python_process_tracepoint(struct perf_sample *sample,
380 struct thread *thread, 379 struct thread *thread,
381 struct addr_location *al) 380 struct addr_location *al)
382{ 381{
382 struct event_format *event = evsel->tp_format;
383 PyObject *handler, *context, *t, *obj, *callchain; 383 PyObject *handler, *context, *t, *obj, *callchain;
384 PyObject *dict = NULL; 384 PyObject *dict = NULL;
385 static char handler_name[256]; 385 static char handler_name[256];
386 struct format_field *field; 386 struct format_field *field;
387 unsigned long s, ns; 387 unsigned long s, ns;
388 struct event_format *event;
389 unsigned n = 0; 388 unsigned n = 0;
390 int pid; 389 int pid;
391 int cpu = sample->cpu; 390 int cpu = sample->cpu;
@@ -397,7 +396,6 @@ static void python_process_tracepoint(struct perf_sample *sample,
397 if (!t) 396 if (!t)
398 Py_FatalError("couldn't create Python tuple"); 397 Py_FatalError("couldn't create Python tuple");
399 398
400 event = find_cache_event(evsel);
401 if (!event) 399 if (!event)
402 die("ug! no event found for type %d", (int)evsel->attr.config); 400 die("ug! no event found for type %d", (int)evsel->attr.config);
403 401
@@ -405,6 +403,9 @@ static void python_process_tracepoint(struct perf_sample *sample,
405 403
406 sprintf(handler_name, "%s__%s", event->system, event->name); 404 sprintf(handler_name, "%s__%s", event->system, event->name);
407 405
406 if (!test_and_set_bit(event->id, events_defined))
407 define_event_symbols(event, handler_name, event->print_fmt.args);
408
408 handler = get_handler(handler_name); 409 handler = get_handler(handler_name);
409 if (!handler) { 410 if (!handler) {
410 dict = PyDict_New(); 411 dict = PyDict_New();
@@ -475,6 +476,289 @@ static void python_process_tracepoint(struct perf_sample *sample,
475 Py_DECREF(t); 476 Py_DECREF(t);
476} 477}
477 478
479static PyObject *tuple_new(unsigned int sz)
480{
481 PyObject *t;
482
483 t = PyTuple_New(sz);
484 if (!t)
485 Py_FatalError("couldn't create Python tuple");
486 return t;
487}
488
489static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
490{
491#if BITS_PER_LONG == 64
492 return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
493#endif
494#if BITS_PER_LONG == 32
495 return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val));
496#endif
497}
498
499static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
500{
501 return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
502}
503
504static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
505{
506 return PyTuple_SetItem(t, pos, PyString_FromString(s));
507}
508
509static int python_export_evsel(struct db_export *dbe, struct perf_evsel *evsel)
510{
511 struct tables *tables = container_of(dbe, struct tables, dbe);
512 PyObject *t;
513
514 t = tuple_new(2);
515
516 tuple_set_u64(t, 0, evsel->db_id);
517 tuple_set_string(t, 1, perf_evsel__name(evsel));
518
519 call_object(tables->evsel_handler, t, "evsel_table");
520
521 Py_DECREF(t);
522
523 return 0;
524}
525
526static int python_export_machine(struct db_export *dbe,
527 struct machine *machine)
528{
529 struct tables *tables = container_of(dbe, struct tables, dbe);
530 PyObject *t;
531
532 t = tuple_new(3);
533
534 tuple_set_u64(t, 0, machine->db_id);
535 tuple_set_s32(t, 1, machine->pid);
536 tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
537
538 call_object(tables->machine_handler, t, "machine_table");
539
540 Py_DECREF(t);
541
542 return 0;
543}
544
545static int python_export_thread(struct db_export *dbe, struct thread *thread,
546 u64 main_thread_db_id, struct machine *machine)
547{
548 struct tables *tables = container_of(dbe, struct tables, dbe);
549 PyObject *t;
550
551 t = tuple_new(5);
552
553 tuple_set_u64(t, 0, thread->db_id);
554 tuple_set_u64(t, 1, machine->db_id);
555 tuple_set_u64(t, 2, main_thread_db_id);
556 tuple_set_s32(t, 3, thread->pid_);
557 tuple_set_s32(t, 4, thread->tid);
558
559 call_object(tables->thread_handler, t, "thread_table");
560
561 Py_DECREF(t);
562
563 return 0;
564}
565
566static int python_export_comm(struct db_export *dbe, struct comm *comm)
567{
568 struct tables *tables = container_of(dbe, struct tables, dbe);
569 PyObject *t;
570
571 t = tuple_new(2);
572
573 tuple_set_u64(t, 0, comm->db_id);
574 tuple_set_string(t, 1, comm__str(comm));
575
576 call_object(tables->comm_handler, t, "comm_table");
577
578 Py_DECREF(t);
579
580 return 0;
581}
582
583static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
584 struct comm *comm, struct thread *thread)
585{
586 struct tables *tables = container_of(dbe, struct tables, dbe);
587 PyObject *t;
588
589 t = tuple_new(3);
590
591 tuple_set_u64(t, 0, db_id);
592 tuple_set_u64(t, 1, comm->db_id);
593 tuple_set_u64(t, 2, thread->db_id);
594
595 call_object(tables->comm_thread_handler, t, "comm_thread_table");
596
597 Py_DECREF(t);
598
599 return 0;
600}
601
602static int python_export_dso(struct db_export *dbe, struct dso *dso,
603 struct machine *machine)
604{
605 struct tables *tables = container_of(dbe, struct tables, dbe);
606 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
607 PyObject *t;
608
609 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
610
611 t = tuple_new(5);
612
613 tuple_set_u64(t, 0, dso->db_id);
614 tuple_set_u64(t, 1, machine->db_id);
615 tuple_set_string(t, 2, dso->short_name);
616 tuple_set_string(t, 3, dso->long_name);
617 tuple_set_string(t, 4, sbuild_id);
618
619 call_object(tables->dso_handler, t, "dso_table");
620
621 Py_DECREF(t);
622
623 return 0;
624}
625
626static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
627 struct dso *dso)
628{
629 struct tables *tables = container_of(dbe, struct tables, dbe);
630 u64 *sym_db_id = symbol__priv(sym);
631 PyObject *t;
632
633 t = tuple_new(6);
634
635 tuple_set_u64(t, 0, *sym_db_id);
636 tuple_set_u64(t, 1, dso->db_id);
637 tuple_set_u64(t, 2, sym->start);
638 tuple_set_u64(t, 3, sym->end);
639 tuple_set_s32(t, 4, sym->binding);
640 tuple_set_string(t, 5, sym->name);
641
642 call_object(tables->symbol_handler, t, "symbol_table");
643
644 Py_DECREF(t);
645
646 return 0;
647}
648
649static int python_export_branch_type(struct db_export *dbe, u32 branch_type,
650 const char *name)
651{
652 struct tables *tables = container_of(dbe, struct tables, dbe);
653 PyObject *t;
654
655 t = tuple_new(2);
656
657 tuple_set_s32(t, 0, branch_type);
658 tuple_set_string(t, 1, name);
659
660 call_object(tables->branch_type_handler, t, "branch_type_table");
661
662 Py_DECREF(t);
663
664 return 0;
665}
666
667static int python_export_sample(struct db_export *dbe,
668 struct export_sample *es)
669{
670 struct tables *tables = container_of(dbe, struct tables, dbe);
671 PyObject *t;
672
673 t = tuple_new(21);
674
675 tuple_set_u64(t, 0, es->db_id);
676 tuple_set_u64(t, 1, es->evsel->db_id);
677 tuple_set_u64(t, 2, es->al->machine->db_id);
678 tuple_set_u64(t, 3, es->thread->db_id);
679 tuple_set_u64(t, 4, es->comm_db_id);
680 tuple_set_u64(t, 5, es->dso_db_id);
681 tuple_set_u64(t, 6, es->sym_db_id);
682 tuple_set_u64(t, 7, es->offset);
683 tuple_set_u64(t, 8, es->sample->ip);
684 tuple_set_u64(t, 9, es->sample->time);
685 tuple_set_s32(t, 10, es->sample->cpu);
686 tuple_set_u64(t, 11, es->addr_dso_db_id);
687 tuple_set_u64(t, 12, es->addr_sym_db_id);
688 tuple_set_u64(t, 13, es->addr_offset);
689 tuple_set_u64(t, 14, es->sample->addr);
690 tuple_set_u64(t, 15, es->sample->period);
691 tuple_set_u64(t, 16, es->sample->weight);
692 tuple_set_u64(t, 17, es->sample->transaction);
693 tuple_set_u64(t, 18, es->sample->data_src);
694 tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
695 tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
696
697 call_object(tables->sample_handler, t, "sample_table");
698
699 Py_DECREF(t);
700
701 return 0;
702}
703
704static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
705{
706 struct tables *tables = container_of(dbe, struct tables, dbe);
707 PyObject *t;
708 u64 parent_db_id, sym_db_id;
709
710 parent_db_id = cp->parent ? cp->parent->db_id : 0;
711 sym_db_id = cp->sym ? *(u64 *)symbol__priv(cp->sym) : 0;
712
713 t = tuple_new(4);
714
715 tuple_set_u64(t, 0, cp->db_id);
716 tuple_set_u64(t, 1, parent_db_id);
717 tuple_set_u64(t, 2, sym_db_id);
718 tuple_set_u64(t, 3, cp->ip);
719
720 call_object(tables->call_path_handler, t, "call_path_table");
721
722 Py_DECREF(t);
723
724 return 0;
725}
726
727static int python_export_call_return(struct db_export *dbe,
728 struct call_return *cr)
729{
730 struct tables *tables = container_of(dbe, struct tables, dbe);
731 u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
732 PyObject *t;
733
734 t = tuple_new(11);
735
736 tuple_set_u64(t, 0, cr->db_id);
737 tuple_set_u64(t, 1, cr->thread->db_id);
738 tuple_set_u64(t, 2, comm_db_id);
739 tuple_set_u64(t, 3, cr->cp->db_id);
740 tuple_set_u64(t, 4, cr->call_time);
741 tuple_set_u64(t, 5, cr->return_time);
742 tuple_set_u64(t, 6, cr->branch_count);
743 tuple_set_u64(t, 7, cr->call_ref);
744 tuple_set_u64(t, 8, cr->return_ref);
745 tuple_set_u64(t, 9, cr->cp->parent->db_id);
746 tuple_set_s32(t, 10, cr->flags);
747
748 call_object(tables->call_return_handler, t, "call_return_table");
749
750 Py_DECREF(t);
751
752 return 0;
753}
754
755static int python_process_call_return(struct call_return *cr, void *data)
756{
757 struct db_export *dbe = data;
758
759 return db_export__call_return(dbe, cr);
760}
761
478static void python_process_general_event(struct perf_sample *sample, 762static void python_process_general_event(struct perf_sample *sample,
479 struct perf_evsel *evsel, 763 struct perf_evsel *evsel,
480 struct thread *thread, 764 struct thread *thread,
@@ -551,19 +835,25 @@ exit:
551 Py_DECREF(t); 835 Py_DECREF(t);
552} 836}
553 837
554static void python_process_event(union perf_event *event __maybe_unused, 838static void python_process_event(union perf_event *event,
555 struct perf_sample *sample, 839 struct perf_sample *sample,
556 struct perf_evsel *evsel, 840 struct perf_evsel *evsel,
557 struct thread *thread, 841 struct thread *thread,
558 struct addr_location *al) 842 struct addr_location *al)
559{ 843{
844 struct tables *tables = &tables_global;
845
560 switch (evsel->attr.type) { 846 switch (evsel->attr.type) {
561 case PERF_TYPE_TRACEPOINT: 847 case PERF_TYPE_TRACEPOINT:
562 python_process_tracepoint(sample, evsel, thread, al); 848 python_process_tracepoint(sample, evsel, thread, al);
563 break; 849 break;
564 /* Reserve for future process_hw/sw/raw APIs */ 850 /* Reserve for future process_hw/sw/raw APIs */
565 default: 851 default:
566 python_process_general_event(sample, evsel, thread, al); 852 if (tables->db_export_mode)
853 db_export__sample(&tables->dbe, event, sample, evsel,
854 thread, al);
855 else
856 python_process_general_event(sample, evsel, thread, al);
567 } 857 }
568} 858}
569 859
@@ -589,11 +879,79 @@ error:
589 return -1; 879 return -1;
590} 880}
591 881
882#define SET_TABLE_HANDLER_(name, handler_name, table_name) do { \
883 tables->handler_name = get_handler(#table_name); \
884 if (tables->handler_name) \
885 tables->dbe.export_ ## name = python_export_ ## name; \
886} while (0)
887
888#define SET_TABLE_HANDLER(name) \
889 SET_TABLE_HANDLER_(name, name ## _handler, name ## _table)
890
891static void set_table_handlers(struct tables *tables)
892{
893 const char *perf_db_export_mode = "perf_db_export_mode";
894 const char *perf_db_export_calls = "perf_db_export_calls";
895 PyObject *db_export_mode, *db_export_calls;
896 bool export_calls = false;
897 int ret;
898
899 memset(tables, 0, sizeof(struct tables));
900 if (db_export__init(&tables->dbe))
901 Py_FatalError("failed to initialize export");
902
903 db_export_mode = PyDict_GetItemString(main_dict, perf_db_export_mode);
904 if (!db_export_mode)
905 return;
906
907 ret = PyObject_IsTrue(db_export_mode);
908 if (ret == -1)
909 handler_call_die(perf_db_export_mode);
910 if (!ret)
911 return;
912
913 tables->dbe.crp = NULL;
914 db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
915 if (db_export_calls) {
916 ret = PyObject_IsTrue(db_export_calls);
917 if (ret == -1)
918 handler_call_die(perf_db_export_calls);
919 export_calls = !!ret;
920 }
921
922 if (export_calls) {
923 tables->dbe.crp =
924 call_return_processor__new(python_process_call_return,
925 &tables->dbe);
926 if (!tables->dbe.crp)
927 Py_FatalError("failed to create calls processor");
928 }
929
930 tables->db_export_mode = true;
931 /*
932 * Reserve per symbol space for symbol->db_id via symbol__priv()
933 */
934 symbol_conf.priv_size = sizeof(u64);
935
936 SET_TABLE_HANDLER(evsel);
937 SET_TABLE_HANDLER(machine);
938 SET_TABLE_HANDLER(thread);
939 SET_TABLE_HANDLER(comm);
940 SET_TABLE_HANDLER(comm_thread);
941 SET_TABLE_HANDLER(dso);
942 SET_TABLE_HANDLER(symbol);
943 SET_TABLE_HANDLER(branch_type);
944 SET_TABLE_HANDLER(sample);
945 SET_TABLE_HANDLER(call_path);
946 SET_TABLE_HANDLER(call_return);
947}
948
592/* 949/*
593 * Start trace script 950 * Start trace script
594 */ 951 */
595static int python_start_script(const char *script, int argc, const char **argv) 952static int python_start_script(const char *script, int argc, const char **argv)
596{ 953{
954 struct tables *tables = &tables_global;
597 const char **command_line; 955 const char **command_line;
598 char buf[PATH_MAX]; 956 char buf[PATH_MAX];
599 int i, err = 0; 957 int i, err = 0;
@@ -632,6 +990,14 @@ static int python_start_script(const char *script, int argc, const char **argv)
632 990
633 free(command_line); 991 free(command_line);
634 992
993 set_table_handlers(tables);
994
995 if (tables->db_export_mode) {
996 err = db_export__branch_types(&tables->dbe);
997 if (err)
998 goto error;
999 }
1000
635 return err; 1001 return err;
636error: 1002error:
637 Py_Finalize(); 1003 Py_Finalize();
@@ -642,7 +1008,9 @@ error:
642 1008
643static int python_flush_script(void) 1009static int python_flush_script(void)
644{ 1010{
645 return 0; 1011 struct tables *tables = &tables_global;
1012
1013 return db_export__flush(&tables->dbe);
646} 1014}
647 1015
648/* 1016/*
@@ -650,8 +1018,12 @@ static int python_flush_script(void)
650 */ 1018 */
651static int python_stop_script(void) 1019static int python_stop_script(void)
652{ 1020{
1021 struct tables *tables = &tables_global;
1022
653 try_call_object("trace_end", NULL); 1023 try_call_object("trace_end", NULL);
654 1024
1025 db_export__exit(&tables->dbe);
1026
655 Py_XDECREF(main_dict); 1027 Py_XDECREF(main_dict);
656 Py_XDECREF(main_module); 1028 Py_XDECREF(main_module);
657 Py_Finalize(); 1029 Py_Finalize();
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6702ac28754b..f4478ce72fdb 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -228,6 +228,15 @@ static int process_finished_round(struct perf_tool *tool,
228 union perf_event *event, 228 union perf_event *event,
229 struct perf_session *session); 229 struct perf_session *session);
230 230
231static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
232 union perf_event *event __maybe_unused,
233 struct perf_session *perf_session
234 __maybe_unused)
235{
236 dump_printf(": unhandled!\n");
237 return 0;
238}
239
231void perf_tool__fill_defaults(struct perf_tool *tool) 240void perf_tool__fill_defaults(struct perf_tool *tool)
232{ 241{
233 if (tool->sample == NULL) 242 if (tool->sample == NULL)
@@ -262,6 +271,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
262 else 271 else
263 tool->finished_round = process_finished_round_stub; 272 tool->finished_round = process_finished_round_stub;
264 } 273 }
274 if (tool->id_index == NULL)
275 tool->id_index = process_id_index_stub;
265} 276}
266 277
267static void swap_sample_id_all(union perf_event *event, void *data) 278static void swap_sample_id_all(union perf_event *event, void *data)
@@ -460,6 +471,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
460 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 471 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
461 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 472 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
462 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 473 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
474 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
463 [PERF_RECORD_HEADER_MAX] = NULL, 475 [PERF_RECORD_HEADER_MAX] = NULL,
464}; 476};
465 477
@@ -888,11 +900,26 @@ static s64 perf_session__process_user_event(struct perf_session *session,
888 return tool->build_id(tool, event, session); 900 return tool->build_id(tool, event, session);
889 case PERF_RECORD_FINISHED_ROUND: 901 case PERF_RECORD_FINISHED_ROUND:
890 return tool->finished_round(tool, event, session); 902 return tool->finished_round(tool, event, session);
903 case PERF_RECORD_ID_INDEX:
904 return tool->id_index(tool, event, session);
891 default: 905 default:
892 return -EINVAL; 906 return -EINVAL;
893 } 907 }
894} 908}
895 909
910int perf_session__deliver_synth_event(struct perf_session *session,
911 union perf_event *event,
912 struct perf_sample *sample,
913 struct perf_tool *tool)
914{
915 events_stats__inc(&session->stats, event->header.type);
916
917 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
918 return perf_session__process_user_event(session, event, tool, 0);
919
920 return perf_session__deliver_event(session, event, sample, tool, 0);
921}
922
896static void event_swap(union perf_event *event, bool sample_id_all) 923static void event_swap(union perf_event *event, bool sample_id_all)
897{ 924{
898 perf_event__swap_op swap; 925 perf_event__swap_op swap;
@@ -1417,9 +1444,9 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1417 if (symbol_conf.use_callchain && sample->callchain) { 1444 if (symbol_conf.use_callchain && sample->callchain) {
1418 struct addr_location node_al; 1445 struct addr_location node_al;
1419 1446
1420 if (machine__resolve_callchain(al->machine, evsel, al->thread, 1447 if (thread__resolve_callchain(al->thread, evsel,
1421 sample, NULL, NULL, 1448 sample, NULL, NULL,
1422 PERF_MAX_STACK_DEPTH) != 0) { 1449 PERF_MAX_STACK_DEPTH) != 0) {
1423 if (verbose) 1450 if (verbose)
1424 error("Failed to resolve callchain. Skipping\n"); 1451 error("Failed to resolve callchain. Skipping\n");
1425 return; 1452 return;
@@ -1594,3 +1621,111 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1594out: 1621out:
1595 return err; 1622 return err;
1596} 1623}
1624
1625int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
1626 union perf_event *event,
1627 struct perf_session *session)
1628{
1629 struct perf_evlist *evlist = session->evlist;
1630 struct id_index_event *ie = &event->id_index;
1631 size_t i, nr, max_nr;
1632
1633 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
1634 sizeof(struct id_index_entry);
1635 nr = ie->nr;
1636 if (nr > max_nr)
1637 return -EINVAL;
1638
1639 if (dump_trace)
1640 fprintf(stdout, " nr: %zu\n", nr);
1641
1642 for (i = 0; i < nr; i++) {
1643 struct id_index_entry *e = &ie->entries[i];
1644 struct perf_sample_id *sid;
1645
1646 if (dump_trace) {
1647 fprintf(stdout, " ... id: %"PRIu64, e->id);
1648 fprintf(stdout, " idx: %"PRIu64, e->idx);
1649 fprintf(stdout, " cpu: %"PRId64, e->cpu);
1650 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
1651 }
1652
1653 sid = perf_evlist__id2sid(evlist, e->id);
1654 if (!sid)
1655 return -ENOENT;
1656 sid->idx = e->idx;
1657 sid->cpu = e->cpu;
1658 sid->tid = e->tid;
1659 }
1660 return 0;
1661}
1662
1663int perf_event__synthesize_id_index(struct perf_tool *tool,
1664 perf_event__handler_t process,
1665 struct perf_evlist *evlist,
1666 struct machine *machine)
1667{
1668 union perf_event *ev;
1669 struct perf_evsel *evsel;
1670 size_t nr = 0, i = 0, sz, max_nr, n;
1671 int err;
1672
1673 pr_debug2("Synthesizing id index\n");
1674
1675 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
1676 sizeof(struct id_index_entry);
1677
1678 evlist__for_each(evlist, evsel)
1679 nr += evsel->ids;
1680
1681 n = nr > max_nr ? max_nr : nr;
1682 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
1683 ev = zalloc(sz);
1684 if (!ev)
1685 return -ENOMEM;
1686
1687 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1688 ev->id_index.header.size = sz;
1689 ev->id_index.nr = n;
1690
1691 evlist__for_each(evlist, evsel) {
1692 u32 j;
1693
1694 for (j = 0; j < evsel->ids; j++) {
1695 struct id_index_entry *e;
1696 struct perf_sample_id *sid;
1697
1698 if (i >= n) {
1699 err = process(tool, ev, NULL, machine);
1700 if (err)
1701 goto out_err;
1702 nr -= n;
1703 i = 0;
1704 }
1705
1706 e = &ev->id_index.entries[i++];
1707
1708 e->id = evsel->id[j];
1709
1710 sid = perf_evlist__id2sid(evlist, e->id);
1711 if (!sid) {
1712 free(ev);
1713 return -ENOENT;
1714 }
1715
1716 e->idx = sid->idx;
1717 e->cpu = sid->cpu;
1718 e->tid = sid->tid;
1719 }
1720 }
1721
1722 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
1723 ev->id_index.header.size = sz;
1724 ev->id_index.nr = nr;
1725
1726 err = process(tool, ev, NULL, machine);
1727out_err:
1728 free(ev);
1729
1730 return err;
1731}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index a4be851f1a90..dc26ebf60fe4 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -126,4 +126,19 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
126extern volatile int session_done; 126extern volatile int session_done;
127 127
128#define session_done() ACCESS_ONCE(session_done) 128#define session_done() ACCESS_ONCE(session_done)
129
130int perf_session__deliver_synth_event(struct perf_session *session,
131 union perf_event *event,
132 struct perf_sample *sample,
133 struct perf_tool *tool);
134
135int perf_event__process_id_index(struct perf_tool *tool,
136 union perf_event *event,
137 struct perf_session *session);
138
139int perf_event__synthesize_id_index(struct perf_tool *tool,
140 perf_event__handler_t process,
141 struct perf_evlist *evlist,
142 struct machine *machine);
143
129#endif /* __PERF_SESSION_H */ 144#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 1e23a5bfb044..efc7eb6b8f0f 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -546,6 +546,35 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
546 return 0; 546 return 0;
547} 547}
548 548
549static int decompress_kmodule(struct dso *dso, const char *name,
550 enum dso_binary_type type)
551{
552 int fd;
553 const char *ext = strrchr(name, '.');
554 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
555
556 if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
557 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
558 type != dso->symtab_type)
559 return -1;
560
561 if (!ext || !is_supported_compression(ext + 1))
562 return -1;
563
564 fd = mkstemp(tmpbuf);
565 if (fd < 0)
566 return -1;
567
568 if (!decompress_to_file(ext + 1, name, fd)) {
569 close(fd);
570 fd = -1;
571 }
572
573 unlink(tmpbuf);
574
575 return fd;
576}
577
549bool symsrc__possibly_runtime(struct symsrc *ss) 578bool symsrc__possibly_runtime(struct symsrc *ss)
550{ 579{
551 return ss->dynsym || ss->opdsec; 580 return ss->dynsym || ss->opdsec;
@@ -571,7 +600,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
571 Elf *elf; 600 Elf *elf;
572 int fd; 601 int fd;
573 602
574 fd = open(name, O_RDONLY); 603 if (dso__needs_decompress(dso))
604 fd = decompress_kmodule(dso, name, type);
605 else
606 fd = open(name, O_RDONLY);
607
575 if (fd < 0) 608 if (fd < 0)
576 return -1; 609 return -1;
577 610
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 078331140d8c..c24c5b83156c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -51,7 +51,9 @@ static enum dso_binary_type binary_type_symtab[] = {
51 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 51 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
52 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 52 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
53 DSO_BINARY_TYPE__GUEST_KMODULE, 53 DSO_BINARY_TYPE__GUEST_KMODULE,
54 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
54 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 55 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
56 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
55 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 57 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
56 DSO_BINARY_TYPE__NOT_FOUND, 58 DSO_BINARY_TYPE__NOT_FOUND,
57}; 59};
@@ -1300,7 +1302,9 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1300 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1302 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1301 1303
1302 case DSO_BINARY_TYPE__GUEST_KMODULE: 1304 case DSO_BINARY_TYPE__GUEST_KMODULE:
1305 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1303 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1306 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1307 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1304 /* 1308 /*
1305 * kernel modules know their symtab type - it's set when 1309 * kernel modules know their symtab type - it's set when
1306 * creating a module dso in machine__new_module(). 1310 * creating a module dso in machine__new_module().
@@ -1368,7 +1372,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1368 return -1; 1372 return -1;
1369 1373
1370 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1374 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1371 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1375 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1376 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1377 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1372 1378
1373 /* 1379 /*
1374 * Iterate over candidate debug images. 1380 * Iterate over candidate debug images.
@@ -1505,12 +1511,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1505 symbol_filter_t filter) 1511 symbol_filter_t filter)
1506{ 1512{
1507 int i, err = 0; 1513 int i, err = 0;
1508 char *filename; 1514 char *filename = NULL;
1509 1515
1510 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1516 if (!symbol_conf.ignore_vmlinux_buildid)
1511 vmlinux_path__nr_entries + 1); 1517 filename = dso__build_id_filename(dso, NULL, 0);
1512
1513 filename = dso__build_id_filename(dso, NULL, 0);
1514 if (filename != NULL) { 1518 if (filename != NULL) {
1515 err = dso__load_vmlinux(dso, map, filename, true, filter); 1519 err = dso__load_vmlinux(dso, map, filename, true, filter);
1516 if (err > 0) 1520 if (err > 0)
@@ -1518,6 +1522,9 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1518 free(filename); 1522 free(filename);
1519 } 1523 }
1520 1524
1525 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1526 vmlinux_path__nr_entries + 1);
1527
1521 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1528 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1522 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); 1529 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1523 if (err > 0) 1530 if (err > 0)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index eb2c19bf8d90..ded3ca7266de 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -105,6 +105,7 @@ struct symbol_conf {
105 unsigned short nr_events; 105 unsigned short nr_events;
106 bool try_vmlinux_path, 106 bool try_vmlinux_path,
107 ignore_vmlinux, 107 ignore_vmlinux,
108 ignore_vmlinux_buildid,
108 show_kernel_path, 109 show_kernel_path,
109 use_modules, 110 use_modules,
110 sort_by_name, 111 sort_by_name,
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
new file mode 100644
index 000000000000..9ed59a452d1f
--- /dev/null
+++ b/tools/perf/util/thread-stack.c
@@ -0,0 +1,747 @@
1/*
2 * thread-stack.c: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/rbtree.h>
17#include <linux/list.h>
18#include "thread.h"
19#include "event.h"
20#include "machine.h"
21#include "util.h"
22#include "debug.h"
23#include "symbol.h"
24#include "comm.h"
25#include "thread-stack.h"
26
27#define CALL_PATH_BLOCK_SHIFT 8
28#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
29#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
30
31struct call_path_block {
32 struct call_path cp[CALL_PATH_BLOCK_SIZE];
33 struct list_head node;
34};
35
36/**
37 * struct call_path_root - root of all call paths.
38 * @call_path: root call path
39 * @blocks: list of blocks to store call paths
40 * @next: next free space
41 * @sz: number of spaces
42 */
43struct call_path_root {
44 struct call_path call_path;
45 struct list_head blocks;
46 size_t next;
47 size_t sz;
48};
49
50/**
51 * struct call_return_processor - provides a call-back to consume call-return
52 * information.
53 * @cpr: call path root
54 * @process: call-back that accepts call/return information
55 * @data: anonymous data for call-back
56 */
57struct call_return_processor {
58 struct call_path_root *cpr;
59 int (*process)(struct call_return *cr, void *data);
60 void *data;
61};
62
63#define STACK_GROWTH 2048
64
65/**
66 * struct thread_stack_entry - thread stack entry.
67 * @ret_addr: return address
68 * @timestamp: timestamp (if known)
69 * @ref: external reference (e.g. db_id of sample)
70 * @branch_count: the branch count when the entry was created
71 * @cp: call path
72 * @no_call: a 'call' was not seen
73 */
74struct thread_stack_entry {
75 u64 ret_addr;
76 u64 timestamp;
77 u64 ref;
78 u64 branch_count;
79 struct call_path *cp;
80 bool no_call;
81};
82
83/**
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
85 * branch samples.
86 * @stack: array that holds the stack
87 * @cnt: number of entries in the stack
88 * @sz: current maximum stack size
89 * @trace_nr: current trace number
90 * @branch_count: running branch count
91 * @kernel_start: kernel start address
92 * @last_time: last timestamp
93 * @crp: call/return processor
94 * @comm: current comm
95 */
96struct thread_stack {
97 struct thread_stack_entry *stack;
98 size_t cnt;
99 size_t sz;
100 u64 trace_nr;
101 u64 branch_count;
102 u64 kernel_start;
103 u64 last_time;
104 struct call_return_processor *crp;
105 struct comm *comm;
106};
107
108static int thread_stack__grow(struct thread_stack *ts)
109{
110 struct thread_stack_entry *new_stack;
111 size_t sz, new_sz;
112
113 new_sz = ts->sz + STACK_GROWTH;
114 sz = new_sz * sizeof(struct thread_stack_entry);
115
116 new_stack = realloc(ts->stack, sz);
117 if (!new_stack)
118 return -ENOMEM;
119
120 ts->stack = new_stack;
121 ts->sz = new_sz;
122
123 return 0;
124}
125
126static struct thread_stack *thread_stack__new(struct thread *thread,
127 struct call_return_processor *crp)
128{
129 struct thread_stack *ts;
130
131 ts = zalloc(sizeof(struct thread_stack));
132 if (!ts)
133 return NULL;
134
135 if (thread_stack__grow(ts)) {
136 free(ts);
137 return NULL;
138 }
139
140 if (thread->mg && thread->mg->machine)
141 ts->kernel_start = machine__kernel_start(thread->mg->machine);
142 else
143 ts->kernel_start = 1ULL << 63;
144 ts->crp = crp;
145
146 return ts;
147}
148
149static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
150{
151 int err = 0;
152
153 if (ts->cnt == ts->sz) {
154 err = thread_stack__grow(ts);
155 if (err) {
156 pr_warning("Out of memory: discarding thread stack\n");
157 ts->cnt = 0;
158 }
159 }
160
161 ts->stack[ts->cnt++].ret_addr = ret_addr;
162
163 return err;
164}
165
166static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
167{
168 size_t i;
169
170 /*
171 * In some cases there may be functions which are not seen to return.
172 * For example when setjmp / longjmp has been used. Or the perf context
173 * switch in the kernel which doesn't stop and start tracing in exactly
174 * the same code path. When that happens the return address will be
175 * further down the stack. If the return address is not found at all,
176 * we assume the opposite (i.e. this is a return for a call that wasn't
177 * seen for some reason) and leave the stack alone.
178 */
179 for (i = ts->cnt; i; ) {
180 if (ts->stack[--i].ret_addr == ret_addr) {
181 ts->cnt = i;
182 return;
183 }
184 }
185}
186
187static bool thread_stack__in_kernel(struct thread_stack *ts)
188{
189 if (!ts->cnt)
190 return false;
191
192 return ts->stack[ts->cnt - 1].cp->in_kernel;
193}
194
195static int thread_stack__call_return(struct thread *thread,
196 struct thread_stack *ts, size_t idx,
197 u64 timestamp, u64 ref, bool no_return)
198{
199 struct call_return_processor *crp = ts->crp;
200 struct thread_stack_entry *tse;
201 struct call_return cr = {
202 .thread = thread,
203 .comm = ts->comm,
204 .db_id = 0,
205 };
206
207 tse = &ts->stack[idx];
208 cr.cp = tse->cp;
209 cr.call_time = tse->timestamp;
210 cr.return_time = timestamp;
211 cr.branch_count = ts->branch_count - tse->branch_count;
212 cr.call_ref = tse->ref;
213 cr.return_ref = ref;
214 if (tse->no_call)
215 cr.flags |= CALL_RETURN_NO_CALL;
216 if (no_return)
217 cr.flags |= CALL_RETURN_NO_RETURN;
218
219 return crp->process(&cr, crp->data);
220}
221
222static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
223{
224 struct call_return_processor *crp = ts->crp;
225 int err;
226
227 if (!crp) {
228 ts->cnt = 0;
229 return 0;
230 }
231
232 while (ts->cnt) {
233 err = thread_stack__call_return(thread, ts, --ts->cnt,
234 ts->last_time, 0, true);
235 if (err) {
236 pr_err("Error flushing thread stack!\n");
237 ts->cnt = 0;
238 return err;
239 }
240 }
241
242 return 0;
243}
244
245int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
246 u64 to_ip, u16 insn_len, u64 trace_nr)
247{
248 if (!thread)
249 return -EINVAL;
250
251 if (!thread->ts) {
252 thread->ts = thread_stack__new(thread, NULL);
253 if (!thread->ts) {
254 pr_warning("Out of memory: no thread stack\n");
255 return -ENOMEM;
256 }
257 thread->ts->trace_nr = trace_nr;
258 }
259
260 /*
261 * When the trace is discontinuous, the trace_nr changes. In that case
262 * the stack might be completely invalid. Better to report nothing than
263 * to report something misleading, so flush the stack.
264 */
265 if (trace_nr != thread->ts->trace_nr) {
266 if (thread->ts->trace_nr)
267 thread_stack__flush(thread, thread->ts);
268 thread->ts->trace_nr = trace_nr;
269 }
270
271 /* Stop here if thread_stack__process() is in use */
272 if (thread->ts->crp)
273 return 0;
274
275 if (flags & PERF_IP_FLAG_CALL) {
276 u64 ret_addr;
277
278 if (!to_ip)
279 return 0;
280 ret_addr = from_ip + insn_len;
281 if (ret_addr == to_ip)
282 return 0; /* Zero-length calls are excluded */
283 return thread_stack__push(thread->ts, ret_addr);
284 } else if (flags & PERF_IP_FLAG_RETURN) {
285 if (!from_ip)
286 return 0;
287 thread_stack__pop(thread->ts, to_ip);
288 }
289
290 return 0;
291}
292
293void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
294{
295 if (!thread || !thread->ts)
296 return;
297
298 if (trace_nr != thread->ts->trace_nr) {
299 if (thread->ts->trace_nr)
300 thread_stack__flush(thread, thread->ts);
301 thread->ts->trace_nr = trace_nr;
302 }
303}
304
305void thread_stack__free(struct thread *thread)
306{
307 if (thread->ts) {
308 thread_stack__flush(thread, thread->ts);
309 zfree(&thread->ts->stack);
310 zfree(&thread->ts);
311 }
312}
313
314void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
315 size_t sz, u64 ip)
316{
317 size_t i;
318
319 if (!thread || !thread->ts)
320 chain->nr = 1;
321 else
322 chain->nr = min(sz, thread->ts->cnt + 1);
323
324 chain->ips[0] = ip;
325
326 for (i = 1; i < chain->nr; i++)
327 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
328}
329
330static void call_path__init(struct call_path *cp, struct call_path *parent,
331 struct symbol *sym, u64 ip, bool in_kernel)
332{
333 cp->parent = parent;
334 cp->sym = sym;
335 cp->ip = sym ? 0 : ip;
336 cp->db_id = 0;
337 cp->in_kernel = in_kernel;
338 RB_CLEAR_NODE(&cp->rb_node);
339 cp->children = RB_ROOT;
340}
341
342static struct call_path_root *call_path_root__new(void)
343{
344 struct call_path_root *cpr;
345
346 cpr = zalloc(sizeof(struct call_path_root));
347 if (!cpr)
348 return NULL;
349 call_path__init(&cpr->call_path, NULL, NULL, 0, false);
350 INIT_LIST_HEAD(&cpr->blocks);
351 return cpr;
352}
353
354static void call_path_root__free(struct call_path_root *cpr)
355{
356 struct call_path_block *pos, *n;
357
358 list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
359 list_del(&pos->node);
360 free(pos);
361 }
362 free(cpr);
363}
364
365static struct call_path *call_path__new(struct call_path_root *cpr,
366 struct call_path *parent,
367 struct symbol *sym, u64 ip,
368 bool in_kernel)
369{
370 struct call_path_block *cpb;
371 struct call_path *cp;
372 size_t n;
373
374 if (cpr->next < cpr->sz) {
375 cpb = list_last_entry(&cpr->blocks, struct call_path_block,
376 node);
377 } else {
378 cpb = zalloc(sizeof(struct call_path_block));
379 if (!cpb)
380 return NULL;
381 list_add_tail(&cpb->node, &cpr->blocks);
382 cpr->sz += CALL_PATH_BLOCK_SIZE;
383 }
384
385 n = cpr->next++ & CALL_PATH_BLOCK_MASK;
386 cp = &cpb->cp[n];
387
388 call_path__init(cp, parent, sym, ip, in_kernel);
389
390 return cp;
391}
392
393static struct call_path *call_path__findnew(struct call_path_root *cpr,
394 struct call_path *parent,
395 struct symbol *sym, u64 ip, u64 ks)
396{
397 struct rb_node **p;
398 struct rb_node *node_parent = NULL;
399 struct call_path *cp;
400 bool in_kernel = ip >= ks;
401
402 if (sym)
403 ip = 0;
404
405 if (!parent)
406 return call_path__new(cpr, parent, sym, ip, in_kernel);
407
408 p = &parent->children.rb_node;
409 while (*p != NULL) {
410 node_parent = *p;
411 cp = rb_entry(node_parent, struct call_path, rb_node);
412
413 if (cp->sym == sym && cp->ip == ip)
414 return cp;
415
416 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
417 p = &(*p)->rb_left;
418 else
419 p = &(*p)->rb_right;
420 }
421
422 cp = call_path__new(cpr, parent, sym, ip, in_kernel);
423 if (!cp)
424 return NULL;
425
426 rb_link_node(&cp->rb_node, node_parent, p);
427 rb_insert_color(&cp->rb_node, &parent->children);
428
429 return cp;
430}
431
432struct call_return_processor *
433call_return_processor__new(int (*process)(struct call_return *cr, void *data),
434 void *data)
435{
436 struct call_return_processor *crp;
437
438 crp = zalloc(sizeof(struct call_return_processor));
439 if (!crp)
440 return NULL;
441 crp->cpr = call_path_root__new();
442 if (!crp->cpr)
443 goto out_free;
444 crp->process = process;
445 crp->data = data;
446 return crp;
447
448out_free:
449 free(crp);
450 return NULL;
451}
452
453void call_return_processor__free(struct call_return_processor *crp)
454{
455 if (crp) {
456 call_path_root__free(crp->cpr);
457 free(crp);
458 }
459}
460
461static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
462 u64 timestamp, u64 ref, struct call_path *cp,
463 bool no_call)
464{
465 struct thread_stack_entry *tse;
466 int err;
467
468 if (ts->cnt == ts->sz) {
469 err = thread_stack__grow(ts);
470 if (err)
471 return err;
472 }
473
474 tse = &ts->stack[ts->cnt++];
475 tse->ret_addr = ret_addr;
476 tse->timestamp = timestamp;
477 tse->ref = ref;
478 tse->branch_count = ts->branch_count;
479 tse->cp = cp;
480 tse->no_call = no_call;
481
482 return 0;
483}
484
485static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
486 u64 ret_addr, u64 timestamp, u64 ref,
487 struct symbol *sym)
488{
489 int err;
490
491 if (!ts->cnt)
492 return 1;
493
494 if (ts->cnt == 1) {
495 struct thread_stack_entry *tse = &ts->stack[0];
496
497 if (tse->cp->sym == sym)
498 return thread_stack__call_return(thread, ts, --ts->cnt,
499 timestamp, ref, false);
500 }
501
502 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
503 return thread_stack__call_return(thread, ts, --ts->cnt,
504 timestamp, ref, false);
505 } else {
506 size_t i = ts->cnt - 1;
507
508 while (i--) {
509 if (ts->stack[i].ret_addr != ret_addr)
510 continue;
511 i += 1;
512 while (ts->cnt > i) {
513 err = thread_stack__call_return(thread, ts,
514 --ts->cnt,
515 timestamp, ref,
516 true);
517 if (err)
518 return err;
519 }
520 return thread_stack__call_return(thread, ts, --ts->cnt,
521 timestamp, ref, false);
522 }
523 }
524
525 return 1;
526}
527
528static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
529 struct perf_sample *sample,
530 struct addr_location *from_al,
531 struct addr_location *to_al, u64 ref)
532{
533 struct call_path_root *cpr = ts->crp->cpr;
534 struct call_path *cp;
535 struct symbol *sym;
536 u64 ip;
537
538 if (sample->ip) {
539 ip = sample->ip;
540 sym = from_al->sym;
541 } else if (sample->addr) {
542 ip = sample->addr;
543 sym = to_al->sym;
544 } else {
545 return 0;
546 }
547
548 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
549 ts->kernel_start);
550 if (!cp)
551 return -ENOMEM;
552
553 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
554 true);
555}
556
557static int thread_stack__no_call_return(struct thread *thread,
558 struct thread_stack *ts,
559 struct perf_sample *sample,
560 struct addr_location *from_al,
561 struct addr_location *to_al, u64 ref)
562{
563 struct call_path_root *cpr = ts->crp->cpr;
564 struct call_path *cp, *parent;
565 u64 ks = ts->kernel_start;
566 int err;
567
568 if (sample->ip >= ks && sample->addr < ks) {
569 /* Return to userspace, so pop all kernel addresses */
570 while (thread_stack__in_kernel(ts)) {
571 err = thread_stack__call_return(thread, ts, --ts->cnt,
572 sample->time, ref,
573 true);
574 if (err)
575 return err;
576 }
577
578 /* If the stack is empty, push the userspace address */
579 if (!ts->cnt) {
580 cp = call_path__findnew(cpr, &cpr->call_path,
581 to_al->sym, sample->addr,
582 ts->kernel_start);
583 if (!cp)
584 return -ENOMEM;
585 return thread_stack__push_cp(ts, 0, sample->time, ref,
586 cp, true);
587 }
588 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
589 /* Return to userspace, so pop all kernel addresses */
590 while (thread_stack__in_kernel(ts)) {
591 err = thread_stack__call_return(thread, ts, --ts->cnt,
592 sample->time, ref,
593 true);
594 if (err)
595 return err;
596 }
597 }
598
599 if (ts->cnt)
600 parent = ts->stack[ts->cnt - 1].cp;
601 else
602 parent = &cpr->call_path;
603
604 /* This 'return' had no 'call', so push and pop top of stack */
605 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
606 ts->kernel_start);
607 if (!cp)
608 return -ENOMEM;
609
610 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
611 true);
612 if (err)
613 return err;
614
615 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
616 to_al->sym);
617}
618
619static int thread_stack__trace_begin(struct thread *thread,
620 struct thread_stack *ts, u64 timestamp,
621 u64 ref)
622{
623 struct thread_stack_entry *tse;
624 int err;
625
626 if (!ts->cnt)
627 return 0;
628
629 /* Pop trace end */
630 tse = &ts->stack[ts->cnt - 1];
631 if (tse->cp->sym == NULL && tse->cp->ip == 0) {
632 err = thread_stack__call_return(thread, ts, --ts->cnt,
633 timestamp, ref, false);
634 if (err)
635 return err;
636 }
637
638 return 0;
639}
640
641static int thread_stack__trace_end(struct thread_stack *ts,
642 struct perf_sample *sample, u64 ref)
643{
644 struct call_path_root *cpr = ts->crp->cpr;
645 struct call_path *cp;
646 u64 ret_addr;
647
648 /* No point having 'trace end' on the bottom of the stack */
649 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
650 return 0;
651
652 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
653 ts->kernel_start);
654 if (!cp)
655 return -ENOMEM;
656
657 ret_addr = sample->ip + sample->insn_len;
658
659 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
660 false);
661}
662
663int thread_stack__process(struct thread *thread, struct comm *comm,
664 struct perf_sample *sample,
665 struct addr_location *from_al,
666 struct addr_location *to_al, u64 ref,
667 struct call_return_processor *crp)
668{
669 struct thread_stack *ts = thread->ts;
670 int err = 0;
671
672 if (ts) {
673 if (!ts->crp) {
674 /* Supersede thread_stack__event() */
675 thread_stack__free(thread);
676 thread->ts = thread_stack__new(thread, crp);
677 if (!thread->ts)
678 return -ENOMEM;
679 ts = thread->ts;
680 ts->comm = comm;
681 }
682 } else {
683 thread->ts = thread_stack__new(thread, crp);
684 if (!thread->ts)
685 return -ENOMEM;
686 ts = thread->ts;
687 ts->comm = comm;
688 }
689
690 /* Flush stack on exec */
691 if (ts->comm != comm && thread->pid_ == thread->tid) {
692 err = thread_stack__flush(thread, ts);
693 if (err)
694 return err;
695 ts->comm = comm;
696 }
697
698 /* If the stack is empty, put the current symbol on the stack */
699 if (!ts->cnt) {
700 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
701 ref);
702 if (err)
703 return err;
704 }
705
706 ts->branch_count += 1;
707 ts->last_time = sample->time;
708
709 if (sample->flags & PERF_IP_FLAG_CALL) {
710 struct call_path_root *cpr = ts->crp->cpr;
711 struct call_path *cp;
712 u64 ret_addr;
713
714 if (!sample->ip || !sample->addr)
715 return 0;
716
717 ret_addr = sample->ip + sample->insn_len;
718 if (ret_addr == sample->addr)
719 return 0; /* Zero-length calls are excluded */
720
721 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
722 to_al->sym, sample->addr,
723 ts->kernel_start);
724 if (!cp)
725 return -ENOMEM;
726 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
727 cp, false);
728 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
729 if (!sample->ip || !sample->addr)
730 return 0;
731
732 err = thread_stack__pop_cp(thread, ts, sample->addr,
733 sample->time, ref, from_al->sym);
734 if (err) {
735 if (err < 0)
736 return err;
737 err = thread_stack__no_call_return(thread, ts, sample,
738 from_al, to_al, ref);
739 }
740 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
741 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
742 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
743 err = thread_stack__trace_end(ts, sample, ref);
744 }
745
746 return err;
747}
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
new file mode 100644
index 000000000000..b843bbef8ba2
--- /dev/null
+++ b/tools/perf/util/thread-stack.h
@@ -0,0 +1,111 @@
1/*
2 * thread-stack.h: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef __PERF_THREAD_STACK_H
17#define __PERF_THREAD_STACK_H
18
19#include <sys/types.h>
20
21#include <linux/types.h>
22#include <linux/rbtree.h>
23
24struct thread;
25struct comm;
26struct ip_callchain;
27struct symbol;
28struct dso;
29struct call_return_processor;
30struct comm;
31struct perf_sample;
32struct addr_location;
33
34/*
35 * Call/Return flags.
36 *
37 * CALL_RETURN_NO_CALL: 'return' but no matching 'call'
38 * CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
39 */
40enum {
41 CALL_RETURN_NO_CALL = 1 << 0,
42 CALL_RETURN_NO_RETURN = 1 << 1,
43};
44
45/**
46 * struct call_return - paired call/return information.
47 * @thread: thread in which call/return occurred
48 * @comm: comm in which call/return occurred
49 * @cp: call path
50 * @call_time: timestamp of call (if known)
51 * @return_time: timestamp of return (if known)
52 * @branch_count: number of branches seen between call and return
53 * @call_ref: external reference to 'call' sample (e.g. db_id)
54 * @return_ref: external reference to 'return' sample (e.g. db_id)
55 * @db_id: id used for db-export
56 * @flags: Call/Return flags
57 */
58struct call_return {
59 struct thread *thread;
60 struct comm *comm;
61 struct call_path *cp;
62 u64 call_time;
63 u64 return_time;
64 u64 branch_count;
65 u64 call_ref;
66 u64 return_ref;
67 u64 db_id;
68 u32 flags;
69};
70
71/**
72 * struct call_path - node in list of calls leading to a function call.
73 * @parent: call path to the parent function call
74 * @sym: symbol of function called
75 * @ip: only if sym is null, the ip of the function
76 * @db_id: id used for db-export
77 * @in_kernel: whether function is a in the kernel
78 * @rb_node: node in parent's tree of called functions
79 * @children: tree of call paths of functions called
80 *
81 * In combination with the call_return structure, the call_path structure
82 * defines a context-sensitve call-graph.
83 */
84struct call_path {
85 struct call_path *parent;
86 struct symbol *sym;
87 u64 ip;
88 u64 db_id;
89 bool in_kernel;
90 struct rb_node rb_node;
91 struct rb_root children;
92};
93
94int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
95 u64 to_ip, u16 insn_len, u64 trace_nr);
96void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
97void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
98 size_t sz, u64 ip);
99void thread_stack__free(struct thread *thread);
100
101struct call_return_processor *
102call_return_processor__new(int (*process)(struct call_return *cr, void *data),
103 void *data);
104void call_return_processor__free(struct call_return_processor *crp);
105int thread_stack__process(struct thread *thread, struct comm *comm,
106 struct perf_sample *sample,
107 struct addr_location *from_al,
108 struct addr_location *to_al, u64 ref,
109 struct call_return_processor *crp);
110
111#endif
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c41411726c7a..a2157f0ef1df 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -4,6 +4,7 @@
4#include <string.h> 4#include <string.h>
5#include "session.h" 5#include "session.h"
6#include "thread.h" 6#include "thread.h"
7#include "thread-stack.h"
7#include "util.h" 8#include "util.h"
8#include "debug.h" 9#include "debug.h"
9#include "comm.h" 10#include "comm.h"
@@ -15,7 +16,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
15 pid_t pid = thread->pid_; 16 pid_t pid = thread->pid_;
16 17
17 if (pid == thread->tid || pid == -1) { 18 if (pid == thread->tid || pid == -1) {
18 thread->mg = map_groups__new(); 19 thread->mg = map_groups__new(machine);
19 } else { 20 } else {
20 leader = machine__findnew_thread(machine, pid, pid); 21 leader = machine__findnew_thread(machine, pid, pid);
21 if (leader) 22 if (leader)
@@ -66,6 +67,8 @@ void thread__delete(struct thread *thread)
66{ 67{
67 struct comm *comm, *tmp; 68 struct comm *comm, *tmp;
68 69
70 thread_stack__free(thread);
71
69 if (thread->mg) { 72 if (thread->mg) {
70 map_groups__put(thread->mg); 73 map_groups__put(thread->mg);
71 thread->mg = NULL; 74 thread->mg = NULL;
@@ -198,7 +201,6 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
198} 201}
199 202
200void thread__find_cpumode_addr_location(struct thread *thread, 203void thread__find_cpumode_addr_location(struct thread *thread,
201 struct machine *machine,
202 enum map_type type, u64 addr, 204 enum map_type type, u64 addr,
203 struct addr_location *al) 205 struct addr_location *al)
204{ 206{
@@ -211,8 +213,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
211 }; 213 };
212 214
213 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 215 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
214 thread__find_addr_location(thread, machine, cpumodes[i], type, 216 thread__find_addr_location(thread, cpumodes[i], type, addr, al);
215 addr, al);
216 if (al->map) 217 if (al->map)
217 break; 218 break;
218 } 219 }
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 8c75fa774706..160fd066a7d1 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -8,6 +8,8 @@
8#include "symbol.h" 8#include "symbol.h"
9#include <strlist.h> 9#include <strlist.h>
10 10
11struct thread_stack;
12
11struct thread { 13struct thread {
12 union { 14 union {
13 struct rb_node rb_node; 15 struct rb_node rb_node;
@@ -23,8 +25,10 @@ struct thread {
23 bool dead; /* if set thread has exited */ 25 bool dead; /* if set thread has exited */
24 struct list_head comm_list; 26 struct list_head comm_list;
25 int comm_len; 27 int comm_len;
28 u64 db_id;
26 29
27 void *priv; 30 void *priv;
31 struct thread_stack *ts;
28}; 32};
29 33
30struct machine; 34struct machine;
@@ -54,16 +58,15 @@ void thread__insert_map(struct thread *thread, struct map *map);
54int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); 58int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
55size_t thread__fprintf(struct thread *thread, FILE *fp); 59size_t thread__fprintf(struct thread *thread, FILE *fp);
56 60
57void thread__find_addr_map(struct thread *thread, struct machine *machine, 61void thread__find_addr_map(struct thread *thread,
58 u8 cpumode, enum map_type type, u64 addr, 62 u8 cpumode, enum map_type type, u64 addr,
59 struct addr_location *al); 63 struct addr_location *al);
60 64
61void thread__find_addr_location(struct thread *thread, struct machine *machine, 65void thread__find_addr_location(struct thread *thread,
62 u8 cpumode, enum map_type type, u64 addr, 66 u8 cpumode, enum map_type type, u64 addr,
63 struct addr_location *al); 67 struct addr_location *al);
64 68
65void thread__find_cpumode_addr_location(struct thread *thread, 69void thread__find_cpumode_addr_location(struct thread *thread,
66 struct machine *machine,
67 enum map_type type, u64 addr, 70 enum map_type type, u64 addr,
68 struct addr_location *al); 71 struct addr_location *al);
69 72
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index f11636966a0f..bb2708bbfaca 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -39,7 +39,8 @@ struct perf_tool {
39 event_attr_op attr; 39 event_attr_op attr;
40 event_op2 tracing_data; 40 event_op2 tracing_data;
41 event_op2 finished_round, 41 event_op2 finished_round,
42 build_id; 42 build_id,
43 id_index;
43 bool ordered_events; 44 bool ordered_events;
44 bool ordering_requires_timestamps; 45 bool ordering_requires_timestamps;
45}; 46};
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 7419768c38b1..2dcfe9a7c8d0 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -26,7 +26,7 @@ static int __report_module(struct addr_location *al, u64 ip,
26 Dwfl_Module *mod; 26 Dwfl_Module *mod;
27 struct dso *dso = NULL; 27 struct dso *dso = NULL;
28 28
29 thread__find_addr_location(ui->thread, ui->machine, 29 thread__find_addr_location(ui->thread,
30 PERF_RECORD_MISC_USER, 30 PERF_RECORD_MISC_USER,
31 MAP__FUNCTION, ip, al); 31 MAP__FUNCTION, ip, al);
32 32
@@ -89,7 +89,7 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
89 struct addr_location al; 89 struct addr_location al;
90 ssize_t size; 90 ssize_t size;
91 91
92 thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, 92 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
93 MAP__FUNCTION, addr, &al); 93 MAP__FUNCTION, addr, &al);
94 if (!al.map) { 94 if (!al.map) {
95 pr_debug("unwind: no map for %lx\n", (unsigned long)addr); 95 pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
@@ -164,14 +164,14 @@ frame_callback(Dwfl_Frame *state, void *arg)
164} 164}
165 165
166int unwind__get_entries(unwind_entry_cb_t cb, void *arg, 166int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
167 struct machine *machine, struct thread *thread, 167 struct thread *thread,
168 struct perf_sample *data, 168 struct perf_sample *data,
169 int max_stack) 169 int max_stack)
170{ 170{
171 struct unwind_info ui = { 171 struct unwind_info ui = {
172 .sample = data, 172 .sample = data,
173 .thread = thread, 173 .thread = thread,
174 .machine = machine, 174 .machine = thread->mg->machine,
175 .cb = cb, 175 .cb = cb,
176 .arg = arg, 176 .arg = arg,
177 .max_stack = max_stack, 177 .max_stack = max_stack,
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 4d45c0dfe343..371219a6daf1 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -284,7 +284,7 @@ static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
284{ 284{
285 struct addr_location al; 285 struct addr_location al;
286 286
287 thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, 287 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
288 MAP__FUNCTION, ip, &al); 288 MAP__FUNCTION, ip, &al);
289 return al.map; 289 return al.map;
290} 290}
@@ -374,7 +374,7 @@ static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
374 struct addr_location al; 374 struct addr_location al;
375 ssize_t size; 375 ssize_t size;
376 376
377 thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, 377 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
378 MAP__FUNCTION, addr, &al); 378 MAP__FUNCTION, addr, &al);
379 if (!al.map) { 379 if (!al.map) {
380 pr_debug("unwind: no map for %lx\n", (unsigned long)addr); 380 pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
@@ -476,14 +476,13 @@ static void put_unwind_info(unw_addr_space_t __maybe_unused as,
476 pr_debug("unwind: put_unwind_info called\n"); 476 pr_debug("unwind: put_unwind_info called\n");
477} 477}
478 478
479static int entry(u64 ip, struct thread *thread, struct machine *machine, 479static int entry(u64 ip, struct thread *thread,
480 unwind_entry_cb_t cb, void *arg) 480 unwind_entry_cb_t cb, void *arg)
481{ 481{
482 struct unwind_entry e; 482 struct unwind_entry e;
483 struct addr_location al; 483 struct addr_location al;
484 484
485 thread__find_addr_location(thread, machine, 485 thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
486 PERF_RECORD_MISC_USER,
487 MAP__FUNCTION, ip, &al); 486 MAP__FUNCTION, ip, &al);
488 487
489 e.ip = ip; 488 e.ip = ip;
@@ -586,21 +585,21 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
586 unw_word_t ip; 585 unw_word_t ip;
587 586
588 unw_get_reg(&c, UNW_REG_IP, &ip); 587 unw_get_reg(&c, UNW_REG_IP, &ip);
589 ret = ip ? entry(ip, ui->thread, ui->machine, cb, arg) : 0; 588 ret = ip ? entry(ip, ui->thread, cb, arg) : 0;
590 } 589 }
591 590
592 return ret; 591 return ret;
593} 592}
594 593
595int unwind__get_entries(unwind_entry_cb_t cb, void *arg, 594int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
596 struct machine *machine, struct thread *thread, 595 struct thread *thread,
597 struct perf_sample *data, int max_stack) 596 struct perf_sample *data, int max_stack)
598{ 597{
599 u64 ip; 598 u64 ip;
600 struct unwind_info ui = { 599 struct unwind_info ui = {
601 .sample = data, 600 .sample = data,
602 .thread = thread, 601 .thread = thread,
603 .machine = machine, 602 .machine = thread->mg->machine,
604 }; 603 };
605 int ret; 604 int ret;
606 605
@@ -611,7 +610,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
611 if (ret) 610 if (ret)
612 return ret; 611 return ret;
613 612
614 ret = entry(ip, thread, machine, cb, arg); 613 ret = entry(ip, thread, cb, arg);
615 if (ret) 614 if (ret)
616 return -ENOMEM; 615 return -ENOMEM;
617 616
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index f50b737235eb..12790cf94618 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -16,7 +16,6 @@ typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
16 16
17#ifdef HAVE_DWARF_UNWIND_SUPPORT 17#ifdef HAVE_DWARF_UNWIND_SUPPORT
18int unwind__get_entries(unwind_entry_cb_t cb, void *arg, 18int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
19 struct machine *machine,
20 struct thread *thread, 19 struct thread *thread,
21 struct perf_sample *data, int max_stack); 20 struct perf_sample *data, int max_stack);
22/* libunwind specific */ 21/* libunwind specific */
@@ -38,7 +37,6 @@ static inline void unwind__finish_access(struct thread *thread __maybe_unused) {
38static inline int 37static inline int
39unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, 38unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
40 void *arg __maybe_unused, 39 void *arg __maybe_unused,
41 struct machine *machine __maybe_unused,
42 struct thread *thread __maybe_unused, 40 struct thread *thread __maybe_unused,
43 struct perf_sample *data __maybe_unused, 41 struct perf_sample *data __maybe_unused,
44 int max_stack __maybe_unused) 42 int max_stack __maybe_unused)
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 80bfdaa0e2a4..7dc44cfe25b3 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -351,4 +351,9 @@ void mem_bswap_32(void *src, int byte_size);
351 351
352const char *get_filename_for_perf_kvm(void); 352const char *get_filename_for_perf_kvm(void);
353bool find_process(const char *name); 353bool find_process(const char *name);
354
355#ifdef HAVE_ZLIB_SUPPORT
356int gzip_decompress_to_file(const char *input, int output_fd);
357#endif
358
354#endif /* GIT_COMPAT_UTIL_H */ 359#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index adca69384fcc..5c7dd796979d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -12,9 +12,16 @@
12#include "util.h" 12#include "util.h"
13#include "symbol.h" 13#include "symbol.h"
14#include "machine.h" 14#include "machine.h"
15#include "thread.h"
15#include "linux/string.h" 16#include "linux/string.h"
16#include "debug.h" 17#include "debug.h"
17 18
19/*
20 * Include definition of find_vdso_map() also used in perf-read-vdso.c for
21 * building perf-read-vdso32 and perf-read-vdsox32.
22 */
23#include "find-vdso-map.c"
24
18#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" 25#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
19 26
20struct vdso_file { 27struct vdso_file {
@@ -22,10 +29,15 @@ struct vdso_file {
22 bool error; 29 bool error;
23 char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)]; 30 char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)];
24 const char *dso_name; 31 const char *dso_name;
32 const char *read_prog;
25}; 33};
26 34
27struct vdso_info { 35struct vdso_info {
28 struct vdso_file vdso; 36 struct vdso_file vdso;
37#if BITS_PER_LONG == 64
38 struct vdso_file vdso32;
39 struct vdso_file vdsox32;
40#endif
29}; 41};
30 42
31static struct vdso_info *vdso_info__new(void) 43static struct vdso_info *vdso_info__new(void)
@@ -35,42 +47,23 @@ static struct vdso_info *vdso_info__new(void)
35 .temp_file_name = VDSO__TEMP_FILE_NAME, 47 .temp_file_name = VDSO__TEMP_FILE_NAME,
36 .dso_name = DSO__NAME_VDSO, 48 .dso_name = DSO__NAME_VDSO,
37 }, 49 },
50#if BITS_PER_LONG == 64
51 .vdso32 = {
52 .temp_file_name = VDSO__TEMP_FILE_NAME,
53 .dso_name = DSO__NAME_VDSO32,
54 .read_prog = "perf-read-vdso32",
55 },
56 .vdsox32 = {
57 .temp_file_name = VDSO__TEMP_FILE_NAME,
58 .dso_name = DSO__NAME_VDSOX32,
59 .read_prog = "perf-read-vdsox32",
60 },
61#endif
38 }; 62 };
39 63
40 return memdup(&vdso_info_init, sizeof(vdso_info_init)); 64 return memdup(&vdso_info_init, sizeof(vdso_info_init));
41} 65}
42 66
43static int find_vdso_map(void **start, void **end)
44{
45 FILE *maps;
46 char line[128];
47 int found = 0;
48
49 maps = fopen("/proc/self/maps", "r");
50 if (!maps) {
51 pr_err("vdso: cannot open maps\n");
52 return -1;
53 }
54
55 while (!found && fgets(line, sizeof(line), maps)) {
56 int m = -1;
57
58 /* We care only about private r-x mappings. */
59 if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n",
60 start, end, &m))
61 continue;
62 if (m < 0)
63 continue;
64
65 if (!strncmp(&line[m], VDSO__MAP_NAME,
66 sizeof(VDSO__MAP_NAME) - 1))
67 found = 1;
68 }
69
70 fclose(maps);
71 return !found;
72}
73
74static char *get_file(struct vdso_file *vdso_file) 67static char *get_file(struct vdso_file *vdso_file)
75{ 68{
76 char *vdso = NULL; 69 char *vdso = NULL;
@@ -117,6 +110,12 @@ void vdso__exit(struct machine *machine)
117 110
118 if (vdso_info->vdso.found) 111 if (vdso_info->vdso.found)
119 unlink(vdso_info->vdso.temp_file_name); 112 unlink(vdso_info->vdso.temp_file_name);
113#if BITS_PER_LONG == 64
114 if (vdso_info->vdso32.found)
115 unlink(vdso_info->vdso32.temp_file_name);
116 if (vdso_info->vdsox32.found)
117 unlink(vdso_info->vdsox32.temp_file_name);
118#endif
120 119
121 zfree(&machine->vdso_info); 120 zfree(&machine->vdso_info);
122} 121}
@@ -135,6 +134,153 @@ static struct dso *vdso__new(struct machine *machine, const char *short_name,
135 return dso; 134 return dso;
136} 135}
137 136
137#if BITS_PER_LONG == 64
138
139static enum dso_type machine__thread_dso_type(struct machine *machine,
140 struct thread *thread)
141{
142 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
143 struct map *map;
144 struct dso *dso;
145
146 map = map_groups__first(thread->mg, MAP__FUNCTION);
147 for (; map ; map = map_groups__next(map)) {
148 dso = map->dso;
149 if (!dso || dso->long_name[0] != '/')
150 continue;
151 dso_type = dso__type(dso, machine);
152 if (dso_type != DSO__TYPE_UNKNOWN)
153 break;
154 }
155
156 return dso_type;
157}
158
159static int vdso__do_copy_compat(FILE *f, int fd)
160{
161 char buf[4096];
162 size_t count;
163
164 while (1) {
165 count = fread(buf, 1, sizeof(buf), f);
166 if (ferror(f))
167 return -errno;
168 if (feof(f))
169 break;
170 if (count && writen(fd, buf, count) != (ssize_t)count)
171 return -errno;
172 }
173
174 return 0;
175}
176
177static int vdso__copy_compat(const char *prog, int fd)
178{
179 FILE *f;
180 int err;
181
182 f = popen(prog, "r");
183 if (!f)
184 return -errno;
185
186 err = vdso__do_copy_compat(f, fd);
187
188 if (pclose(f) == -1)
189 return -errno;
190
191 return err;
192}
193
194static int vdso__create_compat_file(const char *prog, char *temp_name)
195{
196 int fd, err;
197
198 fd = mkstemp(temp_name);
199 if (fd < 0)
200 return -errno;
201
202 err = vdso__copy_compat(prog, fd);
203
204 if (close(fd) == -1)
205 return -errno;
206
207 return err;
208}
209
210static const char *vdso__get_compat_file(struct vdso_file *vdso_file)
211{
212 int err;
213
214 if (vdso_file->found)
215 return vdso_file->temp_file_name;
216
217 if (vdso_file->error)
218 return NULL;
219
220 err = vdso__create_compat_file(vdso_file->read_prog,
221 vdso_file->temp_file_name);
222 if (err) {
223 pr_err("%s failed, error %d\n", vdso_file->read_prog, err);
224 vdso_file->error = true;
225 return NULL;
226 }
227
228 vdso_file->found = true;
229
230 return vdso_file->temp_file_name;
231}
232
233static struct dso *vdso__findnew_compat(struct machine *machine,
234 struct vdso_file *vdso_file)
235{
236 const char *file_name;
237 struct dso *dso;
238
239 dso = dsos__find(&machine->user_dsos, vdso_file->dso_name, true);
240 if (dso)
241 return dso;
242
243 file_name = vdso__get_compat_file(vdso_file);
244 if (!file_name)
245 return NULL;
246
247 return vdso__new(machine, vdso_file->dso_name, file_name);
248}
249
250static int vdso__dso_findnew_compat(struct machine *machine,
251 struct thread *thread,
252 struct vdso_info *vdso_info,
253 struct dso **dso)
254{
255 enum dso_type dso_type;
256
257 dso_type = machine__thread_dso_type(machine, thread);
258
259#ifndef HAVE_PERF_READ_VDSO32
260 if (dso_type == DSO__TYPE_32BIT)
261 return 0;
262#endif
263#ifndef HAVE_PERF_READ_VDSOX32
264 if (dso_type == DSO__TYPE_X32BIT)
265 return 0;
266#endif
267
268 switch (dso_type) {
269 case DSO__TYPE_32BIT:
270 *dso = vdso__findnew_compat(machine, &vdso_info->vdso32);
271 return 1;
272 case DSO__TYPE_X32BIT:
273 *dso = vdso__findnew_compat(machine, &vdso_info->vdsox32);
274 return 1;
275 case DSO__TYPE_UNKNOWN:
276 case DSO__TYPE_64BIT:
277 default:
278 return 0;
279 }
280}
281
282#endif
283
138struct dso *vdso__dso_findnew(struct machine *machine, 284struct dso *vdso__dso_findnew(struct machine *machine,
139 struct thread *thread __maybe_unused) 285 struct thread *thread __maybe_unused)
140{ 286{
@@ -148,6 +294,11 @@ struct dso *vdso__dso_findnew(struct machine *machine,
148 if (!vdso_info) 294 if (!vdso_info)
149 return NULL; 295 return NULL;
150 296
297#if BITS_PER_LONG == 64
298 if (vdso__dso_findnew_compat(machine, thread, vdso_info, &dso))
299 return dso;
300#endif
301
151 dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true); 302 dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true);
152 if (!dso) { 303 if (!dso) {
153 char *file; 304 char *file;
@@ -164,5 +315,7 @@ struct dso *vdso__dso_findnew(struct machine *machine,
164 315
165bool dso__is_vdso(struct dso *dso) 316bool dso__is_vdso(struct dso *dso)
166{ 317{
167 return !strcmp(dso->short_name, DSO__NAME_VDSO); 318 return !strcmp(dso->short_name, DSO__NAME_VDSO) ||
319 !strcmp(dso->short_name, DSO__NAME_VDSO32) ||
320 !strcmp(dso->short_name, DSO__NAME_VDSOX32);
168} 321}
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h
index af9d6929a215..d97da1616f0c 100644
--- a/tools/perf/util/vdso.h
+++ b/tools/perf/util/vdso.h
@@ -7,7 +7,9 @@
7 7
8#define VDSO__MAP_NAME "[vdso]" 8#define VDSO__MAP_NAME "[vdso]"
9 9
10#define DSO__NAME_VDSO "[vdso]" 10#define DSO__NAME_VDSO "[vdso]"
11#define DSO__NAME_VDSO32 "[vdso32]"
12#define DSO__NAME_VDSOX32 "[vdsox32]"
11 13
12static inline bool is_vdso_map(const char *filename) 14static inline bool is_vdso_map(const char *filename)
13{ 15{
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
new file mode 100644
index 000000000000..495a449fc25c
--- /dev/null
+++ b/tools/perf/util/zlib.c
@@ -0,0 +1,78 @@
1#include <stdio.h>
2#include <unistd.h>
3#include <sys/stat.h>
4#include <sys/mman.h>
5#include <zlib.h>
6
7#include "util/util.h"
8#include "util/debug.h"
9
10
11#define CHUNK_SIZE 16384
12
13int gzip_decompress_to_file(const char *input, int output_fd)
14{
15 int ret = Z_STREAM_ERROR;
16 int input_fd;
17 void *ptr;
18 int len;
19 struct stat stbuf;
20 unsigned char buf[CHUNK_SIZE];
21 z_stream zs = {
22 .zalloc = Z_NULL,
23 .zfree = Z_NULL,
24 .opaque = Z_NULL,
25 .avail_in = 0,
26 .next_in = Z_NULL,
27 };
28
29 input_fd = open(input, O_RDONLY);
30 if (input_fd < 0)
31 return -1;
32
33 if (fstat(input_fd, &stbuf) < 0)
34 goto out_close;
35
36 ptr = mmap(NULL, stbuf.st_size, PROT_READ, MAP_PRIVATE, input_fd, 0);
37 if (ptr == MAP_FAILED)
38 goto out_close;
39
40 if (inflateInit2(&zs, 16 + MAX_WBITS) != Z_OK)
41 goto out_unmap;
42
43 zs.next_in = ptr;
44 zs.avail_in = stbuf.st_size;
45
46 do {
47 zs.next_out = buf;
48 zs.avail_out = CHUNK_SIZE;
49
50 ret = inflate(&zs, Z_NO_FLUSH);
51 switch (ret) {
52 case Z_NEED_DICT:
53 ret = Z_DATA_ERROR;
54 /* fall through */
55 case Z_DATA_ERROR:
56 case Z_MEM_ERROR:
57 goto out;
58 default:
59 break;
60 }
61
62 len = CHUNK_SIZE - zs.avail_out;
63 if (writen(output_fd, buf, len) != len) {
64 ret = Z_DATA_ERROR;
65 goto out;
66 }
67
68 } while (ret != Z_STREAM_END);
69
70out:
71 inflateEnd(&zs);
72out_unmap:
73 munmap(ptr, stbuf.st_size);
74out_close:
75 close(input_fd);
76
77 return ret == Z_STREAM_END ? 0 : -1;
78}