diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-05-20 02:19:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-05-20 02:20:14 -0400 |
commit | 21f77d231fabd33c5de61fbff31818d93203353e (patch) | |
tree | 74bd85f1184b26409605884bf65ae1c1ba5d724c /arch/x86/events/core.c | |
parent | b0a434fb7412937d55f15b8897c5646c81497bbe (diff) | |
parent | a29d5c9b8167dbc21a7ca8c0302e3799f9063b4e (diff) |
Merge tag 'perf-core-for-mingo-20160516' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
User visible changes:
- Honour the kernel.perf_event_max_stack knob more precisely by not counting
PERF_CONTEXT_{KERNEL,USER} when deciding when to stop adding entries to
the perf_sample->ip_callchain[] array (Arnaldo Carvalho de Melo)
- Fix identation of 'stalled-backend-cycles' in 'perf stat' (Namhyung Kim)
- Update runtime using 'cpu-clock' event in 'perf stat' (Namhyung Kim)
- Use 'cpu-clock' for cpu targets in 'perf stat' (Namhyung Kim)
- Avoid fractional digits for integer scales in 'perf stat' (Andi Kleen)
- Store vdso buildid unconditionally, as it appears in callchains and
we're not checking those when creating the build-id table, so we
end up not being able to resolve VDSO symbols when doing analysis
on a different machine than the one where recording was done, possibly
of a different arch even (arm -> x86_64) (He Kuang)
Infrastructure changes:
- Generalize max_stack sysctl handler, will be used for configuring
multiple kernel knobs related to callchains (Arnaldo Carvalho de Melo)
Cleanups:
- Introduce DSO__NAME_KALLSYMS and DSO__NAME_KCORE, to stop using
open coded strings (Masami Hiramatsu)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r-- | arch/x86/events/core.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 73a75aa5a66d..33787ee817f0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name) | |||
2202 | 2202 | ||
2203 | static int backtrace_address(void *data, unsigned long addr, int reliable) | 2203 | static int backtrace_address(void *data, unsigned long addr, int reliable) |
2204 | { | 2204 | { |
2205 | struct perf_callchain_entry *entry = data; | 2205 | struct perf_callchain_entry_ctx *entry = data; |
2206 | 2206 | ||
2207 | return perf_callchain_store(entry, addr); | 2207 | return perf_callchain_store(entry, addr); |
2208 | } | 2208 | } |
@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = { | |||
2214 | }; | 2214 | }; |
2215 | 2215 | ||
2216 | void | 2216 | void |
2217 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 2217 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
2218 | { | 2218 | { |
2219 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | 2219 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
2220 | /* TODO: We don't support guest os callchain now */ | 2220 | /* TODO: We don't support guest os callchain now */ |
@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2268 | #include <asm/compat.h> | 2268 | #include <asm/compat.h> |
2269 | 2269 | ||
2270 | static inline int | 2270 | static inline int |
2271 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | 2271 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) |
2272 | { | 2272 | { |
2273 | /* 32-bit process in 64-bit kernel. */ | 2273 | /* 32-bit process in 64-bit kernel. */ |
2274 | unsigned long ss_base, cs_base; | 2274 | unsigned long ss_base, cs_base; |
@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2283 | 2283 | ||
2284 | fp = compat_ptr(ss_base + regs->bp); | 2284 | fp = compat_ptr(ss_base + regs->bp); |
2285 | pagefault_disable(); | 2285 | pagefault_disable(); |
2286 | while (entry->nr < sysctl_perf_event_max_stack) { | 2286 | while (entry->nr < entry->max_stack) { |
2287 | unsigned long bytes; | 2287 | unsigned long bytes; |
2288 | frame.next_frame = 0; | 2288 | frame.next_frame = 0; |
2289 | frame.return_address = 0; | 2289 | frame.return_address = 0; |
@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2309 | } | 2309 | } |
2310 | #else | 2310 | #else |
2311 | static inline int | 2311 | static inline int |
2312 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | 2312 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) |
2313 | { | 2313 | { |
2314 | return 0; | 2314 | return 0; |
2315 | } | 2315 | } |
2316 | #endif | 2316 | #endif |
2317 | 2317 | ||
2318 | void | 2318 | void |
2319 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 2319 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
2320 | { | 2320 | { |
2321 | struct stack_frame frame; | 2321 | struct stack_frame frame; |
2322 | const void __user *fp; | 2322 | const void __user *fp; |
@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
2343 | return; | 2343 | return; |
2344 | 2344 | ||
2345 | pagefault_disable(); | 2345 | pagefault_disable(); |
2346 | while (entry->nr < sysctl_perf_event_max_stack) { | 2346 | while (entry->nr < entry->max_stack) { |
2347 | unsigned long bytes; | 2347 | unsigned long bytes; |
2348 | frame.next_frame = NULL; | 2348 | frame.next_frame = NULL; |
2349 | frame.return_address = 0; | 2349 | frame.return_address = 0; |