diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
commit | 1a4a2bc460721bc8f91e4c1294d39b38e5af132f (patch) | |
tree | fe646d05f6e17f05601e0a32cc796bec718ab6e7 /arch/x86/events/core.c | |
parent | 110a9e42b68719f584879c5c5c727bbae90d15f9 (diff) | |
parent | 1ef55be16ed69538f89e0a6508be5e62fdc9851c (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull low-level x86 updates from Ingo Molnar:
"In this cycle this topic tree has become one of those 'super topics'
that accumulated a lot of changes:
- Add CONFIG_VMAP_STACK=y support to the core kernel and enable it on
x86 - preceded by an array of changes. v4.8 saw preparatory changes
in this area already - this is the rest of the work. Includes the
thread stack caching performance optimization. (Andy Lutomirski)
- switch_to() cleanups and all around enhancements. (Brian Gerst)
- A large number of dumpstack infrastructure enhancements and an
unwinder abstraction. The secret long term plan is safe(r) live
patching plus maybe another attempt at debuginfo based unwinding -
but all these current bits are standalone enhancements in a frame
pointer based debug environment as well. (Josh Poimboeuf)
- More __ro_after_init and const annotations. (Kees Cook)
- Enable KASLR for the vmemmap memory region. (Thomas Garnier)"
[ The virtually mapped stack changes are pretty fundamental, and not
x86-specific per se, even if they are only used on x86 right now. ]
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
x86/asm: Get rid of __read_cr4_safe()
thread_info: Use unsigned long for flags
x86/alternatives: Add stack frame dependency to alternative_call_2()
x86/dumpstack: Fix show_stack() task pointer regression
x86/dumpstack: Remove dump_trace() and related callbacks
x86/dumpstack: Convert show_trace_log_lvl() to use the new unwinder
oprofile/x86: Convert x86_backtrace() to use the new unwinder
x86/stacktrace: Convert save_stack_trace_*() to use the new unwinder
perf/x86: Convert perf_callchain_kernel() to use the new unwinder
x86/unwind: Add new unwind interface and implementations
x86/dumpstack: Remove NULL task pointer convention
fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y
sched/core: Free the stack early if CONFIG_THREAD_INFO_IN_TASK
lib/syscall: Pin the task stack in collect_syscall()
x86/process: Pin the target stack in get_wchan()
x86/dumpstack: Pin the target stack when dumping it
kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
sched/core: Add try_get_task_stack() and put_task_stack()
x86/entry/64: Fix a minor comment rebase error
iommu/amd: Don't put completion-wait semaphore on stack
...
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r-- | arch/x86/events/core.c | 36 |
1 files changed, 12 insertions, 24 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 18a1acf86c90..d31735f37ed7 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/timer.h> | 37 | #include <asm/timer.h> |
38 | #include <asm/desc.h> | 38 | #include <asm/desc.h> |
39 | #include <asm/ldt.h> | 39 | #include <asm/ldt.h> |
40 | #include <asm/unwind.h> | ||
40 | 41 | ||
41 | #include "perf_event.h" | 42 | #include "perf_event.h" |
42 | 43 | ||
@@ -2267,39 +2268,26 @@ void arch_perf_update_userpage(struct perf_event *event, | |||
2267 | cyc2ns_read_end(data); | 2268 | cyc2ns_read_end(data); |
2268 | } | 2269 | } |
2269 | 2270 | ||
2270 | /* | ||
2271 | * callchain support | ||
2272 | */ | ||
2273 | |||
2274 | static int backtrace_stack(void *data, char *name) | ||
2275 | { | ||
2276 | return 0; | ||
2277 | } | ||
2278 | |||
2279 | static int backtrace_address(void *data, unsigned long addr, int reliable) | ||
2280 | { | ||
2281 | struct perf_callchain_entry_ctx *entry = data; | ||
2282 | |||
2283 | return perf_callchain_store(entry, addr); | ||
2284 | } | ||
2285 | |||
2286 | static const struct stacktrace_ops backtrace_ops = { | ||
2287 | .stack = backtrace_stack, | ||
2288 | .address = backtrace_address, | ||
2289 | .walk_stack = print_context_stack_bp, | ||
2290 | }; | ||
2291 | |||
2292 | void | 2271 | void |
2293 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) | 2272 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
2294 | { | 2273 | { |
2274 | struct unwind_state state; | ||
2275 | unsigned long addr; | ||
2276 | |||
2295 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | 2277 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
2296 | /* TODO: We don't support guest os callchain now */ | 2278 | /* TODO: We don't support guest os callchain now */ |
2297 | return; | 2279 | return; |
2298 | } | 2280 | } |
2299 | 2281 | ||
2300 | perf_callchain_store(entry, regs->ip); | 2282 | if (perf_callchain_store(entry, regs->ip)) |
2283 | return; | ||
2301 | 2284 | ||
2302 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); | 2285 | for (unwind_start(&state, current, regs, NULL); !unwind_done(&state); |
2286 | unwind_next_frame(&state)) { | ||
2287 | addr = unwind_get_return_address(&state); | ||
2288 | if (!addr || perf_callchain_store(entry, addr)) | ||
2289 | return; | ||
2290 | } | ||
2303 | } | 2291 | } |
2304 | 2292 | ||
2305 | static inline int | 2293 | static inline int |