diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
commit | 1a4a2bc460721bc8f91e4c1294d39b38e5af132f (patch) | |
tree | fe646d05f6e17f05601e0a32cc796bec718ab6e7 /arch/x86/kernel/stacktrace.c | |
parent | 110a9e42b68719f584879c5c5c727bbae90d15f9 (diff) | |
parent | 1ef55be16ed69538f89e0a6508be5e62fdc9851c (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull low-level x86 updates from Ingo Molnar:
"In this cycle this topic tree has become one of those 'super topics'
that accumulated a lot of changes:
- Add CONFIG_VMAP_STACK=y support to the core kernel and enable it on
x86 - preceded by an array of changes. v4.8 saw preparatory changes
in this area already - this is the rest of the work. Includes the
thread stack caching performance optimization. (Andy Lutomirski)
- switch_to() cleanups and all around enhancements. (Brian Gerst)
- A large number of dumpstack infrastructure enhancements and an
unwinder abstraction. The secret long term plan is safe(r) live
patching plus maybe another attempt at debuginfo based unwinding -
but all these current bits are standalone enhancements in a frame
pointer based debug environment as well. (Josh Poimboeuf)
- More __ro_after_init and const annotations. (Kees Cook)
- Enable KASLR for the vmemmap memory region. (Thomas Garnier)"
[ The virtually mapped stack changes are pretty fundamental, and not
x86-specific per se, even if they are only used on x86 right now. ]
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
x86/asm: Get rid of __read_cr4_safe()
thread_info: Use unsigned long for flags
x86/alternatives: Add stack frame dependency to alternative_call_2()
x86/dumpstack: Fix show_stack() task pointer regression
x86/dumpstack: Remove dump_trace() and related callbacks
x86/dumpstack: Convert show_trace_log_lvl() to use the new unwinder
oprofile/x86: Convert x86_backtrace() to use the new unwinder
x86/stacktrace: Convert save_stack_trace_*() to use the new unwinder
perf/x86: Convert perf_callchain_kernel() to use the new unwinder
x86/unwind: Add new unwind interface and implementations
x86/dumpstack: Remove NULL task pointer convention
fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y
sched/core: Free the stack early if CONFIG_THREAD_INFO_IN_TASK
lib/syscall: Pin the task stack in collect_syscall()
x86/process: Pin the target stack in get_wchan()
x86/dumpstack: Pin the target stack when dumping it
kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
sched/core: Add try_get_task_stack() and put_task_stack()
x86/entry/64: Fix a minor comment rebase error
iommu/amd: Don't put completion-wait semaphore on stack
...
Diffstat (limited to 'arch/x86/kernel/stacktrace.c')
-rw-r--r-- | arch/x86/kernel/stacktrace.c | 79 |
1 files changed, 34 insertions, 45 deletions
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 4738f5e0f2ab..0653788026e2 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -8,80 +8,69 @@ | |||
8 | #include <linux/export.h> | 8 | #include <linux/export.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <asm/stacktrace.h> | 10 | #include <asm/stacktrace.h> |
11 | #include <asm/unwind.h> | ||
11 | 12 | ||
12 | static int save_stack_stack(void *data, char *name) | 13 | static int save_stack_address(struct stack_trace *trace, unsigned long addr, |
14 | bool nosched) | ||
13 | { | 15 | { |
14 | return 0; | ||
15 | } | ||
16 | |||
17 | static int | ||
18 | __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched) | ||
19 | { | ||
20 | struct stack_trace *trace = data; | ||
21 | #ifdef CONFIG_FRAME_POINTER | ||
22 | if (!reliable) | ||
23 | return 0; | ||
24 | #endif | ||
25 | if (nosched && in_sched_functions(addr)) | 16 | if (nosched && in_sched_functions(addr)) |
26 | return 0; | 17 | return 0; |
18 | |||
27 | if (trace->skip > 0) { | 19 | if (trace->skip > 0) { |
28 | trace->skip--; | 20 | trace->skip--; |
29 | return 0; | 21 | return 0; |
30 | } | 22 | } |
31 | if (trace->nr_entries < trace->max_entries) { | ||
32 | trace->entries[trace->nr_entries++] = addr; | ||
33 | return 0; | ||
34 | } else { | ||
35 | return -1; /* no more room, stop walking the stack */ | ||
36 | } | ||
37 | } | ||
38 | 23 | ||
39 | static int save_stack_address(void *data, unsigned long addr, int reliable) | 24 | if (trace->nr_entries >= trace->max_entries) |
40 | { | 25 | return -1; |
41 | return __save_stack_address(data, addr, reliable, false); | 26 | |
27 | trace->entries[trace->nr_entries++] = addr; | ||
28 | return 0; | ||
42 | } | 29 | } |
43 | 30 | ||
44 | static int | 31 | static void __save_stack_trace(struct stack_trace *trace, |
45 | save_stack_address_nosched(void *data, unsigned long addr, int reliable) | 32 | struct task_struct *task, struct pt_regs *regs, |
33 | bool nosched) | ||
46 | { | 34 | { |
47 | return __save_stack_address(data, addr, reliable, true); | 35 | struct unwind_state state; |
48 | } | 36 | unsigned long addr; |
49 | 37 | ||
50 | static const struct stacktrace_ops save_stack_ops = { | 38 | if (regs) |
51 | .stack = save_stack_stack, | 39 | save_stack_address(trace, regs->ip, nosched); |
52 | .address = save_stack_address, | ||
53 | .walk_stack = print_context_stack, | ||
54 | }; | ||
55 | 40 | ||
56 | static const struct stacktrace_ops save_stack_ops_nosched = { | 41 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
57 | .stack = save_stack_stack, | 42 | unwind_next_frame(&state)) { |
58 | .address = save_stack_address_nosched, | 43 | addr = unwind_get_return_address(&state); |
59 | .walk_stack = print_context_stack, | 44 | if (!addr || save_stack_address(trace, addr, nosched)) |
60 | }; | 45 | break; |
46 | } | ||
47 | |||
48 | if (trace->nr_entries < trace->max_entries) | ||
49 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
50 | } | ||
61 | 51 | ||
62 | /* | 52 | /* |
63 | * Save stack-backtrace addresses into a stack_trace buffer. | 53 | * Save stack-backtrace addresses into a stack_trace buffer. |
64 | */ | 54 | */ |
65 | void save_stack_trace(struct stack_trace *trace) | 55 | void save_stack_trace(struct stack_trace *trace) |
66 | { | 56 | { |
67 | dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace); | 57 | __save_stack_trace(trace, current, NULL, false); |
68 | if (trace->nr_entries < trace->max_entries) | ||
69 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
70 | } | 58 | } |
71 | EXPORT_SYMBOL_GPL(save_stack_trace); | 59 | EXPORT_SYMBOL_GPL(save_stack_trace); |
72 | 60 | ||
73 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | 61 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
74 | { | 62 | { |
75 | dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); | 63 | __save_stack_trace(trace, current, regs, false); |
76 | if (trace->nr_entries < trace->max_entries) | ||
77 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
78 | } | 64 | } |
79 | 65 | ||
80 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 66 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
81 | { | 67 | { |
82 | dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); | 68 | if (!try_get_task_stack(tsk)) |
83 | if (trace->nr_entries < trace->max_entries) | 69 | return; |
84 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 70 | |
71 | __save_stack_trace(trace, tsk, NULL, true); | ||
72 | |||
73 | put_task_stack(tsk); | ||
85 | } | 74 | } |
86 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | 75 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
87 | 76 | ||