diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 29 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 29 | ||||
-rw-r--r-- | arch/x86/kernel/stacktrace.c | 64 |
5 files changed, 80 insertions, 44 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7a146baaa990..e49a4fd718fe 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -36,6 +36,7 @@ config X86 | |||
36 | select HAVE_ARCH_TRACEHOOK | 36 | select HAVE_ARCH_TRACEHOOK |
37 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 37 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
38 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 38 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
39 | select USER_STACKTRACE_SUPPORT | ||
39 | 40 | ||
40 | config ARCH_DEFCONFIG | 41 | config ARCH_DEFCONFIG |
41 | string | 42 | string |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 2bb43b433e07..754a3e082f94 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -29,7 +29,6 @@ struct dyn_arch_ftrace { | |||
29 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
30 | 30 | ||
31 | #ifdef CONFIG_FUNCTION_RET_TRACER | 31 | #ifdef CONFIG_FUNCTION_RET_TRACER |
32 | #define FTRACE_RET_STACK_SIZE 20 | ||
33 | 32 | ||
34 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
35 | 34 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e90e81ef6ab9..0921b4018c11 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -40,36 +40,8 @@ struct thread_info { | |||
40 | */ | 40 | */ |
41 | __u8 supervisor_stack[0]; | 41 | __u8 supervisor_stack[0]; |
42 | #endif | 42 | #endif |
43 | |||
44 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
45 | /* Index of current stored adress in ret_stack */ | ||
46 | int curr_ret_stack; | ||
47 | /* Stack of return addresses for return function tracing */ | ||
48 | struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE]; | ||
49 | /* | ||
50 | * Number of functions that haven't been traced | ||
51 | * because of depth overrun. | ||
52 | */ | ||
53 | atomic_t trace_overrun; | ||
54 | #endif | ||
55 | }; | 43 | }; |
56 | 44 | ||
57 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
58 | #define INIT_THREAD_INFO(tsk) \ | ||
59 | { \ | ||
60 | .task = &tsk, \ | ||
61 | .exec_domain = &default_exec_domain, \ | ||
62 | .flags = 0, \ | ||
63 | .cpu = 0, \ | ||
64 | .preempt_count = 1, \ | ||
65 | .addr_limit = KERNEL_DS, \ | ||
66 | .restart_block = { \ | ||
67 | .fn = do_no_restart_syscall, \ | ||
68 | }, \ | ||
69 | .curr_ret_stack = -1,\ | ||
70 | .trace_overrun = ATOMIC_INIT(0) \ | ||
71 | } | ||
72 | #else | ||
73 | #define INIT_THREAD_INFO(tsk) \ | 45 | #define INIT_THREAD_INFO(tsk) \ |
74 | { \ | 46 | { \ |
75 | .task = &tsk, \ | 47 | .task = &tsk, \ |
@@ -82,7 +54,6 @@ struct thread_info { | |||
82 | .fn = do_no_restart_syscall, \ | 54 | .fn = do_no_restart_syscall, \ |
83 | }, \ | 55 | }, \ |
84 | } | 56 | } |
85 | #endif | ||
86 | 57 | ||
87 | #define init_thread_info (init_thread_union.thread_info) | 58 | #define init_thread_info (init_thread_union.thread_info) |
88 | #define init_stack (init_thread_union.stack) | 59 | #define init_stack (init_thread_union.stack) |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 356bb1eb6e9a..bb137f7297ed 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
350 | unsigned long func) | 350 | unsigned long func) |
351 | { | 351 | { |
352 | int index; | 352 | int index; |
353 | struct thread_info *ti = current_thread_info(); | 353 | |
354 | if (!current->ret_stack) | ||
355 | return -EBUSY; | ||
354 | 356 | ||
355 | /* The return trace stack is full */ | 357 | /* The return trace stack is full */ |
356 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { | 358 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
357 | atomic_inc(&ti->trace_overrun); | 359 | atomic_inc(¤t->trace_overrun); |
358 | return -EBUSY; | 360 | return -EBUSY; |
359 | } | 361 | } |
360 | 362 | ||
361 | index = ++ti->curr_ret_stack; | 363 | index = ++current->curr_ret_stack; |
362 | barrier(); | 364 | barrier(); |
363 | ti->ret_stack[index].ret = ret; | 365 | current->ret_stack[index].ret = ret; |
364 | ti->ret_stack[index].func = func; | 366 | current->ret_stack[index].func = func; |
365 | ti->ret_stack[index].calltime = time; | 367 | current->ret_stack[index].calltime = time; |
366 | 368 | ||
367 | return 0; | 369 | return 0; |
368 | } | 370 | } |
@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
373 | { | 375 | { |
374 | int index; | 376 | int index; |
375 | 377 | ||
376 | struct thread_info *ti = current_thread_info(); | 378 | index = current->curr_ret_stack; |
377 | index = ti->curr_ret_stack; | 379 | *ret = current->ret_stack[index].ret; |
378 | *ret = ti->ret_stack[index].ret; | 380 | *func = current->ret_stack[index].func; |
379 | *func = ti->ret_stack[index].func; | 381 | *time = current->ret_stack[index].calltime; |
380 | *time = ti->ret_stack[index].calltime; | 382 | *overrun = atomic_read(¤t->trace_overrun); |
381 | *overrun = atomic_read(&ti->trace_overrun); | 383 | current->curr_ret_stack--; |
382 | ti->curr_ret_stack--; | ||
383 | } | 384 | } |
384 | 385 | ||
385 | /* | 386 | /* |
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index a03e7f6d90c3..10786af95545 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/stacktrace.h> | 7 | #include <linux/stacktrace.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/uaccess.h> | ||
9 | #include <asm/stacktrace.h> | 10 | #include <asm/stacktrace.h> |
10 | 11 | ||
11 | static void save_stack_warning(void *data, char *msg) | 12 | static void save_stack_warning(void *data, char *msg) |
@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
83 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 84 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
84 | } | 85 | } |
85 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | 86 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
87 | |||
88 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ | ||
89 | |||
90 | struct stack_frame { | ||
91 | const void __user *next_fp; | ||
92 | unsigned long ret_addr; | ||
93 | }; | ||
94 | |||
95 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | ||
96 | { | ||
97 | int ret; | ||
98 | |||
99 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
100 | return 0; | ||
101 | |||
102 | ret = 1; | ||
103 | pagefault_disable(); | ||
104 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
105 | ret = 0; | ||
106 | pagefault_enable(); | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | static inline void __save_stack_trace_user(struct stack_trace *trace) | ||
112 | { | ||
113 | const struct pt_regs *regs = task_pt_regs(current); | ||
114 | const void __user *fp = (const void __user *)regs->bp; | ||
115 | |||
116 | if (trace->nr_entries < trace->max_entries) | ||
117 | trace->entries[trace->nr_entries++] = regs->ip; | ||
118 | |||
119 | while (trace->nr_entries < trace->max_entries) { | ||
120 | struct stack_frame frame; | ||
121 | |||
122 | frame.next_fp = NULL; | ||
123 | frame.ret_addr = 0; | ||
124 | if (!copy_stack_frame(fp, &frame)) | ||
125 | break; | ||
126 | if ((unsigned long)fp < regs->sp) | ||
127 | break; | ||
128 | if (frame.ret_addr) { | ||
129 | trace->entries[trace->nr_entries++] = | ||
130 | frame.ret_addr; | ||
131 | } | ||
132 | if (fp == frame.next_fp) | ||
133 | break; | ||
134 | fp = frame.next_fp; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void save_stack_trace_user(struct stack_trace *trace) | ||
139 | { | ||
140 | /* | ||
141 | * Trace user stack if we are not a kernel thread | ||
142 | */ | ||
143 | if (current->mm) { | ||
144 | __save_stack_trace_user(trace); | ||
145 | } | ||
146 | if (trace->nr_entries < trace->max_entries) | ||
147 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
148 | } | ||
149 | |||