aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c182
1 files changed, 175 insertions, 7 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 69149337f2fe..16a571dea2ef 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -14,14 +14,178 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/sched.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/list.h> 19#include <linux/list.h>
19 20
20#include <asm/ftrace.h> 21#include <asm/ftrace.h>
22#include <linux/ftrace.h>
21#include <asm/nops.h> 23#include <asm/nops.h>
24#include <asm/nmi.h>
22 25
23 26
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 27
28#ifdef CONFIG_FUNCTION_RET_TRACER
29
30/*
31 * These functions are picked from those used on
32 * this page for dynamic ftrace. They have been
33 * simplified to ignore all traces in NMI context.
34 */
35static atomic_t in_nmi;
36
37void ftrace_nmi_enter(void)
38{
39 atomic_inc(&in_nmi);
40}
41
42void ftrace_nmi_exit(void)
43{
44 atomic_dec(&in_nmi);
45}
46
47/*
48 * Synchronize accesses to return adresses stack with
49 * interrupts.
50 */
51static raw_spinlock_t ret_stack_lock;
52
53/* Add a function return address to the trace stack on thread info.*/
54static int push_return_trace(unsigned long ret, unsigned long long time,
55 unsigned long func)
56{
57 int index;
58 struct thread_info *ti;
59 unsigned long flags;
60 int err = 0;
61
62 raw_local_irq_save(flags);
63 __raw_spin_lock(&ret_stack_lock);
64
65 ti = current_thread_info();
66 /* The return trace stack is full */
67 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
68 err = -EBUSY;
69 goto out;
70 }
71
72 index = ++ti->curr_ret_stack;
73 ti->ret_stack[index].ret = ret;
74 ti->ret_stack[index].func = func;
75 ti->ret_stack[index].calltime = time;
76
77out:
78 __raw_spin_unlock(&ret_stack_lock);
79 raw_local_irq_restore(flags);
80 return err;
81}
82
83/* Retrieve a function return address to the trace stack on thread info.*/
84static void pop_return_trace(unsigned long *ret, unsigned long long *time,
85 unsigned long *func)
86{
87 struct thread_info *ti;
88 int index;
89 unsigned long flags;
90
91 raw_local_irq_save(flags);
92 __raw_spin_lock(&ret_stack_lock);
93
94 ti = current_thread_info();
95 index = ti->curr_ret_stack;
96 *ret = ti->ret_stack[index].ret;
97 *func = ti->ret_stack[index].func;
98 *time = ti->ret_stack[index].calltime;
99 ti->curr_ret_stack--;
100
101 __raw_spin_unlock(&ret_stack_lock);
102 raw_local_irq_restore(flags);
103}
104
105/*
106 * Send the trace to the ring-buffer.
107 * @return the original return address.
108 */
109unsigned long ftrace_return_to_handler(void)
110{
111 struct ftrace_retfunc trace;
112 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
113 trace.rettime = cpu_clock(raw_smp_processor_id());
114 ftrace_function_return(&trace);
115
116 return trace.ret;
117}
118
119/*
120 * Hook the return address and push it in the stack of return addrs
121 * in current thread info.
122 */
123asmlinkage
124void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
125{
126 unsigned long old;
127 unsigned long long calltime;
128 int faulted;
129 unsigned long return_hooker = (unsigned long)
130 &return_to_handler;
131
132 /* Nmi's are currently unsupported */
133 if (atomic_read(&in_nmi))
134 return;
135
136 /*
137 * Protect against fault, even if it shouldn't
138 * happen. This tool is too much intrusive to
139 * ignore such a protection.
140 */
141 asm volatile(
142 "1: movl (%[parent_old]), %[old]\n"
143 "2: movl %[return_hooker], (%[parent_replaced])\n"
144 " movl $0, %[faulted]\n"
145
146 ".section .fixup, \"ax\"\n"
147 "3: movl $1, %[faulted]\n"
148 ".previous\n"
149
150 ".section __ex_table, \"a\"\n"
151 " .long 1b, 3b\n"
152 " .long 2b, 3b\n"
153 ".previous\n"
154
155 : [parent_replaced] "=r" (parent), [old] "=r" (old),
156 [faulted] "=r" (faulted)
157 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
158 : "memory"
159 );
160
161 if (WARN_ON(faulted)) {
162 unregister_ftrace_return();
163 return;
164 }
165
166 if (WARN_ON(!__kernel_text_address(old))) {
167 unregister_ftrace_return();
168 *parent = old;
169 return;
170 }
171
172 calltime = cpu_clock(raw_smp_processor_id());
173
174 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
175 *parent = old;
176}
177
178static int __init init_ftrace_function_return(void)
179{
180 ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
181 return 0;
182}
183device_initcall(init_ftrace_function_return);
184
185
186#endif
187
188#ifdef CONFIG_DYNAMIC_FTRACE
25 189
26union ftrace_code_union { 190union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 191 char code[MCOUNT_INSN_SIZE];
@@ -31,17 +195,11 @@ union ftrace_code_union {
31 } __attribute__((packed)); 195 } __attribute__((packed));
32}; 196};
33 197
34
35static int ftrace_calc_offset(long ip, long addr) 198static int ftrace_calc_offset(long ip, long addr)
36{ 199{
37 return (int)(addr - ip); 200 return (int)(addr - ip);
38} 201}
39 202
40unsigned char *ftrace_nop_replace(void)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 203unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 204{
47 static union ftrace_code_union calc; 205 static union ftrace_code_union calc;
@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
183} 341}
184 342
185 343
344
345
346static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
347
348unsigned char *ftrace_nop_replace(void)
349{
350 return ftrace_nop;
351}
352
186int 353int
187ftrace_modify_code(unsigned long ip, unsigned char *old_code, 354ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code) 355 unsigned char *new_code)
@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data)
292 459
293 return 0; 460 return 0;
294} 461}
462#endif