aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/ftrace.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /arch/sh/kernel/ftrace.c
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'arch/sh/kernel/ftrace.c')
-rw-r--r--arch/sh/kernel/ftrace.c270
1 files changed, 269 insertions, 1 deletions
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 066f37dc32a9..30e13196d35b 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -16,9 +16,13 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h>
19#include <asm/ftrace.h> 20#include <asm/ftrace.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/unistd.h>
23#include <trace/syscall.h>
21 24
25#ifdef CONFIG_DYNAMIC_FTRACE
22static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; 26static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
23 27
24static unsigned char ftrace_nop[4]; 28static unsigned char ftrace_nop[4];
@@ -58,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
58 return ftrace_replaced_code; 62 return ftrace_replaced_code;
59} 63}
60 64
65/*
66 * Modifying code must take extra care. On an SMP machine, if
67 * the code being modified is also being executed on another CPU
68 * that CPU will have undefined results and possibly take a GPF.
69 * We use kstop_machine to stop other CPUS from exectuing code.
70 * But this does not stop NMIs from happening. We still need
71 * to protect against that. We separate out the modification of
72 * the code to take care of this.
73 *
74 * Two buffers are added: An IP buffer and a "code" buffer.
75 *
76 * 1) Put the instruction pointer into the IP buffer
77 * and the new code into the "code" buffer.
78 * 2) Wait for any running NMIs to finish and set a flag that says
79 * we are modifying code, it is done in an atomic operation.
80 * 3) Write the code
81 * 4) clear the flag.
82 * 5) Wait for any running NMIs to finish.
83 *
84 * If an NMI is executed, the first thing it does is to call
85 * "ftrace_nmi_enter". This will check if the flag is set to write
86 * and if it is, it will write what is in the IP and "code" buffers.
87 *
88 * The trick is, it does not matter if everyone is writing the same
89 * content to the code location. Also, if a CPU is executing code
90 * it is OK to write to that code location if the contents being written
91 * are the same as what exists.
92 */
93#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
94static atomic_t nmi_running = ATOMIC_INIT(0);
95static int mod_code_status; /* holds return value of text write */
96static void *mod_code_ip; /* holds the IP to write to */
97static void *mod_code_newcode; /* holds the text to write to the IP */
98
99static unsigned nmi_wait_count;
100static atomic_t nmi_update_count = ATOMIC_INIT(0);
101
102int ftrace_arch_read_dyn_info(char *buf, int size)
103{
104 int r;
105
106 r = snprintf(buf, size, "%u %u",
107 nmi_wait_count,
108 atomic_read(&nmi_update_count));
109 return r;
110}
111
112static void clear_mod_flag(void)
113{
114 int old = atomic_read(&nmi_running);
115
116 for (;;) {
117 int new = old & ~MOD_CODE_WRITE_FLAG;
118
119 if (old == new)
120 break;
121
122 old = atomic_cmpxchg(&nmi_running, old, new);
123 }
124}
125
126static void ftrace_mod_code(void)
127{
128 /*
129 * Yes, more than one CPU process can be writing to mod_code_status.
130 * (and the code itself)
131 * But if one were to fail, then they all should, and if one were
132 * to succeed, then they all should.
133 */
134 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
135 MCOUNT_INSN_SIZE);
136
137 /* if we fail, then kill any new writers */
138 if (mod_code_status)
139 clear_mod_flag();
140}
141
142void ftrace_nmi_enter(void)
143{
144 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
145 smp_rmb();
146 ftrace_mod_code();
147 atomic_inc(&nmi_update_count);
148 }
149 /* Must have previous changes seen before executions */
150 smp_mb();
151}
152
153void ftrace_nmi_exit(void)
154{
155 /* Finish all executions before clearing nmi_running */
156 smp_mb();
157 atomic_dec(&nmi_running);
158}
159
160static void wait_for_nmi_and_set_mod_flag(void)
161{
162 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
163 return;
164
165 do {
166 cpu_relax();
167 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
168
169 nmi_wait_count++;
170}
171
172static void wait_for_nmi(void)
173{
174 if (!atomic_read(&nmi_running))
175 return;
176
177 do {
178 cpu_relax();
179 } while (atomic_read(&nmi_running));
180
181 nmi_wait_count++;
182}
183
184static int
185do_ftrace_mod_code(unsigned long ip, void *new_code)
186{
187 mod_code_ip = (void *)ip;
188 mod_code_newcode = new_code;
189
190 /* The buffers need to be visible before we let NMIs write them */
191 smp_mb();
192
193 wait_for_nmi_and_set_mod_flag();
194
195 /* Make sure all running NMIs have finished before we write the code */
196 smp_mb();
197
198 ftrace_mod_code();
199
200 /* Make sure the write happens before clearing the bit */
201 smp_mb();
202
203 clear_mod_flag();
204 wait_for_nmi();
205
206 return mod_code_status;
207}
208
61static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 209static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
62 unsigned char *new_code) 210 unsigned char *new_code)
63{ 211{
@@ -82,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
82 return -EINVAL; 230 return -EINVAL;
83 231
84 /* replace the text with the new text */ 232 /* replace the text with the new text */
85 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 233 if (do_ftrace_mod_code(ip, new_code))
86 return -EPERM; 234 return -EPERM;
87 235
88 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); 236 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
@@ -131,3 +279,123 @@ int __init ftrace_dyn_arch_init(void *data)
131 279
132 return 0; 280 return 0;
133} 281}
282#endif /* CONFIG_DYNAMIC_FTRACE */
283
284#ifdef CONFIG_FUNCTION_GRAPH_TRACER
285#ifdef CONFIG_DYNAMIC_FTRACE
286extern void ftrace_graph_call(void);
287
288static int ftrace_mod(unsigned long ip, unsigned long old_addr,
289 unsigned long new_addr)
290{
291 unsigned char code[MCOUNT_INSN_SIZE];
292
293 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
294 return -EFAULT;
295
296 if (old_addr != __raw_readl((unsigned long *)code))
297 return -EINVAL;
298
299 __raw_writel(new_addr, ip);
300 return 0;
301}
302
303int ftrace_enable_ftrace_graph_caller(void)
304{
305 unsigned long ip, old_addr, new_addr;
306
307 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
308 old_addr = (unsigned long)(&skip_trace);
309 new_addr = (unsigned long)(&ftrace_graph_caller);
310
311 return ftrace_mod(ip, old_addr, new_addr);
312}
313
314int ftrace_disable_ftrace_graph_caller(void)
315{
316 unsigned long ip, old_addr, new_addr;
317
318 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
319 old_addr = (unsigned long)(&ftrace_graph_caller);
320 new_addr = (unsigned long)(&skip_trace);
321
322 return ftrace_mod(ip, old_addr, new_addr);
323}
324#endif /* CONFIG_DYNAMIC_FTRACE */
325
326/*
327 * Hook the return address and push it in the stack of return addrs
328 * in the current thread info.
329 *
330 * This is the main routine for the function graph tracer. The function
331 * graph tracer essentially works like this:
332 *
333 * parent is the stack address containing self_addr's return address.
334 * We pull the real return address out of parent and store it in
335 * current's ret_stack. Then, we replace the return address on the stack
336 * with the address of return_to_handler. self_addr is the function that
337 * called mcount.
338 *
339 * When self_addr returns, it will jump to return_to_handler which calls
340 * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
341 * return address off of current's ret_stack and jump to it.
342 */
343void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
344{
345 unsigned long old;
346 int faulted, err;
347 struct ftrace_graph_ent trace;
348 unsigned long return_hooker = (unsigned long)&return_to_handler;
349
350 if (unlikely(atomic_read(&current->tracing_graph_pause)))
351 return;
352
353 /*
354 * Protect against fault, even if it shouldn't
355 * happen. This tool is too much intrusive to
356 * ignore such a protection.
357 */
358 __asm__ __volatile__(
359 "1: \n\t"
360 "mov.l @%2, %0 \n\t"
361 "2: \n\t"
362 "mov.l %3, @%2 \n\t"
363 "mov #0, %1 \n\t"
364 "3: \n\t"
365 ".section .fixup, \"ax\" \n\t"
366 "4: \n\t"
367 "mov.l 5f, %0 \n\t"
368 "jmp @%0 \n\t"
369 " mov #1, %1 \n\t"
370 ".balign 4 \n\t"
371 "5: .long 3b \n\t"
372 ".previous \n\t"
373 ".section __ex_table,\"a\" \n\t"
374 ".long 1b, 4b \n\t"
375 ".long 2b, 4b \n\t"
376 ".previous \n\t"
377 : "=&r" (old), "=r" (faulted)
378 : "r" (parent), "r" (return_hooker)
379 );
380
381 if (unlikely(faulted)) {
382 ftrace_graph_stop();
383 WARN_ON(1);
384 return;
385 }
386
387 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
388 if (err == -EBUSY) {
389 __raw_writel(old, parent);
390 return;
391 }
392
393 trace.func = self_addr;
394
395 /* Only trace if the calling function expects to */
396 if (!ftrace_graph_entry(&trace)) {
397 current->curr_ret_stack--;
398 __raw_writel(old, parent);
399 }
400}
401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */