aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c143
1 files changed, 30 insertions, 113 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 231bdd3c5b1c..a85da1764b1c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <asm/cacheflush.h>
21#include <asm/ftrace.h> 22#include <asm/ftrace.h>
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <asm/nops.h> 24#include <asm/nops.h>
@@ -26,6 +27,18 @@
26 27
27#ifdef CONFIG_DYNAMIC_FTRACE 28#ifdef CONFIG_DYNAMIC_FTRACE
28 29
30int ftrace_arch_code_modify_prepare(void)
31{
32 set_kernel_text_rw();
33 return 0;
34}
35
36int ftrace_arch_code_modify_post_process(void)
37{
38 set_kernel_text_ro();
39 return 0;
40}
41
29union ftrace_code_union { 42union ftrace_code_union {
30 char code[MCOUNT_INSN_SIZE]; 43 char code[MCOUNT_INSN_SIZE];
31 struct { 44 struct {
@@ -82,7 +95,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 * are the same as what exists. 95 * are the same as what exists.
83 */ 96 */
84 97
85static atomic_t in_nmi = ATOMIC_INIT(0); 98static atomic_t nmi_running = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */ 99static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */ 100static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */ 101static void *mod_code_ip; /* holds the IP to write to */
@@ -111,12 +124,16 @@ static void ftrace_mod_code(void)
111 */ 124 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 125 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 126 MCOUNT_INSN_SIZE);
127
128 /* if we fail, then kill any new writers */
129 if (mod_code_status)
130 mod_code_write = 0;
114} 131}
115 132
116void ftrace_nmi_enter(void) 133void ftrace_nmi_enter(void)
117{ 134{
118 atomic_inc(&in_nmi); 135 atomic_inc(&nmi_running);
119 /* Must have in_nmi seen before reading write flag */ 136 /* Must have nmi_running seen before reading write flag */
120 smp_mb(); 137 smp_mb();
121 if (mod_code_write) { 138 if (mod_code_write) {
122 ftrace_mod_code(); 139 ftrace_mod_code();
@@ -126,22 +143,21 @@ void ftrace_nmi_enter(void)
126 143
127void ftrace_nmi_exit(void) 144void ftrace_nmi_exit(void)
128{ 145{
129 /* Finish all executions before clearing in_nmi */ 146 /* Finish all executions before clearing nmi_running */
130 smp_wmb(); 147 smp_wmb();
131 atomic_dec(&in_nmi); 148 atomic_dec(&nmi_running);
132} 149}
133 150
134static void wait_for_nmi(void) 151static void wait_for_nmi(void)
135{ 152{
136 int waited = 0; 153 if (!atomic_read(&nmi_running))
154 return;
137 155
138 while (atomic_read(&in_nmi)) { 156 do {
139 waited = 1;
140 cpu_relax(); 157 cpu_relax();
141 } 158 } while (atomic_read(&nmi_running));
142 159
143 if (waited) 160 nmi_wait_count++;
144 nmi_wait_count++;
145} 161}
146 162
147static int 163static int
@@ -368,100 +384,8 @@ int ftrace_disable_ftrace_graph_caller(void)
368 return ftrace_mod_jmp(ip, old_offset, new_offset); 384 return ftrace_mod_jmp(ip, old_offset, new_offset);
369} 385}
370 386
371#else /* CONFIG_DYNAMIC_FTRACE */
372
373/*
374 * These functions are picked from those used on
375 * this page for dynamic ftrace. They have been
376 * simplified to ignore all traces in NMI context.
377 */
378static atomic_t in_nmi;
379
380void ftrace_nmi_enter(void)
381{
382 atomic_inc(&in_nmi);
383}
384
385void ftrace_nmi_exit(void)
386{
387 atomic_dec(&in_nmi);
388}
389
390#endif /* !CONFIG_DYNAMIC_FTRACE */ 387#endif /* !CONFIG_DYNAMIC_FTRACE */
391 388
392/* Add a function return address to the trace stack on thread info.*/
393static int push_return_trace(unsigned long ret, unsigned long long time,
394 unsigned long func, int *depth)
395{
396 int index;
397
398 if (!current->ret_stack)
399 return -EBUSY;
400
401 /* The return trace stack is full */
402 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
403 atomic_inc(&current->trace_overrun);
404 return -EBUSY;
405 }
406
407 index = ++current->curr_ret_stack;
408 barrier();
409 current->ret_stack[index].ret = ret;
410 current->ret_stack[index].func = func;
411 current->ret_stack[index].calltime = time;
412 *depth = index;
413
414 return 0;
415}
416
417/* Retrieve a function return address to the trace stack on thread info.*/
418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
419{
420 int index;
421
422 index = current->curr_ret_stack;
423
424 if (unlikely(index < 0)) {
425 ftrace_graph_stop();
426 WARN_ON(1);
427 /* Might as well panic, otherwise we have no where to go */
428 *ret = (unsigned long)panic;
429 return;
430 }
431
432 *ret = current->ret_stack[index].ret;
433 trace->func = current->ret_stack[index].func;
434 trace->calltime = current->ret_stack[index].calltime;
435 trace->overrun = atomic_read(&current->trace_overrun);
436 trace->depth = index;
437 barrier();
438 current->curr_ret_stack--;
439
440}
441
442/*
443 * Send the trace to the ring-buffer.
444 * @return the original return address.
445 */
446unsigned long ftrace_return_to_handler(void)
447{
448 struct ftrace_graph_ret trace;
449 unsigned long ret;
450
451 pop_return_trace(&trace, &ret);
452 trace.rettime = cpu_clock(raw_smp_processor_id());
453 ftrace_graph_return(&trace);
454
455 if (unlikely(!ret)) {
456 ftrace_graph_stop();
457 WARN_ON(1);
458 /* Might as well panic. What else to do? */
459 ret = (unsigned long)panic;
460 }
461
462 return ret;
463}
464
465/* 389/*
466 * Hook the return address and push it in the stack of return addrs 390 * Hook the return address and push it in the stack of return addrs
467 * in current thread info. 391 * in current thread info.
@@ -476,7 +400,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
476 &return_to_handler; 400 &return_to_handler;
477 401
478 /* Nmi's are currently unsupported */ 402 /* Nmi's are currently unsupported */
479 if (unlikely(atomic_read(&in_nmi))) 403 if (unlikely(in_nmi()))
480 return; 404 return;
481 405
482 if (unlikely(atomic_read(&current->tracing_graph_pause))) 406 if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -512,16 +436,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
512 return; 436 return;
513 } 437 }
514 438
515 if (unlikely(!__kernel_text_address(old))) { 439 calltime = trace_clock_local();
516 ftrace_graph_stop();
517 *parent = old;
518 WARN_ON(1);
519 return;
520 }
521
522 calltime = cpu_clock(raw_smp_processor_id());
523 440
524 if (push_return_trace(old, calltime, 441 if (ftrace_push_return_trace(old, calltime,
525 self_addr, &trace.depth) == -EBUSY) { 442 self_addr, &trace.depth) == -EBUSY) {
526 *parent = old; 443 *parent = old;
527 return; 444 return;