aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-19 06:13:33 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-19 06:13:33 -0500
commit4cd0332db7e8f57cc082bab11d82c064a9721737 (patch)
treeb6de7771d67c5bf6eeb890fa0f5a901365104b98 /arch/x86
parent40999096e8b9872199bf56ecd0c4d98397ccaf2f (diff)
parent712406a6bf59ebf4a00358bb59a4a2a1b2953d90 (diff)
Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/ftrace.h25
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/ftrace.c75
3 files changed, 2 insertions, 99 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b55b4a7fbefd..db24c2278be0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
55#endif /* __ASSEMBLY__ */ 55#endif /* __ASSEMBLY__ */
56#endif /* CONFIG_FUNCTION_TRACER */ 56#endif /* CONFIG_FUNCTION_TRACER */
57 57
58#ifdef CONFIG_FUNCTION_GRAPH_TRACER
59
60#ifndef __ASSEMBLY__
61
62/*
63 * Stack of return addresses for functions
64 * of a thread.
65 * Used in struct thread_info
66 */
67struct ftrace_ret_stack {
68 unsigned long ret;
69 unsigned long func;
70 unsigned long long calltime;
71};
72
73/*
74 * Primary handler of a function return.
75 * It relays on ftrace_return_to_handler.
76 * Defined in entry_32/64.S
77 */
78extern void return_to_handler(void);
79
80#endif /* __ASSEMBLY__ */
81#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
82
83#endif /* _ASM_X86_FTRACE_H */ 58#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 077c9ea655fc..4325165753d8 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -10,6 +10,7 @@
10#include <linux/kdebug.h> 10#include <linux/kdebug.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/ftrace.h>
13#include <linux/kexec.h> 14#include <linux/kexec.h>
14#include <linux/bug.h> 15#include <linux/bug.h>
15#include <linux/nmi.h> 16#include <linux/nmi.h>
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2f9c0c8cb4c7..c2e057d9f88c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -369,79 +369,6 @@ int ftrace_disable_ftrace_graph_caller(void)
369 369
370#endif /* !CONFIG_DYNAMIC_FTRACE */ 370#endif /* !CONFIG_DYNAMIC_FTRACE */
371 371
372/* Add a function return address to the trace stack on thread info.*/
373static int push_return_trace(unsigned long ret, unsigned long long time,
374 unsigned long func, int *depth)
375{
376 int index;
377
378 if (!current->ret_stack)
379 return -EBUSY;
380
381 /* The return trace stack is full */
382 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
383 atomic_inc(&current->trace_overrun);
384 return -EBUSY;
385 }
386
387 index = ++current->curr_ret_stack;
388 barrier();
389 current->ret_stack[index].ret = ret;
390 current->ret_stack[index].func = func;
391 current->ret_stack[index].calltime = time;
392 *depth = index;
393
394 return 0;
395}
396
397/* Retrieve a function return address to the trace stack on thread info.*/
398static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
399{
400 int index;
401
402 index = current->curr_ret_stack;
403
404 if (unlikely(index < 0)) {
405 ftrace_graph_stop();
406 WARN_ON(1);
407 /* Might as well panic, otherwise we have no where to go */
408 *ret = (unsigned long)panic;
409 return;
410 }
411
412 *ret = current->ret_stack[index].ret;
413 trace->func = current->ret_stack[index].func;
414 trace->calltime = current->ret_stack[index].calltime;
415 trace->overrun = atomic_read(&current->trace_overrun);
416 trace->depth = index;
417 barrier();
418 current->curr_ret_stack--;
419
420}
421
422/*
423 * Send the trace to the ring-buffer.
424 * @return the original return address.
425 */
426unsigned long ftrace_return_to_handler(void)
427{
428 struct ftrace_graph_ret trace;
429 unsigned long ret;
430
431 pop_return_trace(&trace, &ret);
432 trace.rettime = cpu_clock(raw_smp_processor_id());
433 ftrace_graph_return(&trace);
434
435 if (unlikely(!ret)) {
436 ftrace_graph_stop();
437 WARN_ON(1);
438 /* Might as well panic. What else to do? */
439 ret = (unsigned long)panic;
440 }
441
442 return ret;
443}
444
445/* 372/*
446 * Hook the return address and push it in the stack of return addrs 373 * Hook the return address and push it in the stack of return addrs
447 * in current thread info. 374 * in current thread info.
@@ -494,7 +421,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
494 421
495 calltime = cpu_clock(raw_smp_processor_id()); 422 calltime = cpu_clock(raw_smp_processor_id());
496 423
497 if (push_return_trace(old, calltime, 424 if (ftrace_push_return_trace(old, calltime,
498 self_addr, &trace.depth) == -EBUSY) { 425 self_addr, &trace.depth) == -EBUSY) {
499 *parent = old; 426 *parent = old;
500 return; 427 return;