aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 15:07:04 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 19:59:45 -0500
commitfb52607afcd0629776f1dc9e657647ceae81dd50 (patch)
tree7bf43b41ff8510d3098c089913cce56a9049f0fd
parent509dceef6470442d8c7b8a43ec34125205840b3c (diff)
tracing/function-return-tracer: change the name into function-graph-tracer
Impact: cleanup This patch changes the name of the "return function tracer" into function-graph-tracer which is a more suitable name for a tracing which makes one able to retrieve the ordered call stack during the code flow. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/ftrace.h4
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/entry_32.S12
-rw-r--r--arch/x86/kernel/ftrace.c12
-rw-r--r--include/linux/ftrace.h24
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c26
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace.h12
-rw-r--r--kernel/trace/trace_functions_graph.c98
17 files changed, 173 insertions, 72 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e49a4fd718fe..0842b1127684 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,7 +29,7 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_RET_TRACER if X86_32 32 select HAVE_FUNCTION_GRAPH_TRACER if X86_32
33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
35 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 754a3e082f94..7e61b4ceb9a4 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,7 +28,7 @@ struct dyn_arch_ftrace {
28#endif /* __ASSEMBLY__ */ 28#endif /* __ASSEMBLY__ */
29#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
30 30
31#ifdef CONFIG_FUNCTION_RET_TRACER 31#ifdef CONFIG_FUNCTION_GRAPH_TRACER
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34 34
@@ -51,6 +51,6 @@ struct ftrace_ret_stack {
51extern void return_to_handler(void); 51extern void return_to_handler(void);
52 52
53#endif /* __ASSEMBLY__ */ 53#endif /* __ASSEMBLY__ */
54#endif /* CONFIG_FUNCTION_RET_TRACER */ 54#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
55 55
56#endif /* _ASM_X86_FTRACE_H */ 56#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index af2bc36ca1c4..64939a0c3986 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,7 +14,7 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER 17ifdef CONFIG_FUNCTION_GRAPH_TRACER
18# Don't trace __switch_to() but let it for function tracer 18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg 19CFLAGS_REMOVE_process_32.o = -pg
20endif 20endif
@@ -70,7 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
70obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o 73obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 74defe21ba42..2b1f0f081a6b 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1188,9 +1188,9 @@ ENTRY(mcount)
1188 1188
1189 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1190 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER 1191#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return 1192 cmpl $ftrace_stub, ftrace_graph_function
1193 jnz ftrace_return_caller 1193 jnz ftrace_graph_caller
1194#endif 1194#endif
1195.globl ftrace_stub 1195.globl ftrace_stub
1196ftrace_stub: 1196ftrace_stub:
@@ -1215,8 +1215,8 @@ END(mcount)
1215#endif /* CONFIG_DYNAMIC_FTRACE */ 1215#endif /* CONFIG_DYNAMIC_FTRACE */
1216#endif /* CONFIG_FUNCTION_TRACER */ 1216#endif /* CONFIG_FUNCTION_TRACER */
1217 1217
1218#ifdef CONFIG_FUNCTION_RET_TRACER 1218#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1219ENTRY(ftrace_return_caller) 1219ENTRY(ftrace_graph_caller)
1220 cmpl $0, function_trace_stop 1220 cmpl $0, function_trace_stop
1221 jne ftrace_stub 1221 jne ftrace_stub
1222 1222
@@ -1230,7 +1230,7 @@ ENTRY(ftrace_return_caller)
1230 popl %ecx 1230 popl %ecx
1231 popl %eax 1231 popl %eax
1232 ret 1232 ret
1233END(ftrace_return_caller) 1233END(ftrace_graph_caller)
1234 1234
1235.globl return_to_handler 1235.globl return_to_handler
1236return_to_handler: 1236return_to_handler:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index bb137f7297ed..3595a4c14aba 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -323,7 +323,7 @@ int __init ftrace_dyn_arch_init(void *data)
323} 323}
324#endif 324#endif
325 325
326#ifdef CONFIG_FUNCTION_RET_TRACER 326#ifdef CONFIG_FUNCTION_GRAPH_TRACER
327 327
328#ifndef CONFIG_DYNAMIC_FTRACE 328#ifndef CONFIG_DYNAMIC_FTRACE
329 329
@@ -389,11 +389,11 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
389 */ 389 */
390unsigned long ftrace_return_to_handler(void) 390unsigned long ftrace_return_to_handler(void)
391{ 391{
392 struct ftrace_retfunc trace; 392 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
394 &trace.overrun); 394 &trace.overrun);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 395 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace); 396 ftrace_graph_function(&trace);
397 397
398 return trace.ret; 398 return trace.ret;
399} 399}
@@ -440,12 +440,12 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
440 ); 440 );
441 441
442 if (WARN_ON(faulted)) { 442 if (WARN_ON(faulted)) {
443 unregister_ftrace_return(); 443 unregister_ftrace_graph();
444 return; 444 return;
445 } 445 }
446 446
447 if (WARN_ON(!__kernel_text_address(old))) { 447 if (WARN_ON(!__kernel_text_address(old))) {
448 unregister_ftrace_return(); 448 unregister_ftrace_graph();
449 *parent = old; 449 *parent = old;
450 return; 450 return;
451 } 451 }
@@ -456,4 +456,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
456 *parent = old; 456 *parent = old;
457} 457}
458 458
459#endif /* CONFIG_FUNCTION_RET_TRACER */ 459#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7854d87b97b2..b4ac734ad8d6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -115,8 +115,8 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
115extern void ftrace_caller(void); 115extern void ftrace_caller(void);
116extern void ftrace_call(void); 116extern void ftrace_call(void);
117extern void mcount_call(void); 117extern void mcount_call(void);
118#ifdef CONFIG_FUNCTION_RET_TRACER 118#ifdef CONFIG_FUNCTION_GRAPH_TRACER
119extern void ftrace_return_caller(void); 119extern void ftrace_graph_caller(void);
120#endif 120#endif
121 121
122/** 122/**
@@ -315,7 +315,7 @@ ftrace_init_module(struct module *mod,
315/* 315/*
316 * Structure that defines a return function trace. 316 * Structure that defines a return function trace.
317 */ 317 */
318struct ftrace_retfunc { 318struct ftrace_graph_ret {
319 unsigned long ret; /* Return address */ 319 unsigned long ret; /* Return address */
320 unsigned long func; /* Current function */ 320 unsigned long func; /* Current function */
321 unsigned long long calltime; 321 unsigned long long calltime;
@@ -324,22 +324,22 @@ struct ftrace_retfunc {
324 unsigned long overrun; 324 unsigned long overrun;
325}; 325};
326 326
327#ifdef CONFIG_FUNCTION_RET_TRACER 327#ifdef CONFIG_FUNCTION_GRAPH_TRACER
328#define FTRACE_RETFUNC_DEPTH 50 328#define FTRACE_RETFUNC_DEPTH 50
329#define FTRACE_RETSTACK_ALLOC_SIZE 32 329#define FTRACE_RETSTACK_ALLOC_SIZE 32
330/* Type of a callback handler of tracing return function */ 330/* Type of a callback handler of tracing return function */
331typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 331typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *);
332 332
333extern int register_ftrace_return(trace_function_return_t func); 333extern int register_ftrace_graph(trace_function_graph_t func);
334/* The current handler in use */ 334/* The current handler in use */
335extern trace_function_return_t ftrace_function_return; 335extern trace_function_graph_t ftrace_graph_function;
336extern void unregister_ftrace_return(void); 336extern void unregister_ftrace_graph(void);
337 337
338extern void ftrace_retfunc_init_task(struct task_struct *t); 338extern void ftrace_graph_init_task(struct task_struct *t);
339extern void ftrace_retfunc_exit_task(struct task_struct *t); 339extern void ftrace_graph_exit_task(struct task_struct *t);
340#else 340#else
341static inline void ftrace_retfunc_init_task(struct task_struct *t) { } 341static inline void ftrace_graph_init_task(struct task_struct *t) { }
342static inline void ftrace_retfunc_exit_task(struct task_struct *t) { } 342static inline void ftrace_graph_exit_task(struct task_struct *t) { }
343#endif 343#endif
344 344
345#endif /* _LINUX_FTRACE_H */ 345#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 0b4df55d7a74..366a054d0b05 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER) 5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d02a0ca70ee9..7ad48f2a2758 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1365,7 +1365,7 @@ struct task_struct {
1365 unsigned long default_timer_slack_ns; 1365 unsigned long default_timer_slack_ns;
1366 1366
1367 struct list_head *scm_work_list; 1367 struct list_head *scm_work_list;
1368#ifdef CONFIG_FUNCTION_RET_TRACER 1368#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1369 /* Index of current stored adress in ret_stack */ 1369 /* Index of current stored adress in ret_stack */
1370 int curr_ret_stack; 1370 int curr_ret_stack;
1371 /* Stack of return addresses for return function tracing */ 1371 /* Stack of return addresses for return function tracing */
diff --git a/kernel/Makefile b/kernel/Makefile
index 03a45e7e87b7..703cf3b7389c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
21CFLAGS_REMOVE_sched_clock.o = -pg 21CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -pg 22CFLAGS_REMOVE_sched.o = -pg
23endif 23endif
24ifdef CONFIG_FUNCTION_RET_TRACER 24ifdef CONFIG_FUNCTION_GRAPH_TRACER
25CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() 25CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
26CFLAGS_REMOVE_module.o = -pg # For __module_text_address() 26CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
27endif 27endif
diff --git a/kernel/fork.c b/kernel/fork.c
index d6e1a3205f62..5f82a999c032 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk)
140 prop_local_destroy_single(&tsk->dirties); 140 prop_local_destroy_single(&tsk->dirties);
141 free_thread_info(tsk->stack); 141 free_thread_info(tsk->stack);
142 rt_mutex_debug_task_free(tsk); 142 rt_mutex_debug_task_free(tsk);
143 ftrace_retfunc_exit_task(tsk); 143 ftrace_graph_exit_task(tsk);
144 free_task_struct(tsk); 144 free_task_struct(tsk);
145} 145}
146EXPORT_SYMBOL(free_task); 146EXPORT_SYMBOL(free_task);
@@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1271 total_forks++; 1271 total_forks++;
1272 spin_unlock(&current->sighand->siglock); 1272 spin_unlock(&current->sighand->siglock);
1273 write_unlock_irq(&tasklist_lock); 1273 write_unlock_irq(&tasklist_lock);
1274 ftrace_retfunc_init_task(p); 1274 ftrace_graph_init_task(p);
1275 proc_fork_connector(p); 1275 proc_fork_connector(p);
1276 cgroup_post_fork(p); 1276 cgroup_post_fork(p);
1277 return p; 1277 return p;
diff --git a/kernel/sched.c b/kernel/sched.c
index 388d9db044ab..52490bf6b884 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5901 * The idle tasks have their own, simple scheduling class: 5901 * The idle tasks have their own, simple scheduling class:
5902 */ 5902 */
5903 idle->sched_class = &idle_sched_class; 5903 idle->sched_class = &idle_sched_class;
5904 ftrace_retfunc_init_task(idle); 5904 ftrace_graph_init_task(idle);
5905} 5905}
5906 5906
5907/* 5907/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 620feadff67a..eb9b901e0777 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,7 +12,7 @@ config NOP_TRACER
12config HAVE_FUNCTION_TRACER 12config HAVE_FUNCTION_TRACER
13 bool 13 bool
14 14
15config HAVE_FUNCTION_RET_TRACER 15config HAVE_FUNCTION_GRAPH_TRACER
16 bool 16 bool
17 17
18config HAVE_FUNCTION_TRACE_MCOUNT_TEST 18config HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
63 (the bootup default), then the overhead of the instructions is very 63 (the bootup default), then the overhead of the instructions is very
64 small and not measurable even in micro-benchmarks. 64 small and not measurable even in micro-benchmarks.
65 65
66config FUNCTION_RET_TRACER 66config FUNCTION_GRAPH_TRACER
67 bool "Kernel Function return Tracer" 67 bool "Kernel Function Graph Tracer"
68 depends on HAVE_FUNCTION_RET_TRACER 68 depends on HAVE_FUNCTION_GRAPH_TRACER
69 depends on FUNCTION_TRACER 69 depends on FUNCTION_TRACER
70 help 70 help
71 Enable the kernel to trace a function at its return. 71 Enable the kernel to trace a function at both its return
72 It's first purpose is to trace the duration of functions. 72 and its entry.
73 This is done by setting the current return address on the thread 73 It's first purpose is to trace the duration of functions and
74 info structure of the current task. 74 draw a call graph for each thread with some informations like
75 the return value.
76 This is done by setting the current return address on the current
77 task structure into a stack of calls.
75 78
76config IRQSOFF_TRACER 79config IRQSOFF_TRACER
77 bool "Interrupts-off Latency Tracer" 80 bool "Interrupts-off Latency Tracer"
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cef4bcb4e822..08c5fe6ddc09 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
29obj-$(CONFIG_STACK_TRACER) += trace_stack.o 29obj-$(CONFIG_STACK_TRACER) += trace_stack.o
30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o 32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_BTS_TRACER) += trace_bts.o 34obj-$(CONFIG_BTS_TRACER) += trace_bts.o
35 35
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53042f118f23..9e19976af727 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -395,11 +395,11 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
395 unsigned long ip, fl; 395 unsigned long ip, fl;
396 unsigned long ftrace_addr; 396 unsigned long ftrace_addr;
397 397
398#ifdef CONFIG_FUNCTION_RET_TRACER 398#ifdef CONFIG_FUNCTION_GRAPH_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER) 399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller; 400 ftrace_addr = (unsigned long)ftrace_caller;
401 else 401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller; 402 ftrace_addr = (unsigned long)ftrace_graph_caller;
403#else 403#else
404 ftrace_addr = (unsigned long)ftrace_caller; 404 ftrace_addr = (unsigned long)ftrace_caller;
405#endif 405#endif
@@ -1496,13 +1496,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1496 return ret; 1496 return ret;
1497} 1497}
1498 1498
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active; 1501static atomic_t ftrace_retfunc_active;
1502 1502
1503/* The callback that hooks the return of a function */ 1503/* The callback that hooks the return of a function */
1504trace_function_return_t ftrace_function_return = 1504trace_function_graph_t ftrace_graph_function =
1505 (trace_function_return_t)ftrace_stub; 1505 (trace_function_graph_t)ftrace_stub;
1506 1506
1507 1507
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
@@ -1549,7 +1549,7 @@ free:
1549} 1549}
1550 1550
1551/* Allocate a return stack for each task */ 1551/* Allocate a return stack for each task */
1552static int start_return_tracing(void) 1552static int start_graph_tracing(void)
1553{ 1553{
1554 struct ftrace_ret_stack **ret_stack_list; 1554 struct ftrace_ret_stack **ret_stack_list;
1555 int ret; 1555 int ret;
@@ -1569,7 +1569,7 @@ static int start_return_tracing(void)
1569 return ret; 1569 return ret;
1570} 1570}
1571 1571
1572int register_ftrace_return(trace_function_return_t func) 1572int register_ftrace_graph(trace_function_graph_t func)
1573{ 1573{
1574 int ret = 0; 1574 int ret = 0;
1575 1575
@@ -1584,13 +1584,13 @@ int register_ftrace_return(trace_function_return_t func)
1584 goto out; 1584 goto out;
1585 } 1585 }
1586 atomic_inc(&ftrace_retfunc_active); 1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing(); 1587 ret = start_graph_tracing();
1588 if (ret) { 1588 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1589 atomic_dec(&ftrace_retfunc_active);
1590 goto out; 1590 goto out;
1591 } 1591 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1592 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1593 ftrace_function_return = func; 1593 ftrace_graph_function = func;
1594 ftrace_startup(); 1594 ftrace_startup();
1595 1595
1596out: 1596out:
@@ -1598,12 +1598,12 @@ out:
1598 return ret; 1598 return ret;
1599} 1599}
1600 1600
1601void unregister_ftrace_return(void) 1601void unregister_ftrace_graph(void)
1602{ 1602{
1603 mutex_lock(&ftrace_sysctl_lock); 1603 mutex_lock(&ftrace_sysctl_lock);
1604 1604
1605 atomic_dec(&ftrace_retfunc_active); 1605 atomic_dec(&ftrace_retfunc_active);
1606 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1606 ftrace_graph_function = (trace_function_graph_t)ftrace_stub;
1607 ftrace_shutdown(); 1607 ftrace_shutdown();
1608 /* Restore normal tracing type */ 1608 /* Restore normal tracing type */
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER; 1609 ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1612,7 +1612,7 @@ void unregister_ftrace_return(void)
1612} 1612}
1613 1613
1614/* Allocate a return stack for newly created task */ 1614/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t) 1615void ftrace_graph_init_task(struct task_struct *t)
1616{ 1616{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1617 if (atomic_read(&ftrace_retfunc_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
@@ -1626,7 +1626,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
1626 t->ret_stack = NULL; 1626 t->ret_stack = NULL;
1627} 1627}
1628 1628
1629void ftrace_retfunc_exit_task(struct task_struct *t) 1629void ftrace_graph_exit_task(struct task_struct *t)
1630{ 1630{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack; 1631 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632 1632
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8df8fdd69c95..f21ab2c68fd4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_RET_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_return(struct trace_array *tr, 882static void __trace_function_graph(struct trace_array *tr,
883 struct trace_array_cpu *data, 883 struct trace_array_cpu *data,
884 struct ftrace_retfunc *trace, 884 struct ftrace_graph_ret *trace,
885 unsigned long flags, 885 unsigned long flags,
886 int pc) 886 int pc)
887{ 887{
888 struct ring_buffer_event *event; 888 struct ring_buffer_event *event;
889 struct ftrace_ret_entry *entry; 889 struct ftrace_graph_entry *entry;
890 unsigned long irq_flags; 890 unsigned long irq_flags;
891 891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -1177,8 +1177,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1177 local_irq_restore(flags); 1177 local_irq_restore(flags);
1178} 1178}
1179 1179
1180#ifdef CONFIG_FUNCTION_RET_TRACER 1180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_return(struct ftrace_retfunc *trace) 1181void trace_function_graph(struct ftrace_graph_ret *trace)
1182{ 1182{
1183 struct trace_array *tr = &global_trace; 1183 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1184 struct trace_array_cpu *data;
@@ -1193,12 +1193,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1193 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1194 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1195 pc = preempt_count();
1196 __trace_function_return(tr, data, trace, flags, pc); 1196 __trace_function_graph(tr, data, trace, flags, pc);
1197 } 1197 }
1198 atomic_dec(&data->disabled); 1198 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1199 raw_local_irq_restore(flags);
1200} 1200}
1201#endif /* CONFIG_FUNCTION_RET_TRACER */ 1201#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1202 1202
1203static struct ftrace_ops trace_ops __read_mostly = 1203static struct ftrace_ops trace_ops __read_mostly =
1204{ 1204{
@@ -2001,7 +2001,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2001 break; 2001 break;
2002 } 2002 }
2003 case TRACE_FN_RET: { 2003 case TRACE_FN_RET: {
2004 return print_return_function(iter); 2004 return print_graph_function(iter);
2005 break; 2005 break;
2006 } 2006 }
2007 case TRACE_BRANCH: { 2007 case TRACE_BRANCH: {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3abd645e8af2..72b5ef868765 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -57,7 +57,7 @@ struct ftrace_entry {
57}; 57};
58 58
59/* Function return entry */ 59/* Function return entry */
60struct ftrace_ret_entry { 60struct ftrace_graph_entry {
61 struct trace_entry ent; 61 struct trace_entry ent;
62 unsigned long ip; 62 unsigned long ip;
63 unsigned long parent_ip; 63 unsigned long parent_ip;
@@ -264,7 +264,7 @@ extern void __ftrace_bad_type(void);
264 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 264 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
265 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 265 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
266 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 266 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
267 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ 267 IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\
268 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 268 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
269 __ftrace_bad_type(); \ 269 __ftrace_bad_type(); \
270 } while (0) 270 } while (0)
@@ -398,7 +398,7 @@ void trace_function(struct trace_array *tr,
398 unsigned long parent_ip, 398 unsigned long parent_ip,
399 unsigned long flags, int pc); 399 unsigned long flags, int pc);
400void 400void
401trace_function_return(struct ftrace_retfunc *trace); 401trace_function_graph(struct ftrace_graph_ret *trace);
402 402
403void trace_bts(struct trace_array *tr, 403void trace_bts(struct trace_array *tr,
404 unsigned long from, 404 unsigned long from,
@@ -489,11 +489,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
489extern unsigned long trace_flags; 489extern unsigned long trace_flags;
490 490
491/* Standard output formatting function used for function return traces */ 491/* Standard output formatting function used for function return traces */
492#ifdef CONFIG_FUNCTION_RET_TRACER 492#ifdef CONFIG_FUNCTION_GRAPH_TRACER
493extern enum print_line_t print_return_function(struct trace_iterator *iter); 493extern enum print_line_t print_graph_function(struct trace_iterator *iter);
494#else 494#else
495static inline enum print_line_t 495static inline enum print_line_t
496print_return_function(struct trace_iterator *iter) 496print_graph_function(struct trace_iterator *iter)
497{ 497{
498 return TRACE_TYPE_UNHANDLED; 498 return TRACE_TYPE_UNHANDLED;
499} 499}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..f5bad4624d2b
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,98 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16
17#define TRACE_GRAPH_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = {
19 /* Display overruns or not */
20 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
21 { } /* Empty entry */
22};
23
24static struct tracer_flags tracer_flags = {
25 .val = 0, /* Don't display overruns by default */
26 .opts = trace_opts
27};
28
29
30static int graph_trace_init(struct trace_array *tr)
31{
32 int cpu;
33 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu);
35
36 return register_ftrace_graph(&trace_function_graph);
37}
38
39static void graph_trace_reset(struct trace_array *tr)
40{
41 unregister_ftrace_graph();
42}
43
44
45enum print_line_t
46print_graph_function(struct trace_iterator *iter)
47{
48 struct trace_seq *s = &iter->seq;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_graph_entry *field;
51 int ret;
52
53 if (entry->type == TRACE_FN_RET) {
54 trace_assign_type(field, entry);
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58
59 ret = seq_print_ip_sym(s, field->ip,
60 trace_flags & TRACE_ITER_SYM_MASK);
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63
64 ret = trace_seq_printf(s, " (%llu ns)",
65 field->rettime - field->calltime);
66 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE;
68
69 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
70 ret = trace_seq_printf(s, " (Overruns: %lu)",
71 field->overrun);
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = trace_seq_printf(s, "\n");
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85static struct tracer graph_trace __read_mostly = {
86 .name = "function-graph",
87 .init = graph_trace_init,
88 .reset = graph_trace_reset,
89 .print_line = print_graph_function,
90 .flags = &tracer_flags,
91};
92
93static __init int init_graph_trace(void)
94{
95 return register_tracer(&graph_trace);
96}
97
98device_initcall(init_graph_trace);