diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-12 04:17:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-12 04:17:09 -0500 |
commit | 60a011c736e7dd09a0b01ca6a051a416f3f52ffb (patch) | |
tree | 20e91db58c3bbfa622d3ed8ec747e4974a807284 | |
parent | d06bbd669539215405874d8fe32ab65105e6c4bb (diff) | |
parent | 19b3e9671c5a219b8c34da2cc66e0ce7c3a501ae (diff) |
Merge branch 'tracing/function-return-tracer' into tracing/fastboot
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 26 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 24 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 6 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 33 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 182 | ||||
-rw-r--r-- | include/linux/ftrace.h | 20 | ||||
-rw-r--r-- | include/linux/ftrace_irq.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 11 | ||||
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 14 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace.c | 65 | ||||
-rw-r--r-- | kernel/trace/trace.h | 35 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 82 |
16 files changed, 506 insertions, 16 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6ab097fd5241..7780cc8b51d1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -29,6 +29,7 @@ config X86 | |||
29 | select HAVE_FTRACE_MCOUNT_RECORD | 29 | select HAVE_FTRACE_MCOUNT_RECORD |
30 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
31 | select HAVE_FUNCTION_TRACER | 31 | select HAVE_FUNCTION_TRACER |
32 | select HAVE_FUNCTION_RET_TRACER if X86_32 | ||
32 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 33 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
33 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 34 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
34 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 35 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index f8173ed1c970..9b6a1fa19e70 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
20 | #endif /* __ASSEMBLY__ */ | 20 | #endif /* __ASSEMBLY__ */ |
21 | #endif /* CONFIG_FUNCTION_TRACER */ | 21 | #endif /* CONFIG_FUNCTION_TRACER */ |
22 | 22 | ||
23 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
24 | #define FTRACE_RET_STACK_SIZE 20 | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | /* | ||
29 | * Stack of return addresses for functions | ||
30 | * of a thread. | ||
31 | * Used in struct thread_info | ||
32 | */ | ||
33 | struct ftrace_ret_stack { | ||
34 | unsigned long ret; | ||
35 | unsigned long func; | ||
36 | unsigned long long calltime; | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * Primary handler of a function return. | ||
41 | * It relays on ftrace_return_to_handler. | ||
42 | * Defined in entry32.S | ||
43 | */ | ||
44 | extern void return_to_handler(void); | ||
45 | |||
46 | #endif /* __ASSEMBLY__ */ | ||
47 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
48 | |||
23 | #endif /* _ASM_X86_FTRACE_H */ | 49 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e44d379faad2..a71158369fd4 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -20,6 +20,7 @@ | |||
20 | struct task_struct; | 20 | struct task_struct; |
21 | struct exec_domain; | 21 | struct exec_domain; |
22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
23 | #include <asm/ftrace.h> | ||
23 | 24 | ||
24 | struct thread_info { | 25 | struct thread_info { |
25 | struct task_struct *task; /* main task structure */ | 26 | struct task_struct *task; /* main task structure */ |
@@ -38,8 +39,30 @@ struct thread_info { | |||
38 | */ | 39 | */ |
39 | __u8 supervisor_stack[0]; | 40 | __u8 supervisor_stack[0]; |
40 | #endif | 41 | #endif |
42 | |||
43 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
44 | /* Index of current stored adress in ret_stack */ | ||
45 | int curr_ret_stack; | ||
46 | /* Stack of return addresses for return function tracing */ | ||
47 | struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE]; | ||
48 | #endif | ||
41 | }; | 49 | }; |
42 | 50 | ||
51 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
52 | #define INIT_THREAD_INFO(tsk) \ | ||
53 | { \ | ||
54 | .task = &tsk, \ | ||
55 | .exec_domain = &default_exec_domain, \ | ||
56 | .flags = 0, \ | ||
57 | .cpu = 0, \ | ||
58 | .preempt_count = 1, \ | ||
59 | .addr_limit = KERNEL_DS, \ | ||
60 | .restart_block = { \ | ||
61 | .fn = do_no_restart_syscall, \ | ||
62 | }, \ | ||
63 | .curr_ret_stack = -1,\ | ||
64 | } | ||
65 | #else | ||
43 | #define INIT_THREAD_INFO(tsk) \ | 66 | #define INIT_THREAD_INFO(tsk) \ |
44 | { \ | 67 | { \ |
45 | .task = &tsk, \ | 68 | .task = &tsk, \ |
@@ -52,6 +75,7 @@ struct thread_info { | |||
52 | .fn = do_no_restart_syscall, \ | 75 | .fn = do_no_restart_syscall, \ |
53 | }, \ | 76 | }, \ |
54 | } | 77 | } |
78 | #endif | ||
55 | 79 | ||
56 | #define init_thread_info (init_thread_union.thread_info) | 80 | #define init_thread_info (init_thread_union.thread_info) |
57 | #define init_stack (init_thread_union.stack) | 81 | #define init_stack (init_thread_union.stack) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e489ff9cb3e2..1d8ed95da846 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | |||
14 | CFLAGS_REMOVE_ftrace.o = -pg | 14 | CFLAGS_REMOVE_ftrace.o = -pg |
15 | endif | 15 | endif |
16 | 16 | ||
17 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
18 | # Don't trace __switch_to() but let it for function tracer | ||
19 | CFLAGS_REMOVE_process_32.o = -pg | ||
20 | endif | ||
21 | |||
17 | # | 22 | # |
18 | # vsyscalls (which work on the user stack) should have | 23 | # vsyscalls (which work on the user stack) should have |
19 | # no stack-protector checks: | 24 | # no stack-protector checks: |
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o | |||
65 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 70 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
66 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 71 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
67 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 72 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
73 | obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o | ||
68 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 74 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 75 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 76 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 9134de814c97..9a0ac85946db 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1188,6 +1188,10 @@ ENTRY(mcount) | |||
1188 | 1188 | ||
1189 | cmpl $ftrace_stub, ftrace_trace_function | 1189 | cmpl $ftrace_stub, ftrace_trace_function |
1190 | jnz trace | 1190 | jnz trace |
1191 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1192 | cmpl $ftrace_stub, ftrace_function_return | ||
1193 | jnz trace_return | ||
1194 | #endif | ||
1191 | .globl ftrace_stub | 1195 | .globl ftrace_stub |
1192 | ftrace_stub: | 1196 | ftrace_stub: |
1193 | ret | 1197 | ret |
@@ -1206,8 +1210,37 @@ trace: | |||
1206 | popl %edx | 1210 | popl %edx |
1207 | popl %ecx | 1211 | popl %ecx |
1208 | popl %eax | 1212 | popl %eax |
1213 | jmp ftrace_stub | ||
1209 | 1214 | ||
1215 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1216 | trace_return: | ||
1217 | pushl %eax | ||
1218 | pushl %ecx | ||
1219 | pushl %edx | ||
1220 | movl 0xc(%esp), %eax | ||
1221 | pushl %eax | ||
1222 | lea 0x4(%ebp), %eax | ||
1223 | pushl %eax | ||
1224 | call prepare_ftrace_return | ||
1225 | addl $8, %esp | ||
1226 | popl %edx | ||
1227 | popl %ecx | ||
1228 | popl %eax | ||
1210 | jmp ftrace_stub | 1229 | jmp ftrace_stub |
1230 | |||
1231 | .globl return_to_handler | ||
1232 | return_to_handler: | ||
1233 | pushl $0 | ||
1234 | pushl %eax | ||
1235 | pushl %ecx | ||
1236 | pushl %edx | ||
1237 | call ftrace_return_to_handler | ||
1238 | movl %eax, 0xc(%esp) | ||
1239 | popl %edx | ||
1240 | popl %ecx | ||
1241 | popl %eax | ||
1242 | ret | ||
1243 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
1211 | END(mcount) | 1244 | END(mcount) |
1212 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1245 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1213 | #endif /* CONFIG_FUNCTION_TRACER */ | 1246 | #endif /* CONFIG_FUNCTION_TRACER */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 69149337f2fe..16a571dea2ef 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -14,14 +14,178 @@ | |||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
19 | 20 | ||
20 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
22 | #include <linux/ftrace.h> | ||
21 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include <asm/nmi.h> | ||
22 | 25 | ||
23 | 26 | ||
24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 27 | |
28 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
29 | |||
30 | /* | ||
31 | * These functions are picked from those used on | ||
32 | * this page for dynamic ftrace. They have been | ||
33 | * simplified to ignore all traces in NMI context. | ||
34 | */ | ||
35 | static atomic_t in_nmi; | ||
36 | |||
37 | void ftrace_nmi_enter(void) | ||
38 | { | ||
39 | atomic_inc(&in_nmi); | ||
40 | } | ||
41 | |||
42 | void ftrace_nmi_exit(void) | ||
43 | { | ||
44 | atomic_dec(&in_nmi); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Synchronize accesses to return adresses stack with | ||
49 | * interrupts. | ||
50 | */ | ||
51 | static raw_spinlock_t ret_stack_lock; | ||
52 | |||
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
55 | unsigned long func) | ||
56 | { | ||
57 | int index; | ||
58 | struct thread_info *ti; | ||
59 | unsigned long flags; | ||
60 | int err = 0; | ||
61 | |||
62 | raw_local_irq_save(flags); | ||
63 | __raw_spin_lock(&ret_stack_lock); | ||
64 | |||
65 | ti = current_thread_info(); | ||
66 | /* The return trace stack is full */ | ||
67 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { | ||
68 | err = -EBUSY; | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | index = ++ti->curr_ret_stack; | ||
73 | ti->ret_stack[index].ret = ret; | ||
74 | ti->ret_stack[index].func = func; | ||
75 | ti->ret_stack[index].calltime = time; | ||
76 | |||
77 | out: | ||
78 | __raw_spin_unlock(&ret_stack_lock); | ||
79 | raw_local_irq_restore(flags); | ||
80 | return err; | ||
81 | } | ||
82 | |||
83 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
84 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | ||
85 | unsigned long *func) | ||
86 | { | ||
87 | struct thread_info *ti; | ||
88 | int index; | ||
89 | unsigned long flags; | ||
90 | |||
91 | raw_local_irq_save(flags); | ||
92 | __raw_spin_lock(&ret_stack_lock); | ||
93 | |||
94 | ti = current_thread_info(); | ||
95 | index = ti->curr_ret_stack; | ||
96 | *ret = ti->ret_stack[index].ret; | ||
97 | *func = ti->ret_stack[index].func; | ||
98 | *time = ti->ret_stack[index].calltime; | ||
99 | ti->curr_ret_stack--; | ||
100 | |||
101 | __raw_spin_unlock(&ret_stack_lock); | ||
102 | raw_local_irq_restore(flags); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
110 | { | ||
111 | struct ftrace_retfunc trace; | ||
112 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | ||
113 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
114 | ftrace_function_return(&trace); | ||
115 | |||
116 | return trace.ret; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Hook the return address and push it in the stack of return addrs | ||
121 | * in current thread info. | ||
122 | */ | ||
123 | asmlinkage | ||
124 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
125 | { | ||
126 | unsigned long old; | ||
127 | unsigned long long calltime; | ||
128 | int faulted; | ||
129 | unsigned long return_hooker = (unsigned long) | ||
130 | &return_to_handler; | ||
131 | |||
132 | /* Nmi's are currently unsupported */ | ||
133 | if (atomic_read(&in_nmi)) | ||
134 | return; | ||
135 | |||
136 | /* | ||
137 | * Protect against fault, even if it shouldn't | ||
138 | * happen. This tool is too much intrusive to | ||
139 | * ignore such a protection. | ||
140 | */ | ||
141 | asm volatile( | ||
142 | "1: movl (%[parent_old]), %[old]\n" | ||
143 | "2: movl %[return_hooker], (%[parent_replaced])\n" | ||
144 | " movl $0, %[faulted]\n" | ||
145 | |||
146 | ".section .fixup, \"ax\"\n" | ||
147 | "3: movl $1, %[faulted]\n" | ||
148 | ".previous\n" | ||
149 | |||
150 | ".section __ex_table, \"a\"\n" | ||
151 | " .long 1b, 3b\n" | ||
152 | " .long 2b, 3b\n" | ||
153 | ".previous\n" | ||
154 | |||
155 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
156 | [faulted] "=r" (faulted) | ||
157 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
158 | : "memory" | ||
159 | ); | ||
160 | |||
161 | if (WARN_ON(faulted)) { | ||
162 | unregister_ftrace_return(); | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | if (WARN_ON(!__kernel_text_address(old))) { | ||
167 | unregister_ftrace_return(); | ||
168 | *parent = old; | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | calltime = cpu_clock(raw_smp_processor_id()); | ||
173 | |||
174 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | ||
175 | *parent = old; | ||
176 | } | ||
177 | |||
178 | static int __init init_ftrace_function_return(void) | ||
179 | { | ||
180 | ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
181 | return 0; | ||
182 | } | ||
183 | device_initcall(init_ftrace_function_return); | ||
184 | |||
185 | |||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
25 | 189 | ||
26 | union ftrace_code_union { | 190 | union ftrace_code_union { |
27 | char code[MCOUNT_INSN_SIZE]; | 191 | char code[MCOUNT_INSN_SIZE]; |
@@ -31,17 +195,11 @@ union ftrace_code_union { | |||
31 | } __attribute__((packed)); | 195 | } __attribute__((packed)); |
32 | }; | 196 | }; |
33 | 197 | ||
34 | |||
35 | static int ftrace_calc_offset(long ip, long addr) | 198 | static int ftrace_calc_offset(long ip, long addr) |
36 | { | 199 | { |
37 | return (int)(addr - ip); | 200 | return (int)(addr - ip); |
38 | } | 201 | } |
39 | 202 | ||
40 | unsigned char *ftrace_nop_replace(void) | ||
41 | { | ||
42 | return ftrace_nop; | ||
43 | } | ||
44 | |||
45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 203 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
46 | { | 204 | { |
47 | static union ftrace_code_union calc; | 205 | static union ftrace_code_union calc; |
@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
183 | } | 341 | } |
184 | 342 | ||
185 | 343 | ||
344 | |||
345 | |||
346 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
347 | |||
348 | unsigned char *ftrace_nop_replace(void) | ||
349 | { | ||
350 | return ftrace_nop; | ||
351 | } | ||
352 | |||
186 | int | 353 | int |
187 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 354 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
188 | unsigned char *new_code) | 355 | unsigned char *new_code) |
@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data) | |||
292 | 459 | ||
293 | return 0; | 460 | return 0; |
294 | } | 461 | } |
462 | #endif | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1f5608c11023..dcbbf72a88b1 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -268,6 +268,26 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } | |||
268 | 268 | ||
269 | 269 | ||
270 | /* | 270 | /* |
271 | * Structure that defines a return function trace. | ||
272 | */ | ||
273 | struct ftrace_retfunc { | ||
274 | unsigned long ret; /* Return address */ | ||
275 | unsigned long func; /* Current function */ | ||
276 | unsigned long long calltime; | ||
277 | unsigned long long rettime; | ||
278 | }; | ||
279 | |||
280 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
281 | /* Type of a callback handler of tracing return function */ | ||
282 | typedef void (*trace_function_return_t)(struct ftrace_retfunc *); | ||
283 | |||
284 | extern void register_ftrace_return(trace_function_return_t func); | ||
285 | /* The current handler in use */ | ||
286 | extern trace_function_return_t ftrace_function_return; | ||
287 | extern void unregister_ftrace_return(void); | ||
288 | #endif | ||
289 | |||
290 | /* | ||
271 | * Structure which defines the trace of an initcall. | 291 | * Structure which defines the trace of an initcall. |
272 | * You don't have to fill the func field since it is | 292 | * You don't have to fill the func field since it is |
273 | * only used internally by the tracer. | 293 | * only used internally by the tracer. |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index b1299d6729f2..0b4df55d7a74 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _LINUX_FTRACE_IRQ_H | 2 | #define _LINUX_FTRACE_IRQ_H |
3 | 3 | ||
4 | 4 | ||
5 | #ifdef CONFIG_DYNAMIC_FTRACE | 5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER) |
6 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
7 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
8 | #else | 8 | #else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 644ffbda17ca..61c8cc36028a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2006,6 +2006,17 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct | |||
2006 | { | 2006 | { |
2007 | *task_thread_info(p) = *task_thread_info(org); | 2007 | *task_thread_info(p) = *task_thread_info(org); |
2008 | task_thread_info(p)->task = p; | 2008 | task_thread_info(p)->task = p; |
2009 | |||
2010 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
2011 | /* | ||
2012 | * When fork() creates a child process, this function is called. | ||
2013 | * But the child task may not inherit the return adresses traced | ||
2014 | * by the return function tracer because it will directly execute | ||
2015 | * in userspace and will not return to kernel functions its parent | ||
2016 | * used. | ||
2017 | */ | ||
2018 | task_thread_info(p)->curr_ret_stack = -1; | ||
2019 | #endif | ||
2009 | } | 2020 | } |
2010 | 2021 | ||
2011 | static inline unsigned long *end_of_stack(struct task_struct *p) | 2022 | static inline unsigned long *end_of_stack(struct task_struct *p) |
diff --git a/kernel/Makefile b/kernel/Makefile index 9a3ec66a9d84..af3be57acbbb 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -23,6 +23,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg | |||
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 23 | CFLAGS_REMOVE_sched_clock.o = -pg |
24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | 24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg |
25 | endif | 25 | endif |
26 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
27 | CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() | ||
28 | CFLAGS_REMOVE_module.o = -pg # For __module_text_address() | ||
29 | endif | ||
26 | 30 | ||
27 | obj-$(CONFIG_FREEZER) += freezer.o | 31 | obj-$(CONFIG_FREEZER) += freezer.o |
28 | obj-$(CONFIG_PROFILING) += profile.o | 32 | obj-$(CONFIG_PROFILING) += profile.o |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index fc4febc3334a..d986216c8327 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -9,6 +9,9 @@ config NOP_TRACER | |||
9 | config HAVE_FUNCTION_TRACER | 9 | config HAVE_FUNCTION_TRACER |
10 | bool | 10 | bool |
11 | 11 | ||
12 | config HAVE_FUNCTION_RET_TRACER | ||
13 | bool | ||
14 | |||
12 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 15 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
13 | bool | 16 | bool |
14 | help | 17 | help |
@@ -54,6 +57,17 @@ config FUNCTION_TRACER | |||
54 | (the bootup default), then the overhead of the instructions is very | 57 | (the bootup default), then the overhead of the instructions is very |
55 | small and not measurable even in micro-benchmarks. | 58 | small and not measurable even in micro-benchmarks. |
56 | 59 | ||
60 | config FUNCTION_RET_TRACER | ||
61 | bool "Kernel Function return Tracer" | ||
62 | depends on !DYNAMIC_FTRACE | ||
63 | depends on HAVE_FUNCTION_RET_TRACER | ||
64 | depends on FUNCTION_TRACER | ||
65 | help | ||
66 | Enable the kernel to trace a function at its return. | ||
67 | It's first purpose is to trace the duration of functions. | ||
68 | This is done by setting the current return address on the thread | ||
69 | info structure of the current task. | ||
70 | |||
57 | config IRQSOFF_TRACER | 71 | config IRQSOFF_TRACER |
58 | bool "Interrupts-off Latency Tracer" | 72 | bool "Interrupts-off Latency Tracer" |
59 | default n | 73 | default n |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c8228b1a49e9..3e1f361bbc17 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -24,5 +24,6 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o | |||
24 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 24 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
25 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 25 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
26 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 26 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
27 | obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o | ||
27 | 28 | ||
28 | libftrace-y := ftrace.o | 29 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 25b803559f17..beb21a51e1ef 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1480,3 +1480,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1480 | return ret; | 1480 | return ret; |
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1484 | trace_function_return_t ftrace_function_return = | ||
1485 | (trace_function_return_t)ftrace_stub; | ||
1486 | void register_ftrace_return(trace_function_return_t func) | ||
1487 | { | ||
1488 | ftrace_function_return = func; | ||
1489 | } | ||
1490 | |||
1491 | void unregister_ftrace_return(void) | ||
1492 | { | ||
1493 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | ||
1494 | } | ||
1495 | #endif | ||
1496 | |||
1497 | |||
1498 | |||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 216bbe7547a4..a3f7ae9cd8e1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -244,13 +244,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
244 | return nsecs / 1000; | 244 | return nsecs / 1000; |
245 | } | 245 | } |
246 | 246 | ||
247 | /* | ||
248 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
249 | * control the output of kernel symbols. | ||
250 | */ | ||
251 | #define TRACE_ITER_SYM_MASK \ | ||
252 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
253 | |||
254 | /* These must match the bit postions in trace_iterator_flags */ | 247 | /* These must match the bit postions in trace_iterator_flags */ |
255 | static const char *trace_options[] = { | 248 | static const char *trace_options[] = { |
256 | "print-parent", | 249 | "print-parent", |
@@ -810,6 +803,35 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
810 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 803 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
811 | } | 804 | } |
812 | 805 | ||
806 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
807 | static void __trace_function_return(struct trace_array *tr, | ||
808 | struct trace_array_cpu *data, | ||
809 | struct ftrace_retfunc *trace, | ||
810 | unsigned long flags, | ||
811 | int pc) | ||
812 | { | ||
813 | struct ring_buffer_event *event; | ||
814 | struct ftrace_ret_entry *entry; | ||
815 | unsigned long irq_flags; | ||
816 | |||
817 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
818 | return; | ||
819 | |||
820 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
821 | &irq_flags); | ||
822 | if (!event) | ||
823 | return; | ||
824 | entry = ring_buffer_event_data(event); | ||
825 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
826 | entry->ent.type = TRACE_FN_RET; | ||
827 | entry->ip = trace->func; | ||
828 | entry->parent_ip = trace->ret; | ||
829 | entry->rettime = trace->rettime; | ||
830 | entry->calltime = trace->calltime; | ||
831 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
832 | } | ||
833 | #endif | ||
834 | |||
813 | void | 835 | void |
814 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 836 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
815 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 837 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
@@ -1038,6 +1060,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1038 | raw_local_irq_restore(flags); | 1060 | raw_local_irq_restore(flags); |
1039 | } | 1061 | } |
1040 | 1062 | ||
1063 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1064 | void trace_function_return(struct ftrace_retfunc *trace) | ||
1065 | { | ||
1066 | struct trace_array *tr = &global_trace; | ||
1067 | struct trace_array_cpu *data; | ||
1068 | unsigned long flags; | ||
1069 | long disabled; | ||
1070 | int cpu; | ||
1071 | int pc; | ||
1072 | |||
1073 | raw_local_irq_save(flags); | ||
1074 | cpu = raw_smp_processor_id(); | ||
1075 | data = tr->data[cpu]; | ||
1076 | disabled = atomic_inc_return(&data->disabled); | ||
1077 | if (likely(disabled == 1)) { | ||
1078 | pc = preempt_count(); | ||
1079 | __trace_function_return(tr, data, trace, flags, pc); | ||
1080 | } | ||
1081 | atomic_dec(&data->disabled); | ||
1082 | raw_local_irq_restore(flags); | ||
1083 | } | ||
1084 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
1085 | |||
1041 | static struct ftrace_ops trace_ops __read_mostly = | 1086 | static struct ftrace_ops trace_ops __read_mostly = |
1042 | { | 1087 | { |
1043 | .func = function_trace_call, | 1088 | .func = function_trace_call, |
@@ -1285,7 +1330,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1285 | # define IP_FMT "%016lx" | 1330 | # define IP_FMT "%016lx" |
1286 | #endif | 1331 | #endif |
1287 | 1332 | ||
1288 | static int | 1333 | int |
1289 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 1334 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
1290 | { | 1335 | { |
1291 | int ret; | 1336 | int ret; |
@@ -1738,6 +1783,10 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1738 | trace_seq_print_cont(s, iter); | 1783 | trace_seq_print_cont(s, iter); |
1739 | break; | 1784 | break; |
1740 | } | 1785 | } |
1786 | case TRACE_FN_RET: { | ||
1787 | return print_return_function(iter); | ||
1788 | break; | ||
1789 | } | ||
1741 | } | 1790 | } |
1742 | return TRACE_TYPE_HANDLED; | 1791 | return TRACE_TYPE_HANDLED; |
1743 | } | 1792 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 978145088fb8..e40ce0c14690 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -22,6 +22,7 @@ enum trace_type { | |||
22 | TRACE_MMIO_RW, | 22 | TRACE_MMIO_RW, |
23 | TRACE_MMIO_MAP, | 23 | TRACE_MMIO_MAP, |
24 | TRACE_BOOT, | 24 | TRACE_BOOT, |
25 | TRACE_FN_RET, | ||
25 | 26 | ||
26 | __TRACE_LAST_TYPE | 27 | __TRACE_LAST_TYPE |
27 | }; | 28 | }; |
@@ -48,6 +49,15 @@ struct ftrace_entry { | |||
48 | unsigned long ip; | 49 | unsigned long ip; |
49 | unsigned long parent_ip; | 50 | unsigned long parent_ip; |
50 | }; | 51 | }; |
52 | |||
53 | /* Function return entry */ | ||
54 | struct ftrace_ret_entry { | ||
55 | struct trace_entry ent; | ||
56 | unsigned long ip; | ||
57 | unsigned long parent_ip; | ||
58 | unsigned long long calltime; | ||
59 | unsigned long long rettime; | ||
60 | }; | ||
51 | extern struct tracer boot_tracer; | 61 | extern struct tracer boot_tracer; |
52 | 62 | ||
53 | /* | 63 | /* |
@@ -218,6 +228,7 @@ extern void __ftrace_bad_type(void); | |||
218 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 228 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
219 | TRACE_MMIO_MAP); \ | 229 | TRACE_MMIO_MAP); \ |
220 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ | 230 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ |
231 | IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET); \ | ||
221 | __ftrace_bad_type(); \ | 232 | __ftrace_bad_type(); \ |
222 | } while (0) | 233 | } while (0) |
223 | 234 | ||
@@ -321,6 +332,8 @@ void trace_function(struct trace_array *tr, | |||
321 | unsigned long ip, | 332 | unsigned long ip, |
322 | unsigned long parent_ip, | 333 | unsigned long parent_ip, |
323 | unsigned long flags, int pc); | 334 | unsigned long flags, int pc); |
335 | void | ||
336 | trace_function_return(struct ftrace_retfunc *trace); | ||
324 | 337 | ||
325 | void tracing_start_cmdline_record(void); | 338 | void tracing_start_cmdline_record(void); |
326 | void tracing_stop_cmdline_record(void); | 339 | void tracing_stop_cmdline_record(void); |
@@ -393,6 +406,10 @@ extern void *head_page(struct trace_array_cpu *data); | |||
393 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 406 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
394 | extern void trace_seq_print_cont(struct trace_seq *s, | 407 | extern void trace_seq_print_cont(struct trace_seq *s, |
395 | struct trace_iterator *iter); | 408 | struct trace_iterator *iter); |
409 | |||
410 | extern int | ||
411 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
412 | unsigned long sym_flags); | ||
396 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 413 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
397 | size_t cnt); | 414 | size_t cnt); |
398 | extern long ns2usecs(cycle_t nsec); | 415 | extern long ns2usecs(cycle_t nsec); |
@@ -400,6 +417,17 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); | |||
400 | 417 | ||
401 | extern unsigned long trace_flags; | 418 | extern unsigned long trace_flags; |
402 | 419 | ||
420 | /* Standard output formatting function used for function return traces */ | ||
421 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
422 | extern enum print_line_t print_return_function(struct trace_iterator *iter); | ||
423 | #else | ||
424 | static inline enum print_line_t | ||
425 | print_return_function(struct trace_iterator *iter) | ||
426 | { | ||
427 | return TRACE_TYPE_UNHANDLED; | ||
428 | } | ||
429 | #endif | ||
430 | |||
403 | /* | 431 | /* |
404 | * trace_iterator_flags is an enumeration that defines bit | 432 | * trace_iterator_flags is an enumeration that defines bit |
405 | * positions into trace_flags that controls the output. | 433 | * positions into trace_flags that controls the output. |
@@ -422,6 +450,13 @@ enum trace_iterator_flags { | |||
422 | TRACE_ITER_PREEMPTONLY = 0x800, | 450 | TRACE_ITER_PREEMPTONLY = 0x800, |
423 | }; | 451 | }; |
424 | 452 | ||
453 | /* | ||
454 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
455 | * control the output of kernel symbols. | ||
456 | */ | ||
457 | #define TRACE_ITER_SYM_MASK \ | ||
458 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
459 | |||
425 | extern struct tracer nop_trace; | 460 | extern struct tracer nop_trace; |
426 | 461 | ||
427 | /** | 462 | /** |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c new file mode 100644 index 000000000000..7680b21537dd --- /dev/null +++ b/kernel/trace/trace_functions_return.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function return tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | |||
17 | static void start_return_trace(struct trace_array *tr) | ||
18 | { | ||
19 | register_ftrace_return(&trace_function_return); | ||
20 | } | ||
21 | |||
22 | static void stop_return_trace(struct trace_array *tr) | ||
23 | { | ||
24 | unregister_ftrace_return(); | ||
25 | } | ||
26 | |||
27 | static void return_trace_init(struct trace_array *tr) | ||
28 | { | ||
29 | int cpu; | ||
30 | for_each_online_cpu(cpu) | ||
31 | tracing_reset(tr, cpu); | ||
32 | |||
33 | start_return_trace(tr); | ||
34 | } | ||
35 | |||
36 | static void return_trace_reset(struct trace_array *tr) | ||
37 | { | ||
38 | stop_return_trace(tr); | ||
39 | } | ||
40 | |||
41 | |||
42 | enum print_line_t | ||
43 | print_return_function(struct trace_iterator *iter) | ||
44 | { | ||
45 | struct trace_seq *s = &iter->seq; | ||
46 | struct trace_entry *entry = iter->ent; | ||
47 | struct ftrace_ret_entry *field; | ||
48 | int ret; | ||
49 | |||
50 | if (entry->type == TRACE_FN_RET) { | ||
51 | trace_assign_type(field, entry); | ||
52 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
53 | if (!ret) | ||
54 | return TRACE_TYPE_PARTIAL_LINE; | ||
55 | ret = seq_print_ip_sym(s, field->ip, | ||
56 | trace_flags & TRACE_ITER_SYM_MASK); | ||
57 | if (!ret) | ||
58 | return TRACE_TYPE_PARTIAL_LINE; | ||
59 | ret = trace_seq_printf(s, " (%llu ns)\n", | ||
60 | field->rettime - field->calltime); | ||
61 | if (!ret) | ||
62 | return TRACE_TYPE_PARTIAL_LINE; | ||
63 | else | ||
64 | return TRACE_TYPE_HANDLED; | ||
65 | } | ||
66 | return TRACE_TYPE_UNHANDLED; | ||
67 | } | ||
68 | |||
69 | static struct tracer return_trace __read_mostly = | ||
70 | { | ||
71 | .name = "return", | ||
72 | .init = return_trace_init, | ||
73 | .reset = return_trace_reset, | ||
74 | .print_line = print_return_function | ||
75 | }; | ||
76 | |||
77 | static __init int init_return_trace(void) | ||
78 | { | ||
79 | return register_tracer(&return_trace); | ||
80 | } | ||
81 | |||
82 | device_initcall(init_return_trace); | ||