aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/ftrace.h8
-rw-r--r--arch/x86/kernel/entry_32.S18
-rw-r--r--arch/x86/kernel/ftrace.c286
-rw-r--r--include/linux/ftrace.h69
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c304
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_boot.c3
-rw-r--r--kernel/trace/trace_branch.c7
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_functions_return.c16
-rw-r--r--kernel/trace/trace_irqsoff.c9
-rw-r--r--kernel/trace/trace_mmiotrace.c3
-rw-r--r--kernel/trace/trace_nop.c3
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
-rw-r--r--kernel/trace/trace_selftest.c70
-rw-r--r--kernel/trace/trace_sysprof.c3
20 files changed, 509 insertions, 328 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9b6a1fa19e70..2bb43b433e07 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -17,6 +17,14 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
17 */ 17 */
18 return addr - 1; 18 return addr - 1;
19} 19}
20
21#ifdef CONFIG_DYNAMIC_FTRACE
22
23struct dyn_arch_ftrace {
24 /* No extra data needed for x86 */
25};
26
27#endif /* CONFIG_DYNAMIC_FTRACE */
20#endif /* __ASSEMBLY__ */ 28#endif /* __ASSEMBLY__ */
21#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
22 30
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f97621149839..74defe21ba42 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1190,7 +1190,7 @@ ENTRY(mcount)
1190 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER 1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return 1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz trace_return 1193 jnz ftrace_return_caller
1194#endif 1194#endif
1195.globl ftrace_stub 1195.globl ftrace_stub
1196ftrace_stub: 1196ftrace_stub:
@@ -1211,9 +1211,15 @@ trace:
1211 popl %ecx 1211 popl %ecx
1212 popl %eax 1212 popl %eax
1213 jmp ftrace_stub 1213 jmp ftrace_stub
1214END(mcount)
1215#endif /* CONFIG_DYNAMIC_FTRACE */
1216#endif /* CONFIG_FUNCTION_TRACER */
1214 1217
1215#ifdef CONFIG_FUNCTION_RET_TRACER 1218#ifdef CONFIG_FUNCTION_RET_TRACER
1216trace_return: 1219ENTRY(ftrace_return_caller)
1220 cmpl $0, function_trace_stop
1221 jne ftrace_stub
1222
1217 pushl %eax 1223 pushl %eax
1218 pushl %ecx 1224 pushl %ecx
1219 pushl %edx 1225 pushl %edx
@@ -1223,7 +1229,8 @@ trace_return:
1223 popl %edx 1229 popl %edx
1224 popl %ecx 1230 popl %ecx
1225 popl %eax 1231 popl %eax
1226 jmp ftrace_stub 1232 ret
1233END(ftrace_return_caller)
1227 1234
1228.globl return_to_handler 1235.globl return_to_handler
1229return_to_handler: 1236return_to_handler:
@@ -1237,10 +1244,7 @@ return_to_handler:
1237 popl %ecx 1244 popl %ecx
1238 popl %eax 1245 popl %eax
1239 ret 1246 ret
1240#endif /* CONFIG_FUNCTION_RET_TRACER */ 1247#endif
1241END(mcount)
1242#endif /* CONFIG_DYNAMIC_FTRACE */
1243#endif /* CONFIG_FUNCTION_TRACER */
1244 1248
1245.section .rodata,"a" 1249.section .rodata,"a"
1246#include "syscall_table_32.S" 1250#include "syscall_table_32.S"
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index fe832738e1e2..924153edd973 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,133 +24,6 @@
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26 26
27
28#ifdef CONFIG_FUNCTION_RET_TRACER
29
30/*
31 * These functions are picked from those used on
32 * this page for dynamic ftrace. They have been
33 * simplified to ignore all traces in NMI context.
34 */
35static atomic_t in_nmi;
36
37void ftrace_nmi_enter(void)
38{
39 atomic_inc(&in_nmi);
40}
41
42void ftrace_nmi_exit(void)
43{
44 atomic_dec(&in_nmi);
45}
46
47/* Add a function return address to the trace stack on thread info.*/
48static int push_return_trace(unsigned long ret, unsigned long long time,
49 unsigned long func)
50{
51 int index;
52 struct thread_info *ti = current_thread_info();
53
54 /* The return trace stack is full */
55 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
56 return -EBUSY;
57
58 index = ++ti->curr_ret_stack;
59 ti->ret_stack[index].ret = ret;
60 ti->ret_stack[index].func = func;
61 ti->ret_stack[index].calltime = time;
62
63 return 0;
64}
65
66/* Retrieve a function return address to the trace stack on thread info.*/
67static void pop_return_trace(unsigned long *ret, unsigned long long *time,
68 unsigned long *func)
69{
70 int index;
71
72 struct thread_info *ti = current_thread_info();
73 index = ti->curr_ret_stack;
74 *ret = ti->ret_stack[index].ret;
75 *func = ti->ret_stack[index].func;
76 *time = ti->ret_stack[index].calltime;
77 ti->curr_ret_stack--;
78}
79
80/*
81 * Send the trace to the ring-buffer.
82 * @return the original return address.
83 */
84unsigned long ftrace_return_to_handler(void)
85{
86 struct ftrace_retfunc trace;
87 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
88 trace.rettime = cpu_clock(raw_smp_processor_id());
89 ftrace_function_return(&trace);
90
91 return trace.ret;
92}
93
94/*
95 * Hook the return address and push it in the stack of return addrs
96 * in current thread info.
97 */
98void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
99{
100 unsigned long old;
101 unsigned long long calltime;
102 int faulted;
103 unsigned long return_hooker = (unsigned long)
104 &return_to_handler;
105
106 /* Nmi's are currently unsupported */
107 if (atomic_read(&in_nmi))
108 return;
109
110 /*
111 * Protect against fault, even if it shouldn't
112 * happen. This tool is too much intrusive to
113 * ignore such a protection.
114 */
115 asm volatile(
116 "1: movl (%[parent_old]), %[old]\n"
117 "2: movl %[return_hooker], (%[parent_replaced])\n"
118 " movl $0, %[faulted]\n"
119
120 ".section .fixup, \"ax\"\n"
121 "3: movl $1, %[faulted]\n"
122 ".previous\n"
123
124 ".section __ex_table, \"a\"\n"
125 " .long 1b, 3b\n"
126 " .long 2b, 3b\n"
127 ".previous\n"
128
129 : [parent_replaced] "=r" (parent), [old] "=r" (old),
130 [faulted] "=r" (faulted)
131 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
132 : "memory"
133 );
134
135 if (WARN_ON(faulted)) {
136 unregister_ftrace_return();
137 return;
138 }
139
140 if (WARN_ON(!__kernel_text_address(old))) {
141 unregister_ftrace_return();
142 *parent = old;
143 return;
144 }
145
146 calltime = cpu_clock(raw_smp_processor_id());
147
148 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
149 *parent = old;
150}
151
152#endif
153
154#ifdef CONFIG_DYNAMIC_FTRACE 27#ifdef CONFIG_DYNAMIC_FTRACE
155 28
156union ftrace_code_union { 29union ftrace_code_union {
@@ -166,7 +39,7 @@ static int ftrace_calc_offset(long ip, long addr)
166 return (int)(addr - ip); 39 return (int)(addr - ip);
167} 40}
168 41
169unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 42static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
170{ 43{
171 static union ftrace_code_union calc; 44 static union ftrace_code_union calc;
172 45
@@ -311,12 +184,12 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
311 184
312static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 185static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
313 186
314unsigned char *ftrace_nop_replace(void) 187static unsigned char *ftrace_nop_replace(void)
315{ 188{
316 return ftrace_nop; 189 return ftrace_nop;
317} 190}
318 191
319int 192static int
320ftrace_modify_code(unsigned long ip, unsigned char *old_code, 193ftrace_modify_code(unsigned long ip, unsigned char *old_code,
321 unsigned char *new_code) 194 unsigned char *new_code)
322{ 195{
@@ -349,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
349 return 0; 222 return 0;
350} 223}
351 224
225int ftrace_make_nop(struct module *mod,
226 struct dyn_ftrace *rec, unsigned long addr)
227{
228 unsigned char *new, *old;
229 unsigned long ip = rec->ip;
230
231 old = ftrace_call_replace(ip, addr);
232 new = ftrace_nop_replace();
233
234 return ftrace_modify_code(rec->ip, old, new);
235}
236
237int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
238{
239 unsigned char *new, *old;
240 unsigned long ip = rec->ip;
241
242 old = ftrace_nop_replace();
243 new = ftrace_call_replace(ip, addr);
244
245 return ftrace_modify_code(rec->ip, old, new);
246}
247
352int ftrace_update_ftrace_func(ftrace_func_t func) 248int ftrace_update_ftrace_func(ftrace_func_t func)
353{ 249{
354 unsigned long ip = (unsigned long)(&ftrace_call); 250 unsigned long ip = (unsigned long)(&ftrace_call);
@@ -426,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data)
426 return 0; 322 return 0;
427} 323}
428#endif 324#endif
325
326#ifdef CONFIG_FUNCTION_RET_TRACER
327
328#ifndef CONFIG_DYNAMIC_FTRACE
329
330/*
331 * These functions are picked from those used on
332 * this page for dynamic ftrace. They have been
333 * simplified to ignore all traces in NMI context.
334 */
335static atomic_t in_nmi;
336
337void ftrace_nmi_enter(void)
338{
339 atomic_inc(&in_nmi);
340}
341
342void ftrace_nmi_exit(void)
343{
344 atomic_dec(&in_nmi);
345}
346#endif /* !CONFIG_DYNAMIC_FTRACE */
347
348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func)
351{
352 int index;
353 struct thread_info *ti = current_thread_info();
354
355 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
357 return -EBUSY;
358
359 index = ++ti->curr_ret_stack;
360 barrier();
361 ti->ret_stack[index].ret = ret;
362 ti->ret_stack[index].func = func;
363 ti->ret_stack[index].calltime = time;
364
365 return 0;
366}
367
368/* Retrieve a function return address to the trace stack on thread info.*/
369static void pop_return_trace(unsigned long *ret, unsigned long long *time,
370 unsigned long *func)
371{
372 int index;
373
374 struct thread_info *ti = current_thread_info();
375 index = ti->curr_ret_stack;
376 *ret = ti->ret_stack[index].ret;
377 *func = ti->ret_stack[index].func;
378 *time = ti->ret_stack[index].calltime;
379 ti->curr_ret_stack--;
380}
381
382/*
383 * Send the trace to the ring-buffer.
384 * @return the original return address.
385 */
386unsigned long ftrace_return_to_handler(void)
387{
388 struct ftrace_retfunc trace;
389 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
390 trace.rettime = cpu_clock(raw_smp_processor_id());
391 ftrace_function_return(&trace);
392
393 return trace.ret;
394}
395
396/*
397 * Hook the return address and push it in the stack of return addrs
398 * in current thread info.
399 */
400void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
401{
402 unsigned long old;
403 unsigned long long calltime;
404 int faulted;
405 unsigned long return_hooker = (unsigned long)
406 &return_to_handler;
407
408 /* Nmi's are currently unsupported */
409 if (atomic_read(&in_nmi))
410 return;
411
412 /*
413 * Protect against fault, even if it shouldn't
414 * happen. This tool is too much intrusive to
415 * ignore such a protection.
416 */
417 asm volatile(
418 "1: movl (%[parent_old]), %[old]\n"
419 "2: movl %[return_hooker], (%[parent_replaced])\n"
420 " movl $0, %[faulted]\n"
421
422 ".section .fixup, \"ax\"\n"
423 "3: movl $1, %[faulted]\n"
424 ".previous\n"
425
426 ".section __ex_table, \"a\"\n"
427 " .long 1b, 3b\n"
428 " .long 2b, 3b\n"
429 ".previous\n"
430
431 : [parent_replaced] "=r" (parent), [old] "=r" (old),
432 [faulted] "=r" (faulted)
433 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
434 : "memory"
435 );
436
437 if (WARN_ON(faulted)) {
438 unregister_ftrace_return();
439 return;
440 }
441
442 if (WARN_ON(!__kernel_text_address(old))) {
443 unregister_ftrace_return();
444 *parent = old;
445 return;
446 }
447
448 calltime = cpu_clock(raw_smp_processor_id());
449
450 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
451 *parent = old;
452}
453
454#endif /* CONFIG_FUNCTION_RET_TRACER */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4fbc4a8b86a5..f1af1aab00e6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -25,6 +25,17 @@ struct ftrace_ops {
25 25
26extern int function_trace_stop; 26extern int function_trace_stop;
27 27
28/*
29 * Type of the current tracing.
30 */
31enum ftrace_tracing_type_t {
32 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
33 FTRACE_TYPE_RETURN, /* Hook the return of the function */
34};
35
36/* Current tracing type, default is FTRACE_TYPE_ENTER */
37extern enum ftrace_tracing_type_t ftrace_tracing_type;
38
28/** 39/**
29 * ftrace_stop - stop function tracer. 40 * ftrace_stop - stop function tracer.
30 * 41 *
@@ -74,6 +85,9 @@ static inline void ftrace_start(void) { }
74#endif /* CONFIG_FUNCTION_TRACER */ 85#endif /* CONFIG_FUNCTION_TRACER */
75 86
76#ifdef CONFIG_DYNAMIC_FTRACE 87#ifdef CONFIG_DYNAMIC_FTRACE
88/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
89#include <asm/ftrace.h>
90
77enum { 91enum {
78 FTRACE_FL_FREE = (1 << 0), 92 FTRACE_FL_FREE = (1 << 0),
79 FTRACE_FL_FAILED = (1 << 1), 93 FTRACE_FL_FAILED = (1 << 1),
@@ -88,6 +102,7 @@ struct dyn_ftrace {
88 struct list_head list; 102 struct list_head list;
89 unsigned long ip; /* address of mcount call-site */ 103 unsigned long ip; /* address of mcount call-site */
90 unsigned long flags; 104 unsigned long flags;
105 struct dyn_arch_ftrace arch;
91}; 106};
92 107
93int ftrace_force_update(void); 108int ftrace_force_update(void);
@@ -95,22 +110,43 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
95 110
96/* defined in arch */ 111/* defined in arch */
97extern int ftrace_ip_converted(unsigned long ip); 112extern int ftrace_ip_converted(unsigned long ip);
98extern unsigned char *ftrace_nop_replace(void);
99extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
100extern int ftrace_dyn_arch_init(void *data); 113extern int ftrace_dyn_arch_init(void *data);
101extern int ftrace_update_ftrace_func(ftrace_func_t func); 114extern int ftrace_update_ftrace_func(ftrace_func_t func);
102extern void ftrace_caller(void); 115extern void ftrace_caller(void);
103extern void ftrace_call(void); 116extern void ftrace_call(void);
104extern void mcount_call(void); 117extern void mcount_call(void);
118#ifdef CONFIG_FUNCTION_RET_TRACER
119extern void ftrace_return_caller(void);
120#endif
105 121
106/* May be defined in arch */ 122/**
107extern int ftrace_arch_read_dyn_info(char *buf, int size); 123 * ftrace_make_nop - convert code into top
124 * @mod: module structure if called by module load initialization
125 * @rec: the mcount call site record
126 * @addr: the address that the call site should be calling
127 *
128 * This is a very sensitive operation and great care needs
129 * to be taken by the arch. The operation should carefully
130 * read the location, check to see if what is read is indeed
131 * what we expect it to be, and then on success of the compare,
132 * it should write to the location.
133 *
134 * The code segment at @rec->ip should be a caller to @addr
135 *
136 * Return must be:
137 * 0 on success
138 * -EFAULT on error reading the location
139 * -EINVAL on a failed compare of the contents
140 * -EPERM on error writing to the location
141 * Any other value will be considered a failure.
142 */
143extern int ftrace_make_nop(struct module *mod,
144 struct dyn_ftrace *rec, unsigned long addr);
108 145
109/** 146/**
110 * ftrace_modify_code - modify code segment 147 * ftrace_make_call - convert a nop call site into a call to addr
111 * @ip: the address of the code segment 148 * @rec: the mcount call site record
112 * @old_code: the contents of what is expected to be there 149 * @addr: the address that the call site should call
113 * @new_code: the code to patch in
114 * 150 *
115 * This is a very sensitive operation and great care needs 151 * This is a very sensitive operation and great care needs
116 * to be taken by the arch. The operation should carefully 152 * to be taken by the arch. The operation should carefully
@@ -118,6 +154,8 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
118 * what we expect it to be, and then on success of the compare, 154 * what we expect it to be, and then on success of the compare,
119 * it should write to the location. 155 * it should write to the location.
120 * 156 *
157 * The code segment at @rec->ip should be a nop
158 *
121 * Return must be: 159 * Return must be:
122 * 0 on success 160 * 0 on success
123 * -EFAULT on error reading the location 161 * -EFAULT on error reading the location
@@ -125,8 +163,11 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
125 * -EPERM on error writing to the location 163 * -EPERM on error writing to the location
126 * Any other value will be considered a failure. 164 * Any other value will be considered a failure.
127 */ 165 */
128extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 166extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
129 unsigned char *new_code); 167
168
169/* May be defined in arch */
170extern int ftrace_arch_read_dyn_info(char *buf, int size);
130 171
131extern int skip_trace(unsigned long ip); 172extern int skip_trace(unsigned long ip);
132 173
@@ -259,11 +300,13 @@ static inline void ftrace_dump(void) { }
259 300
260#ifdef CONFIG_FTRACE_MCOUNT_RECORD 301#ifdef CONFIG_FTRACE_MCOUNT_RECORD
261extern void ftrace_init(void); 302extern void ftrace_init(void);
262extern void ftrace_init_module(unsigned long *start, unsigned long *end); 303extern void ftrace_init_module(struct module *mod,
304 unsigned long *start, unsigned long *end);
263#else 305#else
264static inline void ftrace_init(void) { } 306static inline void ftrace_init(void) { }
265static inline void 307static inline void
266ftrace_init_module(unsigned long *start, unsigned long *end) { } 308ftrace_init_module(struct module *mod,
309 unsigned long *start, unsigned long *end) { }
267#endif 310#endif
268 311
269 312
@@ -281,7 +324,7 @@ struct ftrace_retfunc {
281/* Type of a callback handler of tracing return function */ 324/* Type of a callback handler of tracing return function */
282typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 325typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
283 326
284extern void register_ftrace_return(trace_function_return_t func); 327extern int register_ftrace_return(trace_function_return_t func);
285/* The current handler in use */ 328/* The current handler in use */
286extern trace_function_return_t ftrace_function_return; 329extern trace_function_return_t ftrace_function_return;
287extern void unregister_ftrace_return(void); 330extern void unregister_ftrace_return(void);
diff --git a/kernel/module.c b/kernel/module.c
index fc1dff9a178c..89bcf7c1327d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2192,7 +2192,7 @@ static noinline struct module *load_module(void __user *umod,
2192 /* sechdrs[0].sh_size is always zero */ 2192 /* sechdrs[0].sh_size is always zero */
2193 mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", 2193 mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc",
2194 sizeof(*mseg), &num_mcount); 2194 sizeof(*mseg), &num_mcount);
2195 ftrace_init_module(mseg, mseg + num_mcount); 2195 ftrace_init_module(mod, mseg, mseg + num_mcount);
2196 2196
2197 err = module_finalize(hdr, sechdrs, mod); 2197 err = module_finalize(hdr, sechdrs, mod);
2198 if (err < 0) 2198 if (err < 0)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9c89526b6b7c..b8378fad29a3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -59,7 +59,6 @@ config FUNCTION_TRACER
59 59
60config FUNCTION_RET_TRACER 60config FUNCTION_RET_TRACER
61 bool "Kernel Function return Tracer" 61 bool "Kernel Function return Tracer"
62 depends on !DYNAMIC_FTRACE
63 depends on HAVE_FUNCTION_RET_TRACER 62 depends on HAVE_FUNCTION_RET_TRACER
64 depends on FUNCTION_TRACER 63 depends on FUNCTION_TRACER
65 help 64 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 54cb9a7d15e5..f212da486689 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -50,6 +50,9 @@ static int last_ftrace_enabled;
50/* Quick disabling of function tracer. */ 50/* Quick disabling of function tracer. */
51int function_trace_stop; 51int function_trace_stop;
52 52
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
53/* 56/*
54 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -334,7 +337,7 @@ ftrace_record_ip(unsigned long ip)
334{ 337{
335 struct dyn_ftrace *rec; 338 struct dyn_ftrace *rec;
336 339
337 if (!ftrace_enabled || ftrace_disabled) 340 if (ftrace_disabled)
338 return NULL; 341 return NULL;
339 342
340 rec = ftrace_alloc_dyn_node(ip); 343 rec = ftrace_alloc_dyn_node(ip);
@@ -348,107 +351,138 @@ ftrace_record_ip(unsigned long ip)
348 return rec; 351 return rec;
349} 352}
350 353
351#define FTRACE_ADDR ((long)(ftrace_caller)) 354static void print_ip_ins(const char *fmt, unsigned char *p)
355{
356 int i;
357
358 printk(KERN_CONT "%s", fmt);
359
360 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
361 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
362}
363
364static void ftrace_bug(int failed, unsigned long ip)
365{
366 switch (failed) {
367 case -EFAULT:
368 FTRACE_WARN_ON_ONCE(1);
369 pr_info("ftrace faulted on modifying ");
370 print_ip_sym(ip);
371 break;
372 case -EINVAL:
373 FTRACE_WARN_ON_ONCE(1);
374 pr_info("ftrace failed to modify ");
375 print_ip_sym(ip);
376 print_ip_ins(" actual: ", (unsigned char *)ip);
377 printk(KERN_CONT "\n");
378 break;
379 case -EPERM:
380 FTRACE_WARN_ON_ONCE(1);
381 pr_info("ftrace faulted on writing ");
382 print_ip_sym(ip);
383 break;
384 default:
385 FTRACE_WARN_ON_ONCE(1);
386 pr_info("ftrace faulted on unknown error ");
387 print_ip_sym(ip);
388 }
389}
390
352 391
353static int 392static int
354__ftrace_replace_code(struct dyn_ftrace *rec, 393__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
355 unsigned char *old, unsigned char *new, int enable)
356{ 394{
357 unsigned long ip, fl; 395 unsigned long ip, fl;
396 unsigned long ftrace_addr;
397
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
358 406
359 ip = rec->ip; 407 ip = rec->ip;
360 408
361 if (ftrace_filtered && enable) { 409 /*
410 * If this record is not to be traced and
411 * it is not enabled then do nothing.
412 *
413 * If this record is not to be traced and
414 * it is enabled then disabled it.
415 *
416 */
417 if (rec->flags & FTRACE_FL_NOTRACE) {
418 if (rec->flags & FTRACE_FL_ENABLED)
419 rec->flags &= ~FTRACE_FL_ENABLED;
420 else
421 return 0;
422
423 } else if (ftrace_filtered && enable) {
362 /* 424 /*
363 * If filtering is on: 425 * Filtering is on:
364 *
365 * If this record is set to be filtered and
366 * is enabled then do nothing.
367 *
368 * If this record is set to be filtered and
369 * it is not enabled, enable it.
370 *
371 * If this record is not set to be filtered
372 * and it is not enabled do nothing.
373 *
374 * If this record is set not to trace then
375 * do nothing.
376 *
377 * If this record is set not to trace and
378 * it is enabled then disable it.
379 *
380 * If this record is not set to be filtered and
381 * it is enabled, disable it.
382 */ 426 */
383 427
384 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 428 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
385 FTRACE_FL_ENABLED);
386 429
387 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 430 /* Record is filtered and enabled, do nothing */
388 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 431 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
389 !fl || (fl == FTRACE_FL_NOTRACE))
390 return 0; 432 return 0;
391 433
392 /* 434 /* Record is not filtered and is not enabled do nothing */
393 * If it is enabled disable it, 435 if (!fl)
394 * otherwise enable it! 436 return 0;
395 */ 437
396 if (fl & FTRACE_FL_ENABLED) { 438 /* Record is not filtered but enabled, disable it */
397 /* swap new and old */ 439 if (fl == FTRACE_FL_ENABLED)
398 new = old;
399 old = ftrace_call_replace(ip, FTRACE_ADDR);
400 rec->flags &= ~FTRACE_FL_ENABLED; 440 rec->flags &= ~FTRACE_FL_ENABLED;
401 } else { 441 else
402 new = ftrace_call_replace(ip, FTRACE_ADDR); 442 /* Otherwise record is filtered but not enabled, enable it */
403 rec->flags |= FTRACE_FL_ENABLED; 443 rec->flags |= FTRACE_FL_ENABLED;
404 }
405 } else { 444 } else {
445 /* Disable or not filtered */
406 446
407 if (enable) { 447 if (enable) {
408 /* 448 /* if record is enabled, do nothing */
409 * If this record is set not to trace and is
410 * not enabled, do nothing.
411 */
412 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
413 if (fl == FTRACE_FL_NOTRACE)
414 return 0;
415
416 new = ftrace_call_replace(ip, FTRACE_ADDR);
417 } else
418 old = ftrace_call_replace(ip, FTRACE_ADDR);
419
420 if (enable) {
421 if (rec->flags & FTRACE_FL_ENABLED) 449 if (rec->flags & FTRACE_FL_ENABLED)
422 return 0; 450 return 0;
451
423 rec->flags |= FTRACE_FL_ENABLED; 452 rec->flags |= FTRACE_FL_ENABLED;
453
424 } else { 454 } else {
455
456 /* if record is not enabled do nothing */
425 if (!(rec->flags & FTRACE_FL_ENABLED)) 457 if (!(rec->flags & FTRACE_FL_ENABLED))
426 return 0; 458 return 0;
459
427 rec->flags &= ~FTRACE_FL_ENABLED; 460 rec->flags &= ~FTRACE_FL_ENABLED;
428 } 461 }
429 } 462 }
430 463
431 return ftrace_modify_code(ip, old, new); 464 if (rec->flags & FTRACE_FL_ENABLED)
465 return ftrace_make_call(rec, ftrace_addr);
466 else
467 return ftrace_make_nop(NULL, rec, ftrace_addr);
432} 468}
433 469
434static void ftrace_replace_code(int enable) 470static void ftrace_replace_code(int enable)
435{ 471{
436 int i, failed; 472 int i, failed;
437 unsigned char *new = NULL, *old = NULL;
438 struct dyn_ftrace *rec; 473 struct dyn_ftrace *rec;
439 struct ftrace_page *pg; 474 struct ftrace_page *pg;
440 475
441 if (enable)
442 old = ftrace_nop_replace();
443 else
444 new = ftrace_nop_replace();
445
446 for (pg = ftrace_pages_start; pg; pg = pg->next) { 476 for (pg = ftrace_pages_start; pg; pg = pg->next) {
447 for (i = 0; i < pg->index; i++) { 477 for (i = 0; i < pg->index; i++) {
448 rec = &pg->records[i]; 478 rec = &pg->records[i];
449 479
450 /* don't modify code that has already faulted */ 480 /*
451 if (rec->flags & FTRACE_FL_FAILED) 481 * Skip over free records and records that have
482 * failed.
483 */
484 if (rec->flags & FTRACE_FL_FREE ||
485 rec->flags & FTRACE_FL_FAILED)
452 continue; 486 continue;
453 487
454 /* ignore updates to this record's mcount site */ 488 /* ignore updates to this record's mcount site */
@@ -459,68 +493,30 @@ static void ftrace_replace_code(int enable)
459 unfreeze_record(rec); 493 unfreeze_record(rec);
460 } 494 }
461 495
462 failed = __ftrace_replace_code(rec, old, new, enable); 496 failed = __ftrace_replace_code(rec, enable);
463 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 497 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
464 rec->flags |= FTRACE_FL_FAILED; 498 rec->flags |= FTRACE_FL_FAILED;
465 if ((system_state == SYSTEM_BOOTING) || 499 if ((system_state == SYSTEM_BOOTING) ||
466 !core_kernel_text(rec->ip)) { 500 !core_kernel_text(rec->ip)) {
467 ftrace_free_rec(rec); 501 ftrace_free_rec(rec);
468 } 502 } else
503 ftrace_bug(failed, rec->ip);
469 } 504 }
470 } 505 }
471 } 506 }
472} 507}
473 508
474static void print_ip_ins(const char *fmt, unsigned char *p)
475{
476 int i;
477
478 printk(KERN_CONT "%s", fmt);
479
480 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
481 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
482}
483
484static int 509static int
485ftrace_code_disable(struct dyn_ftrace *rec) 510ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
486{ 511{
487 unsigned long ip; 512 unsigned long ip;
488 unsigned char *nop, *call;
489 int ret; 513 int ret;
490 514
491 ip = rec->ip; 515 ip = rec->ip;
492 516
493 nop = ftrace_nop_replace(); 517 ret = ftrace_make_nop(mod, rec, mcount_addr);
494 call = ftrace_call_replace(ip, mcount_addr);
495
496 ret = ftrace_modify_code(ip, call, nop);
497 if (ret) { 518 if (ret) {
498 switch (ret) { 519 ftrace_bug(ret, ip);
499 case -EFAULT:
500 FTRACE_WARN_ON_ONCE(1);
501 pr_info("ftrace faulted on modifying ");
502 print_ip_sym(ip);
503 break;
504 case -EINVAL:
505 FTRACE_WARN_ON_ONCE(1);
506 pr_info("ftrace failed to modify ");
507 print_ip_sym(ip);
508 print_ip_ins(" expected: ", call);
509 print_ip_ins(" actual: ", (unsigned char *)ip);
510 print_ip_ins(" replace: ", nop);
511 printk(KERN_CONT "\n");
512 break;
513 case -EPERM:
514 FTRACE_WARN_ON_ONCE(1);
515 pr_info("ftrace faulted on writing ");
516 print_ip_sym(ip);
517 break;
518 default:
519 FTRACE_WARN_ON_ONCE(1);
520 pr_info("ftrace faulted on unknown error ");
521 print_ip_sym(ip);
522 }
523
524 rec->flags |= FTRACE_FL_FAILED; 520 rec->flags |= FTRACE_FL_FAILED;
525 return 0; 521 return 0;
526 } 522 }
@@ -560,8 +556,7 @@ static void ftrace_startup(void)
560 556
561 mutex_lock(&ftrace_start_lock); 557 mutex_lock(&ftrace_start_lock);
562 ftrace_start_up++; 558 ftrace_start_up++;
563 if (ftrace_start_up == 1) 559 command |= FTRACE_ENABLE_CALLS;
564 command |= FTRACE_ENABLE_CALLS;
565 560
566 if (saved_ftrace_func != ftrace_trace_function) { 561 if (saved_ftrace_func != ftrace_trace_function) {
567 saved_ftrace_func = ftrace_trace_function; 562 saved_ftrace_func = ftrace_trace_function;
@@ -639,7 +634,7 @@ static cycle_t ftrace_update_time;
639static unsigned long ftrace_update_cnt; 634static unsigned long ftrace_update_cnt;
640unsigned long ftrace_update_tot_cnt; 635unsigned long ftrace_update_tot_cnt;
641 636
642static int ftrace_update_code(void) 637static int ftrace_update_code(struct module *mod)
643{ 638{
644 struct dyn_ftrace *p, *t; 639 struct dyn_ftrace *p, *t;
645 cycle_t start, stop; 640 cycle_t start, stop;
@@ -656,7 +651,7 @@ static int ftrace_update_code(void)
656 list_del_init(&p->list); 651 list_del_init(&p->list);
657 652
658 /* convert record (i.e, patch mcount-call with NOP) */ 653 /* convert record (i.e, patch mcount-call with NOP) */
659 if (ftrace_code_disable(p)) { 654 if (ftrace_code_disable(mod, p)) {
660 p->flags |= FTRACE_FL_CONVERTED; 655 p->flags |= FTRACE_FL_CONVERTED;
661 ftrace_update_cnt++; 656 ftrace_update_cnt++;
662 } else 657 } else
@@ -699,7 +694,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
699 694
700 cnt = num_to_init / ENTRIES_PER_PAGE; 695 cnt = num_to_init / ENTRIES_PER_PAGE;
701 pr_info("ftrace: allocating %ld entries in %d pages\n", 696 pr_info("ftrace: allocating %ld entries in %d pages\n",
702 num_to_init, cnt); 697 num_to_init, cnt + 1);
703 698
704 for (i = 0; i < cnt; i++) { 699 for (i = 0; i < cnt; i++) {
705 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 700 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -782,13 +777,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
782 void *p = NULL; 777 void *p = NULL;
783 loff_t l = -1; 778 loff_t l = -1;
784 779
785 if (*pos != iter->pos) { 780 if (*pos > iter->pos)
786 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 781 *pos = iter->pos;
787 ; 782
788 } else { 783 l = *pos;
789 l = *pos; 784 p = t_next(m, p, &l);
790 p = t_next(m, p, &l);
791 }
792 785
793 return p; 786 return p;
794} 787}
@@ -799,15 +792,21 @@ static void t_stop(struct seq_file *m, void *p)
799 792
800static int t_show(struct seq_file *m, void *v) 793static int t_show(struct seq_file *m, void *v)
801{ 794{
795 struct ftrace_iterator *iter = m->private;
802 struct dyn_ftrace *rec = v; 796 struct dyn_ftrace *rec = v;
803 char str[KSYM_SYMBOL_LEN]; 797 char str[KSYM_SYMBOL_LEN];
798 int ret = 0;
804 799
805 if (!rec) 800 if (!rec)
806 return 0; 801 return 0;
807 802
808 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 803 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
809 804
810 seq_printf(m, "%s\n", str); 805 ret = seq_printf(m, "%s\n", str);
806 if (ret < 0) {
807 iter->pos--;
808 iter->idx--;
809 }
811 810
812 return 0; 811 return 0;
813} 812}
@@ -833,7 +832,7 @@ ftrace_avail_open(struct inode *inode, struct file *file)
833 return -ENOMEM; 832 return -ENOMEM;
834 833
835 iter->pg = ftrace_pages_start; 834 iter->pg = ftrace_pages_start;
836 iter->pos = -1; 835 iter->pos = 0;
837 836
838 ret = seq_open(file, &show_ftrace_seq_ops); 837 ret = seq_open(file, &show_ftrace_seq_ops);
839 if (!ret) { 838 if (!ret) {
@@ -920,7 +919,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
920 919
921 if (file->f_mode & FMODE_READ) { 920 if (file->f_mode & FMODE_READ) {
922 iter->pg = ftrace_pages_start; 921 iter->pg = ftrace_pages_start;
923 iter->pos = -1; 922 iter->pos = 0;
924 iter->flags = enable ? FTRACE_ITER_FILTER : 923 iter->flags = enable ? FTRACE_ITER_FILTER :
925 FTRACE_ITER_NOTRACE; 924 FTRACE_ITER_NOTRACE;
926 925
@@ -1211,7 +1210,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1211 1210
1212 mutex_lock(&ftrace_sysctl_lock); 1211 mutex_lock(&ftrace_sysctl_lock);
1213 mutex_lock(&ftrace_start_lock); 1212 mutex_lock(&ftrace_start_lock);
1214 if (iter->filtered && ftrace_start_up && ftrace_enabled) 1213 if (ftrace_start_up && ftrace_enabled)
1215 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1214 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1216 mutex_unlock(&ftrace_start_lock); 1215 mutex_unlock(&ftrace_start_lock);
1217 mutex_unlock(&ftrace_sysctl_lock); 1216 mutex_unlock(&ftrace_sysctl_lock);
@@ -1298,7 +1297,8 @@ static __init int ftrace_init_debugfs(void)
1298 1297
1299fs_initcall(ftrace_init_debugfs); 1298fs_initcall(ftrace_init_debugfs);
1300 1299
1301static int ftrace_convert_nops(unsigned long *start, 1300static int ftrace_convert_nops(struct module *mod,
1301 unsigned long *start,
1302 unsigned long *end) 1302 unsigned long *end)
1303{ 1303{
1304 unsigned long *p; 1304 unsigned long *p;
@@ -1309,23 +1309,32 @@ static int ftrace_convert_nops(unsigned long *start,
1309 p = start; 1309 p = start;
1310 while (p < end) { 1310 while (p < end) {
1311 addr = ftrace_call_adjust(*p++); 1311 addr = ftrace_call_adjust(*p++);
1312 /*
1313 * Some architecture linkers will pad between
1314 * the different mcount_loc sections of different
1315 * object files to satisfy alignments.
1316 * Skip any NULL pointers.
1317 */
1318 if (!addr)
1319 continue;
1312 ftrace_record_ip(addr); 1320 ftrace_record_ip(addr);
1313 } 1321 }
1314 1322
1315 /* disable interrupts to prevent kstop machine */ 1323 /* disable interrupts to prevent kstop machine */
1316 local_irq_save(flags); 1324 local_irq_save(flags);
1317 ftrace_update_code(); 1325 ftrace_update_code(mod);
1318 local_irq_restore(flags); 1326 local_irq_restore(flags);
1319 mutex_unlock(&ftrace_start_lock); 1327 mutex_unlock(&ftrace_start_lock);
1320 1328
1321 return 0; 1329 return 0;
1322} 1330}
1323 1331
1324void ftrace_init_module(unsigned long *start, unsigned long *end) 1332void ftrace_init_module(struct module *mod,
1333 unsigned long *start, unsigned long *end)
1325{ 1334{
1326 if (ftrace_disabled || start == end) 1335 if (ftrace_disabled || start == end)
1327 return; 1336 return;
1328 ftrace_convert_nops(start, end); 1337 ftrace_convert_nops(mod, start, end);
1329} 1338}
1330 1339
1331extern unsigned long __start_mcount_loc[]; 1340extern unsigned long __start_mcount_loc[];
@@ -1355,7 +1364,8 @@ void __init ftrace_init(void)
1355 1364
1356 last_ftrace_enabled = ftrace_enabled = 1; 1365 last_ftrace_enabled = ftrace_enabled = 1;
1357 1366
1358 ret = ftrace_convert_nops(__start_mcount_loc, 1367 ret = ftrace_convert_nops(NULL,
1368 __start_mcount_loc,
1359 __stop_mcount_loc); 1369 __stop_mcount_loc);
1360 1370
1361 return; 1371 return;
@@ -1411,10 +1421,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
1411 return -1; 1421 return -1;
1412 1422
1413 mutex_lock(&ftrace_sysctl_lock); 1423 mutex_lock(&ftrace_sysctl_lock);
1424
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1414 ret = __register_ftrace_function(ops); 1430 ret = __register_ftrace_function(ops);
1415 ftrace_startup(); 1431 ftrace_startup();
1416 mutex_unlock(&ftrace_sysctl_lock);
1417 1432
1433out:
1434 mutex_unlock(&ftrace_sysctl_lock);
1418 return ret; 1435 return ret;
1419} 1436}
1420 1437
@@ -1480,16 +1497,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1480} 1497}
1481 1498
1482#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_RET_TRACER
1500
1501/* The callback that hooks the return of a function */
1483trace_function_return_t ftrace_function_return = 1502trace_function_return_t ftrace_function_return =
1484 (trace_function_return_t)ftrace_stub; 1503 (trace_function_return_t)ftrace_stub;
1485void register_ftrace_return(trace_function_return_t func) 1504
1505int register_ftrace_return(trace_function_return_t func)
1486{ 1506{
1507 int ret = 0;
1508
1509 mutex_lock(&ftrace_sysctl_lock);
1510
1511 /*
1512 * Don't launch return tracing if normal function
1513 * tracing is already running.
1514 */
1515 if (ftrace_trace_function != ftrace_stub) {
1516 ret = -EBUSY;
1517 goto out;
1518 }
1519
1520 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1487 ftrace_function_return = func; 1521 ftrace_function_return = func;
1522 ftrace_startup();
1523
1524out:
1525 mutex_unlock(&ftrace_sysctl_lock);
1526 return ret;
1488} 1527}
1489 1528
1490void unregister_ftrace_return(void) 1529void unregister_ftrace_return(void)
1491{ 1530{
1531 mutex_lock(&ftrace_sysctl_lock);
1532
1492 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1533 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1534 ftrace_shutdown();
1535 /* Restore normal tracing type */
1536 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1537
1538 mutex_unlock(&ftrace_sysctl_lock);
1493} 1539}
1494#endif 1540#endif
1495 1541
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4a904623e05d..396fda034e3f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1051,7 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1051 * Need to use raw, since this must be called before the 1051 * Need to use raw, since this must be called before the
1052 * recursive protection is performed. 1052 * recursive protection is performed.
1053 */ 1053 */
1054 raw_local_irq_save(flags); 1054 local_irq_save(flags);
1055 cpu = raw_smp_processor_id(); 1055 cpu = raw_smp_processor_id();
1056 data = tr->data[cpu]; 1056 data = tr->data[cpu];
1057 disabled = atomic_inc_return(&data->disabled); 1057 disabled = atomic_inc_return(&data->disabled);
@@ -1062,7 +1062,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1062 } 1062 }
1063 1063
1064 atomic_dec(&data->disabled); 1064 atomic_dec(&data->disabled);
1065 raw_local_irq_restore(flags); 1065 local_irq_restore(flags);
1066} 1066}
1067 1067
1068#ifdef CONFIG_FUNCTION_RET_TRACER 1068#ifdef CONFIG_FUNCTION_RET_TRACER
@@ -2638,8 +2638,11 @@ static int tracing_set_tracer(char *buf)
2638 current_trace->reset(tr); 2638 current_trace->reset(tr);
2639 2639
2640 current_trace = t; 2640 current_trace = t;
2641 if (t->init) 2641 if (t->init) {
2642 t->init(tr); 2642 ret = t->init(tr);
2643 if (ret)
2644 goto out;
2645 }
2643 2646
2644 trace_branch_enable(tr); 2647 trace_branch_enable(tr);
2645 out: 2648 out:
@@ -2655,6 +2658,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2655 char buf[max_tracer_type_len+1]; 2658 char buf[max_tracer_type_len+1];
2656 int i; 2659 int i;
2657 size_t ret; 2660 size_t ret;
2661 int err;
2662
2663 ret = cnt;
2658 2664
2659 if (cnt > max_tracer_type_len) 2665 if (cnt > max_tracer_type_len)
2660 cnt = max_tracer_type_len; 2666 cnt = max_tracer_type_len;
@@ -2668,12 +2674,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2668 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 2674 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2669 buf[i] = 0; 2675 buf[i] = 0;
2670 2676
2671 ret = tracing_set_tracer(buf); 2677 err = tracing_set_tracer(buf);
2672 if (!ret) 2678 if (err)
2673 ret = cnt; 2679 return err;
2674 2680
2675 if (ret > 0) 2681 filp->f_pos += ret;
2676 filp->f_pos += ret;
2677 2682
2678 return ret; 2683 return ret;
2679} 2684}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 790ea8c0e1f3..cdbd5cc22be8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -264,7 +264,8 @@ enum print_line_t {
264 */ 264 */
265struct tracer { 265struct tracer {
266 const char *name; 266 const char *name;
267 void (*init)(struct trace_array *tr); 267 /* Your tracer should raise a warning if init fails */
268 int (*init)(struct trace_array *tr);
268 void (*reset)(struct trace_array *tr); 269 void (*reset)(struct trace_array *tr);
269 void (*start)(struct trace_array *tr); 270 void (*start)(struct trace_array *tr);
270 void (*stop)(struct trace_array *tr); 271 void (*stop)(struct trace_array *tr);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index cb333b7fd113..a4fa2c57e34e 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -47,7 +47,7 @@ static void reset_boot_trace(struct trace_array *tr)
47 tracing_reset(tr, cpu); 47 tracing_reset(tr, cpu);
48} 48}
49 49
50static void boot_trace_init(struct trace_array *tr) 50static int boot_trace_init(struct trace_array *tr)
51{ 51{
52 int cpu; 52 int cpu;
53 boot_trace = tr; 53 boot_trace = tr;
@@ -56,6 +56,7 @@ static void boot_trace_init(struct trace_array *tr)
56 tracing_reset(tr, cpu); 56 tracing_reset(tr, cpu);
57 57
58 tracing_sched_switch_assign_trace(tr); 58 tracing_sched_switch_assign_trace(tr);
59 return 0;
59} 60}
60 61
61static enum print_line_t 62static enum print_line_t
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 85265553918f..23f9b02ce967 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -41,7 +41,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
41 if (unlikely(!tr)) 41 if (unlikely(!tr))
42 return; 42 return;
43 43
44 local_irq_save(flags); 44 raw_local_irq_save(flags);
45 cpu = raw_smp_processor_id(); 45 cpu = raw_smp_processor_id();
46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47 goto out; 47 goto out;
@@ -73,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
73 73
74 out: 74 out:
75 atomic_dec(&tr->data[cpu]->disabled); 75 atomic_dec(&tr->data[cpu]->disabled);
76 local_irq_restore(flags); 76 raw_local_irq_restore(flags);
77} 77}
78 78
79static inline 79static inline
@@ -125,7 +125,7 @@ static void stop_branch_trace(struct trace_array *tr)
125 disable_branch_tracing(); 125 disable_branch_tracing();
126} 126}
127 127
128static void branch_trace_init(struct trace_array *tr) 128static int branch_trace_init(struct trace_array *tr)
129{ 129{
130 int cpu; 130 int cpu;
131 131
@@ -133,6 +133,7 @@ static void branch_trace_init(struct trace_array *tr)
133 tracing_reset(tr, cpu); 133 tracing_reset(tr, cpu);
134 134
135 start_branch_trace(tr); 135 start_branch_trace(tr);
136 return 0;
136} 137}
137 138
138static void branch_trace_reset(struct trace_array *tr) 139static void branch_trace_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 8693b7a0a5b2..e74f6d0a3216 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -42,9 +42,10 @@ static void stop_function_trace(struct trace_array *tr)
42 tracing_stop_cmdline_record(); 42 tracing_stop_cmdline_record();
43} 43}
44 44
45static void function_trace_init(struct trace_array *tr) 45static int function_trace_init(struct trace_array *tr)
46{ 46{
47 start_function_trace(tr); 47 start_function_trace(tr);
48 return 0;
48} 49}
49 50
50static void function_trace_reset(struct trace_array *tr) 51static void function_trace_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
index 7680b21537dd..a68564af022b 100644
--- a/kernel/trace/trace_functions_return.c
+++ b/kernel/trace/trace_functions_return.c
@@ -14,28 +14,18 @@
14#include "trace.h" 14#include "trace.h"
15 15
16 16
17static void start_return_trace(struct trace_array *tr) 17static int return_trace_init(struct trace_array *tr)
18{
19 register_ftrace_return(&trace_function_return);
20}
21
22static void stop_return_trace(struct trace_array *tr)
23{
24 unregister_ftrace_return();
25}
26
27static void return_trace_init(struct trace_array *tr)
28{ 18{
29 int cpu; 19 int cpu;
30 for_each_online_cpu(cpu) 20 for_each_online_cpu(cpu)
31 tracing_reset(tr, cpu); 21 tracing_reset(tr, cpu);
32 22
33 start_return_trace(tr); 23 return register_ftrace_return(&trace_function_return);
34} 24}
35 25
36static void return_trace_reset(struct trace_array *tr) 26static void return_trace_reset(struct trace_array *tr)
37{ 27{
38 stop_return_trace(tr); 28 unregister_ftrace_return();
39} 29}
40 30
41 31
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d919d4eaa7cc..7c2e326bbc8b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -416,11 +416,12 @@ static void irqsoff_tracer_close(struct trace_iterator *iter)
416} 416}
417 417
418#ifdef CONFIG_IRQSOFF_TRACER 418#ifdef CONFIG_IRQSOFF_TRACER
419static void irqsoff_tracer_init(struct trace_array *tr) 419static int irqsoff_tracer_init(struct trace_array *tr)
420{ 420{
421 trace_type = TRACER_IRQS_OFF; 421 trace_type = TRACER_IRQS_OFF;
422 422
423 __irqsoff_tracer_init(tr); 423 __irqsoff_tracer_init(tr);
424 return 0;
424} 425}
425static struct tracer irqsoff_tracer __read_mostly = 426static struct tracer irqsoff_tracer __read_mostly =
426{ 427{
@@ -442,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly =
442#endif 443#endif
443 444
444#ifdef CONFIG_PREEMPT_TRACER 445#ifdef CONFIG_PREEMPT_TRACER
445static void preemptoff_tracer_init(struct trace_array *tr) 446static int preemptoff_tracer_init(struct trace_array *tr)
446{ 447{
447 trace_type = TRACER_PREEMPT_OFF; 448 trace_type = TRACER_PREEMPT_OFF;
448 449
449 __irqsoff_tracer_init(tr); 450 __irqsoff_tracer_init(tr);
451 return 0;
450} 452}
451 453
452static struct tracer preemptoff_tracer __read_mostly = 454static struct tracer preemptoff_tracer __read_mostly =
@@ -471,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly =
471#if defined(CONFIG_IRQSOFF_TRACER) && \ 473#if defined(CONFIG_IRQSOFF_TRACER) && \
472 defined(CONFIG_PREEMPT_TRACER) 474 defined(CONFIG_PREEMPT_TRACER)
473 475
474static void preemptirqsoff_tracer_init(struct trace_array *tr) 476static int preemptirqsoff_tracer_init(struct trace_array *tr)
475{ 477{
476 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 478 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
477 479
478 __irqsoff_tracer_init(tr); 480 __irqsoff_tracer_init(tr);
481 return 0;
479} 482}
480 483
481static struct tracer preemptirqsoff_tracer __read_mostly = 484static struct tracer preemptirqsoff_tracer __read_mostly =
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 51bcf370215e..433d650eda9f 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -30,13 +30,14 @@ static void mmio_reset_data(struct trace_array *tr)
30 tracing_reset(tr, cpu); 30 tracing_reset(tr, cpu);
31} 31}
32 32
33static void mmio_trace_init(struct trace_array *tr) 33static int mmio_trace_init(struct trace_array *tr)
34{ 34{
35 pr_debug("in %s\n", __func__); 35 pr_debug("in %s\n", __func__);
36 mmio_trace_array = tr; 36 mmio_trace_array = tr;
37 37
38 mmio_reset_data(tr); 38 mmio_reset_data(tr);
39 enable_mmiotrace(); 39 enable_mmiotrace();
40 return 0;
40} 41}
41 42
42static void mmio_trace_reset(struct trace_array *tr) 43static void mmio_trace_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 2ef1d227e7d8..0e77415caed3 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -24,7 +24,7 @@ static void stop_nop_trace(struct trace_array *tr)
24 /* Nothing to do! */ 24 /* Nothing to do! */
25} 25}
26 26
27static void nop_trace_init(struct trace_array *tr) 27static int nop_trace_init(struct trace_array *tr)
28{ 28{
29 int cpu; 29 int cpu;
30 ctx_trace = tr; 30 ctx_trace = tr;
@@ -33,6 +33,7 @@ static void nop_trace_init(struct trace_array *tr)
33 tracing_reset(tr, cpu); 33 tracing_reset(tr, cpu);
34 34
35 start_nop_trace(tr); 35 start_nop_trace(tr);
36 return 0;
36} 37}
37 38
38static void nop_trace_reset(struct trace_array *tr) 39static void nop_trace_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index be35bdfe2e38..863390557b44 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -206,10 +206,11 @@ static void stop_sched_trace(struct trace_array *tr)
206 tracing_stop_sched_switch_record(); 206 tracing_stop_sched_switch_record();
207} 207}
208 208
209static void sched_switch_trace_init(struct trace_array *tr) 209static int sched_switch_trace_init(struct trace_array *tr)
210{ 210{
211 ctx_trace = tr; 211 ctx_trace = tr;
212 start_sched_trace(tr); 212 start_sched_trace(tr);
213 return 0;
213} 214}
214 215
215static void sched_switch_trace_reset(struct trace_array *tr) 216static void sched_switch_trace_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 983f2b1478c9..0067b49746c1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -331,10 +331,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
331 unregister_trace_sched_wakeup(probe_wakeup); 331 unregister_trace_sched_wakeup(probe_wakeup);
332} 332}
333 333
334static void wakeup_tracer_init(struct trace_array *tr) 334static int wakeup_tracer_init(struct trace_array *tr)
335{ 335{
336 wakeup_trace = tr; 336 wakeup_trace = tr;
337 start_wakeup_tracer(tr); 337 start_wakeup_tracer(tr);
338 return 0;
338} 339}
339 340
340static void wakeup_tracer_reset(struct trace_array *tr) 341static void wakeup_tracer_reset(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 24e6e075e6d6..88c8eb70f54a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -52,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
52 int cpu, ret = 0; 52 int cpu, ret = 0;
53 53
54 /* Don't allow flipping of max traces now */ 54 /* Don't allow flipping of max traces now */
55 raw_local_irq_save(flags); 55 local_irq_save(flags);
56 __raw_spin_lock(&ftrace_max_lock); 56 __raw_spin_lock(&ftrace_max_lock);
57 57
58 cnt = ring_buffer_entries(tr->buffer); 58 cnt = ring_buffer_entries(tr->buffer);
@@ -63,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 break; 63 break;
64 } 64 }
65 __raw_spin_unlock(&ftrace_max_lock); 65 __raw_spin_unlock(&ftrace_max_lock);
66 raw_local_irq_restore(flags); 66 local_irq_restore(flags);
67 67
68 if (count) 68 if (count)
69 *count = cnt; 69 *count = cnt;
@@ -71,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
71 return ret; 71 return ret;
72} 72}
73 73
74static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
75{
76 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 trace->name, init_ret);
78}
74#ifdef CONFIG_FUNCTION_TRACER 79#ifdef CONFIG_FUNCTION_TRACER
75 80
76#ifdef CONFIG_DYNAMIC_FTRACE 81#ifdef CONFIG_DYNAMIC_FTRACE
@@ -111,7 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
111 ftrace_set_filter(func_name, strlen(func_name), 1); 116 ftrace_set_filter(func_name, strlen(func_name), 1);
112 117
113 /* enable tracing */ 118 /* enable tracing */
114 trace->init(tr); 119 ret = trace->init(tr);
120 if (ret) {
121 warn_failed_init_tracer(trace, ret);
122 goto out;
123 }
115 124
116 /* Sleep for a 1/10 of a second */ 125 /* Sleep for a 1/10 of a second */
117 msleep(100); 126 msleep(100);
@@ -181,7 +190,12 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
181 ftrace_enabled = 1; 190 ftrace_enabled = 1;
182 tracer_enabled = 1; 191 tracer_enabled = 1;
183 192
184 trace->init(tr); 193 ret = trace->init(tr);
194 if (ret) {
195 warn_failed_init_tracer(trace, ret);
196 goto out;
197 }
198
185 /* Sleep for a 1/10 of a second */ 199 /* Sleep for a 1/10 of a second */
186 msleep(100); 200 msleep(100);
187 /* stop the tracing. */ 201 /* stop the tracing. */
@@ -223,7 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
223 int ret; 237 int ret;
224 238
225 /* start the tracing */ 239 /* start the tracing */
226 trace->init(tr); 240 ret = trace->init(tr);
241 if (ret) {
242 warn_failed_init_tracer(trace, ret);
243 return ret;
244 }
245
227 /* reset the max latency */ 246 /* reset the max latency */
228 tracing_max_latency = 0; 247 tracing_max_latency = 0;
229 /* disable interrupts for a bit */ 248 /* disable interrupts for a bit */
@@ -272,7 +291,12 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
272 } 291 }
273 292
274 /* start the tracing */ 293 /* start the tracing */
275 trace->init(tr); 294 ret = trace->init(tr);
295 if (ret) {
296 warn_failed_init_tracer(trace, ret);
297 return ret;
298 }
299
276 /* reset the max latency */ 300 /* reset the max latency */
277 tracing_max_latency = 0; 301 tracing_max_latency = 0;
278 /* disable preemption for a bit */ 302 /* disable preemption for a bit */
@@ -321,7 +345,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
321 } 345 }
322 346
323 /* start the tracing */ 347 /* start the tracing */
324 trace->init(tr); 348 ret = trace->init(tr);
349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
325 353
326 /* reset the max latency */ 354 /* reset the max latency */
327 tracing_max_latency = 0; 355 tracing_max_latency = 0;
@@ -449,7 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
449 wait_for_completion(&isrt); 477 wait_for_completion(&isrt);
450 478
451 /* start the tracing */ 479 /* start the tracing */
452 trace->init(tr); 480 ret = trace->init(tr);
481 if (ret) {
482 warn_failed_init_tracer(trace, ret);
483 return ret;
484 }
485
453 /* reset the max latency */ 486 /* reset the max latency */
454 tracing_max_latency = 0; 487 tracing_max_latency = 0;
455 488
@@ -505,7 +538,12 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
505 int ret; 538 int ret;
506 539
507 /* start the tracing */ 540 /* start the tracing */
508 trace->init(tr); 541 ret = trace->init(tr);
542 if (ret) {
543 warn_failed_init_tracer(trace, ret);
544 return ret;
545 }
546
509 /* Sleep for a 1/10 of a second */ 547 /* Sleep for a 1/10 of a second */
510 msleep(100); 548 msleep(100);
511 /* stop the tracing. */ 549 /* stop the tracing. */
@@ -532,7 +570,12 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
532 int ret; 570 int ret;
533 571
534 /* start the tracing */ 572 /* start the tracing */
535 trace->init(tr); 573 ret = trace->init(tr);
574 if (ret) {
575 warn_failed_init_tracer(trace, ret);
576 return 0;
577 }
578
536 /* Sleep for a 1/10 of a second */ 579 /* Sleep for a 1/10 of a second */
537 msleep(100); 580 msleep(100);
538 /* stop the tracing. */ 581 /* stop the tracing. */
@@ -554,7 +597,12 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
554 int ret; 597 int ret;
555 598
556 /* start the tracing */ 599 /* start the tracing */
557 trace->init(tr); 600 ret = trace->init(tr);
601 if (ret) {
602 warn_failed_init_tracer(trace, ret);
603 return ret;
604 }
605
558 /* Sleep for a 1/10 of a second */ 606 /* Sleep for a 1/10 of a second */
559 msleep(100); 607 msleep(100);
560 /* stop the tracing. */ 608 /* stop the tracing. */
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 05f753422aea..54960edb96d0 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -261,11 +261,12 @@ static void stop_stack_trace(struct trace_array *tr)
261 mutex_unlock(&sample_timer_lock); 261 mutex_unlock(&sample_timer_lock);
262} 262}
263 263
264static void stack_trace_init(struct trace_array *tr) 264static int stack_trace_init(struct trace_array *tr)
265{ 265{
266 sysprof_trace = tr; 266 sysprof_trace = tr;
267 267
268 start_stack_trace(tr); 268 start_stack_trace(tr);
269 return 0;
269} 270}
270 271
271static void stack_trace_reset(struct trace_array *tr) 272static void stack_trace_reset(struct trace_array *tr)