aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/entry_32.S18
-rw-r--r--arch/x86/kernel/ftrace.c258
-rw-r--r--include/linux/ftrace.h16
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c58
-rw-r--r--kernel/trace/trace_functions_return.c15
6 files changed, 211 insertions, 155 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f97621149839..74defe21ba42 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1190,7 +1190,7 @@ ENTRY(mcount)
1190 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER 1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return 1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz trace_return 1193 jnz ftrace_return_caller
1194#endif 1194#endif
1195.globl ftrace_stub 1195.globl ftrace_stub
1196ftrace_stub: 1196ftrace_stub:
@@ -1211,9 +1211,15 @@ trace:
1211 popl %ecx 1211 popl %ecx
1212 popl %eax 1212 popl %eax
1213 jmp ftrace_stub 1213 jmp ftrace_stub
1214END(mcount)
1215#endif /* CONFIG_DYNAMIC_FTRACE */
1216#endif /* CONFIG_FUNCTION_TRACER */
1214 1217
1215#ifdef CONFIG_FUNCTION_RET_TRACER 1218#ifdef CONFIG_FUNCTION_RET_TRACER
1216trace_return: 1219ENTRY(ftrace_return_caller)
1220 cmpl $0, function_trace_stop
1221 jne ftrace_stub
1222
1217 pushl %eax 1223 pushl %eax
1218 pushl %ecx 1224 pushl %ecx
1219 pushl %edx 1225 pushl %edx
@@ -1223,7 +1229,8 @@ trace_return:
1223 popl %edx 1229 popl %edx
1224 popl %ecx 1230 popl %ecx
1225 popl %eax 1231 popl %eax
1226 jmp ftrace_stub 1232 ret
1233END(ftrace_return_caller)
1227 1234
1228.globl return_to_handler 1235.globl return_to_handler
1229return_to_handler: 1236return_to_handler:
@@ -1237,10 +1244,7 @@ return_to_handler:
1237 popl %ecx 1244 popl %ecx
1238 popl %eax 1245 popl %eax
1239 ret 1246 ret
1240#endif /* CONFIG_FUNCTION_RET_TRACER */ 1247#endif
1241END(mcount)
1242#endif /* CONFIG_DYNAMIC_FTRACE */
1243#endif /* CONFIG_FUNCTION_TRACER */
1244 1248
1245.section .rodata,"a" 1249.section .rodata,"a"
1246#include "syscall_table_32.S" 1250#include "syscall_table_32.S"
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d98b5a8ecf4c..924153edd973 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,134 +24,6 @@
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26 26
27
28#ifdef CONFIG_FUNCTION_RET_TRACER
29
30/*
31 * These functions are picked from those used on
32 * this page for dynamic ftrace. They have been
33 * simplified to ignore all traces in NMI context.
34 */
35static atomic_t in_nmi;
36
37void ftrace_nmi_enter(void)
38{
39 atomic_inc(&in_nmi);
40}
41
42void ftrace_nmi_exit(void)
43{
44 atomic_dec(&in_nmi);
45}
46
47/* Add a function return address to the trace stack on thread info.*/
48static int push_return_trace(unsigned long ret, unsigned long long time,
49 unsigned long func)
50{
51 int index;
52 struct thread_info *ti = current_thread_info();
53
54 /* The return trace stack is full */
55 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
56 return -EBUSY;
57
58 index = ++ti->curr_ret_stack;
59 barrier();
60 ti->ret_stack[index].ret = ret;
61 ti->ret_stack[index].func = func;
62 ti->ret_stack[index].calltime = time;
63
64 return 0;
65}
66
67/* Retrieve a function return address to the trace stack on thread info.*/
68static void pop_return_trace(unsigned long *ret, unsigned long long *time,
69 unsigned long *func)
70{
71 int index;
72
73 struct thread_info *ti = current_thread_info();
74 index = ti->curr_ret_stack;
75 *ret = ti->ret_stack[index].ret;
76 *func = ti->ret_stack[index].func;
77 *time = ti->ret_stack[index].calltime;
78 ti->curr_ret_stack--;
79}
80
81/*
82 * Send the trace to the ring-buffer.
83 * @return the original return address.
84 */
85unsigned long ftrace_return_to_handler(void)
86{
87 struct ftrace_retfunc trace;
88 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
89 trace.rettime = cpu_clock(raw_smp_processor_id());
90 ftrace_function_return(&trace);
91
92 return trace.ret;
93}
94
95/*
96 * Hook the return address and push it in the stack of return addrs
97 * in current thread info.
98 */
99void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
100{
101 unsigned long old;
102 unsigned long long calltime;
103 int faulted;
104 unsigned long return_hooker = (unsigned long)
105 &return_to_handler;
106
107 /* Nmi's are currently unsupported */
108 if (atomic_read(&in_nmi))
109 return;
110
111 /*
112 * Protect against fault, even if it shouldn't
113 * happen. This tool is too much intrusive to
114 * ignore such a protection.
115 */
116 asm volatile(
117 "1: movl (%[parent_old]), %[old]\n"
118 "2: movl %[return_hooker], (%[parent_replaced])\n"
119 " movl $0, %[faulted]\n"
120
121 ".section .fixup, \"ax\"\n"
122 "3: movl $1, %[faulted]\n"
123 ".previous\n"
124
125 ".section __ex_table, \"a\"\n"
126 " .long 1b, 3b\n"
127 " .long 2b, 3b\n"
128 ".previous\n"
129
130 : [parent_replaced] "=r" (parent), [old] "=r" (old),
131 [faulted] "=r" (faulted)
132 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
133 : "memory"
134 );
135
136 if (WARN_ON(faulted)) {
137 unregister_ftrace_return();
138 return;
139 }
140
141 if (WARN_ON(!__kernel_text_address(old))) {
142 unregister_ftrace_return();
143 *parent = old;
144 return;
145 }
146
147 calltime = cpu_clock(raw_smp_processor_id());
148
149 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
150 *parent = old;
151}
152
153#endif
154
155#ifdef CONFIG_DYNAMIC_FTRACE 27#ifdef CONFIG_DYNAMIC_FTRACE
156 28
157union ftrace_code_union { 29union ftrace_code_union {
@@ -450,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data)
450 return 0; 322 return 0;
451} 323}
452#endif 324#endif
325
326#ifdef CONFIG_FUNCTION_RET_TRACER
327
328#ifndef CONFIG_DYNAMIC_FTRACE
329
330/*
331 * These functions are picked from those used on
332 * this page for dynamic ftrace. They have been
333 * simplified to ignore all traces in NMI context.
334 */
335static atomic_t in_nmi;
336
337void ftrace_nmi_enter(void)
338{
339 atomic_inc(&in_nmi);
340}
341
342void ftrace_nmi_exit(void)
343{
344 atomic_dec(&in_nmi);
345}
346#endif /* !CONFIG_DYNAMIC_FTRACE */
347
348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func)
351{
352 int index;
353 struct thread_info *ti = current_thread_info();
354
355 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
357 return -EBUSY;
358
359 index = ++ti->curr_ret_stack;
360 barrier();
361 ti->ret_stack[index].ret = ret;
362 ti->ret_stack[index].func = func;
363 ti->ret_stack[index].calltime = time;
364
365 return 0;
366}
367
368/* Retrieve a function return address to the trace stack on thread info.*/
369static void pop_return_trace(unsigned long *ret, unsigned long long *time,
370 unsigned long *func)
371{
372 int index;
373
374 struct thread_info *ti = current_thread_info();
375 index = ti->curr_ret_stack;
376 *ret = ti->ret_stack[index].ret;
377 *func = ti->ret_stack[index].func;
378 *time = ti->ret_stack[index].calltime;
379 ti->curr_ret_stack--;
380}
381
382/*
383 * Send the trace to the ring-buffer.
384 * @return the original return address.
385 */
386unsigned long ftrace_return_to_handler(void)
387{
388 struct ftrace_retfunc trace;
389 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
390 trace.rettime = cpu_clock(raw_smp_processor_id());
391 ftrace_function_return(&trace);
392
393 return trace.ret;
394}
395
396/*
397 * Hook the return address and push it in the stack of return addrs
398 * in current thread info.
399 */
400void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
401{
402 unsigned long old;
403 unsigned long long calltime;
404 int faulted;
405 unsigned long return_hooker = (unsigned long)
406 &return_to_handler;
407
408 /* Nmi's are currently unsupported */
409 if (atomic_read(&in_nmi))
410 return;
411
412 /*
413 * Protect against fault, even if it shouldn't
414 * happen. This tool is too much intrusive to
415 * ignore such a protection.
416 */
417 asm volatile(
418 "1: movl (%[parent_old]), %[old]\n"
419 "2: movl %[return_hooker], (%[parent_replaced])\n"
420 " movl $0, %[faulted]\n"
421
422 ".section .fixup, \"ax\"\n"
423 "3: movl $1, %[faulted]\n"
424 ".previous\n"
425
426 ".section __ex_table, \"a\"\n"
427 " .long 1b, 3b\n"
428 " .long 2b, 3b\n"
429 ".previous\n"
430
431 : [parent_replaced] "=r" (parent), [old] "=r" (old),
432 [faulted] "=r" (faulted)
433 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
434 : "memory"
435 );
436
437 if (WARN_ON(faulted)) {
438 unregister_ftrace_return();
439 return;
440 }
441
442 if (WARN_ON(!__kernel_text_address(old))) {
443 unregister_ftrace_return();
444 *parent = old;
445 return;
446 }
447
448 calltime = cpu_clock(raw_smp_processor_id());
449
450 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
451 *parent = old;
452}
453
454#endif /* CONFIG_FUNCTION_RET_TRACER */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 166a2070ef65..f1af1aab00e6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -25,6 +25,17 @@ struct ftrace_ops {
25 25
26extern int function_trace_stop; 26extern int function_trace_stop;
27 27
28/*
29 * Type of the current tracing.
30 */
31enum ftrace_tracing_type_t {
32 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
33 FTRACE_TYPE_RETURN, /* Hook the return of the function */
34};
35
36/* Current tracing type, default is FTRACE_TYPE_ENTER */
37extern enum ftrace_tracing_type_t ftrace_tracing_type;
38
28/** 39/**
29 * ftrace_stop - stop function tracer. 40 * ftrace_stop - stop function tracer.
30 * 41 *
@@ -104,6 +115,9 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
104extern void ftrace_caller(void); 115extern void ftrace_caller(void);
105extern void ftrace_call(void); 116extern void ftrace_call(void);
106extern void mcount_call(void); 117extern void mcount_call(void);
118#ifdef CONFIG_FUNCTION_RET_TRACER
119extern void ftrace_return_caller(void);
120#endif
107 121
108/** 122/**
109 * ftrace_make_nop - convert code into top 123 * ftrace_make_nop - convert code into top
@@ -310,7 +324,7 @@ struct ftrace_retfunc {
310/* Type of a callback handler of tracing return function */ 324/* Type of a callback handler of tracing return function */
311typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 325typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
312 326
313extern void register_ftrace_return(trace_function_return_t func); 327extern int register_ftrace_return(trace_function_return_t func);
314/* The current handler in use */ 328/* The current handler in use */
315extern trace_function_return_t ftrace_function_return; 329extern trace_function_return_t ftrace_function_return;
316extern void unregister_ftrace_return(void); 330extern void unregister_ftrace_return(void);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9c89526b6b7c..b8378fad29a3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -59,7 +59,6 @@ config FUNCTION_TRACER
59 59
60config FUNCTION_RET_TRACER 60config FUNCTION_RET_TRACER
61 bool "Kernel Function return Tracer" 61 bool "Kernel Function return Tracer"
62 depends on !DYNAMIC_FTRACE
63 depends on HAVE_FUNCTION_RET_TRACER 62 depends on HAVE_FUNCTION_RET_TRACER
64 depends on FUNCTION_TRACER 63 depends on FUNCTION_TRACER
65 help 64 help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b42ec1de546b..2f78a45aac14 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -50,6 +50,9 @@ static int last_ftrace_enabled;
50/* Quick disabling of function tracer. */ 50/* Quick disabling of function tracer. */
51int function_trace_stop; 51int function_trace_stop;
52 52
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
53/* 56/*
54 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -385,12 +388,21 @@ static void ftrace_bug(int failed, unsigned long ip)
385 } 388 }
386} 389}
387 390
388#define FTRACE_ADDR ((long)(ftrace_caller))
389 391
390static int 392static int
391__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 393__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
392{ 394{
393 unsigned long ip, fl; 395 unsigned long ip, fl;
396 unsigned long ftrace_addr;
397
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
394 406
395 ip = rec->ip; 407 ip = rec->ip;
396 408
@@ -450,9 +462,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
450 } 462 }
451 463
452 if (rec->flags & FTRACE_FL_ENABLED) 464 if (rec->flags & FTRACE_FL_ENABLED)
453 return ftrace_make_call(rec, FTRACE_ADDR); 465 return ftrace_make_call(rec, ftrace_addr);
454 else 466 else
455 return ftrace_make_nop(NULL, rec, FTRACE_ADDR); 467 return ftrace_make_nop(NULL, rec, ftrace_addr);
456} 468}
457 469
458static void ftrace_replace_code(int enable) 470static void ftrace_replace_code(int enable)
@@ -1405,10 +1417,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
1405 return -1; 1417 return -1;
1406 1418
1407 mutex_lock(&ftrace_sysctl_lock); 1419 mutex_lock(&ftrace_sysctl_lock);
1420
1421 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1422 ret = -EBUSY;
1423 goto out;
1424 }
1425
1408 ret = __register_ftrace_function(ops); 1426 ret = __register_ftrace_function(ops);
1409 ftrace_startup(); 1427 ftrace_startup();
1410 mutex_unlock(&ftrace_sysctl_lock);
1411 1428
1429out:
1430 mutex_unlock(&ftrace_sysctl_lock);
1412 return ret; 1431 return ret;
1413} 1432}
1414 1433
@@ -1474,16 +1493,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1474} 1493}
1475 1494
1476#ifdef CONFIG_FUNCTION_RET_TRACER 1495#ifdef CONFIG_FUNCTION_RET_TRACER
1496
1497/* The callback that hooks the return of a function */
1477trace_function_return_t ftrace_function_return = 1498trace_function_return_t ftrace_function_return =
1478 (trace_function_return_t)ftrace_stub; 1499 (trace_function_return_t)ftrace_stub;
1479void register_ftrace_return(trace_function_return_t func) 1500
1501int register_ftrace_return(trace_function_return_t func)
1480{ 1502{
1503 int ret = 0;
1504
1505 mutex_lock(&ftrace_sysctl_lock);
1506
1507 /*
1508 * Don't launch return tracing if normal function
1509 * tracing is already running.
1510 */
1511 if (ftrace_trace_function != ftrace_stub) {
1512 ret = -EBUSY;
1513 goto out;
1514 }
1515
1516 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1481 ftrace_function_return = func; 1517 ftrace_function_return = func;
1518 ftrace_startup();
1519
1520out:
1521 mutex_unlock(&ftrace_sysctl_lock);
1522 return ret;
1482} 1523}
1483 1524
1484void unregister_ftrace_return(void) 1525void unregister_ftrace_return(void)
1485{ 1526{
1527 mutex_lock(&ftrace_sysctl_lock);
1528
1486 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1529 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1530 ftrace_shutdown();
1531 /* Restore normal tracing type */
1532 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1533
1534 mutex_unlock(&ftrace_sysctl_lock);
1487} 1535}
1488#endif 1536#endif
1489 1537
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
index 61185f756a13..a68564af022b 100644
--- a/kernel/trace/trace_functions_return.c
+++ b/kernel/trace/trace_functions_return.c
@@ -14,29 +14,18 @@
14#include "trace.h" 14#include "trace.h"
15 15
16 16
17static void start_return_trace(struct trace_array *tr)
18{
19 register_ftrace_return(&trace_function_return);
20}
21
22static void stop_return_trace(struct trace_array *tr)
23{
24 unregister_ftrace_return();
25}
26
27static int return_trace_init(struct trace_array *tr) 17static int return_trace_init(struct trace_array *tr)
28{ 18{
29 int cpu; 19 int cpu;
30 for_each_online_cpu(cpu) 20 for_each_online_cpu(cpu)
31 tracing_reset(tr, cpu); 21 tracing_reset(tr, cpu);
32 22
33 start_return_trace(tr); 23 return register_ftrace_return(&trace_function_return);
34 return 0;
35} 24}
36 25
37static void return_trace_reset(struct trace_array *tr) 26static void return_trace_reset(struct trace_array *tr)
38{ 27{
39 stop_return_trace(tr); 28 unregister_ftrace_return();
40} 29}
41 30
42 31