aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-16 00:02:06 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-16 01:57:38 -0500
commite7d3737ea1b102030f44e96c97754101e41515f0 (patch)
tree44fdc3dea100d1fa639e6ba3cb1bfca2ab40e70b /arch/x86/kernel/ftrace.c
parentb01c746617da5e260803eb10ed64ca043e9a1241 (diff)
tracing/function-return-tracer: support for dynamic ftrace on function return tracer
This patch adds the support for dynamic tracing on the function return tracer. The whole difference with normal dynamic function tracing is that we don't need to hook on a particular callback. The only pro that we want is to nop or set dynamically the calls to ftrace_caller (which is ftrace_return_caller here). Some security checks ensure that we are not trying to launch dynamic tracing for return tracing while normal function tracing is already running. An example of trace with getnstimeofday set as a filter: ktime_get_ts+0x22/0x50 -> getnstimeofday (2283 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1396 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1825 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1426 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1464 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1524 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1382 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1434 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1464 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1502 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1404 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1397 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1051 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1314 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1344 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1163 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1390 ns) ktime_get_ts+0x22/0x50 -> getnstimeofday (1374 ns) Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c258
1 files changed, 130 insertions, 128 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d98b5a8ecf4c..924153edd973 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,134 +24,6 @@
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26 26
27
28#ifdef CONFIG_FUNCTION_RET_TRACER
29
30/*
31 * These functions are picked from those used on
32 * this page for dynamic ftrace. They have been
33 * simplified to ignore all traces in NMI context.
34 */
35static atomic_t in_nmi;
36
37void ftrace_nmi_enter(void)
38{
39 atomic_inc(&in_nmi);
40}
41
42void ftrace_nmi_exit(void)
43{
44 atomic_dec(&in_nmi);
45}
46
47/* Add a function return address to the trace stack on thread info.*/
48static int push_return_trace(unsigned long ret, unsigned long long time,
49 unsigned long func)
50{
51 int index;
52 struct thread_info *ti = current_thread_info();
53
54 /* The return trace stack is full */
55 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
56 return -EBUSY;
57
58 index = ++ti->curr_ret_stack;
59 barrier();
60 ti->ret_stack[index].ret = ret;
61 ti->ret_stack[index].func = func;
62 ti->ret_stack[index].calltime = time;
63
64 return 0;
65}
66
67/* Retrieve a function return address to the trace stack on thread info.*/
68static void pop_return_trace(unsigned long *ret, unsigned long long *time,
69 unsigned long *func)
70{
71 int index;
72
73 struct thread_info *ti = current_thread_info();
74 index = ti->curr_ret_stack;
75 *ret = ti->ret_stack[index].ret;
76 *func = ti->ret_stack[index].func;
77 *time = ti->ret_stack[index].calltime;
78 ti->curr_ret_stack--;
79}
80
81/*
82 * Send the trace to the ring-buffer.
83 * @return the original return address.
84 */
85unsigned long ftrace_return_to_handler(void)
86{
87 struct ftrace_retfunc trace;
88 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
89 trace.rettime = cpu_clock(raw_smp_processor_id());
90 ftrace_function_return(&trace);
91
92 return trace.ret;
93}
94
95/*
96 * Hook the return address and push it in the stack of return addrs
97 * in current thread info.
98 */
99void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
100{
101 unsigned long old;
102 unsigned long long calltime;
103 int faulted;
104 unsigned long return_hooker = (unsigned long)
105 &return_to_handler;
106
107 /* Nmi's are currently unsupported */
108 if (atomic_read(&in_nmi))
109 return;
110
111 /*
112 * Protect against fault, even if it shouldn't
113 * happen. This tool is too much intrusive to
114 * ignore such a protection.
115 */
116 asm volatile(
117 "1: movl (%[parent_old]), %[old]\n"
118 "2: movl %[return_hooker], (%[parent_replaced])\n"
119 " movl $0, %[faulted]\n"
120
121 ".section .fixup, \"ax\"\n"
122 "3: movl $1, %[faulted]\n"
123 ".previous\n"
124
125 ".section __ex_table, \"a\"\n"
126 " .long 1b, 3b\n"
127 " .long 2b, 3b\n"
128 ".previous\n"
129
130 : [parent_replaced] "=r" (parent), [old] "=r" (old),
131 [faulted] "=r" (faulted)
132 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
133 : "memory"
134 );
135
136 if (WARN_ON(faulted)) {
137 unregister_ftrace_return();
138 return;
139 }
140
141 if (WARN_ON(!__kernel_text_address(old))) {
142 unregister_ftrace_return();
143 *parent = old;
144 return;
145 }
146
147 calltime = cpu_clock(raw_smp_processor_id());
148
149 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
150 *parent = old;
151}
152
153#endif
154
155#ifdef CONFIG_DYNAMIC_FTRACE 27#ifdef CONFIG_DYNAMIC_FTRACE
156 28
157union ftrace_code_union { 29union ftrace_code_union {
@@ -450,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data)
450 return 0; 322 return 0;
451} 323}
452#endif 324#endif
325
326#ifdef CONFIG_FUNCTION_RET_TRACER
327
328#ifndef CONFIG_DYNAMIC_FTRACE
329
330/*
331 * These functions are picked from those used on
332 * this page for dynamic ftrace. They have been
333 * simplified to ignore all traces in NMI context.
334 */
335static atomic_t in_nmi;
336
337void ftrace_nmi_enter(void)
338{
339 atomic_inc(&in_nmi);
340}
341
342void ftrace_nmi_exit(void)
343{
344 atomic_dec(&in_nmi);
345}
346#endif /* !CONFIG_DYNAMIC_FTRACE */
347
348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func)
351{
352 int index;
353 struct thread_info *ti = current_thread_info();
354
355 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
357 return -EBUSY;
358
359 index = ++ti->curr_ret_stack;
360 barrier();
361 ti->ret_stack[index].ret = ret;
362 ti->ret_stack[index].func = func;
363 ti->ret_stack[index].calltime = time;
364
365 return 0;
366}
367
368/* Retrieve a function return address to the trace stack on thread info.*/
369static void pop_return_trace(unsigned long *ret, unsigned long long *time,
370 unsigned long *func)
371{
372 int index;
373
374 struct thread_info *ti = current_thread_info();
375 index = ti->curr_ret_stack;
376 *ret = ti->ret_stack[index].ret;
377 *func = ti->ret_stack[index].func;
378 *time = ti->ret_stack[index].calltime;
379 ti->curr_ret_stack--;
380}
381
382/*
383 * Send the trace to the ring-buffer.
384 * @return the original return address.
385 */
386unsigned long ftrace_return_to_handler(void)
387{
388 struct ftrace_retfunc trace;
389 pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
390 trace.rettime = cpu_clock(raw_smp_processor_id());
391 ftrace_function_return(&trace);
392
393 return trace.ret;
394}
395
396/*
397 * Hook the return address and push it in the stack of return addrs
398 * in current thread info.
399 */
400void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
401{
402 unsigned long old;
403 unsigned long long calltime;
404 int faulted;
405 unsigned long return_hooker = (unsigned long)
406 &return_to_handler;
407
408 /* Nmi's are currently unsupported */
409 if (atomic_read(&in_nmi))
410 return;
411
412 /*
413 * Protect against fault, even if it shouldn't
414 * happen. This tool is too much intrusive to
415 * ignore such a protection.
416 */
417 asm volatile(
418 "1: movl (%[parent_old]), %[old]\n"
419 "2: movl %[return_hooker], (%[parent_replaced])\n"
420 " movl $0, %[faulted]\n"
421
422 ".section .fixup, \"ax\"\n"
423 "3: movl $1, %[faulted]\n"
424 ".previous\n"
425
426 ".section __ex_table, \"a\"\n"
427 " .long 1b, 3b\n"
428 " .long 2b, 3b\n"
429 ".previous\n"
430
431 : [parent_replaced] "=r" (parent), [old] "=r" (old),
432 [faulted] "=r" (faulted)
433 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
434 : "memory"
435 );
436
437 if (WARN_ON(faulted)) {
438 unregister_ftrace_return();
439 return;
440 }
441
442 if (WARN_ON(!__kernel_text_address(old))) {
443 unregister_ftrace_return();
444 *parent = old;
445 return;
446 }
447
448 calltime = cpu_clock(raw_smp_processor_id());
449
450 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
451 *parent = old;
452}
453
454#endif /* CONFIG_FUNCTION_RET_TRACER */