aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:33:47 -0400
commitd61f82d06672f57fca410da6f7fffd15867db622 (patch)
tree62ef5573934eaa638c0d39a45d789691aecbd7d3 /arch
parent3c1720f00bb619302ba19d55986ab565e74d06db (diff)
ftrace: use dynamic patching for updating mcount calls
This patch replaces the indirect call to the mcount function pointer with a direct call that will be patched by the dynamic ftrace routines. On boot up, the mcount function calls the ftace_stub function. When the dynamic ftrace code is initialized, the ftrace_stub is replaced with a call to the ftrace_record_ip, which records the instruction pointers of the locations that call it. Later, the ftraced daemon will call kstop_machine and patch all the locations to nops. When a ftrace is enabled, the original calls to mcount will now be set top call ftrace_caller, which will do a direct call to the registered ftrace function. This direct call is also patched when the function that should be called is updated. All patching is performed by a kstop_machine routine to prevent any type of race conditions that is associated with modifying code on the fly. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/entry_32.S47
-rw-r--r--arch/x86/kernel/entry_64.S67
-rw-r--r--arch/x86/kernel/ftrace.c41
3 files changed, 150 insertions, 5 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f47b9b5440d2..e6517ce0b824 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1110,10 +1110,50 @@ ENDPROC(xen_failsafe_callback)
1110#endif /* CONFIG_XEN */ 1110#endif /* CONFIG_XEN */
1111 1111
1112#ifdef CONFIG_FTRACE 1112#ifdef CONFIG_FTRACE
1113#ifdef CONFIG_DYNAMIC_FTRACE
1114
1115ENTRY(mcount)
1116 pushl %eax
1117 pushl %ecx
1118 pushl %edx
1119 movl 0xc(%esp), %eax
1120
1121.globl mcount_call
1122mcount_call:
1123 call ftrace_stub
1124
1125 popl %edx
1126 popl %ecx
1127 popl %eax
1128
1129 ret
1130END(mcount)
1131
1132ENTRY(ftrace_caller)
1133 pushl %eax
1134 pushl %ecx
1135 pushl %edx
1136 movl 0xc(%esp), %eax
1137 movl 0x4(%ebp), %edx
1138
1139.globl ftrace_call
1140ftrace_call:
1141 call ftrace_stub
1142
1143 popl %edx
1144 popl %ecx
1145 popl %eax
1146
1147.globl ftrace_stub
1148ftrace_stub:
1149 ret
1150END(ftrace_caller)
1151
1152#else /* ! CONFIG_DYNAMIC_FTRACE */
1153
1113ENTRY(mcount) 1154ENTRY(mcount)
1114 cmpl $ftrace_stub, ftrace_trace_function 1155 cmpl $ftrace_stub, ftrace_trace_function
1115 jnz trace 1156 jnz trace
1116
1117.globl ftrace_stub 1157.globl ftrace_stub
1118ftrace_stub: 1158ftrace_stub:
1119 ret 1159 ret
@@ -1126,7 +1166,7 @@ trace:
1126 movl 0xc(%esp), %eax 1166 movl 0xc(%esp), %eax
1127 movl 0x4(%ebp), %edx 1167 movl 0x4(%ebp), %edx
1128 1168
1129 call *ftrace_trace_function 1169 call *ftrace_trace_function
1130 1170
1131 popl %edx 1171 popl %edx
1132 popl %ecx 1172 popl %ecx
@@ -1134,7 +1174,8 @@ trace:
1134 1174
1135 jmp ftrace_stub 1175 jmp ftrace_stub
1136END(mcount) 1176END(mcount)
1137#endif 1177#endif /* CONFIG_DYNAMIC_FTRACE */
1178#endif /* CONFIG_FTRACE */
1138 1179
1139.section .rodata,"a" 1180.section .rodata,"a"
1140#include "syscall_table_32.S" 1181#include "syscall_table_32.S"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index f046e0c64883..fe25e5febca3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,70 @@
55 .code64 55 .code64
56 56
57#ifdef CONFIG_FTRACE 57#ifdef CONFIG_FTRACE
58#ifdef CONFIG_DYNAMIC_FTRACE
59ENTRY(mcount)
60
61 subq $0x38, %rsp
62 movq %rax, (%rsp)
63 movq %rcx, 8(%rsp)
64 movq %rdx, 16(%rsp)
65 movq %rsi, 24(%rsp)
66 movq %rdi, 32(%rsp)
67 movq %r8, 40(%rsp)
68 movq %r9, 48(%rsp)
69
70 movq 0x38(%rsp), %rdi
71
72.globl mcount_call
73mcount_call:
74 call ftrace_stub
75
76 movq 48(%rsp), %r9
77 movq 40(%rsp), %r8
78 movq 32(%rsp), %rdi
79 movq 24(%rsp), %rsi
80 movq 16(%rsp), %rdx
81 movq 8(%rsp), %rcx
82 movq (%rsp), %rax
83 addq $0x38, %rsp
84
85 retq
86END(mcount)
87
88ENTRY(ftrace_caller)
89
90 /* taken from glibc */
91 subq $0x38, %rsp
92 movq %rax, (%rsp)
93 movq %rcx, 8(%rsp)
94 movq %rdx, 16(%rsp)
95 movq %rsi, 24(%rsp)
96 movq %rdi, 32(%rsp)
97 movq %r8, 40(%rsp)
98 movq %r9, 48(%rsp)
99
100 movq 0x38(%rsp), %rdi
101 movq 8(%rbp), %rsi
102
103.globl ftrace_call
104ftrace_call:
105 call ftrace_stub
106
107 movq 48(%rsp), %r9
108 movq 40(%rsp), %r8
109 movq 32(%rsp), %rdi
110 movq 24(%rsp), %rsi
111 movq 16(%rsp), %rdx
112 movq 8(%rsp), %rcx
113 movq (%rsp), %rax
114 addq $0x38, %rsp
115
116.globl ftrace_stub
117ftrace_stub:
118 retq
119END(ftrace_caller)
120
121#else /* ! CONFIG_DYNAMIC_FTRACE */
58ENTRY(mcount) 122ENTRY(mcount)
59 cmpq $ftrace_stub, ftrace_trace_function 123 cmpq $ftrace_stub, ftrace_trace_function
60 jnz trace 124 jnz trace
@@ -89,7 +153,8 @@ trace:
89 153
90 jmp ftrace_stub 154 jmp ftrace_stub
91END(mcount) 155END(mcount)
92#endif 156#endif /* CONFIG_DYNAMIC_FTRACE */
157#endif /* CONFIG_FTRACE */
93 158
94#ifndef CONFIG_PREEMPT 159#ifndef CONFIG_PREEMPT
95#define retint_kernel retint_restore_args 160#define retint_kernel retint_restore_args
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index b69795efa226..9f44623e0072 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -109,10 +109,49 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
109 return faulted; 109 return faulted;
110} 110}
111 111
112int __init ftrace_dyn_arch_init(void) 112notrace int ftrace_update_ftrace_func(ftrace_func_t func)
113{
114 unsigned long ip = (unsigned long)(&ftrace_call);
115 unsigned char old[5], *new;
116 int ret;
117
118 ip += CALL_BACK;
119
120 memcpy(old, &ftrace_call, 5);
121 new = ftrace_call_replace(ip, (unsigned long)func);
122 ret = ftrace_modify_code(ip, old, new);
123
124 return ret;
125}
126
127notrace int ftrace_mcount_set(unsigned long *data)
128{
129 unsigned long ip = (long)(&mcount_call);
130 unsigned long *addr = data;
131 unsigned char old[5], *new;
132
133 /* ip is at the location, but modify code will subtact this */
134 ip += CALL_BACK;
135
136 /*
137 * Replace the mcount stub with a pointer to the
138 * ip recorder function.
139 */
140 memcpy(old, &mcount_call, 5);
141 new = ftrace_call_replace(ip, *addr);
142 *addr = ftrace_modify_code(ip, old, new);
143
144 return 0;
145}
146
147int __init ftrace_dyn_arch_init(void *data)
113{ 148{
114 const unsigned char *const *noptable = find_nop_table(); 149 const unsigned char *const *noptable = find_nop_table();
115 150
151 /* This is running in kstop_machine */
152
153 ftrace_mcount_set(data);
154
116 ftrace_nop = (unsigned long *)noptable[CALL_BACK]; 155 ftrace_nop = (unsigned long *)noptable[CALL_BACK];
117 156
118 return 0; 157 return 0;