diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:43 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:33:47 -0400 |
commit | d61f82d06672f57fca410da6f7fffd15867db622 (patch) | |
tree | 62ef5573934eaa638c0d39a45d789691aecbd7d3 | |
parent | 3c1720f00bb619302ba19d55986ab565e74d06db (diff) |
ftrace: use dynamic patching for updating mcount calls
This patch replaces the indirect call to the mcount function
pointer with a direct call that will be patched by the
dynamic ftrace routines.
On boot up, the mcount function calls the ftace_stub function.
When the dynamic ftrace code is initialized, the ftrace_stub
is replaced with a call to the ftrace_record_ip, which records
the instruction pointers of the locations that call it.
Later, the ftraced daemon will call kstop_machine and patch all
the locations to nops.
When a ftrace is enabled, the original calls to mcount will now
be set top call ftrace_caller, which will do a direct call
to the registered ftrace function. This direct call is also patched
when the function that should be called is updated.
All patching is performed by a kstop_machine routine to prevent any
type of race conditions that is associated with modifying code
on the fly.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/entry_32.S | 47 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 67 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 41 | ||||
-rw-r--r-- | include/linux/ftrace.h | 7 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 183 |
5 files changed, 261 insertions, 84 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index f47b9b5440d2..e6517ce0b824 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1110,10 +1110,50 @@ ENDPROC(xen_failsafe_callback) | |||
1110 | #endif /* CONFIG_XEN */ | 1110 | #endif /* CONFIG_XEN */ |
1111 | 1111 | ||
1112 | #ifdef CONFIG_FTRACE | 1112 | #ifdef CONFIG_FTRACE |
1113 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1114 | |||
1115 | ENTRY(mcount) | ||
1116 | pushl %eax | ||
1117 | pushl %ecx | ||
1118 | pushl %edx | ||
1119 | movl 0xc(%esp), %eax | ||
1120 | |||
1121 | .globl mcount_call | ||
1122 | mcount_call: | ||
1123 | call ftrace_stub | ||
1124 | |||
1125 | popl %edx | ||
1126 | popl %ecx | ||
1127 | popl %eax | ||
1128 | |||
1129 | ret | ||
1130 | END(mcount) | ||
1131 | |||
1132 | ENTRY(ftrace_caller) | ||
1133 | pushl %eax | ||
1134 | pushl %ecx | ||
1135 | pushl %edx | ||
1136 | movl 0xc(%esp), %eax | ||
1137 | movl 0x4(%ebp), %edx | ||
1138 | |||
1139 | .globl ftrace_call | ||
1140 | ftrace_call: | ||
1141 | call ftrace_stub | ||
1142 | |||
1143 | popl %edx | ||
1144 | popl %ecx | ||
1145 | popl %eax | ||
1146 | |||
1147 | .globl ftrace_stub | ||
1148 | ftrace_stub: | ||
1149 | ret | ||
1150 | END(ftrace_caller) | ||
1151 | |||
1152 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
1153 | |||
1113 | ENTRY(mcount) | 1154 | ENTRY(mcount) |
1114 | cmpl $ftrace_stub, ftrace_trace_function | 1155 | cmpl $ftrace_stub, ftrace_trace_function |
1115 | jnz trace | 1156 | jnz trace |
1116 | |||
1117 | .globl ftrace_stub | 1157 | .globl ftrace_stub |
1118 | ftrace_stub: | 1158 | ftrace_stub: |
1119 | ret | 1159 | ret |
@@ -1126,7 +1166,7 @@ trace: | |||
1126 | movl 0xc(%esp), %eax | 1166 | movl 0xc(%esp), %eax |
1127 | movl 0x4(%ebp), %edx | 1167 | movl 0x4(%ebp), %edx |
1128 | 1168 | ||
1129 | call *ftrace_trace_function | 1169 | call *ftrace_trace_function |
1130 | 1170 | ||
1131 | popl %edx | 1171 | popl %edx |
1132 | popl %ecx | 1172 | popl %ecx |
@@ -1134,7 +1174,8 @@ trace: | |||
1134 | 1174 | ||
1135 | jmp ftrace_stub | 1175 | jmp ftrace_stub |
1136 | END(mcount) | 1176 | END(mcount) |
1137 | #endif | 1177 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1178 | #endif /* CONFIG_FTRACE */ | ||
1138 | 1179 | ||
1139 | .section .rodata,"a" | 1180 | .section .rodata,"a" |
1140 | #include "syscall_table_32.S" | 1181 | #include "syscall_table_32.S" |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f046e0c64883..fe25e5febca3 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -55,6 +55,70 @@ | |||
55 | .code64 | 55 | .code64 |
56 | 56 | ||
57 | #ifdef CONFIG_FTRACE | 57 | #ifdef CONFIG_FTRACE |
58 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
59 | ENTRY(mcount) | ||
60 | |||
61 | subq $0x38, %rsp | ||
62 | movq %rax, (%rsp) | ||
63 | movq %rcx, 8(%rsp) | ||
64 | movq %rdx, 16(%rsp) | ||
65 | movq %rsi, 24(%rsp) | ||
66 | movq %rdi, 32(%rsp) | ||
67 | movq %r8, 40(%rsp) | ||
68 | movq %r9, 48(%rsp) | ||
69 | |||
70 | movq 0x38(%rsp), %rdi | ||
71 | |||
72 | .globl mcount_call | ||
73 | mcount_call: | ||
74 | call ftrace_stub | ||
75 | |||
76 | movq 48(%rsp), %r9 | ||
77 | movq 40(%rsp), %r8 | ||
78 | movq 32(%rsp), %rdi | ||
79 | movq 24(%rsp), %rsi | ||
80 | movq 16(%rsp), %rdx | ||
81 | movq 8(%rsp), %rcx | ||
82 | movq (%rsp), %rax | ||
83 | addq $0x38, %rsp | ||
84 | |||
85 | retq | ||
86 | END(mcount) | ||
87 | |||
88 | ENTRY(ftrace_caller) | ||
89 | |||
90 | /* taken from glibc */ | ||
91 | subq $0x38, %rsp | ||
92 | movq %rax, (%rsp) | ||
93 | movq %rcx, 8(%rsp) | ||
94 | movq %rdx, 16(%rsp) | ||
95 | movq %rsi, 24(%rsp) | ||
96 | movq %rdi, 32(%rsp) | ||
97 | movq %r8, 40(%rsp) | ||
98 | movq %r9, 48(%rsp) | ||
99 | |||
100 | movq 0x38(%rsp), %rdi | ||
101 | movq 8(%rbp), %rsi | ||
102 | |||
103 | .globl ftrace_call | ||
104 | ftrace_call: | ||
105 | call ftrace_stub | ||
106 | |||
107 | movq 48(%rsp), %r9 | ||
108 | movq 40(%rsp), %r8 | ||
109 | movq 32(%rsp), %rdi | ||
110 | movq 24(%rsp), %rsi | ||
111 | movq 16(%rsp), %rdx | ||
112 | movq 8(%rsp), %rcx | ||
113 | movq (%rsp), %rax | ||
114 | addq $0x38, %rsp | ||
115 | |||
116 | .globl ftrace_stub | ||
117 | ftrace_stub: | ||
118 | retq | ||
119 | END(ftrace_caller) | ||
120 | |||
121 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
58 | ENTRY(mcount) | 122 | ENTRY(mcount) |
59 | cmpq $ftrace_stub, ftrace_trace_function | 123 | cmpq $ftrace_stub, ftrace_trace_function |
60 | jnz trace | 124 | jnz trace |
@@ -89,7 +153,8 @@ trace: | |||
89 | 153 | ||
90 | jmp ftrace_stub | 154 | jmp ftrace_stub |
91 | END(mcount) | 155 | END(mcount) |
92 | #endif | 156 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
157 | #endif /* CONFIG_FTRACE */ | ||
93 | 158 | ||
94 | #ifndef CONFIG_PREEMPT | 159 | #ifndef CONFIG_PREEMPT |
95 | #define retint_kernel retint_restore_args | 160 | #define retint_kernel retint_restore_args |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b69795efa226..9f44623e0072 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -109,10 +109,49 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
109 | return faulted; | 109 | return faulted; |
110 | } | 110 | } |
111 | 111 | ||
112 | int __init ftrace_dyn_arch_init(void) | 112 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) |
113 | { | ||
114 | unsigned long ip = (unsigned long)(&ftrace_call); | ||
115 | unsigned char old[5], *new; | ||
116 | int ret; | ||
117 | |||
118 | ip += CALL_BACK; | ||
119 | |||
120 | memcpy(old, &ftrace_call, 5); | ||
121 | new = ftrace_call_replace(ip, (unsigned long)func); | ||
122 | ret = ftrace_modify_code(ip, old, new); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | notrace int ftrace_mcount_set(unsigned long *data) | ||
128 | { | ||
129 | unsigned long ip = (long)(&mcount_call); | ||
130 | unsigned long *addr = data; | ||
131 | unsigned char old[5], *new; | ||
132 | |||
133 | /* ip is at the location, but modify code will subtact this */ | ||
134 | ip += CALL_BACK; | ||
135 | |||
136 | /* | ||
137 | * Replace the mcount stub with a pointer to the | ||
138 | * ip recorder function. | ||
139 | */ | ||
140 | memcpy(old, &mcount_call, 5); | ||
141 | new = ftrace_call_replace(ip, *addr); | ||
142 | *addr = ftrace_modify_code(ip, old, new); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | int __init ftrace_dyn_arch_init(void *data) | ||
113 | { | 148 | { |
114 | const unsigned char *const *noptable = find_nop_table(); | 149 | const unsigned char *const *noptable = find_nop_table(); |
115 | 150 | ||
151 | /* This is running in kstop_machine */ | ||
152 | |||
153 | ftrace_mcount_set(data); | ||
154 | |||
116 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; | 155 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; |
117 | 156 | ||
118 | return 0; | 157 | return 0; |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index d509ad6c9cb8..b0dd0093058f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -56,9 +56,14 @@ struct dyn_ftrace { | |||
56 | extern int ftrace_ip_converted(unsigned long ip); | 56 | extern int ftrace_ip_converted(unsigned long ip); |
57 | extern unsigned char *ftrace_nop_replace(void); | 57 | extern unsigned char *ftrace_nop_replace(void); |
58 | extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); | 58 | extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); |
59 | extern int ftrace_dyn_arch_init(void); | 59 | extern int ftrace_dyn_arch_init(void *data); |
60 | extern int ftrace_mcount_set(unsigned long *data); | ||
60 | extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 61 | extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
61 | unsigned char *new_code); | 62 | unsigned char *new_code); |
63 | extern int ftrace_update_ftrace_func(ftrace_func_t func); | ||
64 | extern void ftrace_caller(void); | ||
65 | extern void ftrace_call(void); | ||
66 | extern void mcount_call(void); | ||
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | #ifdef CONFIG_FRAME_POINTER | 69 | #ifdef CONFIG_FRAME_POINTER |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f6d9af3bf66b..88544f9bc0ed 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -26,14 +26,8 @@ | |||
26 | 26 | ||
27 | #include "trace.h" | 27 | #include "trace.h" |
28 | 28 | ||
29 | #ifdef CONFIG_DYNAMIC_FTRACE | 29 | int ftrace_enabled; |
30 | # define FTRACE_ENABLED_INIT 1 | 30 | static int last_ftrace_enabled; |
31 | #else | ||
32 | # define FTRACE_ENABLED_INIT 0 | ||
33 | #endif | ||
34 | |||
35 | int ftrace_enabled = FTRACE_ENABLED_INIT; | ||
36 | static int last_ftrace_enabled = FTRACE_ENABLED_INIT; | ||
37 | 31 | ||
38 | static DEFINE_SPINLOCK(ftrace_lock); | 32 | static DEFINE_SPINLOCK(ftrace_lock); |
39 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 33 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
@@ -149,6 +143,14 @@ static int notrace __unregister_ftrace_function(struct ftrace_ops *ops) | |||
149 | 143 | ||
150 | #ifdef CONFIG_DYNAMIC_FTRACE | 144 | #ifdef CONFIG_DYNAMIC_FTRACE |
151 | 145 | ||
146 | enum { | ||
147 | FTRACE_ENABLE_CALLS = (1 << 0), | ||
148 | FTRACE_DISABLE_CALLS = (1 << 1), | ||
149 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | ||
150 | FTRACE_ENABLE_MCOUNT = (1 << 3), | ||
151 | FTRACE_DISABLE_MCOUNT = (1 << 4), | ||
152 | }; | ||
153 | |||
152 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | 154 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; |
153 | 155 | ||
154 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | 156 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); |
@@ -199,12 +201,8 @@ ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | |||
199 | hlist_add_head(&node->node, &ftrace_hash[key]); | 201 | hlist_add_head(&node->node, &ftrace_hash[key]); |
200 | } | 202 | } |
201 | 203 | ||
202 | static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | 204 | static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
203 | { | 205 | { |
204 | /* If this was already converted, skip it */ | ||
205 | if (ftrace_ip_converted(ip)) | ||
206 | return NULL; | ||
207 | |||
208 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 206 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
209 | if (!ftrace_pages->next) | 207 | if (!ftrace_pages->next) |
210 | return NULL; | 208 | return NULL; |
@@ -215,7 +213,7 @@ static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | |||
215 | } | 213 | } |
216 | 214 | ||
217 | static void notrace | 215 | static void notrace |
218 | ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | 216 | ftrace_record_ip(unsigned long ip) |
219 | { | 217 | { |
220 | struct dyn_ftrace *node; | 218 | struct dyn_ftrace *node; |
221 | unsigned long flags; | 219 | unsigned long flags; |
@@ -223,6 +221,9 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | |||
223 | int resched; | 221 | int resched; |
224 | int atomic; | 222 | int atomic; |
225 | 223 | ||
224 | if (!ftrace_enabled) | ||
225 | return; | ||
226 | |||
226 | resched = need_resched(); | 227 | resched = need_resched(); |
227 | preempt_disable_notrace(); | 228 | preempt_disable_notrace(); |
228 | 229 | ||
@@ -251,11 +252,12 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | |||
251 | 252 | ||
252 | /* | 253 | /* |
253 | * There's a slight race that the ftraced will update the | 254 | * There's a slight race that the ftraced will update the |
254 | * hash and reset here. The arch alloc is responsible | 255 | * hash and reset here. If it is already converted, skip it. |
255 | * for seeing if the IP has already changed, and if | ||
256 | * it has, the alloc will fail. | ||
257 | */ | 256 | */ |
258 | node = ftrace_alloc_shutdown_node(ip); | 257 | if (ftrace_ip_converted(ip)) |
258 | goto out_unlock; | ||
259 | |||
260 | node = ftrace_alloc_dyn_node(ip); | ||
259 | if (!node) | 261 | if (!node) |
260 | goto out_unlock; | 262 | goto out_unlock; |
261 | 263 | ||
@@ -277,11 +279,7 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | |||
277 | preempt_enable_notrace(); | 279 | preempt_enable_notrace(); |
278 | } | 280 | } |
279 | 281 | ||
280 | static struct ftrace_ops ftrace_shutdown_ops __read_mostly = | 282 | #define FTRACE_ADDR ((long)(&ftrace_caller)) |
281 | { | ||
282 | .func = ftrace_record_ip, | ||
283 | }; | ||
284 | |||
285 | #define MCOUNT_ADDR ((long)(&mcount)) | 283 | #define MCOUNT_ADDR ((long)(&mcount)) |
286 | 284 | ||
287 | static void notrace ftrace_replace_code(int saved) | 285 | static void notrace ftrace_replace_code(int saved) |
@@ -309,9 +307,9 @@ static void notrace ftrace_replace_code(int saved) | |||
309 | ip = rec->ip; | 307 | ip = rec->ip; |
310 | 308 | ||
311 | if (saved) | 309 | if (saved) |
312 | new = ftrace_call_replace(ip, MCOUNT_ADDR); | 310 | new = ftrace_call_replace(ip, FTRACE_ADDR); |
313 | else | 311 | else |
314 | old = ftrace_call_replace(ip, MCOUNT_ADDR); | 312 | old = ftrace_call_replace(ip, FTRACE_ADDR); |
315 | 313 | ||
316 | failed = ftrace_modify_code(ip, old, new); | 314 | failed = ftrace_modify_code(ip, old, new); |
317 | if (failed) | 315 | if (failed) |
@@ -320,16 +318,6 @@ static void notrace ftrace_replace_code(int saved) | |||
320 | } | 318 | } |
321 | } | 319 | } |
322 | 320 | ||
323 | static notrace void ftrace_startup_code(void) | ||
324 | { | ||
325 | ftrace_replace_code(1); | ||
326 | } | ||
327 | |||
328 | static notrace void ftrace_shutdown_code(void) | ||
329 | { | ||
330 | ftrace_replace_code(0); | ||
331 | } | ||
332 | |||
333 | static notrace void ftrace_shutdown_replenish(void) | 321 | static notrace void ftrace_shutdown_replenish(void) |
334 | { | 322 | { |
335 | if (ftrace_pages->next) | 323 | if (ftrace_pages->next) |
@@ -339,16 +327,8 @@ static notrace void ftrace_shutdown_replenish(void) | |||
339 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | 327 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); |
340 | } | 328 | } |
341 | 329 | ||
342 | static int notrace __ftrace_modify_code(void *data) | ||
343 | { | ||
344 | void (*func)(void) = data; | ||
345 | |||
346 | func(); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static notrace void | 330 | static notrace void |
351 | ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr) | 331 | ftrace_code_disable(struct dyn_ftrace *rec) |
352 | { | 332 | { |
353 | unsigned long ip; | 333 | unsigned long ip; |
354 | unsigned char *nop, *call; | 334 | unsigned char *nop, *call; |
@@ -357,67 +337,113 @@ ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr) | |||
357 | ip = rec->ip; | 337 | ip = rec->ip; |
358 | 338 | ||
359 | nop = ftrace_nop_replace(); | 339 | nop = ftrace_nop_replace(); |
360 | call = ftrace_call_replace(ip, addr); | 340 | call = ftrace_call_replace(ip, MCOUNT_ADDR); |
361 | 341 | ||
362 | failed = ftrace_modify_code(ip, call, nop); | 342 | failed = ftrace_modify_code(ip, call, nop); |
363 | if (failed) | 343 | if (failed) |
364 | rec->flags |= FTRACE_FL_FAILED; | 344 | rec->flags |= FTRACE_FL_FAILED; |
365 | } | 345 | } |
366 | 346 | ||
367 | static void notrace ftrace_run_startup_code(void) | 347 | static int notrace __ftrace_modify_code(void *data) |
368 | { | 348 | { |
369 | stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); | 349 | unsigned long addr; |
350 | int *command = data; | ||
351 | |||
352 | if (*command & FTRACE_ENABLE_CALLS) | ||
353 | ftrace_replace_code(1); | ||
354 | else if (*command & FTRACE_DISABLE_CALLS) | ||
355 | ftrace_replace_code(0); | ||
356 | |||
357 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | ||
358 | ftrace_update_ftrace_func(ftrace_trace_function); | ||
359 | |||
360 | if (*command & FTRACE_ENABLE_MCOUNT) { | ||
361 | addr = (unsigned long)ftrace_record_ip; | ||
362 | ftrace_mcount_set(&addr); | ||
363 | } else if (*command & FTRACE_DISABLE_MCOUNT) { | ||
364 | addr = (unsigned long)ftrace_stub; | ||
365 | ftrace_mcount_set(&addr); | ||
366 | } | ||
367 | |||
368 | return 0; | ||
370 | } | 369 | } |
371 | 370 | ||
372 | static void notrace ftrace_run_shutdown_code(void) | 371 | static void notrace ftrace_run_update_code(int command) |
373 | { | 372 | { |
374 | stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS); | 373 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); |
375 | } | 374 | } |
376 | 375 | ||
376 | static ftrace_func_t saved_ftrace_func; | ||
377 | |||
377 | static void notrace ftrace_startup(void) | 378 | static void notrace ftrace_startup(void) |
378 | { | 379 | { |
380 | int command = 0; | ||
381 | |||
379 | mutex_lock(&ftraced_lock); | 382 | mutex_lock(&ftraced_lock); |
380 | ftraced_suspend++; | 383 | ftraced_suspend++; |
381 | if (ftraced_suspend != 1) | 384 | if (ftraced_suspend == 1) |
385 | command |= FTRACE_ENABLE_CALLS; | ||
386 | |||
387 | if (saved_ftrace_func != ftrace_trace_function) { | ||
388 | saved_ftrace_func = ftrace_trace_function; | ||
389 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
390 | } | ||
391 | |||
392 | if (!command || !ftrace_enabled) | ||
382 | goto out; | 393 | goto out; |
383 | __unregister_ftrace_function(&ftrace_shutdown_ops); | ||
384 | 394 | ||
385 | if (ftrace_enabled) | 395 | ftrace_run_update_code(command); |
386 | ftrace_run_startup_code(); | ||
387 | out: | 396 | out: |
388 | mutex_unlock(&ftraced_lock); | 397 | mutex_unlock(&ftraced_lock); |
389 | } | 398 | } |
390 | 399 | ||
391 | static void notrace ftrace_shutdown(void) | 400 | static void notrace ftrace_shutdown(void) |
392 | { | 401 | { |
402 | int command = 0; | ||
403 | |||
393 | mutex_lock(&ftraced_lock); | 404 | mutex_lock(&ftraced_lock); |
394 | ftraced_suspend--; | 405 | ftraced_suspend--; |
395 | if (ftraced_suspend) | 406 | if (!ftraced_suspend) |
396 | goto out; | 407 | command |= FTRACE_DISABLE_CALLS; |
397 | 408 | ||
398 | if (ftrace_enabled) | 409 | if (saved_ftrace_func != ftrace_trace_function) { |
399 | ftrace_run_shutdown_code(); | 410 | saved_ftrace_func = ftrace_trace_function; |
411 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
412 | } | ||
400 | 413 | ||
401 | __register_ftrace_function(&ftrace_shutdown_ops); | 414 | if (!command || !ftrace_enabled) |
415 | goto out; | ||
416 | |||
417 | ftrace_run_update_code(command); | ||
402 | out: | 418 | out: |
403 | mutex_unlock(&ftraced_lock); | 419 | mutex_unlock(&ftraced_lock); |
404 | } | 420 | } |
405 | 421 | ||
406 | static void notrace ftrace_startup_sysctl(void) | 422 | static void notrace ftrace_startup_sysctl(void) |
407 | { | 423 | { |
424 | int command = FTRACE_ENABLE_MCOUNT; | ||
425 | |||
408 | mutex_lock(&ftraced_lock); | 426 | mutex_lock(&ftraced_lock); |
427 | /* Force update next time */ | ||
428 | saved_ftrace_func = NULL; | ||
409 | /* ftraced_suspend is true if we want ftrace running */ | 429 | /* ftraced_suspend is true if we want ftrace running */ |
410 | if (ftraced_suspend) | 430 | if (ftraced_suspend) |
411 | ftrace_run_startup_code(); | 431 | command |= FTRACE_ENABLE_CALLS; |
432 | |||
433 | ftrace_run_update_code(command); | ||
412 | mutex_unlock(&ftraced_lock); | 434 | mutex_unlock(&ftraced_lock); |
413 | } | 435 | } |
414 | 436 | ||
415 | static void notrace ftrace_shutdown_sysctl(void) | 437 | static void notrace ftrace_shutdown_sysctl(void) |
416 | { | 438 | { |
439 | int command = FTRACE_DISABLE_MCOUNT; | ||
440 | |||
417 | mutex_lock(&ftraced_lock); | 441 | mutex_lock(&ftraced_lock); |
418 | /* ftraced_suspend is true if ftrace is running */ | 442 | /* ftraced_suspend is true if ftrace is running */ |
419 | if (ftraced_suspend) | 443 | if (ftraced_suspend) |
420 | ftrace_run_shutdown_code(); | 444 | command |= FTRACE_DISABLE_CALLS; |
445 | |||
446 | ftrace_run_update_code(command); | ||
421 | mutex_unlock(&ftraced_lock); | 447 | mutex_unlock(&ftraced_lock); |
422 | } | 448 | } |
423 | 449 | ||
@@ -430,11 +456,13 @@ static int notrace __ftrace_update_code(void *ignore) | |||
430 | struct dyn_ftrace *p; | 456 | struct dyn_ftrace *p; |
431 | struct hlist_head head; | 457 | struct hlist_head head; |
432 | struct hlist_node *t; | 458 | struct hlist_node *t; |
459 | int save_ftrace_enabled; | ||
433 | cycle_t start, stop; | 460 | cycle_t start, stop; |
434 | int i; | 461 | int i; |
435 | 462 | ||
436 | /* Don't be calling ftrace ops now */ | 463 | /* Don't be recording funcs now */ |
437 | __unregister_ftrace_function(&ftrace_shutdown_ops); | 464 | save_ftrace_enabled = ftrace_enabled; |
465 | ftrace_enabled = 0; | ||
438 | 466 | ||
439 | start = now(raw_smp_processor_id()); | 467 | start = now(raw_smp_processor_id()); |
440 | ftrace_update_cnt = 0; | 468 | ftrace_update_cnt = 0; |
@@ -449,7 +477,7 @@ static int notrace __ftrace_update_code(void *ignore) | |||
449 | 477 | ||
450 | /* all CPUS are stopped, we are safe to modify code */ | 478 | /* all CPUS are stopped, we are safe to modify code */ |
451 | hlist_for_each_entry(p, t, &head, node) { | 479 | hlist_for_each_entry(p, t, &head, node) { |
452 | ftrace_code_disable(p, MCOUNT_ADDR); | 480 | ftrace_code_disable(p); |
453 | ftrace_update_cnt++; | 481 | ftrace_update_cnt++; |
454 | } | 482 | } |
455 | 483 | ||
@@ -459,7 +487,7 @@ static int notrace __ftrace_update_code(void *ignore) | |||
459 | ftrace_update_time = stop - start; | 487 | ftrace_update_time = stop - start; |
460 | ftrace_update_tot_cnt += ftrace_update_cnt; | 488 | ftrace_update_tot_cnt += ftrace_update_cnt; |
461 | 489 | ||
462 | __register_ftrace_function(&ftrace_shutdown_ops); | 490 | ftrace_enabled = save_ftrace_enabled; |
463 | 491 | ||
464 | return 0; | 492 | return 0; |
465 | } | 493 | } |
@@ -515,11 +543,6 @@ static int __init ftrace_dyn_table_alloc(void) | |||
515 | struct ftrace_page *pg; | 543 | struct ftrace_page *pg; |
516 | int cnt; | 544 | int cnt; |
517 | int i; | 545 | int i; |
518 | int ret; | ||
519 | |||
520 | ret = ftrace_dyn_arch_init(); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | 546 | ||
524 | /* allocate a few pages */ | 547 | /* allocate a few pages */ |
525 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | 548 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -557,11 +580,19 @@ static int __init ftrace_dyn_table_alloc(void) | |||
557 | return 0; | 580 | return 0; |
558 | } | 581 | } |
559 | 582 | ||
560 | static int __init notrace ftrace_shutdown_init(void) | 583 | static int __init notrace ftrace_dynamic_init(void) |
561 | { | 584 | { |
562 | struct task_struct *p; | 585 | struct task_struct *p; |
586 | unsigned long addr; | ||
563 | int ret; | 587 | int ret; |
564 | 588 | ||
589 | addr = (unsigned long)ftrace_record_ip; | ||
590 | stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); | ||
591 | |||
592 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
593 | if (addr) | ||
594 | return addr; | ||
595 | |||
565 | ret = ftrace_dyn_table_alloc(); | 596 | ret = ftrace_dyn_table_alloc(); |
566 | if (ret) | 597 | if (ret) |
567 | return ret; | 598 | return ret; |
@@ -570,12 +601,12 @@ static int __init notrace ftrace_shutdown_init(void) | |||
570 | if (IS_ERR(p)) | 601 | if (IS_ERR(p)) |
571 | return -1; | 602 | return -1; |
572 | 603 | ||
573 | __register_ftrace_function(&ftrace_shutdown_ops); | 604 | last_ftrace_enabled = ftrace_enabled = 1; |
574 | 605 | ||
575 | return 0; | 606 | return 0; |
576 | } | 607 | } |
577 | 608 | ||
578 | core_initcall(ftrace_shutdown_init); | 609 | core_initcall(ftrace_dynamic_init); |
579 | #else | 610 | #else |
580 | # define ftrace_startup() do { } while (0) | 611 | # define ftrace_startup() do { } while (0) |
581 | # define ftrace_shutdown() do { } while (0) | 612 | # define ftrace_shutdown() do { } while (0) |
@@ -599,9 +630,8 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
599 | int ret; | 630 | int ret; |
600 | 631 | ||
601 | mutex_lock(&ftrace_sysctl_lock); | 632 | mutex_lock(&ftrace_sysctl_lock); |
602 | ftrace_startup(); | ||
603 | |||
604 | ret = __register_ftrace_function(ops); | 633 | ret = __register_ftrace_function(ops); |
634 | ftrace_startup(); | ||
605 | mutex_unlock(&ftrace_sysctl_lock); | 635 | mutex_unlock(&ftrace_sysctl_lock); |
606 | 636 | ||
607 | return ret; | 637 | return ret; |
@@ -619,10 +649,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
619 | 649 | ||
620 | mutex_lock(&ftrace_sysctl_lock); | 650 | mutex_lock(&ftrace_sysctl_lock); |
621 | ret = __unregister_ftrace_function(ops); | 651 | ret = __unregister_ftrace_function(ops); |
622 | 652 | ftrace_shutdown(); | |
623 | if (ftrace_list == &ftrace_list_end) | ||
624 | ftrace_shutdown(); | ||
625 | |||
626 | mutex_unlock(&ftrace_sysctl_lock); | 653 | mutex_unlock(&ftrace_sysctl_lock); |
627 | 654 | ||
628 | return ret; | 655 | return ret; |