aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c101
1 files changed, 22 insertions, 79 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cd37469b54ee..c9a281f272fd 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/module.h>
22 23
23#include <trace/syscall.h> 24#include <trace/syscall.h>
24 25
@@ -49,6 +50,7 @@ static DEFINE_PER_CPU(int, save_modifying_code);
49int ftrace_arch_code_modify_prepare(void) 50int ftrace_arch_code_modify_prepare(void)
50{ 51{
51 set_kernel_text_rw(); 52 set_kernel_text_rw();
53 set_all_modules_text_rw();
52 modifying_code = 1; 54 modifying_code = 1;
53 return 0; 55 return 0;
54} 56}
@@ -56,6 +58,7 @@ int ftrace_arch_code_modify_prepare(void)
56int ftrace_arch_code_modify_post_process(void) 58int ftrace_arch_code_modify_post_process(void)
57{ 59{
58 modifying_code = 0; 60 modifying_code = 0;
61 set_all_modules_text_ro();
59 set_kernel_text_ro(); 62 set_kernel_text_ro();
60 return 0; 63 return 0;
61} 64}
@@ -120,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
120static atomic_t nmi_running = ATOMIC_INIT(0); 123static atomic_t nmi_running = ATOMIC_INIT(0);
121static int mod_code_status; /* holds return value of text write */ 124static int mod_code_status; /* holds return value of text write */
122static void *mod_code_ip; /* holds the IP to write to */ 125static void *mod_code_ip; /* holds the IP to write to */
123static void *mod_code_newcode; /* holds the text to write to the IP */ 126static const void *mod_code_newcode; /* holds the text to write to the IP */
124 127
125static unsigned nmi_wait_count; 128static unsigned nmi_wait_count;
126static atomic_t nmi_update_count = ATOMIC_INIT(0); 129static atomic_t nmi_update_count = ATOMIC_INIT(0);
@@ -167,9 +170,9 @@ static void ftrace_mod_code(void)
167 170
168void ftrace_nmi_enter(void) 171void ftrace_nmi_enter(void)
169{ 172{
170 __get_cpu_var(save_modifying_code) = modifying_code; 173 __this_cpu_write(save_modifying_code, modifying_code);
171 174
172 if (!__get_cpu_var(save_modifying_code)) 175 if (!__this_cpu_read(save_modifying_code))
173 return; 176 return;
174 177
175 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { 178 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -183,7 +186,7 @@ void ftrace_nmi_enter(void)
183 186
184void ftrace_nmi_exit(void) 187void ftrace_nmi_exit(void)
185{ 188{
186 if (!__get_cpu_var(save_modifying_code)) 189 if (!__this_cpu_read(save_modifying_code))
187 return; 190 return;
188 191
189 /* Finish all executions before clearing nmi_running */ 192 /* Finish all executions before clearing nmi_running */
@@ -222,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
222} 225}
223 226
224static int 227static int
225do_ftrace_mod_code(unsigned long ip, void *new_code) 228do_ftrace_mod_code(unsigned long ip, const void *new_code)
226{ 229{
227 /* 230 /*
228 * On x86_64, kernel text mappings are mapped read-only with 231 * On x86_64, kernel text mappings are mapped read-only with
@@ -257,19 +260,14 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
257 return mod_code_status; 260 return mod_code_status;
258} 261}
259 262
260 263static const unsigned char *ftrace_nop_replace(void)
261
262
263static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
264
265static unsigned char *ftrace_nop_replace(void)
266{ 264{
267 return ftrace_nop; 265 return ideal_nops[NOP_ATOMIC5];
268} 266}
269 267
270static int 268static int
271ftrace_modify_code(unsigned long ip, unsigned char *old_code, 269ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
272 unsigned char *new_code) 270 unsigned const char *new_code)
273{ 271{
274 unsigned char replaced[MCOUNT_INSN_SIZE]; 272 unsigned char replaced[MCOUNT_INSN_SIZE];
275 273
@@ -303,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
303int ftrace_make_nop(struct module *mod, 301int ftrace_make_nop(struct module *mod,
304 struct dyn_ftrace *rec, unsigned long addr) 302 struct dyn_ftrace *rec, unsigned long addr)
305{ 303{
306 unsigned char *new, *old; 304 unsigned const char *new, *old;
307 unsigned long ip = rec->ip; 305 unsigned long ip = rec->ip;
308 306
309 old = ftrace_call_replace(ip, addr); 307 old = ftrace_call_replace(ip, addr);
@@ -314,7 +312,7 @@ int ftrace_make_nop(struct module *mod,
314 312
315int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 313int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
316{ 314{
317 unsigned char *new, *old; 315 unsigned const char *new, *old;
318 unsigned long ip = rec->ip; 316 unsigned long ip = rec->ip;
319 317
320 old = ftrace_nop_replace(); 318 old = ftrace_nop_replace();
@@ -338,62 +336,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
338 336
339int __init ftrace_dyn_arch_init(void *data) 337int __init ftrace_dyn_arch_init(void *data)
340{ 338{
341 extern const unsigned char ftrace_test_p6nop[];
342 extern const unsigned char ftrace_test_nop5[];
343 extern const unsigned char ftrace_test_jmp[];
344 int faulted = 0;
345
346 /*
347 * There is no good nop for all x86 archs.
348 * We will default to using the P6_NOP5, but first we
349 * will test to make sure that the nop will actually
350 * work on this CPU. If it faults, we will then
351 * go to a lesser efficient 5 byte nop. If that fails
352 * we then just use a jmp as our nop. This isn't the most
353 * efficient nop, but we can not use a multi part nop
354 * since we would then risk being preempted in the middle
355 * of that nop, and if we enabled tracing then, it might
356 * cause a system crash.
357 *
358 * TODO: check the cpuid to determine the best nop.
359 */
360 asm volatile (
361 "ftrace_test_jmp:"
362 "jmp ftrace_test_p6nop\n"
363 "nop\n"
364 "nop\n"
365 "nop\n" /* 2 byte jmp + 3 bytes */
366 "ftrace_test_p6nop:"
367 P6_NOP5
368 "jmp 1f\n"
369 "ftrace_test_nop5:"
370 ".byte 0x66,0x66,0x66,0x66,0x90\n"
371 "1:"
372 ".section .fixup, \"ax\"\n"
373 "2: movl $1, %0\n"
374 " jmp ftrace_test_nop5\n"
375 "3: movl $2, %0\n"
376 " jmp 1b\n"
377 ".previous\n"
378 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
379 _ASM_EXTABLE(ftrace_test_nop5, 3b)
380 : "=r"(faulted) : "0" (faulted));
381
382 switch (faulted) {
383 case 0:
384 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
385 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
386 break;
387 case 1:
388 pr_info("converting mcount calls to 66 66 66 66 90\n");
389 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
390 break;
391 case 2:
392 pr_info("converting mcount calls to jmp . + 5\n");
393 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
394 break;
395 }
396
397 /* The return code is retured via data */ 339 /* The return code is retured via data */
398 *(unsigned long *)data = 0; 340 *(unsigned long *)data = 0;
399 341
@@ -495,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
495 return; 437 return;
496 } 438 }
497 439
498 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
499 frame_pointer) == -EBUSY) {
500 *parent = old;
501 return;
502 }
503
504 trace.func = self_addr; 440 trace.func = self_addr;
441 trace.depth = current->curr_ret_stack + 1;
505 442
506 /* Only trace if the calling function expects to */ 443 /* Only trace if the calling function expects to */
507 if (!ftrace_graph_entry(&trace)) { 444 if (!ftrace_graph_entry(&trace)) {
508 current->curr_ret_stack--;
509 *parent = old; 445 *parent = old;
446 return;
447 }
448
449 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
450 frame_pointer) == -EBUSY) {
451 *parent = old;
452 return;
510 } 453 }
511} 454}
512#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 455#endif /* CONFIG_FUNCTION_GRAPH_TRACER */