diff options
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
| -rw-r--r-- | arch/x86/kernel/ftrace.c | 390 |
1 files changed, 380 insertions, 10 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 50ea0ac8c9bf..1b43086b097a 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -14,14 +14,17 @@ | |||
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
| 17 | #include <linux/sched.h> | ||
| 17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
| 19 | 20 | ||
| 20 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
| 22 | #include <linux/ftrace.h> | ||
| 21 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
| 24 | #include <asm/nmi.h> | ||
| 22 | 25 | ||
| 23 | 26 | ||
| 24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 25 | 28 | ||
| 26 | union ftrace_code_union { | 29 | union ftrace_code_union { |
| 27 | char code[MCOUNT_INSN_SIZE]; | 30 | char code[MCOUNT_INSN_SIZE]; |
| @@ -31,18 +34,12 @@ union ftrace_code_union { | |||
| 31 | } __attribute__((packed)); | 34 | } __attribute__((packed)); |
| 32 | }; | 35 | }; |
| 33 | 36 | ||
| 34 | |||
| 35 | static int ftrace_calc_offset(long ip, long addr) | 37 | static int ftrace_calc_offset(long ip, long addr) |
| 36 | { | 38 | { |
| 37 | return (int)(addr - ip); | 39 | return (int)(addr - ip); |
| 38 | } | 40 | } |
| 39 | 41 | ||
| 40 | unsigned char *ftrace_nop_replace(void) | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
| 41 | { | ||
| 42 | return ftrace_nop; | ||
| 43 | } | ||
| 44 | |||
| 45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
| 46 | { | 43 | { |
| 47 | static union ftrace_code_union calc; | 44 | static union ftrace_code_union calc; |
| 48 | 45 | ||
| @@ -56,7 +53,142 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
| 56 | return calc.code; | 53 | return calc.code; |
| 57 | } | 54 | } |
| 58 | 55 | ||
| 59 | int | 56 | /* |
| 57 | * Modifying code must take extra care. On an SMP machine, if | ||
| 58 | * the code being modified is also being executed on another CPU | ||
| 59 | * that CPU will have undefined results and possibly take a GPF. | ||
| 60 | * We use kstop_machine to stop other CPUS from exectuing code. | ||
| 61 | * But this does not stop NMIs from happening. We still need | ||
| 62 | * to protect against that. We separate out the modification of | ||
| 63 | * the code to take care of this. | ||
| 64 | * | ||
| 65 | * Two buffers are added: An IP buffer and a "code" buffer. | ||
| 66 | * | ||
| 67 | * 1) Put the instruction pointer into the IP buffer | ||
| 68 | * and the new code into the "code" buffer. | ||
| 69 | * 2) Set a flag that says we are modifying code | ||
| 70 | * 3) Wait for any running NMIs to finish. | ||
| 71 | * 4) Write the code | ||
| 72 | * 5) clear the flag. | ||
| 73 | * 6) Wait for any running NMIs to finish. | ||
| 74 | * | ||
| 75 | * If an NMI is executed, the first thing it does is to call | ||
| 76 | * "ftrace_nmi_enter". This will check if the flag is set to write | ||
| 77 | * and if it is, it will write what is in the IP and "code" buffers. | ||
| 78 | * | ||
| 79 | * The trick is, it does not matter if everyone is writing the same | ||
| 80 | * content to the code location. Also, if a CPU is executing code | ||
| 81 | * it is OK to write to that code location if the contents being written | ||
| 82 | * are the same as what exists. | ||
| 83 | */ | ||
| 84 | |||
| 85 | static atomic_t in_nmi = ATOMIC_INIT(0); | ||
| 86 | static int mod_code_status; /* holds return value of text write */ | ||
| 87 | static int mod_code_write; /* set when NMI should do the write */ | ||
| 88 | static void *mod_code_ip; /* holds the IP to write to */ | ||
| 89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | ||
| 90 | |||
| 91 | static unsigned nmi_wait_count; | ||
| 92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | ||
| 93 | |||
| 94 | int ftrace_arch_read_dyn_info(char *buf, int size) | ||
| 95 | { | ||
| 96 | int r; | ||
| 97 | |||
| 98 | r = snprintf(buf, size, "%u %u", | ||
| 99 | nmi_wait_count, | ||
| 100 | atomic_read(&nmi_update_count)); | ||
| 101 | return r; | ||
| 102 | } | ||
| 103 | |||
| 104 | static void ftrace_mod_code(void) | ||
| 105 | { | ||
| 106 | /* | ||
| 107 | * Yes, more than one CPU process can be writing to mod_code_status. | ||
| 108 | * (and the code itself) | ||
| 109 | * But if one were to fail, then they all should, and if one were | ||
| 110 | * to succeed, then they all should. | ||
| 111 | */ | ||
| 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | ||
| 113 | MCOUNT_INSN_SIZE); | ||
| 114 | } | ||
| 115 | |||
| 116 | void ftrace_nmi_enter(void) | ||
| 117 | { | ||
| 118 | atomic_inc(&in_nmi); | ||
| 119 | /* Must have in_nmi seen before reading write flag */ | ||
| 120 | smp_mb(); | ||
| 121 | if (mod_code_write) { | ||
| 122 | ftrace_mod_code(); | ||
| 123 | atomic_inc(&nmi_update_count); | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | void ftrace_nmi_exit(void) | ||
| 128 | { | ||
| 129 | /* Finish all executions before clearing in_nmi */ | ||
| 130 | smp_wmb(); | ||
| 131 | atomic_dec(&in_nmi); | ||
| 132 | } | ||
| 133 | |||
| 134 | static void wait_for_nmi(void) | ||
| 135 | { | ||
| 136 | int waited = 0; | ||
| 137 | |||
| 138 | while (atomic_read(&in_nmi)) { | ||
| 139 | waited = 1; | ||
| 140 | cpu_relax(); | ||
| 141 | } | ||
| 142 | |||
| 143 | if (waited) | ||
| 144 | nmi_wait_count++; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int | ||
| 148 | do_ftrace_mod_code(unsigned long ip, void *new_code) | ||
| 149 | { | ||
| 150 | mod_code_ip = (void *)ip; | ||
| 151 | mod_code_newcode = new_code; | ||
| 152 | |||
| 153 | /* The buffers need to be visible before we let NMIs write them */ | ||
| 154 | smp_wmb(); | ||
| 155 | |||
| 156 | mod_code_write = 1; | ||
| 157 | |||
| 158 | /* Make sure write bit is visible before we wait on NMIs */ | ||
| 159 | smp_mb(); | ||
| 160 | |||
| 161 | wait_for_nmi(); | ||
| 162 | |||
| 163 | /* Make sure all running NMIs have finished before we write the code */ | ||
| 164 | smp_mb(); | ||
| 165 | |||
| 166 | ftrace_mod_code(); | ||
| 167 | |||
| 168 | /* Make sure the write happens before clearing the bit */ | ||
| 169 | smp_wmb(); | ||
| 170 | |||
| 171 | mod_code_write = 0; | ||
| 172 | |||
| 173 | /* make sure NMIs see the cleared bit */ | ||
| 174 | smp_mb(); | ||
| 175 | |||
| 176 | wait_for_nmi(); | ||
| 177 | |||
| 178 | return mod_code_status; | ||
| 179 | } | ||
| 180 | |||
| 181 | |||
| 182 | |||
| 183 | |||
| 184 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
| 185 | |||
| 186 | static unsigned char *ftrace_nop_replace(void) | ||
| 187 | { | ||
| 188 | return ftrace_nop; | ||
| 189 | } | ||
| 190 | |||
| 191 | static int | ||
| 60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 192 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
| 61 | unsigned char *new_code) | 193 | unsigned char *new_code) |
| 62 | { | 194 | { |
| @@ -81,7 +213,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
| 81 | return -EINVAL; | 213 | return -EINVAL; |
| 82 | 214 | ||
| 83 | /* replace the text with the new text */ | 215 | /* replace the text with the new text */ |
| 84 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | 216 | if (do_ftrace_mod_code(ip, new_code)) |
| 85 | return -EPERM; | 217 | return -EPERM; |
| 86 | 218 | ||
| 87 | sync_core(); | 219 | sync_core(); |
| @@ -89,6 +221,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
| 89 | return 0; | 221 | return 0; |
| 90 | } | 222 | } |
| 91 | 223 | ||
| 224 | int ftrace_make_nop(struct module *mod, | ||
| 225 | struct dyn_ftrace *rec, unsigned long addr) | ||
| 226 | { | ||
| 227 | unsigned char *new, *old; | ||
| 228 | unsigned long ip = rec->ip; | ||
| 229 | |||
| 230 | old = ftrace_call_replace(ip, addr); | ||
| 231 | new = ftrace_nop_replace(); | ||
| 232 | |||
| 233 | return ftrace_modify_code(rec->ip, old, new); | ||
| 234 | } | ||
| 235 | |||
| 236 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
| 237 | { | ||
| 238 | unsigned char *new, *old; | ||
| 239 | unsigned long ip = rec->ip; | ||
| 240 | |||
| 241 | old = ftrace_nop_replace(); | ||
| 242 | new = ftrace_call_replace(ip, addr); | ||
| 243 | |||
| 244 | return ftrace_modify_code(rec->ip, old, new); | ||
| 245 | } | ||
| 246 | |||
| 92 | int ftrace_update_ftrace_func(ftrace_func_t func) | 247 | int ftrace_update_ftrace_func(ftrace_func_t func) |
| 93 | { | 248 | { |
| 94 | unsigned long ip = (unsigned long)(&ftrace_call); | 249 | unsigned long ip = (unsigned long)(&ftrace_call); |
| @@ -165,3 +320,218 @@ int __init ftrace_dyn_arch_init(void *data) | |||
| 165 | 320 | ||
| 166 | return 0; | 321 | return 0; |
| 167 | } | 322 | } |
| 323 | #endif | ||
| 324 | |||
| 325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 326 | |||
| 327 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 328 | extern void ftrace_graph_call(void); | ||
| 329 | |||
| 330 | static int ftrace_mod_jmp(unsigned long ip, | ||
| 331 | int old_offset, int new_offset) | ||
| 332 | { | ||
| 333 | unsigned char code[MCOUNT_INSN_SIZE]; | ||
| 334 | |||
| 335 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | ||
| 336 | return -EFAULT; | ||
| 337 | |||
| 338 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | ||
| 339 | return -EINVAL; | ||
| 340 | |||
| 341 | *(int *)(&code[1]) = new_offset; | ||
| 342 | |||
| 343 | if (do_ftrace_mod_code(ip, &code)) | ||
| 344 | return -EPERM; | ||
| 345 | |||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | int ftrace_enable_ftrace_graph_caller(void) | ||
| 350 | { | ||
| 351 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 352 | int old_offset, new_offset; | ||
| 353 | |||
| 354 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | ||
| 355 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
| 356 | |||
| 357 | return ftrace_mod_jmp(ip, old_offset, new_offset); | ||
| 358 | } | ||
| 359 | |||
| 360 | int ftrace_disable_ftrace_graph_caller(void) | ||
| 361 | { | ||
| 362 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 363 | int old_offset, new_offset; | ||
| 364 | |||
| 365 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
| 366 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | ||
| 367 | |||
| 368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | ||
| 369 | } | ||
| 370 | |||
| 371 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
| 372 | |||
| 373 | /* | ||
| 374 | * These functions are picked from those used on | ||
| 375 | * this page for dynamic ftrace. They have been | ||
| 376 | * simplified to ignore all traces in NMI context. | ||
| 377 | */ | ||
| 378 | static atomic_t in_nmi; | ||
| 379 | |||
| 380 | void ftrace_nmi_enter(void) | ||
| 381 | { | ||
| 382 | atomic_inc(&in_nmi); | ||
| 383 | } | ||
| 384 | |||
| 385 | void ftrace_nmi_exit(void) | ||
| 386 | { | ||
| 387 | atomic_dec(&in_nmi); | ||
| 388 | } | ||
| 389 | |||
| 390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
| 391 | |||
| 392 | /* Add a function return address to the trace stack on thread info.*/ | ||
| 393 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
| 394 | unsigned long func, int *depth) | ||
| 395 | { | ||
| 396 | int index; | ||
| 397 | |||
| 398 | if (!current->ret_stack) | ||
| 399 | return -EBUSY; | ||
| 400 | |||
| 401 | /* The return trace stack is full */ | ||
| 402 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
| 403 | atomic_inc(¤t->trace_overrun); | ||
| 404 | return -EBUSY; | ||
| 405 | } | ||
| 406 | |||
| 407 | index = ++current->curr_ret_stack; | ||
| 408 | barrier(); | ||
| 409 | current->ret_stack[index].ret = ret; | ||
| 410 | current->ret_stack[index].func = func; | ||
| 411 | current->ret_stack[index].calltime = time; | ||
| 412 | *depth = index; | ||
| 413 | |||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | |||
| 417 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
| 418 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
| 419 | { | ||
| 420 | int index; | ||
| 421 | |||
| 422 | index = current->curr_ret_stack; | ||
| 423 | |||
| 424 | if (unlikely(index < 0)) { | ||
| 425 | ftrace_graph_stop(); | ||
| 426 | WARN_ON(1); | ||
| 427 | /* Might as well panic, otherwise we have no where to go */ | ||
| 428 | *ret = (unsigned long)panic; | ||
| 429 | return; | ||
| 430 | } | ||
| 431 | |||
| 432 | *ret = current->ret_stack[index].ret; | ||
| 433 | trace->func = current->ret_stack[index].func; | ||
| 434 | trace->calltime = current->ret_stack[index].calltime; | ||
| 435 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
| 436 | trace->depth = index; | ||
| 437 | barrier(); | ||
| 438 | current->curr_ret_stack--; | ||
| 439 | |||
| 440 | } | ||
| 441 | |||
| 442 | /* | ||
| 443 | * Send the trace to the ring-buffer. | ||
| 444 | * @return the original return address. | ||
| 445 | */ | ||
| 446 | unsigned long ftrace_return_to_handler(void) | ||
| 447 | { | ||
| 448 | struct ftrace_graph_ret trace; | ||
| 449 | unsigned long ret; | ||
| 450 | |||
| 451 | pop_return_trace(&trace, &ret); | ||
| 452 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
| 453 | ftrace_graph_return(&trace); | ||
| 454 | |||
| 455 | if (unlikely(!ret)) { | ||
| 456 | ftrace_graph_stop(); | ||
| 457 | WARN_ON(1); | ||
| 458 | /* Might as well panic. What else to do? */ | ||
| 459 | ret = (unsigned long)panic; | ||
| 460 | } | ||
| 461 | |||
| 462 | return ret; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* | ||
| 466 | * Hook the return address and push it in the stack of return addrs | ||
| 467 | * in current thread info. | ||
| 468 | */ | ||
| 469 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
| 470 | { | ||
| 471 | unsigned long old; | ||
| 472 | unsigned long long calltime; | ||
| 473 | int faulted; | ||
| 474 | struct ftrace_graph_ent trace; | ||
| 475 | unsigned long return_hooker = (unsigned long) | ||
| 476 | &return_to_handler; | ||
| 477 | |||
| 478 | /* Nmi's are currently unsupported */ | ||
| 479 | if (unlikely(atomic_read(&in_nmi))) | ||
| 480 | return; | ||
| 481 | |||
| 482 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
| 483 | return; | ||
| 484 | |||
| 485 | /* | ||
| 486 | * Protect against fault, even if it shouldn't | ||
| 487 | * happen. This tool is too much intrusive to | ||
| 488 | * ignore such a protection. | ||
| 489 | */ | ||
| 490 | asm volatile( | ||
| 491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" | ||
| 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | ||
| 493 | " movl $0, %[faulted]\n" | ||
| 494 | |||
| 495 | ".section .fixup, \"ax\"\n" | ||
| 496 | "3: movl $1, %[faulted]\n" | ||
| 497 | ".previous\n" | ||
| 498 | |||
| 499 | _ASM_EXTABLE(1b, 3b) | ||
| 500 | _ASM_EXTABLE(2b, 3b) | ||
| 501 | |||
| 502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
| 503 | [faulted] "=r" (faulted) | ||
| 504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
| 505 | : "memory" | ||
| 506 | ); | ||
| 507 | |||
| 508 | if (unlikely(faulted)) { | ||
| 509 | ftrace_graph_stop(); | ||
| 510 | WARN_ON(1); | ||
| 511 | return; | ||
| 512 | } | ||
| 513 | |||
| 514 | if (unlikely(!__kernel_text_address(old))) { | ||
| 515 | ftrace_graph_stop(); | ||
| 516 | *parent = old; | ||
| 517 | WARN_ON(1); | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | |||
| 521 | calltime = cpu_clock(raw_smp_processor_id()); | ||
| 522 | |||
| 523 | if (push_return_trace(old, calltime, | ||
| 524 | self_addr, &trace.depth) == -EBUSY) { | ||
| 525 | *parent = old; | ||
| 526 | return; | ||
| 527 | } | ||
| 528 | |||
| 529 | trace.func = self_addr; | ||
| 530 | |||
| 531 | /* Only trace if the calling function expects to */ | ||
| 532 | if (!ftrace_graph_entry(&trace)) { | ||
| 533 | current->curr_ret_stack--; | ||
| 534 | *parent = old; | ||
| 535 | } | ||
| 536 | } | ||
| 537 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
