diff options
Diffstat (limited to 'arch/powerpc/kernel/ftrace.c')
-rw-r--r-- | arch/powerpc/kernel/ftrace.c | 73 |
1 files changed, 15 insertions, 58 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index e66af6d265e8..44d4d8eb3c85 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -510,79 +510,36 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
510 | } | 510 | } |
511 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 511 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
512 | 512 | ||
513 | #ifdef CONFIG_PPC64 | ||
514 | extern void mod_return_to_handler(void); | ||
515 | #endif | ||
516 | |||
517 | /* | 513 | /* |
518 | * Hook the return address and push it in the stack of return addrs | 514 | * Hook the return address and push it in the stack of return addrs |
519 | * in current thread info. | 515 | * in current thread info. Return the address we want to divert to. |
520 | */ | 516 | */ |
521 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 517 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
522 | { | 518 | { |
523 | unsigned long old; | ||
524 | int faulted; | ||
525 | struct ftrace_graph_ent trace; | 519 | struct ftrace_graph_ent trace; |
526 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 520 | unsigned long return_hooker; |
527 | 521 | ||
528 | if (unlikely(ftrace_graph_is_dead())) | 522 | if (unlikely(ftrace_graph_is_dead())) |
529 | return; | 523 | goto out; |
530 | 524 | ||
531 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 525 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
532 | return; | 526 | goto out; |
533 | |||
534 | #ifdef CONFIG_PPC64 | ||
535 | /* non core kernel code needs to save and restore the TOC */ | ||
536 | if (REGION_ID(self_addr) != KERNEL_REGION_ID) | ||
537 | return_hooker = (unsigned long)&mod_return_to_handler; | ||
538 | #endif | ||
539 | |||
540 | return_hooker = ppc_function_entry((void *)return_hooker); | ||
541 | 527 | ||
542 | /* | 528 | return_hooker = ppc_function_entry(return_to_handler); |
543 | * Protect against fault, even if it shouldn't | ||
544 | * happen. This tool is too much intrusive to | ||
545 | * ignore such a protection. | ||
546 | */ | ||
547 | asm volatile( | ||
548 | "1: " PPC_LL "%[old], 0(%[parent])\n" | ||
549 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" | ||
550 | " li %[faulted], 0\n" | ||
551 | "3:\n" | ||
552 | |||
553 | ".section .fixup, \"ax\"\n" | ||
554 | "4: li %[faulted], 1\n" | ||
555 | " b 3b\n" | ||
556 | ".previous\n" | ||
557 | |||
558 | ".section __ex_table,\"a\"\n" | ||
559 | PPC_LONG_ALIGN "\n" | ||
560 | PPC_LONG "1b,4b\n" | ||
561 | PPC_LONG "2b,4b\n" | ||
562 | ".previous" | ||
563 | |||
564 | : [old] "=&r" (old), [faulted] "=r" (faulted) | ||
565 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | ||
566 | : "memory" | ||
567 | ); | ||
568 | |||
569 | if (unlikely(faulted)) { | ||
570 | ftrace_graph_stop(); | ||
571 | WARN_ON(1); | ||
572 | return; | ||
573 | } | ||
574 | 529 | ||
575 | trace.func = self_addr; | 530 | trace.func = ip; |
576 | trace.depth = current->curr_ret_stack + 1; | 531 | trace.depth = current->curr_ret_stack + 1; |
577 | 532 | ||
578 | /* Only trace if the calling function expects to */ | 533 | /* Only trace if the calling function expects to */ |
579 | if (!ftrace_graph_entry(&trace)) { | 534 | if (!ftrace_graph_entry(&trace)) |
580 | *parent = old; | 535 | goto out; |
581 | return; | 536 | |
582 | } | 537 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) |
538 | goto out; | ||
583 | 539 | ||
584 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) | 540 | parent = return_hooker; |
585 | *parent = old; | 541 | out: |
542 | return parent; | ||
586 | } | 543 | } |
587 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 544 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
588 | 545 | ||