aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/ftrace.c')
-rw-r--r--arch/arm/kernel/ftrace.c116
1 files changed, 116 insertions, 0 deletions
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
new file mode 100644
index 000000000000..76d50e6091bc
--- /dev/null
+++ b/arch/arm/kernel/ftrace.c
@@ -0,0 +1,116 @@
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 *
6 * For licencing details, see COPYING.
7 *
8 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are
11 * enabled. All code mutation routines here take effect atomically.
12 */
13
14#include <linux/ftrace.h>
15
16#include <asm/cacheflush.h>
17#include <asm/ftrace.h>
18
19#define PC_OFFSET 8
20#define BL_OPCODE 0xeb000000
21#define BL_OFFSET_MASK 0x00ffffff
22
23static unsigned long bl_insn;
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
25
26unsigned char *ftrace_nop_replace(void)
27{
28 return (char *)&NOP;
29}
30
31/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
33{
34 long offset;
35
36 offset = (long)addr - (long)(pc + PC_OFFSET);
37 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text.
40 */
41 WARN_ON_ONCE(1);
42 return NULL;
43 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53
54 old = *(unsigned long *)old_code;
55 new = *(unsigned long *)new_code;
56
57 __asm__ __volatile__ (
58 "1: ldr %1, [%2] \n"
59 " cmp %1, %4 \n"
60 "2: streq %3, [%2] \n"
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64
65 ".section .fixup, \"ax\"\n"
66 "4: mov %0, #1 \n"
67 " b 3b \n"
68 ".previous\n"
69
70 ".section __ex_table, \"a\"\n"
71 " .long 1b, 4b \n"
72 " .long 2b, 4b \n"
73 ".previous\n"
74
75 : "=r"(err), "=r"(replaced)
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
77 : "memory");
78
79 if (!err && (replaced == old))
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81
82 return err;
83}
84
85int ftrace_update_ftrace_func(ftrace_func_t func)
86{
87 int ret;
88 unsigned long pc, old;
89 unsigned char *new;
90
91 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
95 return ret;
96}
97
98int ftrace_mcount_set(unsigned long *data)
99{
100 unsigned long pc, old;
101 unsigned long *addr = data;
102 unsigned char *new;
103
104 pc = (unsigned long)&mcount_call;
105 memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE);
106 new = ftrace_call_replace(pc, *addr);
107 *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
108 return 0;
109}
110
111/* run from kstop_machine */
112int __init ftrace_dyn_arch_init(void *data)
113{
114 ftrace_mcount_set(data);
115 return 0;
116}