diff options
author | Rabin Vincent <rabin@rab.in> | 2010-08-10 14:43:28 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-09-02 10:27:40 -0400 |
commit | 3b6c223b1b97ad60bbb0f4efda57d649414ac2a2 (patch) | |
tree | 291dcb285e8cb64415a82ed1c65dc9681921a257 /arch/arm/kernel | |
parent | f9810a82536e0c730c57844753e6c08cc7f77881 (diff) |
ARM: 6318/1: ftrace: fix and update dynamic ftrace
This adds mcount recording and updates dynamic ftrace for ARM to work
with the new ftrace dyamic tracing implementation. It also adds support
for the mcount format used by newer ARM compilers.
With dynamic tracing, mcount() is implemented as a nop. Callsites are
patched on startup with nops, and dynamically patched to call to the
ftrace_caller() routine as needed.
Acked-by: Steven Rostedt <rostedt@goodmis.org> [recordmcount.pl change]
Signed-off-by: Rabin Vincent <rabin@rab.in>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/entry-common.S | 37 | ||||
-rw-r--r-- | arch/arm/kernel/ftrace.c | 155 |
2 files changed, 135 insertions, 57 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f5e75de0203e..e02790f28879 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -127,6 +127,10 @@ ENDPROC(ret_from_fork) | |||
127 | * clobber the ip register. This is OK because the ARM calling convention | 127 | * clobber the ip register. This is OK because the ARM calling convention |
128 | * allows it to be clobbered in subroutines and doesn't use it to hold | 128 | * allows it to be clobbered in subroutines and doesn't use it to hold |
129 | * parameters.) | 129 | * parameters.) |
130 | * | ||
131 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | ||
132 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | ||
133 | * arch/arm/kernel/ftrace.c). | ||
130 | */ | 134 | */ |
131 | 135 | ||
132 | #ifndef CONFIG_OLD_MCOUNT | 136 | #ifndef CONFIG_OLD_MCOUNT |
@@ -136,30 +140,45 @@ ENDPROC(ret_from_fork) | |||
136 | #endif | 140 | #endif |
137 | 141 | ||
138 | #ifdef CONFIG_DYNAMIC_FTRACE | 142 | #ifdef CONFIG_DYNAMIC_FTRACE |
139 | ENTRY(mcount) | 143 | ENTRY(__gnu_mcount_nc) |
144 | mov ip, lr | ||
145 | ldmia sp!, {lr} | ||
146 | mov pc, ip | ||
147 | ENDPROC(__gnu_mcount_nc) | ||
148 | |||
149 | ENTRY(ftrace_caller) | ||
140 | stmdb sp!, {r0-r3, lr} | 150 | stmdb sp!, {r0-r3, lr} |
141 | mov r0, lr | 151 | mov r0, lr |
142 | sub r0, r0, #MCOUNT_INSN_SIZE | 152 | sub r0, r0, #MCOUNT_INSN_SIZE |
153 | ldr r1, [sp, #20] | ||
143 | 154 | ||
144 | .globl mcount_call | 155 | .global ftrace_call |
145 | mcount_call: | 156 | ftrace_call: |
146 | bl ftrace_stub | 157 | bl ftrace_stub |
147 | ldr lr, [fp, #-4] @ restore lr | 158 | ldmia sp!, {r0-r3, ip, lr} |
148 | ldmia sp!, {r0-r3, pc} | 159 | mov pc, ip |
160 | ENDPROC(ftrace_caller) | ||
161 | |||
162 | #ifdef CONFIG_OLD_MCOUNT | ||
163 | ENTRY(mcount) | ||
164 | stmdb sp!, {lr} | ||
165 | ldr lr, [fp, #-4] | ||
166 | ldmia sp!, {pc} | ||
149 | ENDPROC(mcount) | 167 | ENDPROC(mcount) |
150 | 168 | ||
151 | ENTRY(ftrace_caller) | 169 | ENTRY(ftrace_caller_old) |
152 | stmdb sp!, {r0-r3, lr} | 170 | stmdb sp!, {r0-r3, lr} |
153 | ldr r1, [fp, #-4] | 171 | ldr r1, [fp, #-4] |
154 | mov r0, lr | 172 | mov r0, lr |
155 | sub r0, r0, #MCOUNT_INSN_SIZE | 173 | sub r0, r0, #MCOUNT_INSN_SIZE |
156 | 174 | ||
157 | .globl ftrace_call | 175 | .globl ftrace_call_old |
158 | ftrace_call: | 176 | ftrace_call_old: |
159 | bl ftrace_stub | 177 | bl ftrace_stub |
160 | ldr lr, [fp, #-4] @ restore lr | 178 | ldr lr, [fp, #-4] @ restore lr |
161 | ldmia sp!, {r0-r3, pc} | 179 | ldmia sp!, {r0-r3, pc} |
162 | ENDPROC(ftrace_caller) | 180 | ENDPROC(ftrace_caller_old) |
181 | #endif | ||
163 | 182 | ||
164 | #else | 183 | #else |
165 | 184 | ||
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 0298286ad4ad..f09014cfbf2c 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
@@ -2,102 +2,161 @@ | |||
2 | * Dynamic function tracing support. | 2 | * Dynamic function tracing support. |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> | 4 | * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> |
5 | * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> | ||
5 | * | 6 | * |
6 | * For licencing details, see COPYING. | 7 | * For licencing details, see COPYING. |
7 | * | 8 | * |
8 | * Defines low-level handling of mcount calls when the kernel | 9 | * Defines low-level handling of mcount calls when the kernel |
9 | * is compiled with the -pg flag. When using dynamic ftrace, the | 10 | * is compiled with the -pg flag. When using dynamic ftrace, the |
10 | * mcount call-sites get patched lazily with NOP till they are | 11 | * mcount call-sites get patched with NOP till they are enabled. |
11 | * enabled. All code mutation routines here take effect atomically. | 12 | * All code mutation routines here are called under stop_machine(). |
12 | */ | 13 | */ |
13 | 14 | ||
14 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/uaccess.h> | ||
15 | 17 | ||
16 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
17 | #include <asm/ftrace.h> | 19 | #include <asm/ftrace.h> |
18 | 20 | ||
19 | #define PC_OFFSET 8 | 21 | #define NOP 0xe8bd4000 /* pop {lr} */ |
20 | #define BL_OPCODE 0xeb000000 | ||
21 | #define BL_OFFSET_MASK 0x00ffffff | ||
22 | 22 | ||
23 | static unsigned long bl_insn; | 23 | #ifdef CONFIG_OLD_MCOUNT |
24 | static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ | 24 | #define OLD_MCOUNT_ADDR ((unsigned long) mcount) |
25 | #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) | ||
25 | 26 | ||
26 | unsigned char *ftrace_nop_replace(void) | 27 | #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ |
28 | |||
29 | static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) | ||
30 | { | ||
31 | return rec->arch.old_mcount ? OLD_NOP : NOP; | ||
32 | } | ||
33 | |||
34 | static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) | ||
35 | { | ||
36 | if (!rec->arch.old_mcount) | ||
37 | return addr; | ||
38 | |||
39 | if (addr == MCOUNT_ADDR) | ||
40 | addr = OLD_MCOUNT_ADDR; | ||
41 | else if (addr == FTRACE_ADDR) | ||
42 | addr = OLD_FTRACE_ADDR; | ||
43 | |||
44 | return addr; | ||
45 | } | ||
46 | #else | ||
47 | static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) | ||
48 | { | ||
49 | return NOP; | ||
50 | } | ||
51 | |||
52 | static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) | ||
27 | { | 53 | { |
28 | return (char *)&NOP; | 54 | return addr; |
29 | } | 55 | } |
56 | #endif | ||
30 | 57 | ||
31 | /* construct a branch (BL) instruction to addr */ | 58 | /* construct a branch (BL) instruction to addr */ |
32 | unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) | 59 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) |
33 | { | 60 | { |
34 | long offset; | 61 | long offset; |
35 | 62 | ||
36 | offset = (long)addr - (long)(pc + PC_OFFSET); | 63 | offset = (long)addr - (long)(pc + 8); |
37 | if (unlikely(offset < -33554432 || offset > 33554428)) { | 64 | if (unlikely(offset < -33554432 || offset > 33554428)) { |
38 | /* Can't generate branches that far (from ARM ARM). Ftrace | 65 | /* Can't generate branches that far (from ARM ARM). Ftrace |
39 | * doesn't generate branches outside of kernel text. | 66 | * doesn't generate branches outside of kernel text. |
40 | */ | 67 | */ |
41 | WARN_ON_ONCE(1); | 68 | WARN_ON_ONCE(1); |
42 | return NULL; | 69 | return 0; |
43 | } | 70 | } |
44 | offset = (offset >> 2) & BL_OFFSET_MASK; | ||
45 | bl_insn = BL_OPCODE | offset; | ||
46 | return (unsigned char *)&bl_insn; | ||
47 | } | ||
48 | 71 | ||
49 | int ftrace_modify_code(unsigned long pc, unsigned char *old_code, | 72 | offset = (offset >> 2) & 0x00ffffff; |
50 | unsigned char *new_code) | ||
51 | { | ||
52 | unsigned long err = 0, replaced = 0, old, new; | ||
53 | 73 | ||
54 | old = *(unsigned long *)old_code; | 74 | return 0xeb000000 | offset; |
55 | new = *(unsigned long *)new_code; | 75 | } |
56 | 76 | ||
57 | __asm__ __volatile__ ( | 77 | static int ftrace_modify_code(unsigned long pc, unsigned long old, |
58 | "1: ldr %1, [%2] \n" | 78 | unsigned long new) |
59 | " cmp %1, %4 \n" | 79 | { |
60 | "2: streq %3, [%2] \n" | 80 | unsigned long replaced; |
61 | " cmpne %1, %3 \n" | ||
62 | " movne %0, #2 \n" | ||
63 | "3:\n" | ||
64 | 81 | ||
65 | ".pushsection .fixup, \"ax\"\n" | 82 | if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) |
66 | "4: mov %0, #1 \n" | 83 | return -EFAULT; |
67 | " b 3b \n" | ||
68 | ".popsection\n" | ||
69 | 84 | ||
70 | ".pushsection __ex_table, \"a\"\n" | 85 | if (replaced != old) |
71 | " .long 1b, 4b \n" | 86 | return -EINVAL; |
72 | " .long 2b, 4b \n" | ||
73 | ".popsection\n" | ||
74 | 87 | ||
75 | : "=r"(err), "=r"(replaced) | 88 | if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) |
76 | : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) | 89 | return -EPERM; |
77 | : "memory"); | ||
78 | 90 | ||
79 | if (!err && (replaced == old)) | 91 | flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); |
80 | flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); | ||
81 | 92 | ||
82 | return err; | 93 | return 0; |
83 | } | 94 | } |
84 | 95 | ||
85 | int ftrace_update_ftrace_func(ftrace_func_t func) | 96 | int ftrace_update_ftrace_func(ftrace_func_t func) |
86 | { | 97 | { |
87 | int ret; | ||
88 | unsigned long pc, old; | 98 | unsigned long pc, old; |
89 | unsigned char *new; | 99 | unsigned long new; |
100 | int ret; | ||
90 | 101 | ||
91 | pc = (unsigned long)&ftrace_call; | 102 | pc = (unsigned long)&ftrace_call; |
92 | memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); | 103 | memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); |
93 | new = ftrace_call_replace(pc, (unsigned long)func); | 104 | new = ftrace_call_replace(pc, (unsigned long)func); |
94 | ret = ftrace_modify_code(pc, (unsigned char *)&old, new); | 105 | |
106 | ret = ftrace_modify_code(pc, old, new); | ||
107 | |||
108 | #ifdef CONFIG_OLD_MCOUNT | ||
109 | if (!ret) { | ||
110 | pc = (unsigned long)&ftrace_call_old; | ||
111 | memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); | ||
112 | new = ftrace_call_replace(pc, (unsigned long)func); | ||
113 | |||
114 | ret = ftrace_modify_code(pc, old, new); | ||
115 | } | ||
116 | #endif | ||
117 | |||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
122 | { | ||
123 | unsigned long new, old; | ||
124 | unsigned long ip = rec->ip; | ||
125 | |||
126 | old = ftrace_nop_replace(rec); | ||
127 | new = ftrace_call_replace(ip, adjust_address(rec, addr)); | ||
128 | |||
129 | return ftrace_modify_code(rec->ip, old, new); | ||
130 | } | ||
131 | |||
132 | int ftrace_make_nop(struct module *mod, | ||
133 | struct dyn_ftrace *rec, unsigned long addr) | ||
134 | { | ||
135 | unsigned long ip = rec->ip; | ||
136 | unsigned long old; | ||
137 | unsigned long new; | ||
138 | int ret; | ||
139 | |||
140 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); | ||
141 | new = ftrace_nop_replace(rec); | ||
142 | ret = ftrace_modify_code(ip, old, new); | ||
143 | |||
144 | #ifdef CONFIG_OLD_MCOUNT | ||
145 | if (ret == -EINVAL && addr == MCOUNT_ADDR) { | ||
146 | rec->arch.old_mcount = true; | ||
147 | |||
148 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); | ||
149 | new = ftrace_nop_replace(rec); | ||
150 | ret = ftrace_modify_code(ip, old, new); | ||
151 | } | ||
152 | #endif | ||
153 | |||
95 | return ret; | 154 | return ret; |
96 | } | 155 | } |
97 | 156 | ||
98 | /* run from ftrace_init with irqs disabled */ | ||
99 | int __init ftrace_dyn_arch_init(void *data) | 157 | int __init ftrace_dyn_arch_init(void *data) |
100 | { | 158 | { |
101 | ftrace_mcount_set(data); | 159 | *(unsigned long *)data = 0; |
160 | |||
102 | return 0; | 161 | return 0; |
103 | } | 162 | } |