diff options
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
-rw-r--r-- | arch/mips/kernel/ftrace.c | 184 |
1 files changed, 112 insertions, 72 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index e9e64e0ff7a..5a84a1f1123 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Code for replacing ftrace calls with jumps. | 2 | * Code for replacing ftrace calls with jumps. |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2009 DSLab, Lanzhou University, China | 5 | * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China |
6 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> | 6 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> |
7 | * | 7 | * |
8 | * Thanks goes to Steven Rostedt for writing the original x86 version. | 8 | * Thanks goes to Steven Rostedt for writing the original x86 version. |
@@ -12,18 +12,62 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
17 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/uasm.h> | ||
19 | |||
20 | /* | ||
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
18 | 35 | ||
19 | #ifdef CONFIG_DYNAMIC_FTRACE | 36 | #ifdef CONFIG_DYNAMIC_FTRACE |
20 | 37 | ||
21 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
22 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
23 | #define jump_insn_encode(op_code, addr) \ | ||
24 | ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK))) | ||
25 | 40 | ||
26 | static unsigned int ftrace_nop = 0x00000000; | 41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ |
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | ||
44 | #define INSN_JAL(addr) \ | ||
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | ||
46 | |||
47 | static unsigned int insn_jal_ftrace_caller __read_mostly; | ||
48 | static unsigned int insn_lui_v1_hi16_mcount __read_mostly; | ||
49 | static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; | ||
50 | |||
51 | static inline void ftrace_dyn_arch_init_insns(void) | ||
52 | { | ||
53 | u32 *buf; | ||
54 | unsigned int v1; | ||
55 | |||
56 | /* lui v1, hi16_mcount */ | ||
57 | v1 = 3; | ||
58 | buf = (u32 *)&insn_lui_v1_hi16_mcount; | ||
59 | UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); | ||
60 | |||
61 | /* jal (ftrace_caller + 8), jump over the first two instruction */ | ||
62 | buf = (u32 *)&insn_jal_ftrace_caller; | ||
63 | uasm_i_jal(&buf, (FTRACE_ADDR + 8)); | ||
64 | |||
65 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
66 | /* j ftrace_graph_caller */ | ||
67 | buf = (u32 *)&insn_j_ftrace_graph_caller; | ||
68 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); | ||
69 | #endif | ||
70 | } | ||
27 | 71 | ||
28 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
29 | { | 73 | { |
@@ -40,67 +84,56 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
40 | return 0; | 84 | return 0; |
41 | } | 85 | } |
42 | 86 | ||
43 | static int lui_v1; | ||
44 | static int jal_mcount; | ||
45 | |||
46 | int ftrace_make_nop(struct module *mod, | 87 | int ftrace_make_nop(struct module *mod, |
47 | struct dyn_ftrace *rec, unsigned long addr) | 88 | struct dyn_ftrace *rec, unsigned long addr) |
48 | { | 89 | { |
49 | unsigned int new; | 90 | unsigned int new; |
50 | int faulted; | ||
51 | unsigned long ip = rec->ip; | 91 | unsigned long ip = rec->ip; |
52 | 92 | ||
53 | /* We have compiled module with -mlong-calls, but compiled the kernel | 93 | /* |
54 | * without it, we need to cope with them respectively. */ | 94 | * We have compiled module with -mlong-calls, but compiled the kernel |
55 | if (ip & 0x40000000) { | 95 | * without it, we need to cope with them respectively. |
56 | /* record it for ftrace_make_call */ | 96 | */ |
57 | if (lui_v1 == 0) { | 97 | if (in_module(ip)) { |
58 | /* lui_v1 = *(unsigned int *)ip; */ | 98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) |
59 | safe_load_code(lui_v1, ip, faulted); | 99 | /* |
60 | 100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | |
61 | if (unlikely(faulted)) | 101 | * addiu v1, v1, low_16bit_of_mcount |
62 | return -EFAULT; | 102 | * move at, ra |
63 | } | 103 | * move $12, ra_address |
64 | 104 | * jalr v1 | |
65 | /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | 105 | * sub sp, sp, 8 |
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
66 | * addiu v1, v1, low_16bit_of_mcount | 112 | * addiu v1, v1, low_16bit_of_mcount |
67 | * move at, ra | 113 | * move at, ra |
68 | * jalr v1 | 114 | * jalr v1 |
69 | * nop | 115 | * nop | move $12, ra_address | sub sp, sp, 8 |
70 | * 1f: (ip + 12) | 116 | * 1: offset = 4 instructions |
71 | */ | 117 | */ |
72 | new = 0x10000004; | 118 | new = INSN_B_1F_4; |
119 | #endif | ||
73 | } else { | 120 | } else { |
74 | /* record/calculate it for ftrace_make_call */ | 121 | /* |
75 | if (jal_mcount == 0) { | 122 | * move at, ra |
76 | /* We can record it directly like this: | 123 | * jal _mcount --> nop |
77 | * jal_mcount = *(unsigned int *)ip; | ||
78 | * Herein, jump over the first two nop instructions */ | ||
79 | jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8)); | ||
80 | } | ||
81 | |||
82 | /* move at, ra | ||
83 | * jalr v1 --> nop | ||
84 | */ | 124 | */ |
85 | new = ftrace_nop; | 125 | new = INSN_NOP; |
86 | } | 126 | } |
87 | return ftrace_modify_code(ip, new); | 127 | return ftrace_modify_code(ip, new); |
88 | } | 128 | } |
89 | 129 | ||
90 | static int modified; /* initialized as 0 by default */ | ||
91 | |||
92 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 130 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
93 | { | 131 | { |
94 | unsigned int new; | 132 | unsigned int new; |
95 | unsigned long ip = rec->ip; | 133 | unsigned long ip = rec->ip; |
96 | 134 | ||
97 | /* We just need to remove the "b ftrace_stub" at the fist time! */ | ||
98 | if (modified == 0) { | ||
99 | modified = 1; | ||
100 | ftrace_modify_code(addr, ftrace_nop); | ||
101 | } | ||
102 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ |
103 | new = (ip & 0x40000000) ? lui_v1 : jal_mcount; | 136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; |
104 | 137 | ||
105 | return ftrace_modify_code(ip, new); | 138 | return ftrace_modify_code(ip, new); |
106 | } | 139 | } |
@@ -111,44 +144,48 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
111 | { | 144 | { |
112 | unsigned int new; | 145 | unsigned int new; |
113 | 146 | ||
114 | new = jump_insn_encode(JAL, (unsigned long)func); | 147 | new = INSN_JAL((unsigned long)func); |
115 | 148 | ||
116 | return ftrace_modify_code(FTRACE_CALL_IP, new); | 149 | return ftrace_modify_code(FTRACE_CALL_IP, new); |
117 | } | 150 | } |
118 | 151 | ||
119 | int __init ftrace_dyn_arch_init(void *data) | 152 | int __init ftrace_dyn_arch_init(void *data) |
120 | { | 153 | { |
154 | /* Encode the instructions when booting */ | ||
155 | ftrace_dyn_arch_init_insns(); | ||
156 | |||
157 | /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ | ||
158 | ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); | ||
159 | |||
121 | /* The return code is retured via data */ | 160 | /* The return code is retured via data */ |
122 | *(unsigned long *)data = 0; | 161 | *(unsigned long *)data = 0; |
123 | 162 | ||
124 | return 0; | 163 | return 0; |
125 | } | 164 | } |
126 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 165 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
127 | 166 | ||
128 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
129 | 168 | ||
130 | #ifdef CONFIG_DYNAMIC_FTRACE | 169 | #ifdef CONFIG_DYNAMIC_FTRACE |
131 | 170 | ||
132 | extern void ftrace_graph_call(void); | 171 | extern void ftrace_graph_call(void); |
133 | #define JMP 0x08000000 /* jump to target directly */ | ||
134 | #define CALL_FTRACE_GRAPH_CALLER \ | ||
135 | jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller)) | ||
136 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) | 172 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) |
137 | 173 | ||
138 | int ftrace_enable_ftrace_graph_caller(void) | 174 | int ftrace_enable_ftrace_graph_caller(void) |
139 | { | 175 | { |
140 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, | 176 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, |
141 | CALL_FTRACE_GRAPH_CALLER); | 177 | insn_j_ftrace_graph_caller); |
142 | } | 178 | } |
143 | 179 | ||
144 | int ftrace_disable_ftrace_graph_caller(void) | 180 | int ftrace_disable_ftrace_graph_caller(void) |
145 | { | 181 | { |
146 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop); | 182 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); |
147 | } | 183 | } |
148 | 184 | ||
149 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 185 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
150 | 186 | ||
151 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 187 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
188 | |||
152 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ | 189 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ |
153 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
154 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
@@ -162,17 +199,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
162 | unsigned int code; | 199 | unsigned int code; |
163 | int faulted; | 200 | int faulted; |
164 | 201 | ||
165 | /* in module or kernel? */ | 202 | /* |
166 | if (self_addr & 0x40000000) { | 203 | * For module, move the ip from calling site of mcount to the |
167 | /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */ | 204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for |
168 | ip = self_addr - 20; | 205 | * kernel, move to the instruction "move ra, at"(offset is 12) |
169 | } else { | 206 | */ |
170 | /* kernel: move to the instruction "move ra, at" */ | 207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); |
171 | ip = self_addr - 12; | ||
172 | } | ||
173 | 208 | ||
174 | /* search the text until finding the non-store instruction or "s{d,w} | 209 | /* |
175 | * ra, offset(sp)" instruction */ | 210 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | ||
212 | */ | ||
176 | do { | 213 | do { |
177 | ip -= 4; | 214 | ip -= 4; |
178 | 215 | ||
@@ -181,10 +218,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
181 | 218 | ||
182 | if (unlikely(faulted)) | 219 | if (unlikely(faulted)) |
183 | return 0; | 220 | return 0; |
184 | 221 | /* | |
185 | /* If we hit the non-store instruction before finding where the | 222 | * If we hit the non-store instruction before finding where the |
186 | * ra is stored, then this is a leaf function and it does not | 223 | * ra is stored, then this is a leaf function and it does not |
187 | * store the ra on the stack. */ | 224 | * store the ra on the stack |
225 | */ | ||
188 | if ((code & S_R_SP) != S_R_SP) | 226 | if ((code & S_R_SP) != S_R_SP) |
189 | return parent_addr; | 227 | return parent_addr; |
190 | 228 | ||
@@ -202,7 +240,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
202 | return 0; | 240 | return 0; |
203 | } | 241 | } |
204 | 242 | ||
205 | #endif | 243 | #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ |
206 | 244 | ||
207 | /* | 245 | /* |
208 | * Hook the return address and push it in the stack of return addrs | 246 | * Hook the return address and push it in the stack of return addrs |
@@ -220,7 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
220 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
221 | return; | 259 | return; |
222 | 260 | ||
223 | /* "parent" is the stack address saved the return address of the caller | 261 | /* |
262 | * "parent" is the stack address saved the return address of the caller | ||
224 | * of _mcount. | 263 | * of _mcount. |
225 | * | 264 | * |
226 | * if the gcc < 4.5, a leaf function does not save the return address | 265 | * if the gcc < 4.5, a leaf function does not save the return address |
@@ -242,10 +281,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
242 | goto out; | 281 | goto out; |
243 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
244 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, |
245 | (unsigned long)parent, | 284 | (unsigned long)parent, fp); |
246 | fp); | 285 | /* |
247 | /* If fails when getting the stack address of the non-leaf function's | 286 | * If fails when getting the stack address of the non-leaf function's |
248 | * ra, stop function graph tracer and return */ | 287 | * ra, stop function graph tracer and return |
288 | */ | ||
249 | if (parent == 0) | 289 | if (parent == 0) |
250 | goto out; | 290 | goto out; |
251 | #endif | 291 | #endif |
@@ -272,4 +312,4 @@ out: | |||
272 | ftrace_graph_stop(); | 312 | ftrace_graph_stop(); |
273 | WARN_ON(1); | 313 | WARN_ON(1); |
274 | } | 314 | } |
275 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 315 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |