diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-23 07:47:54 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-23 07:47:54 -0500 |
commit | 813b8520f5c240c71df55d14095a7b171de264ce (patch) | |
tree | de28c22c9d35fa25a7fb29a313cdf1befb11486c | |
parent | ed313489badef16d700f5a3be50e8fd8f8294bc8 (diff) | |
parent | 7cc45e64323c8a1042f56e6a8d1dc982f98d52a8 (diff) |
Merge branch 'ppc/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/powerpc
-rw-r--r-- | arch/powerpc/include/asm/ftrace.h | 14 | ||||
-rw-r--r-- | arch/powerpc/include/asm/module.h | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/ftrace.c | 473 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/module_32.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/module_64.c | 13 |
6 files changed, 489 insertions, 42 deletions
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index b298f7a631e6..e5f2ae8362f7 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h | |||
@@ -7,7 +7,19 @@ | |||
7 | 7 | ||
8 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
9 | extern void _mcount(void); | 9 | extern void _mcount(void); |
10 | #endif | 10 | |
11 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
12 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
13 | { | ||
14 | /* reloction of mcount call site is the same as the address */ | ||
15 | return addr; | ||
16 | } | ||
17 | |||
18 | struct dyn_arch_ftrace { | ||
19 | struct module *mod; | ||
20 | }; | ||
21 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
22 | #endif /* __ASSEMBLY__ */ | ||
11 | 23 | ||
12 | #endif | 24 | #endif |
13 | 25 | ||
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index e5f14b13ccf0..08454880a2c0 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h | |||
@@ -34,11 +34,19 @@ struct mod_arch_specific { | |||
34 | #ifdef __powerpc64__ | 34 | #ifdef __powerpc64__ |
35 | unsigned int stubs_section; /* Index of stubs section in module */ | 35 | unsigned int stubs_section; /* Index of stubs section in module */ |
36 | unsigned int toc_section; /* What section is the TOC? */ | 36 | unsigned int toc_section; /* What section is the TOC? */ |
37 | #else | 37 | #ifdef CONFIG_DYNAMIC_FTRACE |
38 | unsigned long toc; | ||
39 | unsigned long tramp; | ||
40 | #endif | ||
41 | |||
42 | #else /* powerpc64 */ | ||
38 | /* Indices of PLT sections within module. */ | 43 | /* Indices of PLT sections within module. */ |
39 | unsigned int core_plt_section; | 44 | unsigned int core_plt_section; |
40 | unsigned int init_plt_section; | 45 | unsigned int init_plt_section; |
46 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
47 | unsigned long tramp; | ||
41 | #endif | 48 | #endif |
49 | #endif /* powerpc64 */ | ||
42 | 50 | ||
43 | /* List of BUG addresses, source line numbers and filenames */ | 51 | /* List of BUG addresses, source line numbers and filenames */ |
44 | struct list_head bug_list; | 52 | struct list_head bug_list; |
@@ -68,6 +76,12 @@ struct mod_arch_specific { | |||
68 | # endif /* MODULE */ | 76 | # endif /* MODULE */ |
69 | #endif | 77 | #endif |
70 | 78 | ||
79 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
80 | # ifdef MODULE | ||
81 | asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous"); | ||
82 | # endif /* MODULE */ | ||
83 | #endif | ||
84 | |||
71 | 85 | ||
72 | struct exception_table_entry; | 86 | struct exception_table_entry; |
73 | void sort_ex_table(struct exception_table_entry *start, | 87 | void sort_ex_table(struct exception_table_entry *start, |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index f4b006ed0ab1..3271cd698e4c 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -9,22 +9,30 @@ | |||
9 | 9 | ||
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/uaccess.h> | ||
13 | #include <linux/module.h> | ||
12 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
13 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
15 | #include <linux/list.h> | 17 | #include <linux/list.h> |
16 | 18 | ||
17 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
20 | #include <asm/code-patching.h> | ||
18 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
19 | 22 | ||
23 | #if 0 | ||
24 | #define DEBUGP printk | ||
25 | #else | ||
26 | #define DEBUGP(fmt , ...) do { } while (0) | ||
27 | #endif | ||
20 | 28 | ||
21 | static unsigned int ftrace_nop = 0x60000000; | 29 | static unsigned int ftrace_nop = PPC_NOP_INSTR; |
22 | 30 | ||
23 | #ifdef CONFIG_PPC32 | 31 | #ifdef CONFIG_PPC32 |
24 | # define GET_ADDR(addr) addr | 32 | # define GET_ADDR(addr) addr |
25 | #else | 33 | #else |
26 | /* PowerPC64's functions are data that points to the functions */ | 34 | /* PowerPC64's functions are data that points to the functions */ |
27 | # define GET_ADDR(addr) *(unsigned long *)addr | 35 | # define GET_ADDR(addr) (*(unsigned long *)addr) |
28 | #endif | 36 | #endif |
29 | 37 | ||
30 | 38 | ||
@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr) | |||
33 | return (int)(addr - ip); | 41 | return (int)(addr - ip); |
34 | } | 42 | } |
35 | 43 | ||
36 | unsigned char *ftrace_nop_replace(void) | 44 | static unsigned char *ftrace_nop_replace(void) |
37 | { | 45 | { |
38 | return (char *)&ftrace_nop; | 46 | return (char *)&ftrace_nop; |
39 | } | 47 | } |
40 | 48 | ||
41 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 49 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
42 | { | 50 | { |
43 | static unsigned int op; | 51 | static unsigned int op; |
44 | 52 | ||
@@ -68,49 +76,434 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
68 | # define _ASM_PTR " .long " | 76 | # define _ASM_PTR " .long " |
69 | #endif | 77 | #endif |
70 | 78 | ||
71 | int | 79 | static int |
72 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 80 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
73 | unsigned char *new_code) | 81 | unsigned char *new_code) |
74 | { | 82 | { |
75 | unsigned replaced; | 83 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
76 | unsigned old = *(unsigned *)old_code; | ||
77 | unsigned new = *(unsigned *)new_code; | ||
78 | int faulted = 0; | ||
79 | 84 | ||
80 | /* | 85 | /* |
81 | * Note: Due to modules and __init, code can | 86 | * Note: Due to modules and __init, code can |
82 | * disappear and change, we need to protect against faulting | 87 | * disappear and change, we need to protect against faulting |
83 | * as well as code changing. | 88 | * as well as code changing. We do this by using the |
89 | * probe_kernel_* functions. | ||
84 | * | 90 | * |
85 | * No real locking needed, this code is run through | 91 | * No real locking needed, this code is run through |
86 | * kstop_machine. | 92 | * kstop_machine, or before SMP starts. |
87 | */ | 93 | */ |
88 | asm volatile ( | 94 | |
89 | "1: lwz %1, 0(%2)\n" | 95 | /* read the text we want to modify */ |
90 | " cmpw %1, %5\n" | 96 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
91 | " bne 2f\n" | 97 | return -EFAULT; |
92 | " stwu %3, 0(%2)\n" | 98 | |
93 | "2:\n" | 99 | /* Make sure it is what we expect it to be */ |
94 | ".section .fixup, \"ax\"\n" | 100 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
95 | "3: li %0, 1\n" | 101 | return -EINVAL; |
96 | " b 2b\n" | 102 | |
97 | ".previous\n" | 103 | /* replace the text with the new text */ |
98 | ".section __ex_table,\"a\"\n" | 104 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) |
99 | _ASM_ALIGN "\n" | 105 | return -EPERM; |
100 | _ASM_PTR "1b, 3b\n" | 106 | |
101 | ".previous" | 107 | flush_icache_range(ip, ip + 8); |
102 | : "=r"(faulted), "=r"(replaced) | 108 | |
103 | : "r"(ip), "r"(new), | 109 | return 0; |
104 | "0"(faulted), "r"(old) | 110 | } |
105 | : "memory"); | 111 | |
106 | 112 | /* | |
107 | if (replaced != old && replaced != new) | 113 | * Helper functions that are the same for both PPC64 and PPC32. |
108 | faulted = 2; | 114 | */ |
109 | 115 | static int test_24bit_addr(unsigned long ip, unsigned long addr) | |
110 | if (!faulted) | 116 | { |
111 | flush_icache_range(ip, ip + 8); | 117 | long diff; |
112 | 118 | ||
113 | return faulted; | 119 | /* |
120 | * Can we get to addr from ip in 24 bits? | ||
121 | * (26 really, since we mulitply by 4 for 4 byte alignment) | ||
122 | */ | ||
123 | diff = addr - ip; | ||
124 | |||
125 | /* | ||
126 | * Return true if diff is less than 1 << 25 | ||
127 | * and greater than -1 << 26. | ||
128 | */ | ||
129 | return (diff < (1 << 25)) && (diff > (-1 << 26)); | ||
130 | } | ||
131 | |||
132 | static int is_bl_op(unsigned int op) | ||
133 | { | ||
134 | return (op & 0xfc000003) == 0x48000001; | ||
135 | } | ||
136 | |||
137 | static int test_offset(unsigned long offset) | ||
138 | { | ||
139 | return (offset + 0x2000000 > 0x3ffffff) || ((offset & 3) != 0); | ||
140 | } | ||
141 | |||
142 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) | ||
143 | { | ||
144 | static int offset; | ||
145 | |||
146 | offset = (op & 0x03fffffc); | ||
147 | /* make it signed */ | ||
148 | if (offset & 0x02000000) | ||
149 | offset |= 0xfe000000; | ||
150 | |||
151 | return ip + (long)offset; | ||
152 | } | ||
153 | |||
154 | static unsigned int branch_offset(unsigned long offset) | ||
155 | { | ||
156 | /* return "bl ip+offset" */ | ||
157 | return 0x48000001 | (offset & 0x03fffffc); | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_PPC64 | ||
161 | static int | ||
162 | __ftrace_make_nop(struct module *mod, | ||
163 | struct dyn_ftrace *rec, unsigned long addr) | ||
164 | { | ||
165 | unsigned char replaced[MCOUNT_INSN_SIZE * 2]; | ||
166 | unsigned int *op = (unsigned *)&replaced; | ||
167 | unsigned char jmp[8]; | ||
168 | unsigned long *ptr = (unsigned long *)&jmp; | ||
169 | unsigned long ip = rec->ip; | ||
170 | unsigned long tramp; | ||
171 | int offset; | ||
172 | |||
173 | /* read where this goes */ | ||
174 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
175 | return -EFAULT; | ||
176 | |||
177 | /* Make sure that that this is still a 24bit jump */ | ||
178 | if (!is_bl_op(*op)) { | ||
179 | printk(KERN_ERR "Not expected bl: opcode is %x\n", *op); | ||
180 | return -EINVAL; | ||
181 | } | ||
182 | |||
183 | /* lets find where the pointer goes */ | ||
184 | tramp = find_bl_target(ip, *op); | ||
185 | |||
186 | /* | ||
187 | * On PPC64 the trampoline looks like: | ||
188 | * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> | ||
189 | * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> | ||
190 | * Where the bytes 2,3,6 and 7 make up the 32bit offset | ||
191 | * to the TOC that holds the pointer. | ||
192 | * to jump to. | ||
193 | * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) | ||
194 | * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) | ||
195 | * The actually address is 32 bytes from the offset | ||
196 | * into the TOC. | ||
197 | * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) | ||
198 | */ | ||
199 | |||
200 | DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); | ||
201 | |||
202 | /* Find where the trampoline jumps to */ | ||
203 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | ||
204 | printk(KERN_ERR "Failed to read %lx\n", tramp); | ||
205 | return -EFAULT; | ||
206 | } | ||
207 | |||
208 | DEBUGP(" %08x %08x", | ||
209 | (unsigned)(*ptr >> 32), | ||
210 | (unsigned)*ptr); | ||
211 | |||
212 | offset = (unsigned)jmp[2] << 24 | | ||
213 | (unsigned)jmp[3] << 16 | | ||
214 | (unsigned)jmp[6] << 8 | | ||
215 | (unsigned)jmp[7]; | ||
216 | |||
217 | DEBUGP(" %x ", offset); | ||
218 | |||
219 | /* get the address this jumps too */ | ||
220 | tramp = mod->arch.toc + offset + 32; | ||
221 | DEBUGP("toc: %lx", tramp); | ||
222 | |||
223 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | ||
224 | printk(KERN_ERR "Failed to read %lx\n", tramp); | ||
225 | return -EFAULT; | ||
226 | } | ||
227 | |||
228 | DEBUGP(" %08x %08x\n", | ||
229 | (unsigned)(*ptr >> 32), | ||
230 | (unsigned)*ptr); | ||
231 | |||
232 | /* This should match what was called */ | ||
233 | if (*ptr != GET_ADDR(addr)) { | ||
234 | printk(KERN_ERR "addr does not match %lx\n", *ptr); | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * We want to nop the line, but the next line is | ||
240 | * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) | ||
241 | * This needs to be turned to a nop too. | ||
242 | */ | ||
243 | if (probe_kernel_read(replaced, (void *)(ip+4), MCOUNT_INSN_SIZE)) | ||
244 | return -EFAULT; | ||
245 | |||
246 | if (*op != 0xe8410028) { | ||
247 | printk(KERN_ERR "Next line is not ld! (%08x)\n", *op); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Milton Miller pointed out that we can not blindly do nops. | ||
253 | * If a task was preempted when calling a trace function, | ||
254 | * the nops will remove the way to restore the TOC in r2 | ||
255 | * and the r2 TOC will get corrupted. | ||
256 | */ | ||
257 | |||
258 | /* | ||
259 | * Replace: | ||
260 | * bl <tramp> <==== will be replaced with "b 1f" | ||
261 | * ld r2,40(r1) | ||
262 | * 1: | ||
263 | */ | ||
264 | op[0] = 0x48000008; /* b +8 */ | ||
265 | |||
266 | if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) | ||
267 | return -EPERM; | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | #else /* !PPC64 */ | ||
273 | static int | ||
274 | __ftrace_make_nop(struct module *mod, | ||
275 | struct dyn_ftrace *rec, unsigned long addr) | ||
276 | { | ||
277 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
278 | unsigned int *op = (unsigned *)&replaced; | ||
279 | unsigned char jmp[8]; | ||
280 | unsigned int *ptr = (unsigned int *)&jmp; | ||
281 | unsigned long ip = rec->ip; | ||
282 | unsigned long tramp; | ||
283 | int offset; | ||
284 | |||
285 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
286 | return -EFAULT; | ||
287 | |||
288 | /* Make sure that that this is still a 24bit jump */ | ||
289 | if (!is_bl_op(*op)) { | ||
290 | printk(KERN_ERR "Not expected bl: opcode is %x\n", *op); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | |||
294 | /* lets find where the pointer goes */ | ||
295 | tramp = find_bl_target(ip, *op); | ||
296 | |||
297 | /* | ||
298 | * On PPC32 the trampoline looks like: | ||
299 | * lis r11,sym@ha | ||
300 | * addi r11,r11,sym@l | ||
301 | * mtctr r11 | ||
302 | * bctr | ||
303 | */ | ||
304 | |||
305 | DEBUGP("ip:%lx jumps to %lx", ip, tramp); | ||
306 | |||
307 | /* Find where the trampoline jumps to */ | ||
308 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | ||
309 | printk(KERN_ERR "Failed to read %lx\n", tramp); | ||
310 | return -EFAULT; | ||
311 | } | ||
312 | |||
313 | DEBUGP(" %08x %08x ", ptr[0], ptr[1]); | ||
314 | |||
315 | tramp = (ptr[1] & 0xffff) | | ||
316 | ((ptr[0] & 0xffff) << 16); | ||
317 | if (tramp & 0x8000) | ||
318 | tramp -= 0x10000; | ||
319 | |||
320 | DEBUGP(" %x ", tramp); | ||
321 | |||
322 | if (tramp != addr) { | ||
323 | printk(KERN_ERR | ||
324 | "Trampoline location %08lx does not match addr\n", | ||
325 | tramp); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | op[0] = PPC_NOP_INSTR; | ||
330 | |||
331 | if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) | ||
332 | return -EPERM; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | #endif /* PPC64 */ | ||
337 | |||
338 | int ftrace_make_nop(struct module *mod, | ||
339 | struct dyn_ftrace *rec, unsigned long addr) | ||
340 | { | ||
341 | unsigned char *old, *new; | ||
342 | unsigned long ip = rec->ip; | ||
343 | |||
344 | /* | ||
345 | * If the calling address is more that 24 bits away, | ||
346 | * then we had to use a trampoline to make the call. | ||
347 | * Otherwise just update the call site. | ||
348 | */ | ||
349 | if (test_24bit_addr(ip, addr)) { | ||
350 | /* within range */ | ||
351 | old = ftrace_call_replace(ip, addr); | ||
352 | new = ftrace_nop_replace(); | ||
353 | return ftrace_modify_code(ip, old, new); | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * Out of range jumps are called from modules. | ||
358 | * We should either already have a pointer to the module | ||
359 | * or it has been passed in. | ||
360 | */ | ||
361 | if (!rec->arch.mod) { | ||
362 | if (!mod) { | ||
363 | printk(KERN_ERR "No module loaded addr=%lx\n", | ||
364 | addr); | ||
365 | return -EFAULT; | ||
366 | } | ||
367 | rec->arch.mod = mod; | ||
368 | } else if (mod) { | ||
369 | if (mod != rec->arch.mod) { | ||
370 | printk(KERN_ERR | ||
371 | "Record mod %p not equal to passed in mod %p\n", | ||
372 | rec->arch.mod, mod); | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | /* nothing to do if mod == rec->arch.mod */ | ||
376 | } else | ||
377 | mod = rec->arch.mod; | ||
378 | |||
379 | return __ftrace_make_nop(mod, rec, addr); | ||
380 | |||
381 | } | ||
382 | |||
383 | #ifdef CONFIG_PPC64 | ||
384 | static int | ||
385 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
386 | { | ||
387 | unsigned char replaced[MCOUNT_INSN_SIZE * 2]; | ||
388 | unsigned int *op = (unsigned *)&replaced; | ||
389 | unsigned long ip = rec->ip; | ||
390 | unsigned long offset; | ||
391 | |||
392 | /* read where this goes */ | ||
393 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE * 2)) | ||
394 | return -EFAULT; | ||
395 | |||
396 | /* | ||
397 | * It should be pointing to two nops or | ||
398 | * b +8; ld r2,40(r1) | ||
399 | */ | ||
400 | if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && | ||
401 | ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) { | ||
402 | printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | |||
406 | /* If we never set up a trampoline to ftrace_caller, then bail */ | ||
407 | if (!rec->arch.mod->arch.tramp) { | ||
408 | printk(KERN_ERR "No ftrace trampoline\n"); | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | |||
412 | /* now calculate a jump to the ftrace caller trampoline */ | ||
413 | offset = rec->arch.mod->arch.tramp - ip; | ||
414 | |||
415 | if (test_offset(offset)) { | ||
416 | printk(KERN_ERR "REL24 %li out of range!\n", | ||
417 | (long int)offset); | ||
418 | return -EINVAL; | ||
419 | } | ||
420 | |||
421 | /* Set to "bl addr" */ | ||
422 | op[0] = branch_offset(offset); | ||
423 | /* ld r2,40(r1) */ | ||
424 | op[1] = 0xe8410028; | ||
425 | |||
426 | DEBUGP("write to %lx\n", rec->ip); | ||
427 | |||
428 | if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE * 2)) | ||
429 | return -EPERM; | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | #else | ||
434 | static int | ||
435 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
436 | { | ||
437 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
438 | unsigned int *op = (unsigned *)&replaced; | ||
439 | unsigned long ip = rec->ip; | ||
440 | unsigned long offset; | ||
441 | |||
442 | /* read where this goes */ | ||
443 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
444 | return -EFAULT; | ||
445 | |||
446 | /* It should be pointing to a nop */ | ||
447 | if (op[0] != PPC_NOP_INSTR) { | ||
448 | printk(KERN_ERR "Expected NOP but have %x\n", op[0]); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | /* If we never set up a trampoline to ftrace_caller, then bail */ | ||
453 | if (!rec->arch.mod->arch.tramp) { | ||
454 | printk(KERN_ERR "No ftrace trampoline\n"); | ||
455 | return -EINVAL; | ||
456 | } | ||
457 | |||
458 | /* now calculate a jump to the ftrace caller trampoline */ | ||
459 | offset = rec->arch.mod->arch.tramp - ip; | ||
460 | |||
461 | if (test_offset(offset)) { | ||
462 | printk(KERN_ERR "REL24 %li out of range!\n", | ||
463 | (long int)offset); | ||
464 | return -EINVAL; | ||
465 | } | ||
466 | |||
467 | /* Set to "bl addr" */ | ||
468 | op[0] = branch_offset(offset); | ||
469 | |||
470 | DEBUGP("write to %lx\n", rec->ip); | ||
471 | |||
472 | if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) | ||
473 | return -EPERM; | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | #endif /* CONFIG_PPC64 */ | ||
478 | |||
479 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
480 | { | ||
481 | unsigned char *old, *new; | ||
482 | unsigned long ip = rec->ip; | ||
483 | |||
484 | /* | ||
485 | * If the calling address is more that 24 bits away, | ||
486 | * then we had to use a trampoline to make the call. | ||
487 | * Otherwise just update the call site. | ||
488 | */ | ||
489 | if (test_24bit_addr(ip, addr)) { | ||
490 | /* within range */ | ||
491 | old = ftrace_nop_replace(); | ||
492 | new = ftrace_call_replace(ip, addr); | ||
493 | return ftrace_modify_code(ip, old, new); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Out of range jumps are called from modules. | ||
498 | * Being that we are converting from nop, it had better | ||
499 | * already have a module defined. | ||
500 | */ | ||
501 | if (!rec->arch.mod) { | ||
502 | printk(KERN_ERR "No module loaded\n"); | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | |||
506 | return __ftrace_make_call(rec, addr); | ||
114 | } | 507 | } |
115 | 508 | ||
116 | int ftrace_update_ftrace_func(ftrace_func_t func) | 509 | int ftrace_update_ftrace_func(ftrace_func_t func) |
@@ -128,10 +521,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
128 | 521 | ||
129 | int __init ftrace_dyn_arch_init(void *data) | 522 | int __init ftrace_dyn_arch_init(void *data) |
130 | { | 523 | { |
131 | /* This is running in kstop_machine */ | 524 | /* caller expects data to be zero */ |
525 | unsigned long *p = data; | ||
132 | 526 | ||
133 | ftrace_mcount_set(data); | 527 | *p = 0; |
134 | 528 | ||
135 | return 0; | 529 | return 0; |
136 | } | 530 | } |
137 | |||
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 31982d05d81a..88d9c1d5e5fb 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -69,10 +69,15 @@ void cpu_idle(void) | |||
69 | smp_mb(); | 69 | smp_mb(); |
70 | local_irq_disable(); | 70 | local_irq_disable(); |
71 | 71 | ||
72 | /* Don't trace irqs off for idle */ | ||
73 | stop_critical_timings(); | ||
74 | |||
72 | /* check again after disabling irqs */ | 75 | /* check again after disabling irqs */ |
73 | if (!need_resched() && !cpu_should_die()) | 76 | if (!need_resched() && !cpu_should_die()) |
74 | ppc_md.power_save(); | 77 | ppc_md.power_save(); |
75 | 78 | ||
79 | start_critical_timings(); | ||
80 | |||
76 | local_irq_enable(); | 81 | local_irq_enable(); |
77 | set_thread_flag(TIF_POLLING_NRFLAG); | 82 | set_thread_flag(TIF_POLLING_NRFLAG); |
78 | 83 | ||
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 2df91a03462a..f832773fc28e 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/ftrace.h> | ||
25 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
26 | #include <linux/bug.h> | 27 | #include <linux/bug.h> |
27 | #include <linux/sort.h> | 28 | #include <linux/sort.h> |
@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) | |||
53 | r_addend = rela[i].r_addend; | 54 | r_addend = rela[i].r_addend; |
54 | } | 55 | } |
55 | 56 | ||
57 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
58 | _count_relocs++; /* add one for ftrace_caller */ | ||
59 | #endif | ||
56 | return _count_relocs; | 60 | return _count_relocs; |
57 | } | 61 | } |
58 | 62 | ||
@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
306 | return -ENOEXEC; | 310 | return -ENOEXEC; |
307 | } | 311 | } |
308 | } | 312 | } |
313 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
314 | module->arch.tramp = | ||
315 | do_plt_call(module->module_core, | ||
316 | (unsigned long)ftrace_caller, | ||
317 | sechdrs, module); | ||
318 | #endif | ||
309 | return 0; | 319 | return 0; |
310 | } | 320 | } |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 1af2377e4992..8992b031a7b6 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/moduleloader.h> | 20 | #include <linux/moduleloader.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/ftrace.h> | ||
23 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
24 | #include <asm/module.h> | 25 | #include <asm/module.h> |
25 | #include <asm/firmware.h> | 26 | #include <asm/firmware.h> |
@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, | |||
163 | } | 164 | } |
164 | } | 165 | } |
165 | 166 | ||
167 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
168 | /* make the trampoline to the ftrace_caller */ | ||
169 | relocs++; | ||
170 | #endif | ||
171 | |||
166 | DEBUGP("Looks like a total of %lu stubs, max\n", relocs); | 172 | DEBUGP("Looks like a total of %lu stubs, max\n", relocs); |
167 | return relocs * sizeof(struct ppc64_stub_entry); | 173 | return relocs * sizeof(struct ppc64_stub_entry); |
168 | } | 174 | } |
@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
441 | } | 447 | } |
442 | } | 448 | } |
443 | 449 | ||
450 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
451 | me->arch.toc = my_r2(sechdrs, me); | ||
452 | me->arch.tramp = stub_for_addr(sechdrs, | ||
453 | (unsigned long)ftrace_caller, | ||
454 | me); | ||
455 | #endif | ||
456 | |||
444 | return 0; | 457 | return 0; |
445 | } | 458 | } |