aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/ftrace.c')
-rw-r--r--arch/powerpc/kernel/ftrace.c218
1 files changed, 160 insertions, 58 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 60c60ccf5e3c..70e2a736be1f 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -5,6 +5,9 @@
5 * 5 *
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
7 * 7 *
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
10 *
8 */ 11 */
9 12
10#include <linux/spinlock.h> 13#include <linux/spinlock.h>
@@ -20,14 +23,6 @@
20#include <asm/code-patching.h> 23#include <asm/code-patching.h>
21#include <asm/ftrace.h> 24#include <asm/ftrace.h>
22 25
23#if 0
24#define DEBUGP printk
25#else
26#define DEBUGP(fmt , ...) do { } while (0)
27#endif
28
29static unsigned int ftrace_nop = PPC_NOP_INSTR;
30
31#ifdef CONFIG_PPC32 26#ifdef CONFIG_PPC32
32# define GET_ADDR(addr) addr 27# define GET_ADDR(addr) addr
33#else 28#else
@@ -35,37 +30,23 @@ static unsigned int ftrace_nop = PPC_NOP_INSTR;
35# define GET_ADDR(addr) (*(unsigned long *)addr) 30# define GET_ADDR(addr) (*(unsigned long *)addr)
36#endif 31#endif
37 32
38 33#ifdef CONFIG_DYNAMIC_FTRACE
39static unsigned int ftrace_calc_offset(long ip, long addr) 34static unsigned int ftrace_nop_replace(void)
40{ 35{
41 return (int)(addr - ip); 36 return PPC_INST_NOP;
42} 37}
43 38
44static unsigned char *ftrace_nop_replace(void) 39static unsigned int
40ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
45{ 41{
46 return (char *)&ftrace_nop; 42 unsigned int op;
47}
48
49static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
50{
51 static unsigned int op;
52 43
53 /*
54 * It would be nice to just use create_function_call, but that will
55 * update the code itself. Here we need to just return the
56 * instruction that is going to be modified, without modifying the
57 * code.
58 */
59 addr = GET_ADDR(addr); 44 addr = GET_ADDR(addr);
60 45
61 /* Set to "bl addr" */ 46 /* if (link) set op to 'bl' else 'b' */
62 op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc); 47 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
63 48
64 /* 49 return op;
65 * No locking needed, this must be called via kstop_machine
66 * which in essence is like running on a uniprocessor machine.
67 */
68 return (unsigned char *)&op;
69} 50}
70 51
71#ifdef CONFIG_PPC64 52#ifdef CONFIG_PPC64
@@ -77,10 +58,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
77#endif 58#endif
78 59
79static int 60static int
80ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
81 unsigned char *new_code)
82{ 62{
83 unsigned char replaced[MCOUNT_INSN_SIZE]; 63 unsigned int replaced;
84 64
85 /* 65 /*
86 * Note: Due to modules and __init, code can 66 * Note: Due to modules and __init, code can
@@ -93,15 +73,15 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
93 */ 73 */
94 74
95 /* read the text we want to modify */ 75 /* read the text we want to modify */
96 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 76 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
97 return -EFAULT; 77 return -EFAULT;
98 78
99 /* Make sure it is what we expect it to be */ 79 /* Make sure it is what we expect it to be */
100 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 80 if (replaced != old)
101 return -EINVAL; 81 return -EINVAL;
102 82
103 /* replace the text with the new text */ 83 /* replace the text with the new text */
104 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 84 if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
105 return -EPERM; 85 return -EPERM;
106 86
107 flush_icache_range(ip, ip + 8); 87 flush_icache_range(ip, ip + 8);
@@ -119,6 +99,8 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
119 return create_branch((unsigned int *)ip, addr, 0); 99 return create_branch((unsigned int *)ip, addr, 0);
120} 100}
121 101
102#ifdef CONFIG_MODULES
103
122static int is_bl_op(unsigned int op) 104static int is_bl_op(unsigned int op)
123{ 105{
124 return (op & 0xfc000003) == 0x48000001; 106 return (op & 0xfc000003) == 0x48000001;
@@ -175,7 +157,7 @@ __ftrace_make_nop(struct module *mod,
175 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) 157 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
176 */ 158 */
177 159
178 DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); 160 pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
179 161
180 /* Find where the trampoline jumps to */ 162 /* Find where the trampoline jumps to */
181 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 163 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -183,7 +165,7 @@ __ftrace_make_nop(struct module *mod,
183 return -EFAULT; 165 return -EFAULT;
184 } 166 }
185 167
186 DEBUGP(" %08x %08x", jmp[0], jmp[1]); 168 pr_debug(" %08x %08x", jmp[0], jmp[1]);
187 169
188 /* verify that this is what we expect it to be */ 170 /* verify that this is what we expect it to be */
189 if (((jmp[0] & 0xffff0000) != 0x3d820000) || 171 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
@@ -199,18 +181,18 @@ __ftrace_make_nop(struct module *mod,
199 offset = ((unsigned)((unsigned short)jmp[0]) << 16) + 181 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
200 (int)((short)jmp[1]); 182 (int)((short)jmp[1]);
201 183
202 DEBUGP(" %x ", offset); 184 pr_debug(" %x ", offset);
203 185
204 /* get the address this jumps too */ 186 /* get the address this jumps too */
205 tramp = mod->arch.toc + offset + 32; 187 tramp = mod->arch.toc + offset + 32;
206 DEBUGP("toc: %lx", tramp); 188 pr_debug("toc: %lx", tramp);
207 189
208 if (probe_kernel_read(jmp, (void *)tramp, 8)) { 190 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
209 printk(KERN_ERR "Failed to read %lx\n", tramp); 191 printk(KERN_ERR "Failed to read %lx\n", tramp);
210 return -EFAULT; 192 return -EFAULT;
211 } 193 }
212 194
213 DEBUGP(" %08x %08x\n", jmp[0], jmp[1]); 195 pr_debug(" %08x %08x\n", jmp[0], jmp[1]);
214 196
215 ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; 197 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
216 198
@@ -287,7 +269,7 @@ __ftrace_make_nop(struct module *mod,
287 * 0x4e, 0x80, 0x04, 0x20 bctr 269 * 0x4e, 0x80, 0x04, 0x20 bctr
288 */ 270 */
289 271
290 DEBUGP("ip:%lx jumps to %lx", ip, tramp); 272 pr_debug("ip:%lx jumps to %lx", ip, tramp);
291 273
292 /* Find where the trampoline jumps to */ 274 /* Find where the trampoline jumps to */
293 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 275 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -295,7 +277,7 @@ __ftrace_make_nop(struct module *mod,
295 return -EFAULT; 277 return -EFAULT;
296 } 278 }
297 279
298 DEBUGP(" %08x %08x ", jmp[0], jmp[1]); 280 pr_debug(" %08x %08x ", jmp[0], jmp[1]);
299 281
300 /* verify that this is what we expect it to be */ 282 /* verify that this is what we expect it to be */
301 if (((jmp[0] & 0xffff0000) != 0x3d600000) || 283 if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
@@ -311,7 +293,7 @@ __ftrace_make_nop(struct module *mod,
311 if (tramp & 0x8000) 293 if (tramp & 0x8000)
312 tramp -= 0x10000; 294 tramp -= 0x10000;
313 295
314 DEBUGP(" %x ", tramp); 296 pr_debug(" %lx ", tramp);
315 297
316 if (tramp != addr) { 298 if (tramp != addr) {
317 printk(KERN_ERR 299 printk(KERN_ERR
@@ -320,7 +302,7 @@ __ftrace_make_nop(struct module *mod,
320 return -EINVAL; 302 return -EINVAL;
321 } 303 }
322 304
323 op = PPC_NOP_INSTR; 305 op = PPC_INST_NOP;
324 306
325 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) 307 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
326 return -EPERM; 308 return -EPERM;
@@ -330,12 +312,13 @@ __ftrace_make_nop(struct module *mod,
330 return 0; 312 return 0;
331} 313}
332#endif /* PPC64 */ 314#endif /* PPC64 */
315#endif /* CONFIG_MODULES */
333 316
334int ftrace_make_nop(struct module *mod, 317int ftrace_make_nop(struct module *mod,
335 struct dyn_ftrace *rec, unsigned long addr) 318 struct dyn_ftrace *rec, unsigned long addr)
336{ 319{
337 unsigned char *old, *new;
338 unsigned long ip = rec->ip; 320 unsigned long ip = rec->ip;
321 unsigned int old, new;
339 322
340 /* 323 /*
341 * If the calling address is more that 24 bits away, 324 * If the calling address is more that 24 bits away,
@@ -344,11 +327,12 @@ int ftrace_make_nop(struct module *mod,
344 */ 327 */
345 if (test_24bit_addr(ip, addr)) { 328 if (test_24bit_addr(ip, addr)) {
346 /* within range */ 329 /* within range */
347 old = ftrace_call_replace(ip, addr); 330 old = ftrace_call_replace(ip, addr, 1);
348 new = ftrace_nop_replace(); 331 new = ftrace_nop_replace();
349 return ftrace_modify_code(ip, old, new); 332 return ftrace_modify_code(ip, old, new);
350 } 333 }
351 334
335#ifdef CONFIG_MODULES
352 /* 336 /*
353 * Out of range jumps are called from modules. 337 * Out of range jumps are called from modules.
354 * We should either already have a pointer to the module 338 * We should either already have a pointer to the module
@@ -373,9 +357,13 @@ int ftrace_make_nop(struct module *mod,
373 mod = rec->arch.mod; 357 mod = rec->arch.mod;
374 358
375 return __ftrace_make_nop(mod, rec, addr); 359 return __ftrace_make_nop(mod, rec, addr);
376 360#else
361 /* We should not get here without modules */
362 return -EINVAL;
363#endif /* CONFIG_MODULES */
377} 364}
378 365
366#ifdef CONFIG_MODULES
379#ifdef CONFIG_PPC64 367#ifdef CONFIG_PPC64
380static int 368static int
381__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 369__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -392,7 +380,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
392 * b +8; ld r2,40(r1) 380 * b +8; ld r2,40(r1)
393 */ 381 */
394 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && 382 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
395 ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) { 383 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
396 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); 384 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
397 return -EINVAL; 385 return -EINVAL;
398 } 386 }
@@ -414,7 +402,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
414 /* ld r2,40(r1) */ 402 /* ld r2,40(r1) */
415 op[1] = 0xe8410028; 403 op[1] = 0xe8410028;
416 404
417 DEBUGP("write to %lx\n", rec->ip); 405 pr_debug("write to %lx\n", rec->ip);
418 406
419 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) 407 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
420 return -EPERM; 408 return -EPERM;
@@ -435,7 +423,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
435 return -EFAULT; 423 return -EFAULT;
436 424
437 /* It should be pointing to a nop */ 425 /* It should be pointing to a nop */
438 if (op != PPC_NOP_INSTR) { 426 if (op != PPC_INST_NOP) {
439 printk(KERN_ERR "Expected NOP but have %x\n", op); 427 printk(KERN_ERR "Expected NOP but have %x\n", op);
440 return -EINVAL; 428 return -EINVAL;
441 } 429 }
@@ -454,7 +442,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
454 return -EINVAL; 442 return -EINVAL;
455 } 443 }
456 444
457 DEBUGP("write to %lx\n", rec->ip); 445 pr_debug("write to %lx\n", rec->ip);
458 446
459 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) 447 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
460 return -EPERM; 448 return -EPERM;
@@ -464,11 +452,12 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
464 return 0; 452 return 0;
465} 453}
466#endif /* CONFIG_PPC64 */ 454#endif /* CONFIG_PPC64 */
455#endif /* CONFIG_MODULES */
467 456
468int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 457int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
469{ 458{
470 unsigned char *old, *new;
471 unsigned long ip = rec->ip; 459 unsigned long ip = rec->ip;
460 unsigned int old, new;
472 461
473 /* 462 /*
474 * If the calling address is more that 24 bits away, 463 * If the calling address is more that 24 bits away,
@@ -478,10 +467,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
478 if (test_24bit_addr(ip, addr)) { 467 if (test_24bit_addr(ip, addr)) {
479 /* within range */ 468 /* within range */
480 old = ftrace_nop_replace(); 469 old = ftrace_nop_replace();
481 new = ftrace_call_replace(ip, addr); 470 new = ftrace_call_replace(ip, addr, 1);
482 return ftrace_modify_code(ip, old, new); 471 return ftrace_modify_code(ip, old, new);
483 } 472 }
484 473
474#ifdef CONFIG_MODULES
485 /* 475 /*
486 * Out of range jumps are called from modules. 476 * Out of range jumps are called from modules.
487 * Being that we are converting from nop, it had better 477 * Being that we are converting from nop, it had better
@@ -493,16 +483,20 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
493 } 483 }
494 484
495 return __ftrace_make_call(rec, addr); 485 return __ftrace_make_call(rec, addr);
486#else
487 /* We should not get here without modules */
488 return -EINVAL;
489#endif /* CONFIG_MODULES */
496} 490}
497 491
498int ftrace_update_ftrace_func(ftrace_func_t func) 492int ftrace_update_ftrace_func(ftrace_func_t func)
499{ 493{
500 unsigned long ip = (unsigned long)(&ftrace_call); 494 unsigned long ip = (unsigned long)(&ftrace_call);
501 unsigned char old[MCOUNT_INSN_SIZE], *new; 495 unsigned int old, new;
502 int ret; 496 int ret;
503 497
504 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 498 old = *(unsigned int *)&ftrace_call;
505 new = ftrace_call_replace(ip, (unsigned long)func); 499 new = ftrace_call_replace(ip, (unsigned long)func, 1);
506 ret = ftrace_modify_code(ip, old, new); 500 ret = ftrace_modify_code(ip, old, new);
507 501
508 return ret; 502 return ret;
@@ -517,3 +511,111 @@ int __init ftrace_dyn_arch_init(void *data)
517 511
518 return 0; 512 return 0;
519} 513}
514#endif /* CONFIG_DYNAMIC_FTRACE */
515
516#ifdef CONFIG_FUNCTION_GRAPH_TRACER
517
518#ifdef CONFIG_DYNAMIC_FTRACE
519extern void ftrace_graph_call(void);
520extern void ftrace_graph_stub(void);
521
522int ftrace_enable_ftrace_graph_caller(void)
523{
524 unsigned long ip = (unsigned long)(&ftrace_graph_call);
525 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
526 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
527 unsigned int old, new;
528
529 old = ftrace_call_replace(ip, stub, 0);
530 new = ftrace_call_replace(ip, addr, 0);
531
532 return ftrace_modify_code(ip, old, new);
533}
534
535int ftrace_disable_ftrace_graph_caller(void)
536{
537 unsigned long ip = (unsigned long)(&ftrace_graph_call);
538 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
539 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
540 unsigned int old, new;
541
542 old = ftrace_call_replace(ip, addr, 0);
543 new = ftrace_call_replace(ip, stub, 0);
544
545 return ftrace_modify_code(ip, old, new);
546}
547#endif /* CONFIG_DYNAMIC_FTRACE */
548
549#ifdef CONFIG_PPC64
550extern void mod_return_to_handler(void);
551#endif
552
553/*
554 * Hook the return address and push it in the stack of return addrs
555 * in current thread info.
556 */
557void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
558{
559 unsigned long old;
560 int faulted;
561 struct ftrace_graph_ent trace;
562 unsigned long return_hooker = (unsigned long)&return_to_handler;
563
564 if (unlikely(atomic_read(&current->tracing_graph_pause)))
565 return;
566
567#ifdef CONFIG_PPC64
568 /* non core kernel code needs to save and restore the TOC */
569 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
570 return_hooker = (unsigned long)&mod_return_to_handler;
571#endif
572
573 return_hooker = GET_ADDR(return_hooker);
574
575 /*
576 * Protect against fault, even if it shouldn't
577 * happen. This tool is too much intrusive to
578 * ignore such a protection.
579 */
580 asm volatile(
581 "1: " PPC_LL "%[old], 0(%[parent])\n"
582 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
583 " li %[faulted], 0\n"
584 "3:\n"
585
586 ".section .fixup, \"ax\"\n"
587 "4: li %[faulted], 1\n"
588 " b 3b\n"
589 ".previous\n"
590
591 ".section __ex_table,\"a\"\n"
592 PPC_LONG_ALIGN "\n"
593 PPC_LONG "1b,4b\n"
594 PPC_LONG "2b,4b\n"
595 ".previous"
596
597 : [old] "=r" (old), [faulted] "=r" (faulted)
598 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
599 : "memory"
600 );
601
602 if (unlikely(faulted)) {
603 ftrace_graph_stop();
604 WARN_ON(1);
605 return;
606 }
607
608 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
609 *parent = old;
610 return;
611 }
612
613 trace.func = self_addr;
614
615 /* Only trace if the calling function expects to */
616 if (!ftrace_graph_entry(&trace)) {
617 current->curr_ret_stack--;
618 *parent = old;
619 }
620}
621#endif /* CONFIG_FUNCTION_GRAPH_TRACER */