diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-14 19:21:20 -0500 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2008-11-20 13:52:04 -0500 |
commit | e4486fe316895e87672a563c4f36393218f84ff1 (patch) | |
tree | 8cb8e68228e9a9a7c6acf8455d79b3484c8a78a4 /arch/powerpc/kernel/ftrace.c | |
parent | 8fd6e5a8c81e2e9b912ea33c8425a10729db469b (diff) |
powerpc: ftrace, use probe_kernel API to modify code
Impact: use cleaner probe_kernel API over assembly
Using probe_kernel_read/write interface is a much cleaner approach
than the current assembly version.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'arch/powerpc/kernel/ftrace.c')
-rw-r--r-- | arch/powerpc/kernel/ftrace.c | 53 |
1 files changed, 21 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 24c023a5cae8..1adfbb268d8e 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
13 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -72,45 +73,33 @@ static int | |||
72 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 73 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
73 | unsigned char *new_code) | 74 | unsigned char *new_code) |
74 | { | 75 | { |
75 | unsigned replaced; | 76 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
76 | unsigned old = *(unsigned *)old_code; | ||
77 | unsigned new = *(unsigned *)new_code; | ||
78 | int faulted = 0; | ||
79 | 77 | ||
80 | /* | 78 | /* |
81 | * Note: Due to modules and __init, code can | 79 | * Note: Due to modules and __init, code can |
82 | * disappear and change, we need to protect against faulting | 80 | * disappear and change, we need to protect against faulting |
83 | * as well as code changing. | 81 | * as well as code changing. We do this by using the |
82 | * probe_kernel_* functions. | ||
84 | * | 83 | * |
85 | * No real locking needed, this code is run through | 84 | * No real locking needed, this code is run through |
86 | * kstop_machine. | 85 | * kstop_machine, or before SMP starts. |
87 | */ | 86 | */ |
88 | asm volatile ( | 87 | |
89 | "1: lwz %1, 0(%2)\n" | 88 | /* read the text we want to modify */ |
90 | " cmpw %1, %5\n" | 89 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
91 | " bne 2f\n" | 90 | return -EFAULT; |
92 | " stwu %3, 0(%2)\n" | 91 | |
93 | "2:\n" | 92 | /* Make sure it is what we expect it to be */ |
94 | ".section .fixup, \"ax\"\n" | 93 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
95 | "3: li %0, 1\n" | 94 | return -EINVAL; |
96 | " b 2b\n" | 95 | |
97 | ".previous\n" | 96 | /* replace the text with the new text */ |
98 | ".section __ex_table,\"a\"\n" | 97 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) |
99 | _ASM_ALIGN "\n" | 98 | return -EPERM; |
100 | _ASM_PTR "1b, 3b\n" | 99 | |
101 | ".previous" | 100 | flush_icache_range(ip, ip + 8); |
102 | : "=r"(faulted), "=r"(replaced) | 101 | |
103 | : "r"(ip), "r"(new), | 102 | return 0; |
104 | "0"(faulted), "r"(old) | ||
105 | : "memory"); | ||
106 | |||
107 | if (replaced != old && replaced != new) | ||
108 | faulted = 2; | ||
109 | |||
110 | if (!faulted) | ||
111 | flush_icache_range(ip, ip + 8); | ||
112 | |||
113 | return faulted; | ||
114 | } | 103 | } |
115 | 104 | ||
116 | static int test_24bit_addr(unsigned long ip, unsigned long addr) | 105 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |