diff options
author | Rabin Vincent <rabin@rab.in> | 2012-02-18 11:50:51 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-03-24 05:38:55 -0400 |
commit | b21d55e98ac2bbcbbeec9a8cb091f717fd95b072 (patch) | |
tree | ad3eb60af18816acef347bc22e43059c0c0d6873 /arch/arm/kernel/kprobes.c | |
parent | d82227cf8f0b42ff42c21ed47025fdf54cb1698d (diff) |
ARM: 7332/1: extract out code patch function from kprobes
Extract out the code patching code from kprobes so that it can be used
from the jump label code. Additionally, the separated code:
- Uses the IS_ENABLED() macros instead of the #ifdefs for THUMB2
support
- Unifies the two separate functions in kprobes, providing one function
that uses stop_machine() internally, and one that can be called from
stop_machine() directly
- Patches the text on all CPUs only on processors requiring software
broadcasting of cache operations
Acked-by: Jon Medhurst <tixy@yxit.co.uk>
Tested-by: Jon Medhurst <tixy@yxit.co.uk>
Signed-off-by: Rabin Vincent <rabin@rab.in>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/kprobes.c')
-rw-r--r-- | arch/arm/kernel/kprobes.c | 86 |
1 files changed, 24 insertions, 62 deletions
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 129c1163248b..ab1869dac97a 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | 30 | ||
31 | #include "kprobes.h" | 31 | #include "kprobes.h" |
32 | #include "patch.h" | ||
32 | 33 | ||
33 | #define MIN_STACK_SIZE(addr) \ | 34 | #define MIN_STACK_SIZE(addr) \ |
34 | min((unsigned long)MAX_STACK_SIZE, \ | 35 | min((unsigned long)MAX_STACK_SIZE, \ |
@@ -103,57 +104,33 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
106 | #ifdef CONFIG_THUMB2_KERNEL | ||
107 | |||
108 | /* | ||
109 | * For a 32-bit Thumb breakpoint spanning two memory words we need to take | ||
110 | * special precautions to insert the breakpoint atomically, especially on SMP | ||
111 | * systems. This is achieved by calling this arming function using stop_machine. | ||
112 | */ | ||
113 | static int __kprobes set_t32_breakpoint(void *addr) | ||
114 | { | ||
115 | ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; | ||
116 | ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; | ||
117 | flush_insns(addr, 2*sizeof(u16)); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 107 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
122 | { | 108 | { |
123 | uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ | 109 | unsigned int brkp; |
124 | 110 | void *addr; | |
125 | if (!is_wide_instruction(p->opcode)) { | 111 | |
126 | *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | 112 | if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { |
127 | flush_insns(addr, sizeof(u16)); | 113 | /* Remove any Thumb flag */ |
128 | } else if (addr & 2) { | 114 | addr = (void *)((uintptr_t)p->addr & ~1); |
129 | /* A 32-bit instruction spanning two words needs special care */ | 115 | |
130 | stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); | 116 | if (is_wide_instruction(p->opcode)) |
117 | brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | ||
118 | else | ||
119 | brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | ||
131 | } else { | 120 | } else { |
132 | /* Word aligned 32-bit instruction can be written atomically */ | 121 | kprobe_opcode_t insn = p->opcode; |
133 | u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | ||
134 | #ifndef __ARMEB__ /* Swap halfwords for little-endian */ | ||
135 | bkp = (bkp >> 16) | (bkp << 16); | ||
136 | #endif | ||
137 | *(u32 *)addr = bkp; | ||
138 | flush_insns(addr, sizeof(u32)); | ||
139 | } | ||
140 | } | ||
141 | 122 | ||
142 | #else /* !CONFIG_THUMB2_KERNEL */ | 123 | addr = p->addr; |
124 | brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; | ||
143 | 125 | ||
144 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 126 | if (insn >= 0xe0000000) |
145 | { | 127 | brkp |= 0xe0000000; /* Unconditional instruction */ |
146 | kprobe_opcode_t insn = p->opcode; | 128 | else |
147 | kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; | 129 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ |
148 | if (insn >= 0xe0000000) | 130 | } |
149 | brkp |= 0xe0000000; /* Unconditional instruction */ | ||
150 | else | ||
151 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ | ||
152 | *p->addr = brkp; | ||
153 | flush_insns(p->addr, sizeof(p->addr[0])); | ||
154 | } | ||
155 | 131 | ||
156 | #endif /* !CONFIG_THUMB2_KERNEL */ | 132 | patch_text(addr, brkp); |
133 | } | ||
157 | 134 | ||
158 | /* | 135 | /* |
159 | * The actual disarming is done here on each CPU and synchronized using | 136 | * The actual disarming is done here on each CPU and synchronized using |
@@ -166,25 +143,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
166 | int __kprobes __arch_disarm_kprobe(void *p) | 143 | int __kprobes __arch_disarm_kprobe(void *p) |
167 | { | 144 | { |
168 | struct kprobe *kp = p; | 145 | struct kprobe *kp = p; |
169 | #ifdef CONFIG_THUMB2_KERNEL | 146 | void *addr = (void *)((uintptr_t)kp->addr & ~1); |
170 | u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); | ||
171 | kprobe_opcode_t insn = kp->opcode; | ||
172 | unsigned int len; | ||
173 | 147 | ||
174 | if (is_wide_instruction(insn)) { | 148 | __patch_text(addr, kp->opcode); |
175 | ((u16 *)addr)[0] = insn>>16; | ||
176 | ((u16 *)addr)[1] = insn; | ||
177 | len = 2*sizeof(u16); | ||
178 | } else { | ||
179 | ((u16 *)addr)[0] = insn; | ||
180 | len = sizeof(u16); | ||
181 | } | ||
182 | flush_insns(addr, len); | ||
183 | 149 | ||
184 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
185 | *kp->addr = kp->opcode; | ||
186 | flush_insns(kp->addr, sizeof(kp->addr[0])); | ||
187 | #endif | ||
188 | return 0; | 150 | return 0; |
189 | } | 151 | } |
190 | 152 | ||