diff options
author | Balbir Singh <bsingharora@gmail.com> | 2017-06-06 00:29:39 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-07-03 09:12:19 -0400 |
commit | f3eca956389316acd1a132fad1ad0b6f2ca78a61 (patch) | |
tree | d5c198f0c0b1a69263d81f5ff5f9c19beea55af1 | |
parent | d07df82c43be82ab6972662180e89e6ba2a828ad (diff) |
powerpc/kprobes/optprobes: Use patch_instruction()
So that we can implement STRICT_RWX, use patch_instruction() in
optprobes.
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/kernel/optprobes.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index ec60ed0d4aad..6f8273f5e988 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c | |||
@@ -158,12 +158,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) | |||
158 | void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) | 158 | void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) |
159 | { | 159 | { |
160 | /* addis r4,0,(insn)@h */ | 160 | /* addis r4,0,(insn)@h */ |
161 | *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) | | 161 | patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) | |
162 | ((val >> 16) & 0xffff); | 162 | ((val >> 16) & 0xffff)); |
163 | addr++; | ||
163 | 164 | ||
164 | /* ori r4,r4,(insn)@l */ | 165 | /* ori r4,r4,(insn)@l */ |
165 | *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) | | 166 | patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) | |
166 | (val & 0xffff); | 167 | ___PPC_RS(4) | (val & 0xffff)); |
167 | } | 168 | } |
168 | 169 | ||
169 | /* | 170 | /* |
@@ -173,24 +174,28 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) | |||
173 | void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) | 174 | void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) |
174 | { | 175 | { |
175 | /* lis r3,(op)@highest */ | 176 | /* lis r3,(op)@highest */ |
176 | *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) | | 177 | patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) | |
177 | ((val >> 48) & 0xffff); | 178 | ((val >> 48) & 0xffff)); |
179 | addr++; | ||
178 | 180 | ||
179 | /* ori r3,r3,(op)@higher */ | 181 | /* ori r3,r3,(op)@higher */ |
180 | *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) | | 182 | patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | |
181 | ((val >> 32) & 0xffff); | 183 | ___PPC_RS(3) | ((val >> 32) & 0xffff)); |
184 | addr++; | ||
182 | 185 | ||
183 | /* rldicr r3,r3,32,31 */ | 186 | /* rldicr r3,r3,32,31 */ |
184 | *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) | | 187 | patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) | |
185 | __PPC_SH64(32) | __PPC_ME64(31); | 188 | ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31)); |
189 | addr++; | ||
186 | 190 | ||
187 | /* oris r3,r3,(op)@h */ | 191 | /* oris r3,r3,(op)@h */ |
188 | *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) | | 192 | patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) | |
189 | ((val >> 16) & 0xffff); | 193 | ___PPC_RS(3) | ((val >> 16) & 0xffff)); |
194 | addr++; | ||
190 | 195 | ||
191 | /* ori r3,r3,(op)@l */ | 196 | /* ori r3,r3,(op)@l */ |
192 | *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) | | 197 | patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | |
193 | (val & 0xffff); | 198 | ___PPC_RS(3) | (val & 0xffff)); |
194 | } | 199 | } |
195 | 200 | ||
196 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) | 201 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) |
@@ -198,7 +203,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) | |||
198 | kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; | 203 | kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; |
199 | kprobe_opcode_t *op_callback_addr, *emulate_step_addr; | 204 | kprobe_opcode_t *op_callback_addr, *emulate_step_addr; |
200 | long b_offset; | 205 | long b_offset; |
201 | unsigned long nip; | 206 | unsigned long nip, size; |
207 | int rc, i; | ||
202 | 208 | ||
203 | kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; | 209 | kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; |
204 | 210 | ||
@@ -231,8 +237,14 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) | |||
231 | goto error; | 237 | goto error; |
232 | 238 | ||
233 | /* Setup template */ | 239 | /* Setup template */ |
234 | memcpy(buff, optprobe_template_entry, | 240 | /* We can optimize this via patch_instruction_window later */ |
235 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 241 | size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); |
242 | pr_devel("Copying template to %p, size %lu\n", buff, size); | ||
243 | for (i = 0; i < size; i++) { | ||
244 | rc = patch_instruction(buff + i, *(optprobe_template_entry + i)); | ||
245 | if (rc < 0) | ||
246 | goto error; | ||
247 | } | ||
236 | 248 | ||
237 | /* | 249 | /* |
238 | * Fixup the template with instructions to: | 250 | * Fixup the template with instructions to: |
@@ -261,8 +273,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) | |||
261 | if (!branch_op_callback || !branch_emulate_step) | 273 | if (!branch_op_callback || !branch_emulate_step) |
262 | goto error; | 274 | goto error; |
263 | 275 | ||
264 | buff[TMPL_CALL_HDLR_IDX] = branch_op_callback; | 276 | patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback); |
265 | buff[TMPL_EMULATE_IDX] = branch_emulate_step; | 277 | patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step); |
266 | 278 | ||
267 | /* | 279 | /* |
268 | * 3. load instruction to be emulated into relevant register, and | 280 | * 3. load instruction to be emulated into relevant register, and |
@@ -272,8 +284,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) | |||
272 | /* | 284 | /* |
273 | * 4. branch back from trampoline | 285 | * 4. branch back from trampoline |
274 | */ | 286 | */ |
275 | buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX, | 287 | patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0); |
276 | (unsigned long)nip, 0); | ||
277 | 288 | ||
278 | flush_icache_range((unsigned long)buff, | 289 | flush_icache_range((unsigned long)buff, |
279 | (unsigned long)(&buff[TMPL_END_IDX])); | 290 | (unsigned long)(&buff[TMPL_END_IDX])); |