diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2010-02-25 08:34:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 11:49:26 -0500 |
commit | c0f7ac3a9edde786bc129d37627953a8b8abefdf (patch) | |
tree | 8d6f2df2d1a08f2893327dd2a8b6e56525705456 /arch/x86/kernel/kprobes.c | |
parent | 3d55cc8a058ee96291d6d45b1e35121b9920eca3 (diff) |
kprobes/x86: Support kprobes jump optimization on x86
Introduce x86 arch-specific optimization code, which supports
both of x86-32 and x86-64.
This code also supports safety checking, which decodes whole of
a function in which probe is inserted, and checks following
conditions before optimization:
- The optimized instructions which will be replaced by a jump instruction
don't straddle the function boundary.
- There is no indirect jump instruction, because it will jumps into
the address range which is replaced by jump operand.
- There is no jump/loop instruction which jumps into the address range
which is replaced by jump operand.
- Don't optimize kprobes if it is in functions into which fixup code will
jumps.
This uses text_poke_multibyte() which doesn't support modifying
code on NMI/MCE handler. However, since kprobes itself doesn't
support NMI/MCE code probing, it's not a problem.
Changes in v9:
- Use *_text_reserved() for checking the probe can be optimized.
- Verify jump address range is in 2G range when preparing slot.
- Backup original code when switching optimized buffer, instead of
preparing buffer, because there can be int3 of other probes in
preparing phase.
- Check kprobe is disabled in arch_check_optimized_kprobe().
- Strictly check indirect jump opcodes (ff /4, ff /5).
Changes in v6:
- Split stop_machine-based jump patching code.
- Update comments and coding style.
Changes in v5:
- Introduce stop_machine-based jump replacing.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133446.6725.78994.stgit@localhost6.localdomain6>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/kprobes.c')
-rw-r--r-- | arch/x86/kernel/kprobes.c | 433 |
1 files changed, 411 insertions, 22 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 4ae95befd0e..b43bbaebe2c 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/kdebug.h> | 50 | #include <linux/kdebug.h> |
51 | #include <linux/kallsyms.h> | 51 | #include <linux/kallsyms.h> |
52 | #include <linux/ftrace.h> | ||
52 | 53 | ||
53 | #include <asm/cacheflush.h> | 54 | #include <asm/cacheflush.h> |
54 | #include <asm/desc.h> | 55 | #include <asm/desc.h> |
@@ -106,16 +107,22 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { | |||
106 | }; | 107 | }; |
107 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | 108 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
108 | 109 | ||
109 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | 110 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) |
110 | static void __kprobes set_jmp_op(void *from, void *to) | ||
111 | { | 111 | { |
112 | struct __arch_jmp_op { | 112 | struct __arch_relative_insn { |
113 | char op; | 113 | u8 op; |
114 | s32 raddr; | 114 | s32 raddr; |
115 | } __attribute__((packed)) * jop; | 115 | } __attribute__((packed)) *insn; |
116 | jop = (struct __arch_jmp_op *)from; | 116 | |
117 | jop->raddr = (s32)((long)(to) - ((long)(from) + 5)); | 117 | insn = (struct __arch_relative_insn *)from; |
118 | jop->op = RELATIVEJUMP_OPCODE; | 118 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
119 | insn->op = op; | ||
120 | } | ||
121 | |||
122 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | ||
123 | static void __kprobes synthesize_reljump(void *from, void *to) | ||
124 | { | ||
125 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); | ||
119 | } | 126 | } |
120 | 127 | ||
121 | /* | 128 | /* |
@@ -202,7 +209,7 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |||
202 | /* | 209 | /* |
203 | * Basically, kp->ainsn.insn has an original instruction. | 210 | * Basically, kp->ainsn.insn has an original instruction. |
204 | * However, RIP-relative instruction can not do single-stepping | 211 | * However, RIP-relative instruction can not do single-stepping |
205 | * at different place, fix_riprel() tweaks the displacement of | 212 | * at different place, __copy_instruction() tweaks the displacement of |
206 | * that instruction. In that case, we can't recover the instruction | 213 | * that instruction. In that case, we can't recover the instruction |
207 | * from the kp->ainsn.insn. | 214 | * from the kp->ainsn.insn. |
208 | * | 215 | * |
@@ -284,21 +291,37 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
284 | } | 291 | } |
285 | 292 | ||
286 | /* | 293 | /* |
287 | * Adjust the displacement if the instruction uses the %rip-relative | 294 | * Copy an instruction and adjust the displacement if the instruction |
288 | * addressing mode. | 295 | * uses the %rip-relative addressing mode. |
289 | * If it does, Return the address of the 32-bit displacement word. | 296 | * If it does, Return the address of the 32-bit displacement word. |
290 | * If not, return null. | 297 | * If not, return null. |
291 | * Only applicable to 64-bit x86. | 298 | * Only applicable to 64-bit x86. |
292 | */ | 299 | */ |
293 | static void __kprobes fix_riprel(struct kprobe *p) | 300 | static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) |
294 | { | 301 | { |
295 | #ifdef CONFIG_X86_64 | ||
296 | struct insn insn; | 302 | struct insn insn; |
297 | kernel_insn_init(&insn, p->ainsn.insn); | 303 | int ret; |
304 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
298 | 305 | ||
306 | kernel_insn_init(&insn, src); | ||
307 | if (recover) { | ||
308 | insn_get_opcode(&insn); | ||
309 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
310 | ret = recover_probed_instruction(buf, | ||
311 | (unsigned long)src); | ||
312 | if (ret) | ||
313 | return 0; | ||
314 | kernel_insn_init(&insn, buf); | ||
315 | } | ||
316 | } | ||
317 | insn_get_length(&insn); | ||
318 | memcpy(dest, insn.kaddr, insn.length); | ||
319 | |||
320 | #ifdef CONFIG_X86_64 | ||
299 | if (insn_rip_relative(&insn)) { | 321 | if (insn_rip_relative(&insn)) { |
300 | s64 newdisp; | 322 | s64 newdisp; |
301 | u8 *disp; | 323 | u8 *disp; |
324 | kernel_insn_init(&insn, dest); | ||
302 | insn_get_displacement(&insn); | 325 | insn_get_displacement(&insn); |
303 | /* | 326 | /* |
304 | * The copied instruction uses the %rip-relative addressing | 327 | * The copied instruction uses the %rip-relative addressing |
@@ -312,20 +335,23 @@ static void __kprobes fix_riprel(struct kprobe *p) | |||
312 | * extension of the original signed 32-bit displacement would | 335 | * extension of the original signed 32-bit displacement would |
313 | * have given. | 336 | * have given. |
314 | */ | 337 | */ |
315 | newdisp = (u8 *) p->addr + (s64) insn.displacement.value - | 338 | newdisp = (u8 *) src + (s64) insn.displacement.value - |
316 | (u8 *) p->ainsn.insn; | 339 | (u8 *) dest; |
317 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ | 340 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ |
318 | disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); | 341 | disp = (u8 *) dest + insn_offset_displacement(&insn); |
319 | *(s32 *) disp = (s32) newdisp; | 342 | *(s32 *) disp = (s32) newdisp; |
320 | } | 343 | } |
321 | #endif | 344 | #endif |
345 | return insn.length; | ||
322 | } | 346 | } |
323 | 347 | ||
324 | static void __kprobes arch_copy_kprobe(struct kprobe *p) | 348 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
325 | { | 349 | { |
326 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 350 | /* |
327 | 351 | * Copy an instruction without recovering int3, because it will be | |
328 | fix_riprel(p); | 352 | * put by another subsystem. |
353 | */ | ||
354 | __copy_instruction(p->ainsn.insn, p->addr, 0); | ||
329 | 355 | ||
330 | if (can_boost(p->addr)) | 356 | if (can_boost(p->addr)) |
331 | p->ainsn.boostable = 0; | 357 | p->ainsn.boostable = 0; |
@@ -417,9 +443,20 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
417 | *sara = (unsigned long) &kretprobe_trampoline; | 443 | *sara = (unsigned long) &kretprobe_trampoline; |
418 | } | 444 | } |
419 | 445 | ||
446 | #ifdef CONFIG_OPTPROBES | ||
447 | static int __kprobes setup_detour_execution(struct kprobe *p, | ||
448 | struct pt_regs *regs, | ||
449 | int reenter); | ||
450 | #else | ||
451 | #define setup_detour_execution(p, regs, reenter) (0) | ||
452 | #endif | ||
453 | |||
420 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | 454 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
421 | struct kprobe_ctlblk *kcb, int reenter) | 455 | struct kprobe_ctlblk *kcb, int reenter) |
422 | { | 456 | { |
457 | if (setup_detour_execution(p, regs, reenter)) | ||
458 | return; | ||
459 | |||
423 | #if !defined(CONFIG_PREEMPT) | 460 | #if !defined(CONFIG_PREEMPT) |
424 | if (p->ainsn.boostable == 1 && !p->post_handler) { | 461 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
425 | /* Boost up -- we can execute copied instructions directly */ | 462 | /* Boost up -- we can execute copied instructions directly */ |
@@ -815,8 +852,8 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
815 | * These instructions can be executed directly if it | 852 | * These instructions can be executed directly if it |
816 | * jumps back to correct address. | 853 | * jumps back to correct address. |
817 | */ | 854 | */ |
818 | set_jmp_op((void *)regs->ip, | 855 | synthesize_reljump((void *)regs->ip, |
819 | (void *)orig_ip + (regs->ip - copy_ip)); | 856 | (void *)orig_ip + (regs->ip - copy_ip)); |
820 | p->ainsn.boostable = 1; | 857 | p->ainsn.boostable = 1; |
821 | } else { | 858 | } else { |
822 | p->ainsn.boostable = -1; | 859 | p->ainsn.boostable = -1; |
@@ -1043,6 +1080,358 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1043 | return 0; | 1080 | return 0; |
1044 | } | 1081 | } |
1045 | 1082 | ||
1083 | |||
1084 | #ifdef CONFIG_OPTPROBES | ||
1085 | |||
1086 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | ||
1087 | static void __kprobes synthesize_relcall(void *from, void *to) | ||
1088 | { | ||
1089 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | ||
1090 | } | ||
1091 | |||
1092 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | ||
1093 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | ||
1094 | unsigned long val) | ||
1095 | { | ||
1096 | #ifdef CONFIG_X86_64 | ||
1097 | *addr++ = 0x48; | ||
1098 | *addr++ = 0xbf; | ||
1099 | #else | ||
1100 | *addr++ = 0xb8; | ||
1101 | #endif | ||
1102 | *(unsigned long *)addr = val; | ||
1103 | } | ||
1104 | |||
1105 | void __kprobes kprobes_optinsn_template_holder(void) | ||
1106 | { | ||
1107 | asm volatile ( | ||
1108 | ".global optprobe_template_entry\n" | ||
1109 | "optprobe_template_entry: \n" | ||
1110 | #ifdef CONFIG_X86_64 | ||
1111 | /* We don't bother saving the ss register */ | ||
1112 | " pushq %rsp\n" | ||
1113 | " pushfq\n" | ||
1114 | SAVE_REGS_STRING | ||
1115 | " movq %rsp, %rsi\n" | ||
1116 | ".global optprobe_template_val\n" | ||
1117 | "optprobe_template_val: \n" | ||
1118 | ASM_NOP5 | ||
1119 | ASM_NOP5 | ||
1120 | ".global optprobe_template_call\n" | ||
1121 | "optprobe_template_call: \n" | ||
1122 | ASM_NOP5 | ||
1123 | /* Move flags to rsp */ | ||
1124 | " movq 144(%rsp), %rdx\n" | ||
1125 | " movq %rdx, 152(%rsp)\n" | ||
1126 | RESTORE_REGS_STRING | ||
1127 | /* Skip flags entry */ | ||
1128 | " addq $8, %rsp\n" | ||
1129 | " popfq\n" | ||
1130 | #else /* CONFIG_X86_32 */ | ||
1131 | " pushf\n" | ||
1132 | SAVE_REGS_STRING | ||
1133 | " movl %esp, %edx\n" | ||
1134 | ".global optprobe_template_val\n" | ||
1135 | "optprobe_template_val: \n" | ||
1136 | ASM_NOP5 | ||
1137 | ".global optprobe_template_call\n" | ||
1138 | "optprobe_template_call: \n" | ||
1139 | ASM_NOP5 | ||
1140 | RESTORE_REGS_STRING | ||
1141 | " addl $4, %esp\n" /* skip cs */ | ||
1142 | " popf\n" | ||
1143 | #endif | ||
1144 | ".global optprobe_template_end\n" | ||
1145 | "optprobe_template_end: \n"); | ||
1146 | } | ||
1147 | |||
1148 | #define TMPL_MOVE_IDX \ | ||
1149 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | ||
1150 | #define TMPL_CALL_IDX \ | ||
1151 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | ||
1152 | #define TMPL_END_IDX \ | ||
1153 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | ||
1154 | |||
1155 | #define INT3_SIZE sizeof(kprobe_opcode_t) | ||
1156 | |||
1157 | /* Optimized kprobe call back function: called from optinsn */ | ||
1158 | static void __kprobes optimized_callback(struct optimized_kprobe *op, | ||
1159 | struct pt_regs *regs) | ||
1160 | { | ||
1161 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
1162 | |||
1163 | preempt_disable(); | ||
1164 | if (kprobe_running()) { | ||
1165 | kprobes_inc_nmissed_count(&op->kp); | ||
1166 | } else { | ||
1167 | /* Save skipped registers */ | ||
1168 | #ifdef CONFIG_X86_64 | ||
1169 | regs->cs = __KERNEL_CS; | ||
1170 | #else | ||
1171 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | ||
1172 | regs->gs = 0; | ||
1173 | #endif | ||
1174 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | ||
1175 | regs->orig_ax = ~0UL; | ||
1176 | |||
1177 | __get_cpu_var(current_kprobe) = &op->kp; | ||
1178 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
1179 | opt_pre_handler(&op->kp, regs); | ||
1180 | __get_cpu_var(current_kprobe) = NULL; | ||
1181 | } | ||
1182 | preempt_enable_no_resched(); | ||
1183 | } | ||
1184 | |||
1185 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | ||
1186 | { | ||
1187 | int len = 0, ret; | ||
1188 | |||
1189 | while (len < RELATIVEJUMP_SIZE) { | ||
1190 | ret = __copy_instruction(dest + len, src + len, 1); | ||
1191 | if (!ret || !can_boost(dest + len)) | ||
1192 | return -EINVAL; | ||
1193 | len += ret; | ||
1194 | } | ||
1195 | /* Check whether the address range is reserved */ | ||
1196 | if (ftrace_text_reserved(src, src + len - 1) || | ||
1197 | alternatives_text_reserved(src, src + len - 1)) | ||
1198 | return -EBUSY; | ||
1199 | |||
1200 | return len; | ||
1201 | } | ||
1202 | |||
1203 | /* Check whether insn is indirect jump */ | ||
1204 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | ||
1205 | { | ||
1206 | return ((insn->opcode.bytes[0] == 0xff && | ||
1207 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | ||
1208 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | ||
1209 | } | ||
1210 | |||
1211 | /* Check whether insn jumps into specified address range */ | ||
1212 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | ||
1213 | { | ||
1214 | unsigned long target = 0; | ||
1215 | |||
1216 | switch (insn->opcode.bytes[0]) { | ||
1217 | case 0xe0: /* loopne */ | ||
1218 | case 0xe1: /* loope */ | ||
1219 | case 0xe2: /* loop */ | ||
1220 | case 0xe3: /* jcxz */ | ||
1221 | case 0xe9: /* near relative jump */ | ||
1222 | case 0xeb: /* short relative jump */ | ||
1223 | break; | ||
1224 | case 0x0f: | ||
1225 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | ||
1226 | break; | ||
1227 | return 0; | ||
1228 | default: | ||
1229 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | ||
1230 | break; | ||
1231 | return 0; | ||
1232 | } | ||
1233 | target = (unsigned long)insn->next_byte + insn->immediate.value; | ||
1234 | |||
1235 | return (start <= target && target <= start + len); | ||
1236 | } | ||
1237 | |||
1238 | /* Decode whole function to ensure any instructions don't jump into target */ | ||
1239 | static int __kprobes can_optimize(unsigned long paddr) | ||
1240 | { | ||
1241 | int ret; | ||
1242 | unsigned long addr, size = 0, offset = 0; | ||
1243 | struct insn insn; | ||
1244 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
1245 | /* Dummy buffers for lookup_symbol_attrs */ | ||
1246 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
1247 | |||
1248 | /* Lookup symbol including addr */ | ||
1249 | if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf)) | ||
1250 | return 0; | ||
1251 | |||
1252 | /* Check there is enough space for a relative jump. */ | ||
1253 | if (size - offset < RELATIVEJUMP_SIZE) | ||
1254 | return 0; | ||
1255 | |||
1256 | /* Decode instructions */ | ||
1257 | addr = paddr - offset; | ||
1258 | while (addr < paddr - offset + size) { /* Decode until function end */ | ||
1259 | if (search_exception_tables(addr)) | ||
1260 | /* | ||
1261 | * Since some fixup code will jumps into this function, | ||
1262 | * we can't optimize kprobe in this function. | ||
1263 | */ | ||
1264 | return 0; | ||
1265 | kernel_insn_init(&insn, (void *)addr); | ||
1266 | insn_get_opcode(&insn); | ||
1267 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
1268 | ret = recover_probed_instruction(buf, addr); | ||
1269 | if (ret) | ||
1270 | return 0; | ||
1271 | kernel_insn_init(&insn, buf); | ||
1272 | } | ||
1273 | insn_get_length(&insn); | ||
1274 | /* Recover address */ | ||
1275 | insn.kaddr = (void *)addr; | ||
1276 | insn.next_byte = (void *)(addr + insn.length); | ||
1277 | /* Check any instructions don't jump into target */ | ||
1278 | if (insn_is_indirect_jump(&insn) || | ||
1279 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | ||
1280 | RELATIVE_ADDR_SIZE)) | ||
1281 | return 0; | ||
1282 | addr += insn.length; | ||
1283 | } | ||
1284 | |||
1285 | return 1; | ||
1286 | } | ||
1287 | |||
1288 | /* Check optimized_kprobe can actually be optimized. */ | ||
1289 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | ||
1290 | { | ||
1291 | int i; | ||
1292 | struct kprobe *p; | ||
1293 | |||
1294 | for (i = 1; i < op->optinsn.size; i++) { | ||
1295 | p = get_kprobe(op->kp.addr + i); | ||
1296 | if (p && !kprobe_disabled(p)) | ||
1297 | return -EEXIST; | ||
1298 | } | ||
1299 | |||
1300 | return 0; | ||
1301 | } | ||
1302 | |||
1303 | /* Check the addr is within the optimized instructions. */ | ||
1304 | int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op, | ||
1305 | unsigned long addr) | ||
1306 | { | ||
1307 | return ((unsigned long)op->kp.addr <= addr && | ||
1308 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | ||
1309 | } | ||
1310 | |||
1311 | /* Free optimized instruction slot */ | ||
1312 | static __kprobes | ||
1313 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | ||
1314 | { | ||
1315 | if (op->optinsn.insn) { | ||
1316 | free_optinsn_slot(op->optinsn.insn, dirty); | ||
1317 | op->optinsn.insn = NULL; | ||
1318 | op->optinsn.size = 0; | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | ||
1323 | { | ||
1324 | __arch_remove_optimized_kprobe(op, 1); | ||
1325 | } | ||
1326 | |||
1327 | /* | ||
1328 | * Copy replacing target instructions | ||
1329 | * Target instructions MUST be relocatable (checked inside) | ||
1330 | */ | ||
1331 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | ||
1332 | { | ||
1333 | u8 *buf; | ||
1334 | int ret; | ||
1335 | long rel; | ||
1336 | |||
1337 | if (!can_optimize((unsigned long)op->kp.addr)) | ||
1338 | return -EILSEQ; | ||
1339 | |||
1340 | op->optinsn.insn = get_optinsn_slot(); | ||
1341 | if (!op->optinsn.insn) | ||
1342 | return -ENOMEM; | ||
1343 | |||
1344 | /* | ||
1345 | * Verify if the address gap is in 2GB range, because this uses | ||
1346 | * a relative jump. | ||
1347 | */ | ||
1348 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | ||
1349 | if (abs(rel) > 0x7fffffff) | ||
1350 | return -ERANGE; | ||
1351 | |||
1352 | buf = (u8 *)op->optinsn.insn; | ||
1353 | |||
1354 | /* Copy instructions into the out-of-line buffer */ | ||
1355 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | ||
1356 | if (ret < 0) { | ||
1357 | __arch_remove_optimized_kprobe(op, 0); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | op->optinsn.size = ret; | ||
1361 | |||
1362 | /* Copy arch-dep-instance from template */ | ||
1363 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | ||
1364 | |||
1365 | /* Set probe information */ | ||
1366 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | ||
1367 | |||
1368 | /* Set probe function call */ | ||
1369 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | ||
1370 | |||
1371 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | ||
1372 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | ||
1373 | (u8 *)op->kp.addr + op->optinsn.size); | ||
1374 | |||
1375 | flush_icache_range((unsigned long) buf, | ||
1376 | (unsigned long) buf + TMPL_END_IDX + | ||
1377 | op->optinsn.size + RELATIVEJUMP_SIZE); | ||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1381 | /* Replace a breakpoint (int3) with a relative jump. */ | ||
1382 | int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op) | ||
1383 | { | ||
1384 | unsigned char jmp_code[RELATIVEJUMP_SIZE]; | ||
1385 | s32 rel = (s32)((long)op->optinsn.insn - | ||
1386 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
1387 | |||
1388 | /* Backup instructions which will be replaced by jump address */ | ||
1389 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
1390 | RELATIVE_ADDR_SIZE); | ||
1391 | |||
1392 | jmp_code[0] = RELATIVEJUMP_OPCODE; | ||
1393 | *(s32 *)(&jmp_code[1]) = rel; | ||
1394 | |||
1395 | /* | ||
1396 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1397 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1398 | * code probing, it's not a problem. | ||
1399 | */ | ||
1400 | text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE); | ||
1401 | return 0; | ||
1402 | } | ||
1403 | |||
1404 | /* Replace a relative jump with a breakpoint (int3). */ | ||
1405 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
1406 | { | ||
1407 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1408 | |||
1409 | /* Set int3 to first byte for kprobes */ | ||
1410 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
1411 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1412 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
1413 | } | ||
1414 | |||
1415 | static int __kprobes setup_detour_execution(struct kprobe *p, | ||
1416 | struct pt_regs *regs, | ||
1417 | int reenter) | ||
1418 | { | ||
1419 | struct optimized_kprobe *op; | ||
1420 | |||
1421 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | ||
1422 | /* This kprobe is really able to run optimized path. */ | ||
1423 | op = container_of(p, struct optimized_kprobe, kp); | ||
1424 | /* Detour through copied instructions */ | ||
1425 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | ||
1426 | if (!reenter) | ||
1427 | reset_current_kprobe(); | ||
1428 | preempt_enable_no_resched(); | ||
1429 | return 1; | ||
1430 | } | ||
1431 | return 0; | ||
1432 | } | ||
1433 | #endif | ||
1434 | |||
1046 | int __init arch_init_kprobes(void) | 1435 | int __init arch_init_kprobes(void) |
1047 | { | 1436 | { |
1048 | return 0; | 1437 | return 0; |