aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2012-03-05 08:32:16 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-06 03:49:49 -0500
commit464846888d9aad186cab3acdae6b654f9eb19772 (patch)
treefdf0b98237845a7d9af8a01cdf4971b43a2a3063 /arch
parent86b4ce3156c0dc140907ad03639564000cde694f (diff)
x86/kprobes: Fix a bug which can modify kernel code permanently
Fix a bug in kprobes which can modify kernel code permanently at run-time. In the result, kernel can crash when it executes the modified code. This bug can happen when we put two probes enough near and the first probe is optimized. When the second probe is set up, it copies a byte which is already modified by the first probe, and executes it when the probe is hit. Even worse, the first probe and the second probe are removed respectively, the second probe writes back the copied (modified) instruction. To fix this bug, kprobes always recovers the original code and copies the first byte from recovered instruction. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: yrl.pp-manager.tt@hitachi.com Cc: systemtap@sourceware.org Cc: anderson@redhat.com Link: http://lkml.kernel.org/r/20120305133215.5982.31991.stgit@localhost.localdomain Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/kprobes.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 6bec22f514b5..ca6d450bee7e 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -361,19 +361,15 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
361 * If not, return null. 361 * If not, return null.
362 * Only applicable to 64-bit x86. 362 * Only applicable to 64-bit x86.
363 */ 363 */
364static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) 364static int __kprobes __copy_instruction(u8 *dest, u8 *src)
365{ 365{
366 struct insn insn; 366 struct insn insn;
367 kprobe_opcode_t buf[MAX_INSN_SIZE]; 367 kprobe_opcode_t buf[MAX_INSN_SIZE];
368 u8 *orig_src = src; /* Back up original src for RIP calculation */
369 368
370 if (recover) 369 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
371 src = (u8 *)recover_probed_instruction(buf, (unsigned long)src);
372
373 kernel_insn_init(&insn, src);
374 insn_get_length(&insn); 370 insn_get_length(&insn);
375 /* Another subsystem puts a breakpoint, failed to recover */ 371 /* Another subsystem puts a breakpoint, failed to recover */
376 if (recover && insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 372 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
377 return 0; 373 return 0;
378 memcpy(dest, insn.kaddr, insn.length); 374 memcpy(dest, insn.kaddr, insn.length);
379 375
@@ -395,7 +391,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
395 * extension of the original signed 32-bit displacement would 391 * extension of the original signed 32-bit displacement would
396 * have given. 392 * have given.
397 */ 393 */
398 newdisp = (u8 *) orig_src + (s64) insn.displacement.value - (u8 *) dest; 394 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
399 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ 395 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
400 disp = (u8 *) dest + insn_offset_displacement(&insn); 396 disp = (u8 *) dest + insn_offset_displacement(&insn);
401 *(s32 *) disp = (s32) newdisp; 397 *(s32 *) disp = (s32) newdisp;
@@ -406,18 +402,20 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
406 402
407static void __kprobes arch_copy_kprobe(struct kprobe *p) 403static void __kprobes arch_copy_kprobe(struct kprobe *p)
408{ 404{
405 /* Copy an instruction with recovering if other optprobe modifies it.*/
406 __copy_instruction(p->ainsn.insn, p->addr);
407
409 /* 408 /*
410 * Copy an instruction without recovering int3, because it will be 409 * __copy_instruction can modify the displacement of the instruction,
411 * put by another subsystem. 410 * but it doesn't affect boostable check.
412 */ 411 */
413 __copy_instruction(p->ainsn.insn, p->addr, 0); 412 if (can_boost(p->ainsn.insn))
414
415 if (can_boost(p->addr))
416 p->ainsn.boostable = 0; 413 p->ainsn.boostable = 0;
417 else 414 else
418 p->ainsn.boostable = -1; 415 p->ainsn.boostable = -1;
419 416
420 p->opcode = *p->addr; 417 /* Also, displacement change doesn't affect the first byte */
418 p->opcode = p->ainsn.insn[0];
421} 419}
422 420
423int __kprobes arch_prepare_kprobe(struct kprobe *p) 421int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -1276,7 +1274,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
1276 int len = 0, ret; 1274 int len = 0, ret;
1277 1275
1278 while (len < RELATIVEJUMP_SIZE) { 1276 while (len < RELATIVEJUMP_SIZE) {
1279 ret = __copy_instruction(dest + len, src + len, 1); 1277 ret = __copy_instruction(dest + len, src + len);
1280 if (!ret || !can_boost(dest + len)) 1278 if (!ret || !can_boost(dest + len))
1281 return -EINVAL; 1279 return -EINVAL;
1282 len += ret; 1280 len += ret;
@@ -1328,7 +1326,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
1328/* Decode whole function to ensure any instructions don't jump into target */ 1326/* Decode whole function to ensure any instructions don't jump into target */
1329static int __kprobes can_optimize(unsigned long paddr) 1327static int __kprobes can_optimize(unsigned long paddr)
1330{ 1328{
1331 unsigned long addr, __addr, size = 0, offset = 0; 1329 unsigned long addr, size = 0, offset = 0;
1332 struct insn insn; 1330 struct insn insn;
1333 kprobe_opcode_t buf[MAX_INSN_SIZE]; 1331 kprobe_opcode_t buf[MAX_INSN_SIZE];
1334 1332
@@ -1357,8 +1355,7 @@ static int __kprobes can_optimize(unsigned long paddr)
1357 * we can't optimize kprobe in this function. 1355 * we can't optimize kprobe in this function.
1358 */ 1356 */
1359 return 0; 1357 return 0;
1360 __addr = recover_probed_instruction(buf, addr); 1358 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
1361 kernel_insn_init(&insn, (void *)__addr);
1362 insn_get_length(&insn); 1359 insn_get_length(&insn);
1363 /* Another subsystem puts a breakpoint */ 1360 /* Another subsystem puts a breakpoint */
1364 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 1361 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)