aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/kprobes.c92
-rw-r--r--include/asm-i386/kprobes.h6
2 files changed, 96 insertions, 2 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index b40614f5afe..137bf612141 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -41,6 +41,49 @@ void jprobe_return_end(void);
41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44/* insert a jmp code */
45static inline void set_jmp_op(void *from, void *to)
46{
47 struct __arch_jmp_op {
48 char op;
49 long raddr;
50 } __attribute__((packed)) *jop;
51 jop = (struct __arch_jmp_op *)from;
52 jop->raddr = (long)(to) - ((long)(from) + 5);
53 jop->op = RELATIVEJUMP_INSTRUCTION;
54}
55
56/*
57 * returns non-zero if opcodes can be boosted.
58 */
59static inline int can_boost(kprobe_opcode_t opcode)
60{
61 switch (opcode & 0xf0 ) {
62 case 0x70:
63 return 0; /* can't boost conditional jump */
64 case 0x90:
65 /* can't boost call and pushf */
66 return opcode != 0x9a && opcode != 0x9c;
67 case 0xc0:
68 /* can't boost undefined opcodes and soft-interruptions */
69 return (0xc1 < opcode && opcode < 0xc6) ||
70 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
71 case 0xd0:
72 /* can boost AA* and XLAT */
73 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
74 case 0xe0:
75 /* can boost in/out and (may be) jmps */
76 return (0xe3 < opcode && opcode != 0xe8);
77 case 0xf0:
78 /* clear and set flags can be boost */
79 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
80 default:
81 /* currently, can't boost 2 bytes opcodes */
82 return opcode != 0x0f;
83 }
84}
85
86
44/* 87/*
45 * returns non-zero if opcode modifies the interrupt flag. 88 * returns non-zero if opcode modifies the interrupt flag.
46 */ 89 */
@@ -65,6 +108,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
65 108
66 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 109 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
67 p->opcode = *p->addr; 110 p->opcode = *p->addr;
111 if (can_boost(p->opcode)) {
112 p->ainsn.boostable = 0;
113 } else {
114 p->ainsn.boostable = -1;
115 }
68 return 0; 116 return 0;
69} 117}
70 118
@@ -158,6 +206,9 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
158 kprobe_opcode_t *addr = NULL; 206 kprobe_opcode_t *addr = NULL;
159 unsigned long *lp; 207 unsigned long *lp;
160 struct kprobe_ctlblk *kcb; 208 struct kprobe_ctlblk *kcb;
209#ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count();
211#endif /* CONFIG_PREEMPT */
161 212
162 /* 213 /*
163 * We don't want to be preempted for the entire 214 * We don't want to be preempted for the entire
@@ -252,6 +303,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
252 /* handler has already set things up, so skip ss setup */ 303 /* handler has already set things up, so skip ss setup */
253 return 1; 304 return 1;
254 305
306 if (p->ainsn.boostable == 1 &&
307#ifdef CONFIG_PREEMPT
308 !(pre_preempt_count) && /*
309 * This enables booster when the direct
310 * execution path aren't preempted.
311 */
312#endif /* CONFIG_PREEMPT */
313 !p->post_handler && !p->break_handler ) {
314 /* Boost up -- we can execute copied instructions directly */
315 reset_current_kprobe();
316 regs->eip = (unsigned long)p->ainsn.insn;
317 preempt_enable_no_resched();
318 return 1;
319 }
320
255ss_probe: 321ss_probe:
256 prepare_singlestep(p, regs); 322 prepare_singlestep(p, regs);
257 kcb->kprobe_status = KPROBE_HIT_SS; 323 kcb->kprobe_status = KPROBE_HIT_SS;
@@ -357,6 +423,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
357 * 2) If the single-stepped instruction was a call, the return address 423 * 2) If the single-stepped instruction was a call, the return address
358 * that is atop the stack is the address following the copied instruction. 424 * that is atop the stack is the address following the copied instruction.
359 * We need to make it the address following the original instruction. 425 * We need to make it the address following the original instruction.
426 *
427 * This function also checks instruction size for preparing direct execution.
360 */ 428 */
361static void __kprobes resume_execution(struct kprobe *p, 429static void __kprobes resume_execution(struct kprobe *p,
362 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 430 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
@@ -377,6 +445,7 @@ static void __kprobes resume_execution(struct kprobe *p,
377 case 0xca: 445 case 0xca:
378 case 0xea: /* jmp absolute -- eip is correct */ 446 case 0xea: /* jmp absolute -- eip is correct */
379 /* eip is already adjusted, no more changes required */ 447 /* eip is already adjusted, no more changes required */
448 p->ainsn.boostable = 1;
380 goto no_change; 449 goto no_change;
381 case 0xe8: /* call relative - Fix return addr */ 450 case 0xe8: /* call relative - Fix return addr */
382 *tos = orig_eip + (*tos - copy_eip); 451 *tos = orig_eip + (*tos - copy_eip);
@@ -384,18 +453,37 @@ static void __kprobes resume_execution(struct kprobe *p,
384 case 0xff: 453 case 0xff:
385 if ((p->ainsn.insn[1] & 0x30) == 0x10) { 454 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
386 /* call absolute, indirect */ 455 /* call absolute, indirect */
387 /* Fix return addr; eip is correct. */ 456 /*
457 * Fix return addr; eip is correct.
458 * But this is not boostable
459 */
388 *tos = orig_eip + (*tos - copy_eip); 460 *tos = orig_eip + (*tos - copy_eip);
389 goto no_change; 461 goto no_change;
390 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 462 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
391 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 463 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
392 /* eip is correct. */ 464 /* eip is correct. And this is boostable */
465 p->ainsn.boostable = 1;
393 goto no_change; 466 goto no_change;
394 } 467 }
395 default: 468 default:
396 break; 469 break;
397 } 470 }
398 471
472 if (p->ainsn.boostable == 0) {
473 if ((regs->eip > copy_eip) &&
474 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
475 /*
476 * These instructions can be executed directly if it
477 * jumps back to correct address.
478 */
479 set_jmp_op((void *)regs->eip,
480 (void *)orig_eip + (regs->eip - copy_eip));
481 p->ainsn.boostable = 1;
482 } else {
483 p->ainsn.boostable = -1;
484 }
485 }
486
399 regs->eip = orig_eip + (regs->eip - copy_eip); 487 regs->eip = orig_eip + (regs->eip - copy_eip);
400 488
401no_change: 489no_change:
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index a0d2d74a7dd..57d157c5cf8 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -34,6 +34,7 @@ struct pt_regs;
34 34
35typedef u8 kprobe_opcode_t; 35typedef u8 kprobe_opcode_t;
36#define BREAKPOINT_INSTRUCTION 0xcc 36#define BREAKPOINT_INSTRUCTION 0xcc
37#define RELATIVEJUMP_INSTRUCTION 0xe9
37#define MAX_INSN_SIZE 16 38#define MAX_INSN_SIZE 16
38#define MAX_STACK_SIZE 64 39#define MAX_STACK_SIZE 64
39#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ 40#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
@@ -51,6 +52,11 @@ void kretprobe_trampoline(void);
51struct arch_specific_insn { 52struct arch_specific_insn {
52 /* copy of the original instruction */ 53 /* copy of the original instruction */
53 kprobe_opcode_t *insn; 54 kprobe_opcode_t *insn;
55 /*
56 * If this flag is not 0, this kprobe can be boost when its
57 * post_handler and break_handler is not set.
58 */
59 int boostable;
54}; 60};
55 61
56struct prev_kprobe { 62struct prev_kprobe {