aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/kprobes_64.c142
1 files changed, 139 insertions, 3 deletions
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index f6837cd3bed5..bf0e18473677 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -55,6 +55,105 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
55}; 55};
56const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 56const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
57 57
58/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
59static __always_inline void set_jmp_op(void *from, void *to)
60{
61 struct __arch_jmp_op {
62 char op;
63 s32 raddr;
64 } __attribute__((packed)) * jop;
65 jop = (struct __arch_jmp_op *)from;
66 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
67 jop->op = RELATIVEJUMP_INSTRUCTION;
68}
69
70/*
71 * returns non-zero if opcode is boostable
72 * RIP relative instructions are adjusted at copying time
73 */
74static __always_inline int can_boost(kprobe_opcode_t *opcodes)
75{
76#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
77 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
78 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
79 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
80 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
81 << (row % 64))
82 /*
83 * Undefined/reserved opcodes, conditional jump, Opcode Extension
84 * Groups, and some special opcodes can not boost.
85 */
86 static const unsigned long twobyte_is_boostable[256 / 64] = {
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
89 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0)|/* 00 */
90 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 10 */
91 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 20 */
92 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),/* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 40 */
94 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1)|/* 60 */
96 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),/* 70 */
97 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 90 */
99 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),/* b0 */
101 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1)|/* c0 */
102 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* d0 */
103 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* e0 */
104 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
105 /* ----------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
107 };
108#undef W
109 kprobe_opcode_t opcode;
110 kprobe_opcode_t *orig_opcodes = opcodes;
111
112retry:
113 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
114 return 0;
115 opcode = *(opcodes++);
116
117 /* 2nd-byte opcode */
118 if (opcode == 0x0f) {
119 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
120 return 0;
121 return test_bit(*opcodes, twobyte_is_boostable);
122 }
123
124 switch (opcode & 0xf0) {
125 case 0x40:
126 goto retry; /* REX prefix is boostable */
127 case 0x60:
128 if (0x63 < opcode && opcode < 0x67)
129 goto retry; /* prefixes */
130 /* can't boost Address-size override and bound */
131 return (opcode != 0x62 && opcode != 0x67);
132 case 0x70:
133 return 0; /* can't boost conditional jump */
134 case 0xc0:
135 /* can't boost software-interruptions */
136 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
137 case 0xd0:
138 /* can boost AA* and XLAT */
139 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
140 case 0xe0:
141 /* can boost in/out and absolute jmps */
142 return ((opcode & 0x04) || opcode == 0xea);
143 case 0xf0:
144 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
145 goto retry; /* lock/rep(ne) prefix */
146 /* clear and set flags are boostable */
147 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
148 default:
149 /* segment override prefixes are boostable */
150 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
151 goto retry; /* prefixes */
152 /* CS override prefix and call are not boostable */
153 return (opcode != 0x2e && opcode != 0x9a);
154 }
155}
156
58/* 157/*
59 * returns non-zero if opcode modifies the interrupt flag. 158 * returns non-zero if opcode modifies the interrupt flag.
60 */ 159 */
@@ -86,7 +185,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
86 185
87/* 186/*
88 * Determine if the instruction uses the %rip-relative addressing mode. 187 * Determine if the instruction uses the %rip-relative addressing mode.
89 * If it does, return the address of the 32-bit displacement word. 188 * If it does, Return the address of the 32-bit displacement word.
90 * If not, return null. 189 * If not, return null.
91 */ 190 */
92static s32 __kprobes *is_riprel(u8 *insn) 191static s32 __kprobes *is_riprel(u8 *insn)
@@ -210,6 +309,11 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
210 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ 309 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
211 *ripdisp = disp; 310 *ripdisp = disp;
212 } 311 }
312 if (can_boost(p->addr)) {
313 p->ainsn.boostable = 0;
314 } else {
315 p->ainsn.boostable = -1;
316 }
213 p->opcode = *p->addr; 317 p->opcode = *p->addr;
214} 318}
215 319
@@ -226,7 +330,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
226void __kprobes arch_remove_kprobe(struct kprobe *p) 330void __kprobes arch_remove_kprobe(struct kprobe *p)
227{ 331{
228 mutex_lock(&kprobe_mutex); 332 mutex_lock(&kprobe_mutex);
229 free_insn_slot(p->ainsn.insn, 0); 333 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
230 mutex_unlock(&kprobe_mutex); 334 mutex_unlock(&kprobe_mutex);
231} 335}
232 336
@@ -384,6 +488,15 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
384 return 1; 488 return 1;
385 489
386ss_probe: 490ss_probe:
491#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
492 if (p->ainsn.boostable == 1 && !p->post_handler) {
493 /* Boost up -- we can execute copied instructions directly */
494 reset_current_kprobe();
495 regs->ip = (unsigned long)p->ainsn.insn;
496 preempt_enable_no_resched();
497 return 1;
498 }
499#endif
387 prepare_singlestep(p, regs); 500 prepare_singlestep(p, regs);
388 kcb->kprobe_status = KPROBE_HIT_SS; 501 kcb->kprobe_status = KPROBE_HIT_SS;
389 return 1; 502 return 1;
@@ -493,6 +606,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
493 * 2) If the single-stepped instruction was a call, the return address 606 * 2) If the single-stepped instruction was a call, the return address
494 * that is atop the stack is the address following the copied instruction. 607 * that is atop the stack is the address following the copied instruction.
495 * We need to make it the address following the original instruction. 608 * We need to make it the address following the original instruction.
609 *
610 * If this is the first time we've single-stepped the instruction at
611 * this probepoint, and the instruction is boostable, boost it: add a
612 * jump instruction after the copied instruction, that jumps to the next
613 * instruction after the probepoint.
496 */ 614 */
497static void __kprobes resume_execution(struct kprobe *p, 615static void __kprobes resume_execution(struct kprobe *p,
498 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 616 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
@@ -519,6 +637,7 @@ static void __kprobes resume_execution(struct kprobe *p,
519 case 0xcf: 637 case 0xcf:
520 case 0xea: /* jmp absolute -- ip is correct */ 638 case 0xea: /* jmp absolute -- ip is correct */
521 /* ip is already adjusted, no more changes required */ 639 /* ip is already adjusted, no more changes required */
640 p->ainsn.boostable = 1;
522 goto no_change; 641 goto no_change;
523 case 0xe8: /* call relative - Fix return addr */ 642 case 0xe8: /* call relative - Fix return addr */
524 *tos = orig_rip + (*tos - copy_rip); 643 *tos = orig_rip + (*tos - copy_rip);
@@ -527,17 +646,34 @@ static void __kprobes resume_execution(struct kprobe *p,
527 if ((insn[1] & 0x30) == 0x10) { 646 if ((insn[1] & 0x30) == 0x10) {
528 /* call absolute, indirect */ 647 /* call absolute, indirect */
529 /* Fix return addr; ip is correct. */ 648 /* Fix return addr; ip is correct. */
649 /* not boostable */
530 *tos = orig_rip + (*tos - copy_rip); 650 *tos = orig_rip + (*tos - copy_rip);
531 goto no_change; 651 goto no_change;
532 } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 652 } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
533 ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 653 ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
534 /* ip is correct. */ 654 /* ip is correct. And this is boostable */
655 p->ainsn.boostable = 1;
535 goto no_change; 656 goto no_change;
536 } 657 }
537 default: 658 default:
538 break; 659 break;
539 } 660 }
540 661
662 if (p->ainsn.boostable == 0) {
663 if ((regs->ip > copy_rip) &&
664 (regs->ip - copy_rip) + 5 < MAX_INSN_SIZE) {
665 /*
666 * These instructions can be executed directly if it
667 * jumps back to correct address.
668 */
669 set_jmp_op((void *)regs->ip,
670 (void *)orig_rip + (regs->ip - copy_rip));
671 p->ainsn.boostable = 1;
672 } else {
673 p->ainsn.boostable = -1;
674 }
675 }
676
541 regs->ip = orig_rip + (regs->ip - copy_rip); 677 regs->ip = orig_rip + (regs->ip - copy_rip);
542 678
543no_change: 679no_change: