aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2012-03-05 08:32:22 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-06 03:49:49 -0500
commit3f33ab1c0c741bfab2138c14ba1918a7905a1e8b (patch)
treea6f299846bf2345af4e43edc6659851e81ee58d5 /arch/x86/kernel
parent464846888d9aad186cab3acdae6b654f9eb19772 (diff)
x86/kprobes: Split out optprobe related code to kprobes-opt.c
Split out optprobe related code to arch/x86/kernel/kprobes-opt.c for maintenanceability. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Suggested-by: Ingo Molnar <mingo@elte.hu> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: yrl.pp-manager.tt@hitachi.com Cc: systemtap@sourceware.org Cc: anderson@redhat.com Link: http://lkml.kernel.org/r/20120305133222.5982.54794.stgit@localhost.localdomain [ Tidied up the code a tiny bit ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/kprobes-common.h102
-rw-r--r--arch/x86/kernel/kprobes-opt.c512
-rw-r--r--arch/x86/kernel/kprobes.c625
4 files changed, 646 insertions, 594 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 5369059c07a..532d2e090e6 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
71obj-$(CONFIG_KPROBES) += kprobes.o 71obj-$(CONFIG_KPROBES) += kprobes.o
72obj-$(CONFIG_OPTPROBES) += kprobes-opt.o
72obj-$(CONFIG_MODULES) += module.o 73obj-$(CONFIG_MODULES) += module.o
73obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o 74obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
74obj-$(CONFIG_KGDB) += kgdb.o 75obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h
new file mode 100644
index 00000000000..3230b68ef29
--- /dev/null
+++ b/arch/x86/kernel/kprobes-common.h
@@ -0,0 +1,102 @@
1#ifndef __X86_KERNEL_KPROBES_COMMON_H
2#define __X86_KERNEL_KPROBES_COMMON_H
3
4/* Kprobes and Optprobes common header */
5
6#ifdef CONFIG_X86_64
7#define SAVE_REGS_STRING \
8 /* Skip cs, ip, orig_ax. */ \
9 " subq $24, %rsp\n" \
10 " pushq %rdi\n" \
11 " pushq %rsi\n" \
12 " pushq %rdx\n" \
13 " pushq %rcx\n" \
14 " pushq %rax\n" \
15 " pushq %r8\n" \
16 " pushq %r9\n" \
17 " pushq %r10\n" \
18 " pushq %r11\n" \
19 " pushq %rbx\n" \
20 " pushq %rbp\n" \
21 " pushq %r12\n" \
22 " pushq %r13\n" \
23 " pushq %r14\n" \
24 " pushq %r15\n"
25#define RESTORE_REGS_STRING \
26 " popq %r15\n" \
27 " popq %r14\n" \
28 " popq %r13\n" \
29 " popq %r12\n" \
30 " popq %rbp\n" \
31 " popq %rbx\n" \
32 " popq %r11\n" \
33 " popq %r10\n" \
34 " popq %r9\n" \
35 " popq %r8\n" \
36 " popq %rax\n" \
37 " popq %rcx\n" \
38 " popq %rdx\n" \
39 " popq %rsi\n" \
40 " popq %rdi\n" \
41 /* Skip orig_ax, ip, cs */ \
42 " addq $24, %rsp\n"
43#else
44#define SAVE_REGS_STRING \
45 /* Skip cs, ip, orig_ax and gs. */ \
46 " subl $16, %esp\n" \
47 " pushl %fs\n" \
48 " pushl %es\n" \
49 " pushl %ds\n" \
50 " pushl %eax\n" \
51 " pushl %ebp\n" \
52 " pushl %edi\n" \
53 " pushl %esi\n" \
54 " pushl %edx\n" \
55 " pushl %ecx\n" \
56 " pushl %ebx\n"
57#define RESTORE_REGS_STRING \
58 " popl %ebx\n" \
59 " popl %ecx\n" \
60 " popl %edx\n" \
61 " popl %esi\n" \
62 " popl %edi\n" \
63 " popl %ebp\n" \
64 " popl %eax\n" \
65 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
66 " addl $24, %esp\n"
67#endif
68
69/* Ensure if the instruction can be boostable */
70extern int can_boost(kprobe_opcode_t *instruction);
71/* Recover instruction if given address is probed */
72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
73 unsigned long addr);
74/*
75 * Copy an instruction and adjust the displacement if the instruction
76 * uses the %rip-relative addressing mode.
77 */
78extern int __copy_instruction(u8 *dest, u8 *src);
79
80/* Generate a relative-jump/call instruction */
81extern void synthesize_reljump(void *from, void *to);
82extern void synthesize_relcall(void *from, void *to);
83
84#ifdef CONFIG_OPTPROBES
85extern int arch_init_optprobes(void);
86extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
87extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
88#else /* !CONFIG_OPTPROBES */
89static inline int arch_init_optprobes(void)
90{
91 return 0;
92}
93static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
94{
95 return 0;
96}
97static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
98{
99 return addr;
100}
101#endif
102#endif
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
new file mode 100644
index 00000000000..c5e410eed40
--- /dev/null
+++ b/arch/x86/kernel/kprobes-opt.c
@@ -0,0 +1,512 @@
1/*
2 * Kernel Probes Jump Optimization (Optprobes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2004
19 * Copyright (C) Hitachi Ltd., 2012
20 */
21#include <linux/kprobes.h>
22#include <linux/ptrace.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/hardirq.h>
26#include <linux/preempt.h>
27#include <linux/module.h>
28#include <linux/kdebug.h>
29#include <linux/kallsyms.h>
30#include <linux/ftrace.h>
31
32#include <asm/cacheflush.h>
33#include <asm/desc.h>
34#include <asm/pgtable.h>
35#include <asm/uaccess.h>
36#include <asm/alternative.h>
37#include <asm/insn.h>
38#include <asm/debugreg.h>
39
40#include "kprobes-common.h"
41
42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
43{
44 struct optimized_kprobe *op;
45 struct kprobe *kp;
46 long offs;
47 int i;
48
49 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
50 kp = get_kprobe((void *)addr - i);
51 /* This function only handles jump-optimized kprobe */
52 if (kp && kprobe_optimized(kp)) {
53 op = container_of(kp, struct optimized_kprobe, kp);
54 /* If op->list is not empty, op is under optimizing */
55 if (list_empty(&op->list))
56 goto found;
57 }
58 }
59
60 return addr;
61found:
62 /*
63 * If the kprobe can be optimized, original bytes which can be
64 * overwritten by jump destination address. In this case, original
65 * bytes must be recovered from op->optinsn.copied_insn buffer.
66 */
67 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 if (addr == (unsigned long)kp->addr) {
69 buf[0] = kp->opcode;
70 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
71 } else {
72 offs = addr - (unsigned long)kp->addr - 1;
73 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
74 }
75
76 return (unsigned long)buf;
77}
78
79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
80static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
81{
82#ifdef CONFIG_X86_64
83 *addr++ = 0x48;
84 *addr++ = 0xbf;
85#else
86 *addr++ = 0xb8;
87#endif
88 *(unsigned long *)addr = val;
89}
90
91static void __used __kprobes kprobes_optinsn_template_holder(void)
92{
93 asm volatile (
94 ".global optprobe_template_entry\n"
95 "optprobe_template_entry:\n"
96#ifdef CONFIG_X86_64
97 /* We don't bother saving the ss register */
98 " pushq %rsp\n"
99 " pushfq\n"
100 SAVE_REGS_STRING
101 " movq %rsp, %rsi\n"
102 ".global optprobe_template_val\n"
103 "optprobe_template_val:\n"
104 ASM_NOP5
105 ASM_NOP5
106 ".global optprobe_template_call\n"
107 "optprobe_template_call:\n"
108 ASM_NOP5
109 /* Move flags to rsp */
110 " movq 144(%rsp), %rdx\n"
111 " movq %rdx, 152(%rsp)\n"
112 RESTORE_REGS_STRING
113 /* Skip flags entry */
114 " addq $8, %rsp\n"
115 " popfq\n"
116#else /* CONFIG_X86_32 */
117 " pushf\n"
118 SAVE_REGS_STRING
119 " movl %esp, %edx\n"
120 ".global optprobe_template_val\n"
121 "optprobe_template_val:\n"
122 ASM_NOP5
123 ".global optprobe_template_call\n"
124 "optprobe_template_call:\n"
125 ASM_NOP5
126 RESTORE_REGS_STRING
127 " addl $4, %esp\n" /* skip cs */
128 " popf\n"
129#endif
130 ".global optprobe_template_end\n"
131 "optprobe_template_end:\n");
132}
133
134#define TMPL_MOVE_IDX \
135 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
136#define TMPL_CALL_IDX \
137 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
138#define TMPL_END_IDX \
139 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
140
141#define INT3_SIZE sizeof(kprobe_opcode_t)
142
143/* Optimized kprobe call back function: called from optinsn */
144static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
145{
146 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
147 unsigned long flags;
148
149 /* This is possible if op is under delayed unoptimizing */
150 if (kprobe_disabled(&op->kp))
151 return;
152
153 local_irq_save(flags);
154 if (kprobe_running()) {
155 kprobes_inc_nmissed_count(&op->kp);
156 } else {
157 /* Save skipped registers */
158#ifdef CONFIG_X86_64
159 regs->cs = __KERNEL_CS;
160#else
161 regs->cs = __KERNEL_CS | get_kernel_rpl();
162 regs->gs = 0;
163#endif
164 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
165 regs->orig_ax = ~0UL;
166
167 __this_cpu_write(current_kprobe, &op->kp);
168 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
169 opt_pre_handler(&op->kp, regs);
170 __this_cpu_write(current_kprobe, NULL);
171 }
172 local_irq_restore(flags);
173}
174
175static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
176{
177 int len = 0, ret;
178
179 while (len < RELATIVEJUMP_SIZE) {
180 ret = __copy_instruction(dest + len, src + len);
181 if (!ret || !can_boost(dest + len))
182 return -EINVAL;
183 len += ret;
184 }
185 /* Check whether the address range is reserved */
186 if (ftrace_text_reserved(src, src + len - 1) ||
187 alternatives_text_reserved(src, src + len - 1) ||
188 jump_label_text_reserved(src, src + len - 1))
189 return -EBUSY;
190
191 return len;
192}
193
194/* Check whether insn is indirect jump */
195static int __kprobes insn_is_indirect_jump(struct insn *insn)
196{
197 return ((insn->opcode.bytes[0] == 0xff &&
198 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
199 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
200}
201
202/* Check whether insn jumps into specified address range */
203static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
204{
205 unsigned long target = 0;
206
207 switch (insn->opcode.bytes[0]) {
208 case 0xe0: /* loopne */
209 case 0xe1: /* loope */
210 case 0xe2: /* loop */
211 case 0xe3: /* jcxz */
212 case 0xe9: /* near relative jump */
213 case 0xeb: /* short relative jump */
214 break;
215 case 0x0f:
216 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
217 break;
218 return 0;
219 default:
220 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
221 break;
222 return 0;
223 }
224 target = (unsigned long)insn->next_byte + insn->immediate.value;
225
226 return (start <= target && target <= start + len);
227}
228
229/* Decode whole function to ensure any instructions don't jump into target */
230static int __kprobes can_optimize(unsigned long paddr)
231{
232 unsigned long addr, size = 0, offset = 0;
233 struct insn insn;
234 kprobe_opcode_t buf[MAX_INSN_SIZE];
235
236 /* Lookup symbol including addr */
237 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
238 return 0;
239
240 /*
241 * Do not optimize in the entry code due to the unstable
242 * stack handling.
243 */
244 if ((paddr >= (unsigned long)__entry_text_start) &&
245 (paddr < (unsigned long)__entry_text_end))
246 return 0;
247
248 /* Check there is enough space for a relative jump. */
249 if (size - offset < RELATIVEJUMP_SIZE)
250 return 0;
251
252 /* Decode instructions */
253 addr = paddr - offset;
254 while (addr < paddr - offset + size) { /* Decode until function end */
255 if (search_exception_tables(addr))
256 /*
257 * Since some fixup code will jumps into this function,
258 * we can't optimize kprobe in this function.
259 */
260 return 0;
261 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
262 insn_get_length(&insn);
263 /* Another subsystem puts a breakpoint */
264 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
265 return 0;
266 /* Recover address */
267 insn.kaddr = (void *)addr;
268 insn.next_byte = (void *)(addr + insn.length);
269 /* Check any instructions don't jump into target */
270 if (insn_is_indirect_jump(&insn) ||
271 insn_jump_into_range(&insn, paddr + INT3_SIZE,
272 RELATIVE_ADDR_SIZE))
273 return 0;
274 addr += insn.length;
275 }
276
277 return 1;
278}
279
280/* Check optimized_kprobe can actually be optimized. */
281int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
282{
283 int i;
284 struct kprobe *p;
285
286 for (i = 1; i < op->optinsn.size; i++) {
287 p = get_kprobe(op->kp.addr + i);
288 if (p && !kprobe_disabled(p))
289 return -EEXIST;
290 }
291
292 return 0;
293}
294
295/* Check the addr is within the optimized instructions. */
296int __kprobes
297arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
298{
299 return ((unsigned long)op->kp.addr <= addr &&
300 (unsigned long)op->kp.addr + op->optinsn.size > addr);
301}
302
303/* Free optimized instruction slot */
304static __kprobes
305void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
306{
307 if (op->optinsn.insn) {
308 free_optinsn_slot(op->optinsn.insn, dirty);
309 op->optinsn.insn = NULL;
310 op->optinsn.size = 0;
311 }
312}
313
314void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
315{
316 __arch_remove_optimized_kprobe(op, 1);
317}
318
319/*
320 * Copy replacing target instructions
321 * Target instructions MUST be relocatable (checked inside)
322 * This is called when new aggr(opt)probe is allocated or reused.
323 */
324int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
325{
326 u8 *buf;
327 int ret;
328 long rel;
329
330 if (!can_optimize((unsigned long)op->kp.addr))
331 return -EILSEQ;
332
333 op->optinsn.insn = get_optinsn_slot();
334 if (!op->optinsn.insn)
335 return -ENOMEM;
336
337 /*
338 * Verify if the address gap is in 2GB range, because this uses
339 * a relative jump.
340 */
341 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
342 if (abs(rel) > 0x7fffffff)
343 return -ERANGE;
344
345 buf = (u8 *)op->optinsn.insn;
346
347 /* Copy instructions into the out-of-line buffer */
348 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
349 if (ret < 0) {
350 __arch_remove_optimized_kprobe(op, 0);
351 return ret;
352 }
353 op->optinsn.size = ret;
354
355 /* Copy arch-dep-instance from template */
356 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
357
358 /* Set probe information */
359 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
360
361 /* Set probe function call */
362 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
363
364 /* Set returning jmp instruction at the tail of out-of-line buffer */
365 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
366 (u8 *)op->kp.addr + op->optinsn.size);
367
368 flush_icache_range((unsigned long) buf,
369 (unsigned long) buf + TMPL_END_IDX +
370 op->optinsn.size + RELATIVEJUMP_SIZE);
371 return 0;
372}
373
374#define MAX_OPTIMIZE_PROBES 256
375static struct text_poke_param *jump_poke_params;
376static struct jump_poke_buffer {
377 u8 buf[RELATIVEJUMP_SIZE];
378} *jump_poke_bufs;
379
380static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
381 u8 *insn_buf,
382 struct optimized_kprobe *op)
383{
384 s32 rel = (s32)((long)op->optinsn.insn -
385 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
386
387 /* Backup instructions which will be replaced by jump address */
388 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
389 RELATIVE_ADDR_SIZE);
390
391 insn_buf[0] = RELATIVEJUMP_OPCODE;
392 *(s32 *)(&insn_buf[1]) = rel;
393
394 tprm->addr = op->kp.addr;
395 tprm->opcode = insn_buf;
396 tprm->len = RELATIVEJUMP_SIZE;
397}
398
399/*
400 * Replace breakpoints (int3) with relative jumps.
401 * Caller must call with locking kprobe_mutex and text_mutex.
402 */
403void __kprobes arch_optimize_kprobes(struct list_head *oplist)
404{
405 struct optimized_kprobe *op, *tmp;
406 int c = 0;
407
408 list_for_each_entry_safe(op, tmp, oplist, list) {
409 WARN_ON(kprobe_disabled(&op->kp));
410 /* Setup param */
411 setup_optimize_kprobe(&jump_poke_params[c],
412 jump_poke_bufs[c].buf, op);
413 list_del_init(&op->list);
414 if (++c >= MAX_OPTIMIZE_PROBES)
415 break;
416 }
417
418 /*
419 * text_poke_smp doesn't support NMI/MCE code modifying.
420 * However, since kprobes itself also doesn't support NMI/MCE
421 * code probing, it's not a problem.
422 */
423 text_poke_smp_batch(jump_poke_params, c);
424}
425
426static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
427 u8 *insn_buf,
428 struct optimized_kprobe *op)
429{
430 /* Set int3 to first byte for kprobes */
431 insn_buf[0] = BREAKPOINT_INSTRUCTION;
432 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
433
434 tprm->addr = op->kp.addr;
435 tprm->opcode = insn_buf;
436 tprm->len = RELATIVEJUMP_SIZE;
437}
438
439/*
440 * Recover original instructions and breakpoints from relative jumps.
441 * Caller must call with locking kprobe_mutex.
442 */
443extern void arch_unoptimize_kprobes(struct list_head *oplist,
444 struct list_head *done_list)
445{
446 struct optimized_kprobe *op, *tmp;
447 int c = 0;
448
449 list_for_each_entry_safe(op, tmp, oplist, list) {
450 /* Setup param */
451 setup_unoptimize_kprobe(&jump_poke_params[c],
452 jump_poke_bufs[c].buf, op);
453 list_move(&op->list, done_list);
454 if (++c >= MAX_OPTIMIZE_PROBES)
455 break;
456 }
457
458 /*
459 * text_poke_smp doesn't support NMI/MCE code modifying.
460 * However, since kprobes itself also doesn't support NMI/MCE
461 * code probing, it's not a problem.
462 */
463 text_poke_smp_batch(jump_poke_params, c);
464}
465
466/* Replace a relative jump with a breakpoint (int3). */
467void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
468{
469 u8 buf[RELATIVEJUMP_SIZE];
470
471 /* Set int3 to first byte for kprobes */
472 buf[0] = BREAKPOINT_INSTRUCTION;
473 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
474 text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
475}
476
477int __kprobes
478setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
479{
480 struct optimized_kprobe *op;
481
482 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
483 /* This kprobe is really able to run optimized path. */
484 op = container_of(p, struct optimized_kprobe, kp);
485 /* Detour through copied instructions */
486 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
487 if (!reenter)
488 reset_current_kprobe();
489 preempt_enable_no_resched();
490 return 1;
491 }
492 return 0;
493}
494
495int __kprobes arch_init_optprobes(void)
496{
497 /* Allocate code buffer and parameter array */
498 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
499 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
500 if (!jump_poke_bufs)
501 return -ENOMEM;
502
503 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
504 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
505 if (!jump_poke_params) {
506 kfree(jump_poke_bufs);
507 jump_poke_bufs = NULL;
508 return -ENOMEM;
509 }
510
511 return 0;
512}
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index ca6d450bee7..e213fc8408d 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -30,16 +30,15 @@
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes. 31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com> 32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality 33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added 34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386. 35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster 36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64 37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven 38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> 39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code. 40 * unified x86 kprobes code.
41 */ 41 */
42
43#include <linux/kprobes.h> 42#include <linux/kprobes.h>
44#include <linux/ptrace.h> 43#include <linux/ptrace.h>
45#include <linux/string.h> 44#include <linux/string.h>
@@ -59,6 +58,8 @@
59#include <asm/insn.h> 58#include <asm/insn.h>
60#include <asm/debugreg.h> 59#include <asm/debugreg.h>
61 60
61#include "kprobes-common.h"
62
62void jprobe_return_end(void); 63void jprobe_return_end(void);
63 64
64DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 65DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -108,6 +109,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
108 doesn't switch kernel stack.*/ 109 doesn't switch kernel stack.*/
109 {NULL, NULL} /* Terminator */ 110 {NULL, NULL} /* Terminator */
110}; 111};
112
111const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 113const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
112 114
113static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) 115static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
@@ -123,11 +125,17 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
123} 125}
124 126
125/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 127/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
126static void __kprobes synthesize_reljump(void *from, void *to) 128void __kprobes synthesize_reljump(void *from, void *to)
127{ 129{
128 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); 130 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
129} 131}
130 132
133/* Insert a call instruction at address 'from', which calls address 'to'.*/
134void __kprobes synthesize_relcall(void *from, void *to)
135{
136 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
137}
138
131/* 139/*
132 * Skip the prefixes of the instruction. 140 * Skip the prefixes of the instruction.
133 */ 141 */
@@ -151,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
151 * Returns non-zero if opcode is boostable. 159 * Returns non-zero if opcode is boostable.
152 * RIP relative instructions are adjusted at copying time in 64 bits mode 160 * RIP relative instructions are adjusted at copying time in 64 bits mode
153 */ 161 */
154static int __kprobes can_boost(kprobe_opcode_t *opcodes) 162int __kprobes can_boost(kprobe_opcode_t *opcodes)
155{ 163{
156 kprobe_opcode_t opcode; 164 kprobe_opcode_t opcode;
157 kprobe_opcode_t *orig_opcodes = opcodes; 165 kprobe_opcode_t *orig_opcodes = opcodes;
@@ -207,8 +215,8 @@ retry:
207 } 215 }
208} 216}
209 217
210static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, 218static unsigned long
211 unsigned long addr) 219__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
212{ 220{
213 struct kprobe *kp; 221 struct kprobe *kp;
214 222
@@ -235,59 +243,12 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf,
235 return (unsigned long)buf; 243 return (unsigned long)buf;
236} 244}
237 245
238#ifdef CONFIG_OPTPROBES
239static unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf,
240 unsigned long addr)
241{
242 struct optimized_kprobe *op;
243 struct kprobe *kp;
244 long offs;
245 int i;
246
247 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
248 kp = get_kprobe((void *)addr - i);
249 /* This function only handles jump-optimized kprobe */
250 if (kp && kprobe_optimized(kp)) {
251 op = container_of(kp, struct optimized_kprobe, kp);
252 /* If op->list is not empty, op is under optimizing */
253 if (list_empty(&op->list))
254 goto found;
255 }
256 }
257
258 return addr;
259found:
260 /*
261 * If the kprobe can be optimized, original bytes which can be
262 * overwritten by jump destination address. In this case, original
263 * bytes must be recovered from op->optinsn.copied_insn buffer.
264 */
265 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
266 if (addr == (unsigned long)kp->addr) {
267 buf[0] = kp->opcode;
268 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
269 } else {
270 offs = addr - (unsigned long)kp->addr - 1;
271 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
272 }
273
274 return (unsigned long)buf;
275}
276#else
277static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf,
278 unsigned long addr)
279{
280 return addr;
281}
282#endif
283
284/* 246/*
285 * Recover the probed instruction at addr for further analysis. 247 * Recover the probed instruction at addr for further analysis.
286 * Caller must lock kprobes by kprobe_mutex, or disable preemption 248 * Caller must lock kprobes by kprobe_mutex, or disable preemption
287 * for preventing to release referencing kprobes. 249 * for preventing to release referencing kprobes.
288 */ 250 */
289static unsigned long recover_probed_instruction(kprobe_opcode_t *buf, 251unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
290 unsigned long addr)
291{ 252{
292 unsigned long __addr; 253 unsigned long __addr;
293 254
@@ -361,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
361 * If not, return null. 322 * If not, return null.
362 * Only applicable to 64-bit x86. 323 * Only applicable to 64-bit x86.
363 */ 324 */
364static int __kprobes __copy_instruction(u8 *dest, u8 *src) 325int __kprobes __copy_instruction(u8 *dest, u8 *src)
365{ 326{
366 struct insn insn; 327 struct insn insn;
367 kprobe_opcode_t buf[MAX_INSN_SIZE]; 328 kprobe_opcode_t buf[MAX_INSN_SIZE];
@@ -497,8 +458,8 @@ static void __kprobes restore_btf(void)
497 } 458 }
498} 459}
499 460
500void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 461void __kprobes
501 struct pt_regs *regs) 462arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
502{ 463{
503 unsigned long *sara = stack_addr(regs); 464 unsigned long *sara = stack_addr(regs);
504 465
@@ -508,16 +469,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
508 *sara = (unsigned long) &kretprobe_trampoline; 469 *sara = (unsigned long) &kretprobe_trampoline;
509} 470}
510 471
511#ifdef CONFIG_OPTPROBES 472static void __kprobes
512static int __kprobes setup_detour_execution(struct kprobe *p, 473setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
513 struct pt_regs *regs,
514 int reenter);
515#else
516#define setup_detour_execution(p, regs, reenter) (0)
517#endif
518
519static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
520 struct kprobe_ctlblk *kcb, int reenter)
521{ 474{
522 if (setup_detour_execution(p, regs, reenter)) 475 if (setup_detour_execution(p, regs, reenter))
523 return; 476 return;
@@ -559,8 +512,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
559 * within the handler. We save the original kprobes variables and just single 512 * within the handler. We save the original kprobes variables and just single
560 * step on the instruction of the new probe without calling any user handlers. 513 * step on the instruction of the new probe without calling any user handlers.
561 */ 514 */
562static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, 515static int __kprobes
563 struct kprobe_ctlblk *kcb) 516reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
564{ 517{
565 switch (kcb->kprobe_status) { 518 switch (kcb->kprobe_status) {
566 case KPROBE_HIT_SSDONE: 519 case KPROBE_HIT_SSDONE:
@@ -655,69 +608,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
655 return 0; 608 return 0;
656} 609}
657 610
658#ifdef CONFIG_X86_64
659#define SAVE_REGS_STRING \
660 /* Skip cs, ip, orig_ax. */ \
661 " subq $24, %rsp\n" \
662 " pushq %rdi\n" \
663 " pushq %rsi\n" \
664 " pushq %rdx\n" \
665 " pushq %rcx\n" \
666 " pushq %rax\n" \
667 " pushq %r8\n" \
668 " pushq %r9\n" \
669 " pushq %r10\n" \
670 " pushq %r11\n" \
671 " pushq %rbx\n" \
672 " pushq %rbp\n" \
673 " pushq %r12\n" \
674 " pushq %r13\n" \
675 " pushq %r14\n" \
676 " pushq %r15\n"
677#define RESTORE_REGS_STRING \
678 " popq %r15\n" \
679 " popq %r14\n" \
680 " popq %r13\n" \
681 " popq %r12\n" \
682 " popq %rbp\n" \
683 " popq %rbx\n" \
684 " popq %r11\n" \
685 " popq %r10\n" \
686 " popq %r9\n" \
687 " popq %r8\n" \
688 " popq %rax\n" \
689 " popq %rcx\n" \
690 " popq %rdx\n" \
691 " popq %rsi\n" \
692 " popq %rdi\n" \
693 /* Skip orig_ax, ip, cs */ \
694 " addq $24, %rsp\n"
695#else
696#define SAVE_REGS_STRING \
697 /* Skip cs, ip, orig_ax and gs. */ \
698 " subl $16, %esp\n" \
699 " pushl %fs\n" \
700 " pushl %es\n" \
701 " pushl %ds\n" \
702 " pushl %eax\n" \
703 " pushl %ebp\n" \
704 " pushl %edi\n" \
705 " pushl %esi\n" \
706 " pushl %edx\n" \
707 " pushl %ecx\n" \
708 " pushl %ebx\n"
709#define RESTORE_REGS_STRING \
710 " popl %ebx\n" \
711 " popl %ecx\n" \
712 " popl %edx\n" \
713 " popl %esi\n" \
714 " popl %edi\n" \
715 " popl %ebp\n" \
716 " popl %eax\n" \
717 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
718 " addl $24, %esp\n"
719#endif
720
721/* 611/*
722 * When a retprobed function returns, this code saves registers and 612 * When a retprobed function returns, this code saves registers and
723 * calls trampoline_handler() runs, which calls the kretprobe's handler. 613 * calls trampoline_handler() runs, which calls the kretprobe's handler.
@@ -871,8 +761,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
871 * jump instruction after the copied instruction, that jumps to the next 761 * jump instruction after the copied instruction, that jumps to the next
872 * instruction after the probepoint. 762 * instruction after the probepoint.
873 */ 763 */
874static void __kprobes resume_execution(struct kprobe *p, 764static void __kprobes
875 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 765resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
876{ 766{
877 unsigned long *tos = stack_addr(regs); 767 unsigned long *tos = stack_addr(regs);
878 unsigned long copy_ip = (unsigned long)p->ainsn.insn; 768 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
@@ -1051,8 +941,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1051/* 941/*
1052 * Wrapper routine for handling exceptions. 942 * Wrapper routine for handling exceptions.
1053 */ 943 */
1054int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 944int __kprobes
1055 unsigned long val, void *data) 945kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
1056{ 946{
1057 struct die_args *args = data; 947 struct die_args *args = data;
1058 int ret = NOTIFY_DONE; 948 int ret = NOTIFY_DONE;
@@ -1162,462 +1052,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1162 return 0; 1052 return 0;
1163} 1053}
1164 1054
1165
1166#ifdef CONFIG_OPTPROBES
1167
1168/* Insert a call instruction at address 'from', which calls address 'to'.*/
1169static void __kprobes synthesize_relcall(void *from, void *to)
1170{
1171 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
1172}
1173
1174/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
1175static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
1176 unsigned long val)
1177{
1178#ifdef CONFIG_X86_64
1179 *addr++ = 0x48;
1180 *addr++ = 0xbf;
1181#else
1182 *addr++ = 0xb8;
1183#endif
1184 *(unsigned long *)addr = val;
1185}
1186
1187static void __used __kprobes kprobes_optinsn_template_holder(void)
1188{
1189 asm volatile (
1190 ".global optprobe_template_entry\n"
1191 "optprobe_template_entry: \n"
1192#ifdef CONFIG_X86_64
1193 /* We don't bother saving the ss register */
1194 " pushq %rsp\n"
1195 " pushfq\n"
1196 SAVE_REGS_STRING
1197 " movq %rsp, %rsi\n"
1198 ".global optprobe_template_val\n"
1199 "optprobe_template_val: \n"
1200 ASM_NOP5
1201 ASM_NOP5
1202 ".global optprobe_template_call\n"
1203 "optprobe_template_call: \n"
1204 ASM_NOP5
1205 /* Move flags to rsp */
1206 " movq 144(%rsp), %rdx\n"
1207 " movq %rdx, 152(%rsp)\n"
1208 RESTORE_REGS_STRING
1209 /* Skip flags entry */
1210 " addq $8, %rsp\n"
1211 " popfq\n"
1212#else /* CONFIG_X86_32 */
1213 " pushf\n"
1214 SAVE_REGS_STRING
1215 " movl %esp, %edx\n"
1216 ".global optprobe_template_val\n"
1217 "optprobe_template_val: \n"
1218 ASM_NOP5
1219 ".global optprobe_template_call\n"
1220 "optprobe_template_call: \n"
1221 ASM_NOP5
1222 RESTORE_REGS_STRING
1223 " addl $4, %esp\n" /* skip cs */
1224 " popf\n"
1225#endif
1226 ".global optprobe_template_end\n"
1227 "optprobe_template_end: \n");
1228}
1229
1230#define TMPL_MOVE_IDX \
1231 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
1232#define TMPL_CALL_IDX \
1233 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
1234#define TMPL_END_IDX \
1235 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
1236
1237#define INT3_SIZE sizeof(kprobe_opcode_t)
1238
1239/* Optimized kprobe call back function: called from optinsn */
1240static void __kprobes optimized_callback(struct optimized_kprobe *op,
1241 struct pt_regs *regs)
1242{
1243 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1244 unsigned long flags;
1245
1246 /* This is possible if op is under delayed unoptimizing */
1247 if (kprobe_disabled(&op->kp))
1248 return;
1249
1250 local_irq_save(flags);
1251 if (kprobe_running()) {
1252 kprobes_inc_nmissed_count(&op->kp);
1253 } else {
1254 /* Save skipped registers */
1255#ifdef CONFIG_X86_64
1256 regs->cs = __KERNEL_CS;
1257#else
1258 regs->cs = __KERNEL_CS | get_kernel_rpl();
1259 regs->gs = 0;
1260#endif
1261 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
1262 regs->orig_ax = ~0UL;
1263
1264 __this_cpu_write(current_kprobe, &op->kp);
1265 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1266 opt_pre_handler(&op->kp, regs);
1267 __this_cpu_write(current_kprobe, NULL);
1268 }
1269 local_irq_restore(flags);
1270}
1271
1272static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
1273{
1274 int len = 0, ret;
1275
1276 while (len < RELATIVEJUMP_SIZE) {
1277 ret = __copy_instruction(dest + len, src + len);
1278 if (!ret || !can_boost(dest + len))
1279 return -EINVAL;
1280 len += ret;
1281 }
1282 /* Check whether the address range is reserved */
1283 if (ftrace_text_reserved(src, src + len - 1) ||
1284 alternatives_text_reserved(src, src + len - 1) ||
1285 jump_label_text_reserved(src, src + len - 1))
1286 return -EBUSY;
1287
1288 return len;
1289}
1290
1291/* Check whether insn is indirect jump */
1292static int __kprobes insn_is_indirect_jump(struct insn *insn)
1293{
1294 return ((insn->opcode.bytes[0] == 0xff &&
1295 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
1296 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
1297}
1298
1299/* Check whether insn jumps into specified address range */
1300static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
1301{
1302 unsigned long target = 0;
1303
1304 switch (insn->opcode.bytes[0]) {
1305 case 0xe0: /* loopne */
1306 case 0xe1: /* loope */
1307 case 0xe2: /* loop */
1308 case 0xe3: /* jcxz */
1309 case 0xe9: /* near relative jump */
1310 case 0xeb: /* short relative jump */
1311 break;
1312 case 0x0f:
1313 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
1314 break;
1315 return 0;
1316 default:
1317 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
1318 break;
1319 return 0;
1320 }
1321 target = (unsigned long)insn->next_byte + insn->immediate.value;
1322
1323 return (start <= target && target <= start + len);
1324}
1325
1326/* Decode whole function to ensure any instructions don't jump into target */
1327static int __kprobes can_optimize(unsigned long paddr)
1328{
1329 unsigned long addr, size = 0, offset = 0;
1330 struct insn insn;
1331 kprobe_opcode_t buf[MAX_INSN_SIZE];
1332
1333 /* Lookup symbol including addr */
1334 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
1335 return 0;
1336
1337 /*
1338 * Do not optimize in the entry code due to the unstable
1339 * stack handling.
1340 */
1341 if ((paddr >= (unsigned long )__entry_text_start) &&
1342 (paddr < (unsigned long )__entry_text_end))
1343 return 0;
1344
1345 /* Check there is enough space for a relative jump. */
1346 if (size - offset < RELATIVEJUMP_SIZE)
1347 return 0;
1348
1349 /* Decode instructions */
1350 addr = paddr - offset;
1351 while (addr < paddr - offset + size) { /* Decode until function end */
1352 if (search_exception_tables(addr))
1353 /*
1354 * Since some fixup code will jumps into this function,
1355 * we can't optimize kprobe in this function.
1356 */
1357 return 0;
1358 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
1359 insn_get_length(&insn);
1360 /* Another subsystem puts a breakpoint */
1361 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
1362 return 0;
1363 /* Recover address */
1364 insn.kaddr = (void *)addr;
1365 insn.next_byte = (void *)(addr + insn.length);
1366 /* Check any instructions don't jump into target */
1367 if (insn_is_indirect_jump(&insn) ||
1368 insn_jump_into_range(&insn, paddr + INT3_SIZE,
1369 RELATIVE_ADDR_SIZE))
1370 return 0;
1371 addr += insn.length;
1372 }
1373
1374 return 1;
1375}
1376
1377/* Check optimized_kprobe can actually be optimized. */
1378int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
1379{
1380 int i;
1381 struct kprobe *p;
1382
1383 for (i = 1; i < op->optinsn.size; i++) {
1384 p = get_kprobe(op->kp.addr + i);
1385 if (p && !kprobe_disabled(p))
1386 return -EEXIST;
1387 }
1388
1389 return 0;
1390}
1391
1392/* Check the addr is within the optimized instructions. */
1393int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
1394 unsigned long addr)
1395{
1396 return ((unsigned long)op->kp.addr <= addr &&
1397 (unsigned long)op->kp.addr + op->optinsn.size > addr);
1398}
1399
1400/* Free optimized instruction slot */
1401static __kprobes
1402void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
1403{
1404 if (op->optinsn.insn) {
1405 free_optinsn_slot(op->optinsn.insn, dirty);
1406 op->optinsn.insn = NULL;
1407 op->optinsn.size = 0;
1408 }
1409}
1410
1411void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
1412{
1413 __arch_remove_optimized_kprobe(op, 1);
1414}
1415
1416/*
1417 * Copy replacing target instructions
1418 * Target instructions MUST be relocatable (checked inside)
1419 * This is called when new aggr(opt)probe is allocated or reused.
1420 */
1421int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
1422{
1423 u8 *buf;
1424 int ret;
1425 long rel;
1426
1427 if (!can_optimize((unsigned long)op->kp.addr))
1428 return -EILSEQ;
1429
1430 op->optinsn.insn = get_optinsn_slot();
1431 if (!op->optinsn.insn)
1432 return -ENOMEM;
1433
1434 /*
1435 * Verify if the address gap is in 2GB range, because this uses
1436 * a relative jump.
1437 */
1438 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
1439 if (abs(rel) > 0x7fffffff)
1440 return -ERANGE;
1441
1442 buf = (u8 *)op->optinsn.insn;
1443
1444 /* Copy instructions into the out-of-line buffer */
1445 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
1446 if (ret < 0) {
1447 __arch_remove_optimized_kprobe(op, 0);
1448 return ret;
1449 }
1450 op->optinsn.size = ret;
1451
1452 /* Copy arch-dep-instance from template */
1453 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
1454
1455 /* Set probe information */
1456 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
1457
1458 /* Set probe function call */
1459 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
1460
1461 /* Set returning jmp instruction at the tail of out-of-line buffer */
1462 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
1463 (u8 *)op->kp.addr + op->optinsn.size);
1464
1465 flush_icache_range((unsigned long) buf,
1466 (unsigned long) buf + TMPL_END_IDX +
1467 op->optinsn.size + RELATIVEJUMP_SIZE);
1468 return 0;
1469}
1470
1471#define MAX_OPTIMIZE_PROBES 256
1472static struct text_poke_param *jump_poke_params;
1473static struct jump_poke_buffer {
1474 u8 buf[RELATIVEJUMP_SIZE];
1475} *jump_poke_bufs;
1476
1477static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
1478 u8 *insn_buf,
1479 struct optimized_kprobe *op)
1480{
1481 s32 rel = (s32)((long)op->optinsn.insn -
1482 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
1483
1484 /* Backup instructions which will be replaced by jump address */
1485 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
1486 RELATIVE_ADDR_SIZE);
1487
1488 insn_buf[0] = RELATIVEJUMP_OPCODE;
1489 *(s32 *)(&insn_buf[1]) = rel;
1490
1491 tprm->addr = op->kp.addr;
1492 tprm->opcode = insn_buf;
1493 tprm->len = RELATIVEJUMP_SIZE;
1494}
1495
1496/*
1497 * Replace breakpoints (int3) with relative jumps.
1498 * Caller must call with locking kprobe_mutex and text_mutex.
1499 */
1500void __kprobes arch_optimize_kprobes(struct list_head *oplist)
1501{
1502 struct optimized_kprobe *op, *tmp;
1503 int c = 0;
1504
1505 list_for_each_entry_safe(op, tmp, oplist, list) {
1506 WARN_ON(kprobe_disabled(&op->kp));
1507 /* Setup param */
1508 setup_optimize_kprobe(&jump_poke_params[c],
1509 jump_poke_bufs[c].buf, op);
1510 list_del_init(&op->list);
1511 if (++c >= MAX_OPTIMIZE_PROBES)
1512 break;
1513 }
1514
1515 /*
1516 * text_poke_smp doesn't support NMI/MCE code modifying.
1517 * However, since kprobes itself also doesn't support NMI/MCE
1518 * code probing, it's not a problem.
1519 */
1520 text_poke_smp_batch(jump_poke_params, c);
1521}
1522
1523static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
1524 u8 *insn_buf,
1525 struct optimized_kprobe *op)
1526{
1527 /* Set int3 to first byte for kprobes */
1528 insn_buf[0] = BREAKPOINT_INSTRUCTION;
1529 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1530
1531 tprm->addr = op->kp.addr;
1532 tprm->opcode = insn_buf;
1533 tprm->len = RELATIVEJUMP_SIZE;
1534}
1535
1536/*
1537 * Recover original instructions and breakpoints from relative jumps.
1538 * Caller must call with locking kprobe_mutex.
1539 */
1540extern void arch_unoptimize_kprobes(struct list_head *oplist,
1541 struct list_head *done_list)
1542{
1543 struct optimized_kprobe *op, *tmp;
1544 int c = 0;
1545
1546 list_for_each_entry_safe(op, tmp, oplist, list) {
1547 /* Setup param */
1548 setup_unoptimize_kprobe(&jump_poke_params[c],
1549 jump_poke_bufs[c].buf, op);
1550 list_move(&op->list, done_list);
1551 if (++c >= MAX_OPTIMIZE_PROBES)
1552 break;
1553 }
1554
1555 /*
1556 * text_poke_smp doesn't support NMI/MCE code modifying.
1557 * However, since kprobes itself also doesn't support NMI/MCE
1558 * code probing, it's not a problem.
1559 */
1560 text_poke_smp_batch(jump_poke_params, c);
1561}
1562
1563/* Replace a relative jump with a breakpoint (int3). */
1564void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
1565{
1566 u8 buf[RELATIVEJUMP_SIZE];
1567
1568 /* Set int3 to first byte for kprobes */
1569 buf[0] = BREAKPOINT_INSTRUCTION;
1570 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1571 text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
1572}
1573
1574static int __kprobes setup_detour_execution(struct kprobe *p,
1575 struct pt_regs *regs,
1576 int reenter)
1577{
1578 struct optimized_kprobe *op;
1579
1580 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
1581 /* This kprobe is really able to run optimized path. */
1582 op = container_of(p, struct optimized_kprobe, kp);
1583 /* Detour through copied instructions */
1584 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
1585 if (!reenter)
1586 reset_current_kprobe();
1587 preempt_enable_no_resched();
1588 return 1;
1589 }
1590 return 0;
1591}
1592
1593static int __kprobes init_poke_params(void)
1594{
1595 /* Allocate code buffer and parameter array */
1596 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
1597 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1598 if (!jump_poke_bufs)
1599 return -ENOMEM;
1600
1601 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
1602 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1603 if (!jump_poke_params) {
1604 kfree(jump_poke_bufs);
1605 jump_poke_bufs = NULL;
1606 return -ENOMEM;
1607 }
1608
1609 return 0;
1610}
1611#else /* !CONFIG_OPTPROBES */
1612static int __kprobes init_poke_params(void)
1613{
1614 return 0;
1615}
1616#endif
1617
1618int __init arch_init_kprobes(void) 1055int __init arch_init_kprobes(void)
1619{ 1056{
1620 return init_poke_params(); 1057 return arch_init_optprobes();
1621} 1058}
1622 1059
1623int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1060int __kprobes arch_trampoline_kprobe(struct kprobe *p)