aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@kernel.org>2017-03-29 01:05:06 -0400
committerIngo Molnar <mingo@kernel.org>2017-04-12 03:23:47 -0400
commita8d11cd0714f51877587f5ec891013ca46e163ac (patch)
tree2bfe46b521bef359b00e96ef28d66b05bbd820dd
parentea1e34fc366b84e4449b37d86f2222935e29412d (diff)
kprobes/x86: Consolidate insn decoder users for copying code
Consolidate x86 instruction decoder users on the path of copying original code for kprobes. Kprobes decodes the same instruction a maximum of 3 times when preparing the instruction buffer: - The first time for getting the length of the instruction, - the 2nd for adjusting displacement, - and the 3rd for checking whether the instruction is boostable or not. For each time, the actual decoding target address is slightly different (1st is original address or recovered instruction buffer, 2nd and 3rd are pointing to the copied buffer), but all have the same instruction. Thus, this patch also changes the target address to the copied buffer at first and reuses the decoded "insn" for displacement adjusting and checking boostability. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David S . Miller <davem@davemloft.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ye Xiaolong <xiaolong.ye@intel.com> Link: http://lkml.kernel.org/r/149076389643.22469.13151892839998777373.stgit@devbox Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/kprobes/common.h4
-rw-r--r--arch/x86/kernel/kprobes/core.c66
-rw-r--r--arch/x86/kernel/kprobes/opt.c5
3 files changed, 36 insertions, 39 deletions
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index d688826e5736..db2182d63ed0 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
67#endif 67#endif
68 68
69/* Ensure if the instruction can be boostable */ 69/* Ensure if the instruction can be boostable */
70extern int can_boost(kprobe_opcode_t *instruction, void *addr); 70extern int can_boost(struct insn *insn, void *orig_addr);
71/* Recover instruction if given address is probed */ 71/* Recover instruction if given address is probed */
72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, 72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
73 unsigned long addr); 73 unsigned long addr);
@@ -75,7 +75,7 @@ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
75 * Copy an instruction and adjust the displacement if the instruction 75 * Copy an instruction and adjust the displacement if the instruction
76 * uses the %rip-relative addressing mode. 76 * uses the %rip-relative addressing mode.
77 */ 77 */
78extern int __copy_instruction(u8 *dest, u8 *src); 78extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn);
79 79
80/* Generate a relative-jump/call instruction */ 80/* Generate a relative-jump/call instruction */
81extern void synthesize_reljump(void *from, void *to); 81extern void synthesize_reljump(void *from, void *to);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 722f54440e7e..19e1f2a6d7b0 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -164,33 +164,29 @@ static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
164NOKPROBE_SYMBOL(skip_prefixes); 164NOKPROBE_SYMBOL(skip_prefixes);
165 165
166/* 166/*
167 * Returns non-zero if opcode is boostable. 167 * Returns non-zero if INSN is boostable.
168 * RIP relative instructions are adjusted at copying time in 64 bits mode 168 * RIP relative instructions are adjusted at copying time in 64 bits mode
169 */ 169 */
170int can_boost(kprobe_opcode_t *opcodes, void *addr) 170int can_boost(struct insn *insn, void *addr)
171{ 171{
172 struct insn insn;
173 kprobe_opcode_t opcode; 172 kprobe_opcode_t opcode;
174 173
175 if (search_exception_tables((unsigned long)addr)) 174 if (search_exception_tables((unsigned long)addr))
176 return 0; /* Page fault may occur on this address. */ 175 return 0; /* Page fault may occur on this address. */
177 176
178 kernel_insn_init(&insn, (void *)opcodes, MAX_INSN_SIZE);
179 insn_get_opcode(&insn);
180
181 /* 2nd-byte opcode */ 177 /* 2nd-byte opcode */
182 if (insn.opcode.nbytes == 2) 178 if (insn->opcode.nbytes == 2)
183 return test_bit(insn.opcode.bytes[1], 179 return test_bit(insn->opcode.bytes[1],
184 (unsigned long *)twobyte_is_boostable); 180 (unsigned long *)twobyte_is_boostable);
185 181
186 if (insn.opcode.nbytes != 1) 182 if (insn->opcode.nbytes != 1)
187 return 0; 183 return 0;
188 184
189 /* Can't boost Address-size override prefix */ 185 /* Can't boost Address-size override prefix */
190 if (unlikely(inat_is_address_size_prefix(insn.attr))) 186 if (unlikely(inat_is_address_size_prefix(insn->attr)))
191 return 0; 187 return 0;
192 188
193 opcode = insn.opcode.bytes[0]; 189 opcode = insn->opcode.bytes[0];
194 190
195 switch (opcode & 0xf0) { 191 switch (opcode & 0xf0) {
196 case 0x60: 192 case 0x60:
@@ -351,35 +347,31 @@ static int is_IF_modifier(kprobe_opcode_t *insn)
351 * addressing mode. 347 * addressing mode.
352 * This returns the length of copied instruction, or 0 if it has an error. 348 * This returns the length of copied instruction, or 0 if it has an error.
353 */ 349 */
354int __copy_instruction(u8 *dest, u8 *src) 350int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
355{ 351{
356 struct insn insn;
357 kprobe_opcode_t buf[MAX_INSN_SIZE]; 352 kprobe_opcode_t buf[MAX_INSN_SIZE];
358 int length;
359 unsigned long recovered_insn = 353 unsigned long recovered_insn =
360 recover_probed_instruction(buf, (unsigned long)src); 354 recover_probed_instruction(buf, (unsigned long)src);
361 355
362 if (!recovered_insn) 356 if (!recovered_insn || !insn)
363 return 0; 357 return 0;
364 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
365 insn_get_length(&insn);
366 length = insn.length;
367 358
368 /* Another subsystem puts a breakpoint, failed to recover */ 359 /* This can access kernel text if given address is not recovered */
369 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 360 if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
370 return 0; 361 return 0;
371 362
372 /* This can access kernel text if given address is not recovered */ 363 kernel_insn_init(insn, dest, MAX_INSN_SIZE);
373 if (kernel_probe_read(dest, insn.kaddr, length)) 364 insn_get_length(insn);
365
366 /* Another subsystem puts a breakpoint, failed to recover */
367 if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
374 return 0; 368 return 0;
375 369
376#ifdef CONFIG_X86_64 370#ifdef CONFIG_X86_64
377 /* Only x86_64 has RIP relative instructions */ 371 /* Only x86_64 has RIP relative instructions */
378 if (insn_rip_relative(&insn)) { 372 if (insn_rip_relative(insn)) {
379 s64 newdisp; 373 s64 newdisp;
380 u8 *disp; 374 u8 *disp;
381 kernel_insn_init(&insn, dest, length);
382 insn_get_displacement(&insn);
383 /* 375 /*
384 * The copied instruction uses the %rip-relative addressing 376 * The copied instruction uses the %rip-relative addressing
385 * mode. Adjust the displacement for the difference between 377 * mode. Adjust the displacement for the difference between
@@ -392,29 +384,32 @@ int __copy_instruction(u8 *dest, u8 *src)
392 * extension of the original signed 32-bit displacement would 384 * extension of the original signed 32-bit displacement would
393 * have given. 385 * have given.
394 */ 386 */
395 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; 387 newdisp = (u8 *) src + (s64) insn->displacement.value
388 - (u8 *) dest;
396 if ((s64) (s32) newdisp != newdisp) { 389 if ((s64) (s32) newdisp != newdisp) {
397 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); 390 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
398 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); 391 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
392 src, dest, insn->displacement.value);
399 return 0; 393 return 0;
400 } 394 }
401 disp = (u8 *) dest + insn_offset_displacement(&insn); 395 disp = (u8 *) dest + insn_offset_displacement(insn);
402 *(s32 *) disp = (s32) newdisp; 396 *(s32 *) disp = (s32) newdisp;
403 } 397 }
404#endif 398#endif
405 return length; 399 return insn->length;
406} 400}
407 401
408/* Prepare reljump right after instruction to boost */ 402/* Prepare reljump right after instruction to boost */
409static void prepare_boost(struct kprobe *p, int length) 403static void prepare_boost(struct kprobe *p, struct insn *insn)
410{ 404{
411 if (can_boost(p->ainsn.insn, p->addr) && 405 if (can_boost(insn, p->addr) &&
412 MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { 406 MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) {
413 /* 407 /*
414 * These instructions can be executed directly if it 408 * These instructions can be executed directly if it
415 * jumps back to correct address. 409 * jumps back to correct address.
416 */ 410 */
417 synthesize_reljump(p->ainsn.insn + length, p->addr + length); 411 synthesize_reljump(p->ainsn.insn + insn->length,
412 p->addr + insn->length);
418 p->ainsn.boostable = true; 413 p->ainsn.boostable = true;
419 } else { 414 } else {
420 p->ainsn.boostable = false; 415 p->ainsn.boostable = false;
@@ -423,12 +418,13 @@ static void prepare_boost(struct kprobe *p, int length)
423 418
424static int arch_copy_kprobe(struct kprobe *p) 419static int arch_copy_kprobe(struct kprobe *p)
425{ 420{
421 struct insn insn;
426 int len; 422 int len;
427 423
428 set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); 424 set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
429 425
430 /* Copy an instruction with recovering if other optprobe modifies it.*/ 426 /* Copy an instruction with recovering if other optprobe modifies it.*/
431 len = __copy_instruction(p->ainsn.insn, p->addr); 427 len = __copy_instruction(p->ainsn.insn, p->addr, &insn);
432 if (!len) 428 if (!len)
433 return -EINVAL; 429 return -EINVAL;
434 430
@@ -436,7 +432,7 @@ static int arch_copy_kprobe(struct kprobe *p)
436 * __copy_instruction can modify the displacement of the instruction, 432 * __copy_instruction can modify the displacement of the instruction,
437 * but it doesn't affect boostable check. 433 * but it doesn't affect boostable check.
438 */ 434 */
439 prepare_boost(p, len); 435 prepare_boost(p, &insn);
440 436
441 set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); 437 set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
442 438
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 5b5233441d30..9aadff3d0902 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -177,11 +177,12 @@ NOKPROBE_SYMBOL(optimized_callback);
177 177
178static int copy_optimized_instructions(u8 *dest, u8 *src) 178static int copy_optimized_instructions(u8 *dest, u8 *src)
179{ 179{
180 struct insn insn;
180 int len = 0, ret; 181 int len = 0, ret;
181 182
182 while (len < RELATIVEJUMP_SIZE) { 183 while (len < RELATIVEJUMP_SIZE) {
183 ret = __copy_instruction(dest + len, src + len); 184 ret = __copy_instruction(dest + len, src + len, &insn);
184 if (!ret || !can_boost(dest + len, src + len)) 185 if (!ret || !can_boost(&insn, src + len))
185 return -EINVAL; 186 return -EINVAL;
186 len += ret; 187 len += ret;
187 } 188 }