diff options
author | Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 2012-03-13 14:00:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-03-14 02:41:36 -0400 |
commit | 0326f5a94ddea33fa331b2519f4172f4fb387baa (patch) | |
tree | 5485c637754a126c90852e5285842e8462d2826a /arch/x86/kernel/uprobes.c | |
parent | ef15eda98217f5183f457e7a2de8b79555ef908b (diff) |
uprobes/core: Handle breakpoint and singlestep exceptions
Uprobes uses exception notifiers to get to know if a thread hit
a breakpoint or a singlestep exception.
When a thread hits a uprobe or is singlestepping post a uprobe
hit, the uprobe exception notifier sets its TIF_UPROBE bit,
which will then be checked on its return to userspace path
(do_notify_resume() ->uprobe_notify_resume()), where the
consumers handlers are run (in task context) based on the
defined filters.
Uprobe hits are thread specific and hence we need to maintain
information about if a task hit a uprobe, what uprobe was hit,
the slot where the original instruction was copied for xol so
that it can be singlestepped with appropriate fixups.
In some cases, special care is needed for instructions that are
executed out of line (xol). These are architecture specific
artefacts, such as handling RIP relative instructions on x86_64.
Since the instruction at which the uprobe was inserted is
executed out of line, architecture specific fixups are added so
that the thread continues normal execution in the presence of a
uprobe.
Postpone the signals until we execute the probed insn.
post_xol() path does a recalc_sigpending() before return to
user-mode, this ensures the signal can't be lost.
Uprobes relies on DIE_DEBUG notification to notify if a
singlestep is complete.
Adds x86 specific uprobe exception notifiers and appropriate
hooks needed to determine a uprobe hit and subsequent post
processing.
Add requisite x86 fixups for xol for uprobes. Specific cases
needing fixups include relative jumps (x86_64), calls, etc.
Where possible, we check and skip singlestepping the
breakpointed instructions. For now we skip single byte as well
as few multibyte nop instructions. However this can be extended
to other instructions too.
Credits to Oleg Nesterov for suggestions/patches related to
signal, breakpoint, singlestep handling code.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120313180011.29771.89027.sendpatchset@srdronam.in.ibm.com
[ Performed various cleanliness edits ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/uprobes.c')
-rw-r--r-- | arch/x86/kernel/uprobes.c | 265 |
1 files changed, 258 insertions, 7 deletions
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 851a11b0d38c..dc4e910a7d96 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -24,22 +24,28 @@ | |||
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/uprobes.h> | 26 | #include <linux/uprobes.h> |
27 | #include <linux/uaccess.h> | ||
27 | 28 | ||
28 | #include <linux/kdebug.h> | 29 | #include <linux/kdebug.h> |
30 | #include <asm/processor.h> | ||
29 | #include <asm/insn.h> | 31 | #include <asm/insn.h> |
30 | 32 | ||
31 | /* Post-execution fixups. */ | 33 | /* Post-execution fixups. */ |
32 | 34 | ||
33 | /* No fixup needed */ | 35 | /* No fixup needed */ |
34 | #define UPROBE_FIX_NONE 0x0 | 36 | #define UPROBE_FIX_NONE 0x0 |
37 | |||
35 | /* Adjust IP back to vicinity of actual insn */ | 38 | /* Adjust IP back to vicinity of actual insn */ |
36 | #define UPROBE_FIX_IP 0x1 | 39 | #define UPROBE_FIX_IP 0x1 |
40 | |||
37 | /* Adjust the return address of a call insn */ | 41 | /* Adjust the return address of a call insn */ |
38 | #define UPROBE_FIX_CALL 0x2 | 42 | #define UPROBE_FIX_CALL 0x2 |
39 | 43 | ||
40 | #define UPROBE_FIX_RIP_AX 0x8000 | 44 | #define UPROBE_FIX_RIP_AX 0x8000 |
41 | #define UPROBE_FIX_RIP_CX 0x4000 | 45 | #define UPROBE_FIX_RIP_CX 0x4000 |
42 | 46 | ||
47 | #define UPROBE_TRAP_NR UINT_MAX | ||
48 | |||
43 | /* Adaptations for mhiramat x86 decoder v14. */ | 49 | /* Adaptations for mhiramat x86 decoder v14. */ |
44 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) | 50 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) |
45 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) | 51 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) |
@@ -221,10 +227,9 @@ static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn) | |||
221 | } | 227 | } |
222 | 228 | ||
223 | /* | 229 | /* |
224 | * Figure out which fixups post_xol() will need to perform, and annotate | 230 | * Figure out which fixups arch_uprobe_post_xol() will need to perform, and |
225 | * arch_uprobe->fixups accordingly. To start with, | 231 | * annotate arch_uprobe->fixups accordingly. To start with, |
226 | * arch_uprobe->fixups is either zero or it reflects rip-related | 232 | * arch_uprobe->fixups is either zero or it reflects rip-related fixups. |
227 | * fixups. | ||
228 | */ | 233 | */ |
229 | static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn) | 234 | static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn) |
230 | { | 235 | { |
@@ -401,12 +406,12 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, | |||
401 | #endif /* CONFIG_X86_64 */ | 406 | #endif /* CONFIG_X86_64 */ |
402 | 407 | ||
403 | /** | 408 | /** |
404 | * arch_uprobes_analyze_insn - instruction analysis including validity and fixups. | 409 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. |
405 | * @mm: the probed address space. | 410 | * @mm: the probed address space. |
406 | * @arch_uprobe: the probepoint information. | 411 | * @arch_uprobe: the probepoint information. |
407 | * Return 0 on success or a -ve number on error. | 412 | * Return 0 on success or a -ve number on error. |
408 | */ | 413 | */ |
409 | int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm) | 414 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm) |
410 | { | 415 | { |
411 | int ret; | 416 | int ret; |
412 | struct insn insn; | 417 | struct insn insn; |
@@ -421,3 +426,249 @@ int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm) | |||
421 | 426 | ||
422 | return 0; | 427 | return 0; |
423 | } | 428 | } |
429 | |||
430 | #ifdef CONFIG_X86_64 | ||
431 | /* | ||
432 | * If we're emulating a rip-relative instruction, save the contents | ||
433 | * of the scratch register and store the target address in that register. | ||
434 | */ | ||
435 | static void | ||
436 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
437 | struct arch_uprobe_task *autask) | ||
438 | { | ||
439 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) { | ||
440 | autask->saved_scratch_register = regs->ax; | ||
441 | regs->ax = current->utask->vaddr; | ||
442 | regs->ax += auprobe->rip_rela_target_address; | ||
443 | } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { | ||
444 | autask->saved_scratch_register = regs->cx; | ||
445 | regs->cx = current->utask->vaddr; | ||
446 | regs->cx += auprobe->rip_rela_target_address; | ||
447 | } | ||
448 | } | ||
449 | #else | ||
450 | static void | ||
451 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
452 | struct arch_uprobe_task *autask) | ||
453 | { | ||
454 | /* No RIP-relative addressing on 32-bit */ | ||
455 | } | ||
456 | #endif | ||
457 | |||
458 | /* | ||
459 | * arch_uprobe_pre_xol - prepare to execute out of line. | ||
460 | * @auprobe: the probepoint information. | ||
461 | * @regs: reflects the saved user state of current task. | ||
462 | */ | ||
463 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
464 | { | ||
465 | struct arch_uprobe_task *autask; | ||
466 | |||
467 | autask = ¤t->utask->autask; | ||
468 | autask->saved_trap_nr = current->thread.trap_nr; | ||
469 | current->thread.trap_nr = UPROBE_TRAP_NR; | ||
470 | regs->ip = current->utask->xol_vaddr; | ||
471 | pre_xol_rip_insn(auprobe, regs, autask); | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * This function is called by arch_uprobe_post_xol() to adjust the return | ||
478 | * address pushed by a call instruction executed out of line. | ||
479 | */ | ||
480 | static int adjust_ret_addr(unsigned long sp, long correction) | ||
481 | { | ||
482 | int rasize, ncopied; | ||
483 | long ra = 0; | ||
484 | |||
485 | if (is_ia32_task()) | ||
486 | rasize = 4; | ||
487 | else | ||
488 | rasize = 8; | ||
489 | |||
490 | ncopied = copy_from_user(&ra, (void __user *)sp, rasize); | ||
491 | if (unlikely(ncopied)) | ||
492 | return -EFAULT; | ||
493 | |||
494 | ra += correction; | ||
495 | ncopied = copy_to_user((void __user *)sp, &ra, rasize); | ||
496 | if (unlikely(ncopied)) | ||
497 | return -EFAULT; | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | #ifdef CONFIG_X86_64 | ||
503 | static bool is_riprel_insn(struct arch_uprobe *auprobe) | ||
504 | { | ||
505 | return ((auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) != 0); | ||
506 | } | ||
507 | |||
508 | static void | ||
509 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | ||
510 | { | ||
511 | if (is_riprel_insn(auprobe)) { | ||
512 | struct arch_uprobe_task *autask; | ||
513 | |||
514 | autask = ¤t->utask->autask; | ||
515 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) | ||
516 | regs->ax = autask->saved_scratch_register; | ||
517 | else | ||
518 | regs->cx = autask->saved_scratch_register; | ||
519 | |||
520 | /* | ||
521 | * The original instruction includes a displacement, and so | ||
522 | * is 4 bytes longer than what we've just single-stepped. | ||
523 | * Fall through to handle stuff like "jmpq *...(%rip)" and | ||
524 | * "callq *...(%rip)". | ||
525 | */ | ||
526 | if (correction) | ||
527 | *correction += 4; | ||
528 | } | ||
529 | } | ||
530 | #else | ||
531 | static void | ||
532 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | ||
533 | { | ||
534 | /* No RIP-relative addressing on 32-bit */ | ||
535 | } | ||
536 | #endif | ||
537 | |||
538 | /* | ||
539 | * If xol insn itself traps and generates a signal(Say, | ||
540 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped | ||
541 | * instruction jumps back to its own address. It is assumed that anything | ||
542 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. | ||
543 | * | ||
544 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, | ||
545 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to | ||
546 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). | ||
547 | */ | ||
548 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) | ||
549 | { | ||
550 | if (t->thread.trap_nr != UPROBE_TRAP_NR) | ||
551 | return true; | ||
552 | |||
553 | return false; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Called after single-stepping. To avoid the SMP problems that can | ||
558 | * occur when we temporarily put back the original opcode to | ||
559 | * single-step, we single-stepped a copy of the instruction. | ||
560 | * | ||
561 | * This function prepares to resume execution after the single-step. | ||
562 | * We have to fix things up as follows: | ||
563 | * | ||
564 | * Typically, the new ip is relative to the copied instruction. We need | ||
565 | * to make it relative to the original instruction (FIX_IP). Exceptions | ||
566 | * are return instructions and absolute or indirect jump or call instructions. | ||
567 | * | ||
568 | * If the single-stepped instruction was a call, the return address that | ||
569 | * is atop the stack is the address following the copied instruction. We | ||
570 | * need to make it the address following the original instruction (FIX_CALL). | ||
571 | * | ||
572 | * If the original instruction was a rip-relative instruction such as | ||
573 | * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent | ||
574 | * instruction using a scratch register -- e.g., "movl %edx,(%rax)". | ||
575 | * We need to restore the contents of the scratch register and adjust | ||
576 | * the ip, keeping in mind that the instruction we executed is 4 bytes | ||
577 | * shorter than the original instruction (since we squeezed out the offset | ||
578 | * field). (FIX_RIP_AX or FIX_RIP_CX) | ||
579 | */ | ||
580 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
581 | { | ||
582 | struct uprobe_task *utask; | ||
583 | long correction; | ||
584 | int result = 0; | ||
585 | |||
586 | WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); | ||
587 | |||
588 | utask = current->utask; | ||
589 | current->thread.trap_nr = utask->autask.saved_trap_nr; | ||
590 | correction = (long)(utask->vaddr - utask->xol_vaddr); | ||
591 | handle_riprel_post_xol(auprobe, regs, &correction); | ||
592 | if (auprobe->fixups & UPROBE_FIX_IP) | ||
593 | regs->ip += correction; | ||
594 | |||
595 | if (auprobe->fixups & UPROBE_FIX_CALL) | ||
596 | result = adjust_ret_addr(regs->sp, correction); | ||
597 | |||
598 | return result; | ||
599 | } | ||
600 | |||
601 | /* callback routine for handling exceptions. */ | ||
602 | int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) | ||
603 | { | ||
604 | struct die_args *args = data; | ||
605 | struct pt_regs *regs = args->regs; | ||
606 | int ret = NOTIFY_DONE; | ||
607 | |||
608 | /* We are only interested in userspace traps */ | ||
609 | if (regs && !user_mode_vm(regs)) | ||
610 | return NOTIFY_DONE; | ||
611 | |||
612 | switch (val) { | ||
613 | case DIE_INT3: | ||
614 | if (uprobe_pre_sstep_notifier(regs)) | ||
615 | ret = NOTIFY_STOP; | ||
616 | |||
617 | break; | ||
618 | |||
619 | case DIE_DEBUG: | ||
620 | if (uprobe_post_sstep_notifier(regs)) | ||
621 | ret = NOTIFY_STOP; | ||
622 | |||
623 | default: | ||
624 | break; | ||
625 | } | ||
626 | |||
627 | return ret; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * This function gets called when XOL instruction either gets trapped or | ||
632 | * the thread has a fatal signal, so reset the instruction pointer to its | ||
633 | * probed address. | ||
634 | */ | ||
635 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
636 | { | ||
637 | struct uprobe_task *utask = current->utask; | ||
638 | |||
639 | current->thread.trap_nr = utask->autask.saved_trap_nr; | ||
640 | handle_riprel_post_xol(auprobe, regs, NULL); | ||
641 | instruction_pointer_set(regs, utask->vaddr); | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * Skip these instructions as per the currently known x86 ISA. | ||
646 | * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 } | ||
647 | */ | ||
648 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
649 | { | ||
650 | int i; | ||
651 | |||
652 | for (i = 0; i < MAX_UINSN_BYTES; i++) { | ||
653 | if ((auprobe->insn[i] == 0x66)) | ||
654 | continue; | ||
655 | |||
656 | if (auprobe->insn[i] == 0x90) | ||
657 | return true; | ||
658 | |||
659 | if (i == (MAX_UINSN_BYTES - 1)) | ||
660 | break; | ||
661 | |||
662 | if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f)) | ||
663 | return true; | ||
664 | |||
665 | if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19)) | ||
666 | return true; | ||
667 | |||
668 | if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0)) | ||
669 | return true; | ||
670 | |||
671 | break; | ||
672 | } | ||
673 | return false; | ||
674 | } | ||