diff options
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 113 |
1 files changed, 1 insertions, 112 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 4e680f87a75f..6a1c88376bef 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/preempt.h> | 40 | #include <linux/preempt.h> |
41 | #include <linux/moduleloader.h> | 41 | |
42 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | #include <asm/kdebug.h> | 44 | #include <asm/kdebug.h> |
@@ -51,8 +51,6 @@ static struct kprobe *kprobe_prev; | |||
51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; | 51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; |
52 | static struct pt_regs jprobe_saved_regs; | 52 | static struct pt_regs jprobe_saved_regs; |
53 | static long *jprobe_saved_rsp; | 53 | static long *jprobe_saved_rsp; |
54 | static kprobe_opcode_t *get_insn_slot(void); | ||
55 | static void free_insn_slot(kprobe_opcode_t *slot); | ||
56 | void jprobe_return_end(void); | 54 | void jprobe_return_end(void); |
57 | 55 | ||
58 | /* copy of the kernel stack at the probe fire time */ | 56 | /* copy of the kernel stack at the probe fire time */ |
@@ -681,112 +679,3 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
681 | } | 679 | } |
682 | return 0; | 680 | return 0; |
683 | } | 681 | } |
684 | |||
685 | /* | ||
686 | * kprobe->ainsn.insn points to the copy of the instruction to be single-stepped. | ||
687 | * By default on x86_64, pages we get from kmalloc or vmalloc are not | ||
688 | * executable. Single-stepping an instruction on such a page yields an | ||
689 | * oops. So instead of storing the instruction copies in their respective | ||
690 | * kprobe objects, we allocate a page, map it executable, and store all the | ||
691 | * instruction copies there. (We can allocate additional pages if somebody | ||
692 | * inserts a huge number of probes.) Each page can hold up to INSNS_PER_PAGE | ||
693 | * instruction slots, each of which is MAX_INSN_SIZE*sizeof(kprobe_opcode_t) | ||
694 | * bytes. | ||
695 | */ | ||
696 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE*sizeof(kprobe_opcode_t))) | ||
697 | struct kprobe_insn_page { | ||
698 | struct hlist_node hlist; | ||
699 | kprobe_opcode_t *insns; /* page of instruction slots */ | ||
700 | char slot_used[INSNS_PER_PAGE]; | ||
701 | int nused; | ||
702 | }; | ||
703 | |||
704 | static struct hlist_head kprobe_insn_pages; | ||
705 | |||
706 | /** | ||
707 | * get_insn_slot() - Find a slot on an executable page for an instruction. | ||
708 | * We allocate an executable page if there's no room on existing ones. | ||
709 | */ | ||
710 | static kprobe_opcode_t *get_insn_slot(void) | ||
711 | { | ||
712 | struct kprobe_insn_page *kip; | ||
713 | struct hlist_node *pos; | ||
714 | |||
715 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
716 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
717 | if (kip->nused < INSNS_PER_PAGE) { | ||
718 | int i; | ||
719 | for (i = 0; i < INSNS_PER_PAGE; i++) { | ||
720 | if (!kip->slot_used[i]) { | ||
721 | kip->slot_used[i] = 1; | ||
722 | kip->nused++; | ||
723 | return kip->insns + (i*MAX_INSN_SIZE); | ||
724 | } | ||
725 | } | ||
726 | /* Surprise! No unused slots. Fix kip->nused. */ | ||
727 | kip->nused = INSNS_PER_PAGE; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* All out of space. Need to allocate a new page. Use slot 0.*/ | ||
732 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | ||
733 | if (!kip) { | ||
734 | return NULL; | ||
735 | } | ||
736 | |||
737 | /* | ||
738 | * For the %rip-relative displacement fixups to be doable, we | ||
739 | * need our instruction copy to be within +/- 2GB of any data it | ||
740 | * might access via %rip. That is, within 2GB of where the | ||
741 | * kernel image and loaded module images reside. So we allocate | ||
742 | * a page in the module loading area. | ||
743 | */ | ||
744 | kip->insns = module_alloc(PAGE_SIZE); | ||
745 | if (!kip->insns) { | ||
746 | kfree(kip); | ||
747 | return NULL; | ||
748 | } | ||
749 | INIT_HLIST_NODE(&kip->hlist); | ||
750 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | ||
751 | memset(kip->slot_used, 0, INSNS_PER_PAGE); | ||
752 | kip->slot_used[0] = 1; | ||
753 | kip->nused = 1; | ||
754 | return kip->insns; | ||
755 | } | ||
756 | |||
757 | /** | ||
758 | * free_insn_slot() - Free instruction slot obtained from get_insn_slot(). | ||
759 | */ | ||
760 | static void free_insn_slot(kprobe_opcode_t *slot) | ||
761 | { | ||
762 | struct kprobe_insn_page *kip; | ||
763 | struct hlist_node *pos; | ||
764 | |||
765 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
766 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
767 | if (kip->insns <= slot | ||
768 | && slot < kip->insns+(INSNS_PER_PAGE*MAX_INSN_SIZE)) { | ||
769 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | ||
770 | kip->slot_used[i] = 0; | ||
771 | kip->nused--; | ||
772 | if (kip->nused == 0) { | ||
773 | /* | ||
774 | * Page is no longer in use. Free it unless | ||
775 | * it's the last one. We keep the last one | ||
776 | * so as not to have to set it up again the | ||
777 | * next time somebody inserts a probe. | ||
778 | */ | ||
779 | hlist_del(&kip->hlist); | ||
780 | if (hlist_empty(&kprobe_insn_pages)) { | ||
781 | INIT_HLIST_NODE(&kip->hlist); | ||
782 | hlist_add_head(&kip->hlist, | ||
783 | &kprobe_insn_pages); | ||
784 | } else { | ||
785 | module_free(NULL, kip->insns); | ||
786 | kfree(kip); | ||
787 | } | ||
788 | } | ||
789 | return; | ||
790 | } | ||
791 | } | ||
792 | } | ||