diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-06-27 18:17:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 18:23:52 -0400 |
commit | 9ec4b1f356b3bad928ae8e2aa9caebfa737d52df (patch) | |
tree | 24d27ffed66595a9d864448ec53200ca1745f62c /kernel/kprobes.c | |
parent | d3b8a1a8496c83bc4a3cc76505c29255af15572c (diff) |
[PATCH] kprobes: fix single-step out of line - take2
Now that PPC64 has no-execute support, here is a second try to fix the
single step out of line during kprobe execution. Kprobes on x86_64 already
solved this problem by allocating an executable page and using it as the
scratch area for stepping out of line. Reuse that.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 101 |
1 files changed, 101 insertions, 0 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 334f37472c56..65242529a75f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/hash.h> | 36 | #include <linux/hash.h> |
37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/moduleloader.h> | ||
39 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
40 | #include <asm/errno.h> | 41 | #include <asm/errno.h> |
41 | #include <asm/kdebug.h> | 42 | #include <asm/kdebug.h> |
@@ -50,6 +51,106 @@ unsigned int kprobe_cpu = NR_CPUS; | |||
50 | static DEFINE_SPINLOCK(kprobe_lock); | 51 | static DEFINE_SPINLOCK(kprobe_lock); |
51 | static struct kprobe *curr_kprobe; | 52 | static struct kprobe *curr_kprobe; |
52 | 53 | ||
54 | /* | ||
55 | * kprobe->ainsn.insn points to the copy of the instruction to be | ||
56 | * single-stepped. x86_64, POWER4 and above have no-exec support and | ||
57 | * stepping on the instruction on a vmalloced/kmalloced/data page | ||
58 | * is a recipe for disaster | ||
59 | */ | ||
60 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) | ||
61 | |||
62 | struct kprobe_insn_page { | ||
63 | struct hlist_node hlist; | ||
64 | kprobe_opcode_t *insns; /* Page of instruction slots */ | ||
65 | char slot_used[INSNS_PER_PAGE]; | ||
66 | int nused; | ||
67 | }; | ||
68 | |||
69 | static struct hlist_head kprobe_insn_pages; | ||
70 | |||
71 | /** | ||
72 | * get_insn_slot() - Find a slot on an executable page for an instruction. | ||
73 | * We allocate an executable page if there's no room on existing ones. | ||
74 | */ | ||
75 | kprobe_opcode_t *get_insn_slot(void) | ||
76 | { | ||
77 | struct kprobe_insn_page *kip; | ||
78 | struct hlist_node *pos; | ||
79 | |||
80 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
81 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
82 | if (kip->nused < INSNS_PER_PAGE) { | ||
83 | int i; | ||
84 | for (i = 0; i < INSNS_PER_PAGE; i++) { | ||
85 | if (!kip->slot_used[i]) { | ||
86 | kip->slot_used[i] = 1; | ||
87 | kip->nused++; | ||
88 | return kip->insns + (i * MAX_INSN_SIZE); | ||
89 | } | ||
90 | } | ||
91 | /* Surprise! No unused slots. Fix kip->nused. */ | ||
92 | kip->nused = INSNS_PER_PAGE; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* All out of space. Need to allocate a new page. Use slot 0.*/ | ||
97 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | ||
98 | if (!kip) { | ||
99 | return NULL; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Use module_alloc so this page is within +/- 2GB of where the | ||
104 | * kernel image and loaded module images reside. This is required | ||
105 | * so x86_64 can correctly handle the %rip-relative fixups. | ||
106 | */ | ||
107 | kip->insns = module_alloc(PAGE_SIZE); | ||
108 | if (!kip->insns) { | ||
109 | kfree(kip); | ||
110 | return NULL; | ||
111 | } | ||
112 | INIT_HLIST_NODE(&kip->hlist); | ||
113 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | ||
114 | memset(kip->slot_used, 0, INSNS_PER_PAGE); | ||
115 | kip->slot_used[0] = 1; | ||
116 | kip->nused = 1; | ||
117 | return kip->insns; | ||
118 | } | ||
119 | |||
120 | void free_insn_slot(kprobe_opcode_t *slot) | ||
121 | { | ||
122 | struct kprobe_insn_page *kip; | ||
123 | struct hlist_node *pos; | ||
124 | |||
125 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
126 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
127 | if (kip->insns <= slot && | ||
128 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | ||
129 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | ||
130 | kip->slot_used[i] = 0; | ||
131 | kip->nused--; | ||
132 | if (kip->nused == 0) { | ||
133 | /* | ||
134 | * Page is no longer in use. Free it unless | ||
135 | * it's the last one. We keep the last one | ||
136 | * so as not to have to set it up again the | ||
137 | * next time somebody inserts a probe. | ||
138 | */ | ||
139 | hlist_del(&kip->hlist); | ||
140 | if (hlist_empty(&kprobe_insn_pages)) { | ||
141 | INIT_HLIST_NODE(&kip->hlist); | ||
142 | hlist_add_head(&kip->hlist, | ||
143 | &kprobe_insn_pages); | ||
144 | } else { | ||
145 | module_free(NULL, kip->insns); | ||
146 | kfree(kip); | ||
147 | } | ||
148 | } | ||
149 | return; | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | |||
53 | /* Locks kprobe: irqs must be disabled */ | 154 | /* Locks kprobe: irqs must be disabled */ |
54 | void lock_kprobes(void) | 155 | void lock_kprobes(void) |
55 | { | 156 | { |