aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@redhat.com>2010-02-25 08:33:59 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-25 11:49:24 -0500
commit4610ee1d3638fa05ba8e87ccfa971db8e4033ae7 (patch)
tree6f03d6787616687e61f1c3ef27b68d96f66cc7d0 /kernel
parentd498f763950703c724c650db1d34a1c8679f9ca8 (diff)
kprobes: Introduce generic insn_slot framework
Make insn_slot framework support various size slots. Current insn_slot just supports one-size instruction buffer slot. However, kprobes jump optimization needs larger size buffers. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: systemtap <systemtap@sources.redhat.com> Cc: DLE <dle-develop@lists.sourceforge.net> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Anders Kaseorg <andersk@ksplice.com> Cc: Tim Abbott <tabbott@ksplice.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> LKML-Reference: <20100225133358.6725.82430.stgit@localhost6.localdomain6> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Anders Kaseorg <andersk@ksplice.com> Cc: Tim Abbott <tabbott@ksplice.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c104
1 files changed, 65 insertions, 39 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ccec774c716d..78105623d739 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -105,57 +105,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
105 * stepping on the instruction on a vmalloced/kmalloced/data page 105 * stepping on the instruction on a vmalloced/kmalloced/data page
106 * is a recipe for disaster 106 * is a recipe for disaster
107 */ 107 */
108#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
109
110struct kprobe_insn_page { 108struct kprobe_insn_page {
111 struct list_head list; 109 struct list_head list;
112 kprobe_opcode_t *insns; /* Page of instruction slots */ 110 kprobe_opcode_t *insns; /* Page of instruction slots */
113 char slot_used[INSNS_PER_PAGE];
114 int nused; 111 int nused;
115 int ngarbage; 112 int ngarbage;
113 char slot_used[];
114};
115
116#define KPROBE_INSN_PAGE_SIZE(slots) \
117 (offsetof(struct kprobe_insn_page, slot_used) + \
118 (sizeof(char) * (slots)))
119
120struct kprobe_insn_cache {
121 struct list_head pages; /* list of kprobe_insn_page */
122 size_t insn_size; /* size of instruction slot */
123 int nr_garbage;
116}; 124};
117 125
126static int slots_per_page(struct kprobe_insn_cache *c)
127{
128 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
129}
130
118enum kprobe_slot_state { 131enum kprobe_slot_state {
119 SLOT_CLEAN = 0, 132 SLOT_CLEAN = 0,
120 SLOT_DIRTY = 1, 133 SLOT_DIRTY = 1,
121 SLOT_USED = 2, 134 SLOT_USED = 2,
122}; 135};
123 136
124static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 137static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
125static LIST_HEAD(kprobe_insn_pages); 138static struct kprobe_insn_cache kprobe_insn_slots = {
126static int kprobe_garbage_slots; 139 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
127static int collect_garbage_slots(void); 140 .insn_size = MAX_INSN_SIZE,
141 .nr_garbage = 0,
142};
143static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
128 144
129/** 145/**
130 * __get_insn_slot() - Find a slot on an executable page for an instruction. 146 * __get_insn_slot() - Find a slot on an executable page for an instruction.
131 * We allocate an executable page if there's no room on existing ones. 147 * We allocate an executable page if there's no room on existing ones.
132 */ 148 */
133static kprobe_opcode_t __kprobes *__get_insn_slot(void) 149static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
134{ 150{
135 struct kprobe_insn_page *kip; 151 struct kprobe_insn_page *kip;
136 152
137 retry: 153 retry:
138 list_for_each_entry(kip, &kprobe_insn_pages, list) { 154 list_for_each_entry(kip, &c->pages, list) {
139 if (kip->nused < INSNS_PER_PAGE) { 155 if (kip->nused < slots_per_page(c)) {
140 int i; 156 int i;
141 for (i = 0; i < INSNS_PER_PAGE; i++) { 157 for (i = 0; i < slots_per_page(c); i++) {
142 if (kip->slot_used[i] == SLOT_CLEAN) { 158 if (kip->slot_used[i] == SLOT_CLEAN) {
143 kip->slot_used[i] = SLOT_USED; 159 kip->slot_used[i] = SLOT_USED;
144 kip->nused++; 160 kip->nused++;
145 return kip->insns + (i * MAX_INSN_SIZE); 161 return kip->insns + (i * c->insn_size);
146 } 162 }
147 } 163 }
148 /* Surprise! No unused slots. Fix kip->nused. */ 164 /* kip->nused is broken. Fix it. */
149 kip->nused = INSNS_PER_PAGE; 165 kip->nused = slots_per_page(c);
166 WARN_ON(1);
150 } 167 }
151 } 168 }
152 169
153 /* If there are any garbage slots, collect it and try again. */ 170 /* If there are any garbage slots, collect it and try again. */
154 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 171 if (c->nr_garbage && collect_garbage_slots(c) == 0)
155 goto retry; 172 goto retry;
156 } 173
157 /* All out of space. Need to allocate a new page. Use slot 0. */ 174 /* All out of space. Need to allocate a new page. */
158 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 175 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
159 if (!kip) 176 if (!kip)
160 return NULL; 177 return NULL;
161 178
@@ -170,20 +187,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
170 return NULL; 187 return NULL;
171 } 188 }
172 INIT_LIST_HEAD(&kip->list); 189 INIT_LIST_HEAD(&kip->list);
173 list_add(&kip->list, &kprobe_insn_pages); 190 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
174 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
175 kip->slot_used[0] = SLOT_USED; 191 kip->slot_used[0] = SLOT_USED;
176 kip->nused = 1; 192 kip->nused = 1;
177 kip->ngarbage = 0; 193 kip->ngarbage = 0;
194 list_add(&kip->list, &c->pages);
178 return kip->insns; 195 return kip->insns;
179} 196}
180 197
198
181kprobe_opcode_t __kprobes *get_insn_slot(void) 199kprobe_opcode_t __kprobes *get_insn_slot(void)
182{ 200{
183 kprobe_opcode_t *ret; 201 kprobe_opcode_t *ret = NULL;
202
184 mutex_lock(&kprobe_insn_mutex); 203 mutex_lock(&kprobe_insn_mutex);
185 ret = __get_insn_slot(); 204 ret = __get_insn_slot(&kprobe_insn_slots);
186 mutex_unlock(&kprobe_insn_mutex); 205 mutex_unlock(&kprobe_insn_mutex);
206
187 return ret; 207 return ret;
188} 208}
189 209
@@ -199,7 +219,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
199 * so as not to have to set it up again the 219 * so as not to have to set it up again the
200 * next time somebody inserts a probe. 220 * next time somebody inserts a probe.
201 */ 221 */
202 if (!list_is_singular(&kprobe_insn_pages)) { 222 if (!list_is_singular(&kip->list)) {
203 list_del(&kip->list); 223 list_del(&kip->list);
204 module_free(NULL, kip->insns); 224 module_free(NULL, kip->insns);
205 kfree(kip); 225 kfree(kip);
@@ -209,49 +229,55 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
209 return 0; 229 return 0;
210} 230}
211 231
212static int __kprobes collect_garbage_slots(void) 232static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
213{ 233{
214 struct kprobe_insn_page *kip, *next; 234 struct kprobe_insn_page *kip, *next;
215 235
216 /* Ensure no-one is interrupted on the garbages */ 236 /* Ensure no-one is interrupted on the garbages */
217 synchronize_sched(); 237 synchronize_sched();
218 238
219 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { 239 list_for_each_entry_safe(kip, next, &c->pages, list) {
220 int i; 240 int i;
221 if (kip->ngarbage == 0) 241 if (kip->ngarbage == 0)
222 continue; 242 continue;
223 kip->ngarbage = 0; /* we will collect all garbages */ 243 kip->ngarbage = 0; /* we will collect all garbages */
224 for (i = 0; i < INSNS_PER_PAGE; i++) { 244 for (i = 0; i < slots_per_page(c); i++) {
225 if (kip->slot_used[i] == SLOT_DIRTY && 245 if (kip->slot_used[i] == SLOT_DIRTY &&
226 collect_one_slot(kip, i)) 246 collect_one_slot(kip, i))
227 break; 247 break;
228 } 248 }
229 } 249 }
230 kprobe_garbage_slots = 0; 250 c->nr_garbage = 0;
231 return 0; 251 return 0;
232} 252}
233 253
234void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 254static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
255 kprobe_opcode_t *slot, int dirty)
235{ 256{
236 struct kprobe_insn_page *kip; 257 struct kprobe_insn_page *kip;
237 258
238 mutex_lock(&kprobe_insn_mutex); 259 list_for_each_entry(kip, &c->pages, list) {
239 list_for_each_entry(kip, &kprobe_insn_pages, list) { 260 long idx = ((long)slot - (long)kip->insns) / c->insn_size;
240 if (kip->insns <= slot && 261 if (idx >= 0 && idx < slots_per_page(c)) {
241 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 262 WARN_ON(kip->slot_used[idx] != SLOT_USED);
242 int i = (slot - kip->insns) / MAX_INSN_SIZE;
243 if (dirty) { 263 if (dirty) {
244 kip->slot_used[i] = SLOT_DIRTY; 264 kip->slot_used[idx] = SLOT_DIRTY;
245 kip->ngarbage++; 265 kip->ngarbage++;
266 if (++c->nr_garbage > slots_per_page(c))
267 collect_garbage_slots(c);
246 } else 268 } else
247 collect_one_slot(kip, i); 269 collect_one_slot(kip, idx);
248 break; 270 return;
249 } 271 }
250 } 272 }
273 /* Could not free this slot. */
274 WARN_ON(1);
275}
251 276
252 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 277void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
253 collect_garbage_slots(); 278{
254 279 mutex_lock(&kprobe_insn_mutex);
280 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
255 mutex_unlock(&kprobe_insn_mutex); 281 mutex_unlock(&kprobe_insn_mutex);
256} 282}
257#endif 283#endif