diff options
author | Abhishek Sagar <sagar.abhishek@gmail.com> | 2007-06-11 18:20:10 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-01-26 10:25:16 -0500 |
commit | 24ba613c9d6cad315f484e658288db152f1dc447 (patch) | |
tree | 0a94ff96c77ccf7e0415bd2bc76ab400468c5e6e /arch/arm/kernel/kprobes.c | |
parent | 35aa1df4328340f38edc46f00837f08d33d49f63 (diff) |
ARM kprobes: core code
This is a full implementation of Kprobes including Jprobes and
Kretprobes support.
This ARM implementation does not follow the usual kprobes double-
exception model. The traditional model is where the initial kprobes
breakpoint calls kprobe_handler(), which returns from exception to
execute the instruction in its original context, then immediately
re-enters after a second breakpoint (or single-stepping exception)
into post_kprobe_handler(), each time the probe is hit.. The ARM
implementation only executes one kprobes exception per hit, so no
post_kprobe_handler() phase. All side-effects from the kprobe'd
instruction are resolved before returning from the initial exception.
As a result, all instructions are _always_ effectively boosted
regardless of the type of instruction, and even regardless of whether
or not there is a post-handler for the probe.
Signed-off-by: Abhishek Sagar <sagar.abhishek@gmail.com>
Signed-off-by: Quentin Barnes <qbarnes@gmail.com>
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch/arm/kernel/kprobes.c')
-rw-r--r-- | arch/arm/kernel/kprobes.c | 453 |
1 files changed, 453 insertions, 0 deletions
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c new file mode 100644 index 000000000000..a18a8458e99d --- /dev/null +++ b/arch/arm/kernel/kprobes.c | |||
@@ -0,0 +1,453 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes.c | ||
3 | * | ||
4 | * Kprobes on ARM | ||
5 | * | ||
6 | * Abhishek Sagar <sagar.abhishek@gmail.com> | ||
7 | * Copyright (C) 2006, 2007 Motorola Inc. | ||
8 | * | ||
9 | * Nicolas Pitre <nico@marvell.com> | ||
10 | * Copyright (C) 2007 Marvell Ltd. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/kprobes.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/stringify.h> | ||
26 | #include <asm/traps.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | /* | ||
30 | * This undefined instruction must be unique and | ||
31 | * reserved solely for kprobes' use. | ||
32 | */ | ||
33 | #define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 | ||
34 | |||
35 | #define MIN_STACK_SIZE(addr) \ | ||
36 | min((unsigned long)MAX_STACK_SIZE, \ | ||
37 | (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) | ||
38 | |||
39 | #define flush_insns(addr, cnt) \ | ||
40 | flush_icache_range((unsigned long)(addr), \ | ||
41 | (unsigned long)(addr) + \ | ||
42 | sizeof(kprobe_opcode_t) * (cnt)) | ||
43 | |||
44 | /* Used as a marker in ARM_pc to note when we're in a jprobe. */ | ||
45 | #define JPROBE_MAGIC_ADDR 0xffffffff | ||
46 | |||
47 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | ||
48 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
49 | |||
50 | |||
51 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | ||
52 | { | ||
53 | kprobe_opcode_t insn; | ||
54 | kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; | ||
55 | unsigned long addr = (unsigned long)p->addr; | ||
56 | int is; | ||
57 | |||
58 | if (addr & 0x3) | ||
59 | return -EINVAL; | ||
60 | |||
61 | insn = *p->addr; | ||
62 | p->opcode = insn; | ||
63 | p->ainsn.insn = tmp_insn; | ||
64 | |||
65 | switch (arm_kprobe_decode_insn(insn, &p->ainsn)) { | ||
66 | case INSN_REJECTED: /* not supported */ | ||
67 | return -EINVAL; | ||
68 | |||
69 | case INSN_GOOD: /* instruction uses slot */ | ||
70 | p->ainsn.insn = get_insn_slot(); | ||
71 | if (!p->ainsn.insn) | ||
72 | return -ENOMEM; | ||
73 | for (is = 0; is < MAX_INSN_SIZE; ++is) | ||
74 | p->ainsn.insn[is] = tmp_insn[is]; | ||
75 | flush_insns(&p->ainsn.insn, MAX_INSN_SIZE); | ||
76 | break; | ||
77 | |||
78 | case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ | ||
79 | p->ainsn.insn = NULL; | ||
80 | break; | ||
81 | } | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
87 | { | ||
88 | *p->addr = KPROBE_BREAKPOINT_INSTRUCTION; | ||
89 | flush_insns(p->addr, 1); | ||
90 | } | ||
91 | |||
92 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | ||
93 | { | ||
94 | *p->addr = p->opcode; | ||
95 | flush_insns(p->addr, 1); | ||
96 | } | ||
97 | |||
98 | void __kprobes arch_remove_kprobe(struct kprobe *p) | ||
99 | { | ||
100 | if (p->ainsn.insn) { | ||
101 | mutex_lock(&kprobe_mutex); | ||
102 | free_insn_slot(p->ainsn.insn, 0); | ||
103 | mutex_unlock(&kprobe_mutex); | ||
104 | p->ainsn.insn = NULL; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
109 | { | ||
110 | kcb->prev_kprobe.kp = kprobe_running(); | ||
111 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
112 | } | ||
113 | |||
114 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
115 | { | ||
116 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | ||
117 | kcb->kprobe_status = kcb->prev_kprobe.status; | ||
118 | } | ||
119 | |||
120 | static void __kprobes set_current_kprobe(struct kprobe *p) | ||
121 | { | ||
122 | __get_cpu_var(current_kprobe) = p; | ||
123 | } | ||
124 | |||
125 | static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, | ||
126 | struct kprobe_ctlblk *kcb) | ||
127 | { | ||
128 | regs->ARM_pc += 4; | ||
129 | p->ainsn.insn_handler(p, regs); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Called with IRQs disabled. IRQs must remain disabled from that point | ||
134 | * all the way until processing this kprobe is complete. The current | ||
135 | * kprobes implementation cannot process more than one nested level of | ||
136 | * kprobe, and that level is reserved for user kprobe handlers, so we can't | ||
137 | * risk encountering a new kprobe in an interrupt handler. | ||
138 | */ | ||
139 | void __kprobes kprobe_handler(struct pt_regs *regs) | ||
140 | { | ||
141 | struct kprobe *p, *cur; | ||
142 | struct kprobe_ctlblk *kcb; | ||
143 | kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc; | ||
144 | |||
145 | kcb = get_kprobe_ctlblk(); | ||
146 | cur = kprobe_running(); | ||
147 | p = get_kprobe(addr); | ||
148 | |||
149 | if (p) { | ||
150 | if (cur) { | ||
151 | /* Kprobe is pending, so we're recursing. */ | ||
152 | switch (kcb->kprobe_status) { | ||
153 | case KPROBE_HIT_ACTIVE: | ||
154 | case KPROBE_HIT_SSDONE: | ||
155 | /* A pre- or post-handler probe got us here. */ | ||
156 | kprobes_inc_nmissed_count(p); | ||
157 | save_previous_kprobe(kcb); | ||
158 | set_current_kprobe(p); | ||
159 | kcb->kprobe_status = KPROBE_REENTER; | ||
160 | singlestep(p, regs, kcb); | ||
161 | restore_previous_kprobe(kcb); | ||
162 | break; | ||
163 | default: | ||
164 | /* impossible cases */ | ||
165 | BUG(); | ||
166 | } | ||
167 | } else { | ||
168 | set_current_kprobe(p); | ||
169 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
170 | |||
171 | /* | ||
172 | * If we have no pre-handler or it returned 0, we | ||
173 | * continue with normal processing. If we have a | ||
174 | * pre-handler and it returned non-zero, it prepped | ||
175 | * for calling the break_handler below on re-entry, | ||
176 | * so get out doing nothing more here. | ||
177 | */ | ||
178 | if (!p->pre_handler || !p->pre_handler(p, regs)) { | ||
179 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
180 | singlestep(p, regs, kcb); | ||
181 | if (p->post_handler) { | ||
182 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
183 | p->post_handler(p, regs, 0); | ||
184 | } | ||
185 | reset_current_kprobe(); | ||
186 | } | ||
187 | } | ||
188 | } else if (cur) { | ||
189 | /* We probably hit a jprobe. Call its break handler. */ | ||
190 | if (cur->break_handler && cur->break_handler(cur, regs)) { | ||
191 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
192 | singlestep(cur, regs, kcb); | ||
193 | if (cur->post_handler) { | ||
194 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
195 | cur->post_handler(cur, regs, 0); | ||
196 | } | ||
197 | } | ||
198 | reset_current_kprobe(); | ||
199 | } else { | ||
200 | /* | ||
201 | * The probe was removed and a race is in progress. | ||
202 | * There is nothing we can do about it. Let's restart | ||
203 | * the instruction. By the time we can restart, the | ||
204 | * real instruction will be there. | ||
205 | */ | ||
206 | } | ||
207 | } | ||
208 | |||
209 | static int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) | ||
210 | { | ||
211 | kprobe_handler(regs); | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) | ||
216 | { | ||
217 | struct kprobe *cur = kprobe_running(); | ||
218 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
219 | |||
220 | switch (kcb->kprobe_status) { | ||
221 | case KPROBE_HIT_SS: | ||
222 | case KPROBE_REENTER: | ||
223 | /* | ||
224 | * We are here because the instruction being single | ||
225 | * stepped caused a page fault. We reset the current | ||
226 | * kprobe and the PC to point back to the probe address | ||
227 | * and allow the page fault handler to continue as a | ||
228 | * normal page fault. | ||
229 | */ | ||
230 | regs->ARM_pc = (long)cur->addr; | ||
231 | if (kcb->kprobe_status == KPROBE_REENTER) { | ||
232 | restore_previous_kprobe(kcb); | ||
233 | } else { | ||
234 | reset_current_kprobe(); | ||
235 | } | ||
236 | break; | ||
237 | |||
238 | case KPROBE_HIT_ACTIVE: | ||
239 | case KPROBE_HIT_SSDONE: | ||
240 | /* | ||
241 | * We increment the nmissed count for accounting, | ||
242 | * we can also use npre/npostfault count for accounting | ||
243 | * these specific fault cases. | ||
244 | */ | ||
245 | kprobes_inc_nmissed_count(cur); | ||
246 | |||
247 | /* | ||
248 | * We come here because instructions in the pre/post | ||
249 | * handler caused the page_fault, this could happen | ||
250 | * if handler tries to access user space by | ||
251 | * copy_from_user(), get_user() etc. Let the | ||
252 | * user-specified handler try to fix it. | ||
253 | */ | ||
254 | if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) | ||
255 | return 1; | ||
256 | break; | ||
257 | |||
258 | default: | ||
259 | break; | ||
260 | } | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
266 | unsigned long val, void *data) | ||
267 | { | ||
268 | /* | ||
269 | * notify_die() is currently never called on ARM, | ||
270 | * so this callback is currently empty. | ||
271 | */ | ||
272 | return NOTIFY_DONE; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * When a retprobed function returns, trampoline_handler() is called, | ||
277 | * calling the kretprobe's handler. We construct a struct pt_regs to | ||
278 | * give a view of registers r0-r11 to the user return-handler. This is | ||
279 | * not a complete pt_regs structure, but that should be plenty sufficient | ||
280 | * for kretprobe handlers which should normally be interested in r0 only | ||
281 | * anyway. | ||
282 | */ | ||
283 | static void __attribute__((naked)) __kprobes kretprobe_trampoline(void) | ||
284 | { | ||
285 | __asm__ __volatile__ ( | ||
286 | "stmdb sp!, {r0 - r11} \n\t" | ||
287 | "mov r0, sp \n\t" | ||
288 | "bl trampoline_handler \n\t" | ||
289 | "mov lr, r0 \n\t" | ||
290 | "ldmia sp!, {r0 - r11} \n\t" | ||
291 | "mov pc, lr \n\t" | ||
292 | : : : "memory"); | ||
293 | } | ||
294 | |||
295 | /* Called from kretprobe_trampoline */ | ||
296 | static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | ||
297 | { | ||
298 | struct kretprobe_instance *ri = NULL; | ||
299 | struct hlist_head *head, empty_rp; | ||
300 | struct hlist_node *node, *tmp; | ||
301 | unsigned long flags, orig_ret_address = 0; | ||
302 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | ||
303 | |||
304 | INIT_HLIST_HEAD(&empty_rp); | ||
305 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
306 | head = kretprobe_inst_table_head(current); | ||
307 | |||
308 | /* | ||
309 | * It is possible to have multiple instances associated with a given | ||
310 | * task either because multiple functions in the call path have | ||
311 | * a return probe installed on them, and/or more than one return | ||
312 | * probe was registered for a target function. | ||
313 | * | ||
314 | * We can handle this because: | ||
315 | * - instances are always inserted at the head of the list | ||
316 | * - when multiple return probes are registered for the same | ||
317 | * function, the first instance's ret_addr will point to the | ||
318 | * real return address, and all the rest will point to | ||
319 | * kretprobe_trampoline | ||
320 | */ | ||
321 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
322 | if (ri->task != current) | ||
323 | /* another task is sharing our hash bucket */ | ||
324 | continue; | ||
325 | |||
326 | if (ri->rp && ri->rp->handler) { | ||
327 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | ||
328 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | ||
329 | ri->rp->handler(ri, regs); | ||
330 | __get_cpu_var(current_kprobe) = NULL; | ||
331 | } | ||
332 | |||
333 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
334 | recycle_rp_inst(ri, &empty_rp); | ||
335 | |||
336 | if (orig_ret_address != trampoline_address) | ||
337 | /* | ||
338 | * This is the real return address. Any other | ||
339 | * instances associated with this task are for | ||
340 | * other calls deeper on the call stack | ||
341 | */ | ||
342 | break; | ||
343 | } | ||
344 | |||
345 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
346 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
347 | |||
348 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | ||
349 | hlist_del(&ri->hlist); | ||
350 | kfree(ri); | ||
351 | } | ||
352 | |||
353 | return (void *)orig_ret_address; | ||
354 | } | ||
355 | |||
356 | /* Called with kretprobe_lock held. */ | ||
357 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | ||
358 | struct pt_regs *regs) | ||
359 | { | ||
360 | ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; | ||
361 | |||
362 | /* Replace the return addr with trampoline addr. */ | ||
363 | regs->ARM_lr = (unsigned long)&kretprobe_trampoline; | ||
364 | } | ||
365 | |||
366 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
367 | { | ||
368 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
369 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
370 | long sp_addr = regs->ARM_sp; | ||
371 | |||
372 | kcb->jprobe_saved_regs = *regs; | ||
373 | memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); | ||
374 | regs->ARM_pc = (long)jp->entry; | ||
375 | regs->ARM_cpsr |= PSR_I_BIT; | ||
376 | preempt_disable(); | ||
377 | return 1; | ||
378 | } | ||
379 | |||
380 | void __kprobes jprobe_return(void) | ||
381 | { | ||
382 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
383 | |||
384 | __asm__ __volatile__ ( | ||
385 | /* | ||
386 | * Setup an empty pt_regs. Fill SP and PC fields as | ||
387 | * they're needed by longjmp_break_handler. | ||
388 | */ | ||
389 | "sub sp, %0, %1 \n\t" | ||
390 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" | ||
391 | "str %0, [sp, %2] \n\t" | ||
392 | "str r0, [sp, %3] \n\t" | ||
393 | "mov r0, sp \n\t" | ||
394 | "bl kprobe_handler \n\t" | ||
395 | |||
396 | /* | ||
397 | * Return to the context saved by setjmp_pre_handler | ||
398 | * and restored by longjmp_break_handler. | ||
399 | */ | ||
400 | "ldr r0, [sp, %4] \n\t" | ||
401 | "msr cpsr_cxsf, r0 \n\t" | ||
402 | "ldmia sp, {r0 - pc} \n\t" | ||
403 | : | ||
404 | : "r" (kcb->jprobe_saved_regs.ARM_sp), | ||
405 | "I" (sizeof(struct pt_regs)), | ||
406 | "J" (offsetof(struct pt_regs, ARM_sp)), | ||
407 | "J" (offsetof(struct pt_regs, ARM_pc)), | ||
408 | "J" (offsetof(struct pt_regs, ARM_cpsr)) | ||
409 | : "memory", "cc"); | ||
410 | } | ||
411 | |||
412 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | ||
413 | { | ||
414 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
415 | long stack_addr = kcb->jprobe_saved_regs.ARM_sp; | ||
416 | long orig_sp = regs->ARM_sp; | ||
417 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
418 | |||
419 | if (regs->ARM_pc == JPROBE_MAGIC_ADDR) { | ||
420 | if (orig_sp != stack_addr) { | ||
421 | struct pt_regs *saved_regs = | ||
422 | (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp; | ||
423 | printk("current sp %lx does not match saved sp %lx\n", | ||
424 | orig_sp, stack_addr); | ||
425 | printk("Saved registers for jprobe %p\n", jp); | ||
426 | show_regs(saved_regs); | ||
427 | printk("Current registers\n"); | ||
428 | show_regs(regs); | ||
429 | BUG(); | ||
430 | } | ||
431 | *regs = kcb->jprobe_saved_regs; | ||
432 | memcpy((void *)stack_addr, kcb->jprobes_stack, | ||
433 | MIN_STACK_SIZE(stack_addr)); | ||
434 | preempt_enable_no_resched(); | ||
435 | return 1; | ||
436 | } | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static struct undef_hook kprobes_break_hook = { | ||
441 | .instr_mask = 0xffffffff, | ||
442 | .instr_val = KPROBE_BREAKPOINT_INSTRUCTION, | ||
443 | .cpsr_mask = MODE_MASK, | ||
444 | .cpsr_val = SVC_MODE, | ||
445 | .fn = kprobe_trap_handler, | ||
446 | }; | ||
447 | |||
448 | int __init arch_init_kprobes() | ||
449 | { | ||
450 | arm_kprobe_decode_init(); | ||
451 | register_undef_hook(&kprobes_break_hook); | ||
452 | return 0; | ||
453 | } | ||