aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRusty Lynch <rusty.lynch@intel.com>2005-06-27 18:17:12 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:23:53 -0400
commit9508dbfe39112813612085c00d55bacd398eddc6 (patch)
tree5f1cac16508284f54d7402f71bebf085c4bf2e7f /arch
parentba8af12f432c4f00ddb0bc1068b57b20aac93ecf (diff)
[PATCH] Return probe redesign: ia64 specific implementation
The following patch implements function return probes for ia64 using the revised design. With this new design we no longer need to do some of the odd hacks previous required on the last ia64 return probe port that I sent out for comments. Note that this new implementation still does not resolve the problem noted by Keith Owens where backtrace data is lost after a return probe is hit. Changes include: * Addition of kretprobe_trampoline to act as a dummy function for instrumented functions to return to, and for the return probe infrastructure to place a kprobe on on, gaining control so that the return probe handler can be called, and so that the instruction pointer can be moved back to the original return address. * Addition of arch_init(), allowing a kprobe to be registered on kretprobe_trampoline * Addition of trampoline_probe_handler() which is used as the pre_handler for the kprobe inserted on kretprobe_implementation. This is the function that handles the details for calling the return probe handler function and returning control back at the original return address * Addition of arch_prepare_kretprobe() which is setup as the pre_handler for a kprobe registered at the beginning of the target function by kernel/kprobes.c so that a return probe instance can be setup when a caller enters the target function. (A return probe instance contains all the needed information for trampoline_probe_handler to do it's job.) * Hooks added to the exit path of a task so that we can cleanup any left-over return probe instances (i.e. if a task dies while inside a targeted function then the return probe instance was reserved at the beginning of the function but the function never returns so we need to mark the instance as unused.) Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/kprobes.c103
-rw-r--r--arch/ia64/kernel/process.c16
2 files changed, 117 insertions, 2 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 5978823d5c63..c97e18e634ca 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -290,6 +290,94 @@ static inline void set_current_kprobe(struct kprobe *p)
290 current_kprobe = p; 290 current_kprobe = p;
291} 291}
292 292
293static void kretprobe_trampoline(void)
294{
295}
296
297/*
298 * At this point the target function has been tricked into
299 * returning into our trampoline. Lookup the associated instance
300 * and then:
301 * - call the handler function
302 * - cleanup by marking the instance as unused
303 * - long jump back to the original return address
304 */
305int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
306{
307 struct kretprobe_instance *ri = NULL;
308 struct hlist_head *head;
309 struct hlist_node *node, *tmp;
310 unsigned long orig_ret_address = 0;
311 unsigned long trampoline_address =
312 ((struct fnptr *)kretprobe_trampoline)->ip;
313
314 head = kretprobe_inst_table_head(current);
315
316 /*
317 * It is possible to have multiple instances associated with a given
318 * task either because an multiple functions in the call path
319 * have a return probe installed on them, and/or more then one return
320 * return probe was registered for a target function.
321 *
322 * We can handle this because:
323 * - instances are always inserted at the head of the list
324 * - when multiple return probes are registered for the same
325 * function, the first instance's ret_addr will point to the
326 * real return address, and all the rest will point to
327 * kretprobe_trampoline
328 */
329 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
330 if (ri->task != current)
331 /* another task is sharing our hash bucket */
332 continue;
333
334 if (ri->rp && ri->rp->handler)
335 ri->rp->handler(ri, regs);
336
337 orig_ret_address = (unsigned long)ri->ret_addr;
338 recycle_rp_inst(ri);
339
340 if (orig_ret_address != trampoline_address)
341 /*
342 * This is the real return address. Any other
343 * instances associated with this task are for
344 * other calls deeper on the call stack
345 */
346 break;
347 }
348
349 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
350 regs->cr_iip = orig_ret_address;
351
352 unlock_kprobes();
353 preempt_enable_no_resched();
354
355 /*
356 * By returning a non-zero value, we are telling
357 * kprobe_handler() that we have handled unlocking
358 * and re-enabling preemption.
359 */
360 return 1;
361}
362
363void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
364{
365 struct kretprobe_instance *ri;
366
367 if ((ri = get_free_rp_inst(rp)) != NULL) {
368 ri->rp = rp;
369 ri->task = current;
370 ri->ret_addr = (kprobe_opcode_t *)regs->b0;
371
372 /* Replace the return addr with trampoline addr */
373 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
374
375 add_rp_inst(ri);
376 } else {
377 rp->nmissed++;
378 }
379}
380
293int arch_prepare_kprobe(struct kprobe *p) 381int arch_prepare_kprobe(struct kprobe *p)
294{ 382{
295 unsigned long addr = (unsigned long) p->addr; 383 unsigned long addr = (unsigned long) p->addr;
@@ -492,8 +580,8 @@ static int pre_kprobes_handler(struct die_args *args)
492 if (p->pre_handler && p->pre_handler(p, regs)) 580 if (p->pre_handler && p->pre_handler(p, regs))
493 /* 581 /*
494 * Our pre-handler is specifically requesting that we just 582 * Our pre-handler is specifically requesting that we just
495 * do a return. This is handling the case where the 583 * do a return. This is used for both the jprobe pre-handler
496 * pre-handler is really our special jprobe pre-handler. 584 * and the kretprobe trampoline
497 */ 585 */
498 return 1; 586 return 1;
499 587
@@ -599,3 +687,14 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
599 *regs = jprobe_saved_regs; 687 *regs = jprobe_saved_regs;
600 return 1; 688 return 1;
601} 689}
690
691static struct kprobe trampoline_p = {
692 .pre_handler = trampoline_probe_handler
693};
694
695int __init arch_init(void)
696{
697 trampoline_p.addr =
698 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
699 return register_kprobe(&trampoline_p);
700}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index ebb71f3d6d19..6e35bff05d59 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -27,6 +27,7 @@
27#include <linux/efi.h> 27#include <linux/efi.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/kprobes.h>
30 31
31#include <asm/cpu.h> 32#include <asm/cpu.h>
32#include <asm/delay.h> 33#include <asm/delay.h>
@@ -707,6 +708,13 @@ kernel_thread_helper (int (*fn)(void *), void *arg)
707void 708void
708flush_thread (void) 709flush_thread (void)
709{ 710{
711 /*
712 * Remove function-return probe instances associated with this task
713 * and put them back on the free list. Do not insert an exit probe for
714 * this function, it will be disabled by kprobe_flush_task if you do.
715 */
716 kprobe_flush_task(current);
717
710 /* drop floating-point and debug-register state if it exists: */ 718 /* drop floating-point and debug-register state if it exists: */
711 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 719 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
712 ia64_drop_fpu(current); 720 ia64_drop_fpu(current);
@@ -721,6 +729,14 @@ flush_thread (void)
721void 729void
722exit_thread (void) 730exit_thread (void)
723{ 731{
732
733 /*
734 * Remove function-return probe instances associated with this task
735 * and put them back on the free list. Do not insert an exit probe for
736 * this function, it will be disabled by kprobe_flush_task if you do.
737 */
738 kprobe_flush_task(current);
739
724 ia64_drop_fpu(current); 740 ia64_drop_fpu(current);
725#ifdef CONFIG_PERFMON 741#ifdef CONFIG_PERFMON
726 /* if needed, stop monitoring and flush state to perfmon context */ 742 /* if needed, stop monitoring and flush state to perfmon context */