aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@huronp11.davemloft.net>2008-02-09 06:40:55 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-09 06:42:22 -0500
commitd38f1220666a2bd89c4f62d286723a3417b34b9e (patch)
tree1bf4182b09bf2496d21c025023e87e4f2478f597 /arch/sparc64
parent13fa14e185614066d96f90f09da08eebe58cbc8f (diff)
[SPARC64]: Add kretprobe support.
Passes the smoke tests at least, powerpc implementation was used as a guide. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/kprobes.c113
1 files changed, 111 insertions, 2 deletions
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index d94f901d321e..34fc3ddd5002 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -480,8 +480,117 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
480 return 0; 480 return 0;
481} 481}
482 482
483/* architecture specific initialization */ 483/* Called with kretprobe_lock held. The value stored in the return
484int arch_init_kprobes(void) 484 * address register is actually 2 instructions before where the
485 * callee will return to. Sequences usually look something like this
486 *
487 * call some_function <--- return register points here
488 * nop <--- call delay slot
489 * whatever <--- where callee returns to
490 *
491 * To keep trampoline_probe_handler logic simpler, we normalize the
492 * value kept in ri->ret_addr so we don't need to keep adjusting it
493 * back and forth.
494 */
495void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
496 struct pt_regs *regs)
497{
498 ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
499
500 /* Replace the return addr with trampoline addr */
501 regs->u_regs[UREG_RETPC] =
502 ((unsigned long)kretprobe_trampoline) - 8;
503}
504
505/*
506 * Called when the probe at kretprobe trampoline is hit
507 */
508int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
509{
510 struct kretprobe_instance *ri = NULL;
511 struct hlist_head *head, empty_rp;
512 struct hlist_node *node, *tmp;
513 unsigned long flags, orig_ret_address = 0;
514 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
515
516 INIT_HLIST_HEAD(&empty_rp);
517 spin_lock_irqsave(&kretprobe_lock, flags);
518 head = kretprobe_inst_table_head(current);
519
520 /*
521 * It is possible to have multiple instances associated with a given
522 * task either because an multiple functions in the call path
523 * have a return probe installed on them, and/or more then one return
524 * return probe was registered for a target function.
525 *
526 * We can handle this because:
527 * - instances are always inserted at the head of the list
528 * - when multiple return probes are registered for the same
529 * function, the first instance's ret_addr will point to the
530 * real return address, and all the rest will point to
531 * kretprobe_trampoline
532 */
533 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
534 if (ri->task != current)
535 /* another task is sharing our hash bucket */
536 continue;
537
538 if (ri->rp && ri->rp->handler)
539 ri->rp->handler(ri, regs);
540
541 orig_ret_address = (unsigned long)ri->ret_addr;
542 recycle_rp_inst(ri, &empty_rp);
543
544 if (orig_ret_address != trampoline_address)
545 /*
546 * This is the real return address. Any other
547 * instances associated with this task are for
548 * other calls deeper on the call stack
549 */
550 break;
551 }
552
553 kretprobe_assert(ri, orig_ret_address, trampoline_address);
554 regs->tpc = orig_ret_address;
555 regs->tnpc = orig_ret_address + 4;
556
557 reset_current_kprobe();
558 spin_unlock_irqrestore(&kretprobe_lock, flags);
559 preempt_enable_no_resched();
560
561 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
562 hlist_del(&ri->hlist);
563 kfree(ri);
564 }
565 /*
566 * By returning a non-zero value, we are telling
567 * kprobe_handler() that we don't want the post_handler
568 * to run (and have re-enabled preemption)
569 */
570 return 1;
571}
572
573void kretprobe_trampoline_holder(void)
574{
575 asm volatile(".global kretprobe_trampoline\n"
576 "kretprobe_trampoline:\n"
577 "\tnop\n"
578 "\tnop\n");
579}
580static struct kprobe trampoline_p = {
581 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
582 .pre_handler = trampoline_probe_handler
583};
584
585int __init arch_init_kprobes(void)
485{ 586{
587 return register_kprobe(&trampoline_p);
588}
589
590int __kprobes arch_trampoline_kprobe(struct kprobe *p)
591{
592 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
593 return 1;
594
486 return 0; 595 return 0;
487} 596}