aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@redhat.com>2008-03-28 17:27:02 -0400
committerTony Luck <tony.luck@intel.com>2008-04-09 13:36:43 -0400
commit34e1ceb1881ec895ad9b1b52d073f414f3aa87a9 (patch)
treeb29c808286d8f1a476e94bdd7e28f17aec7b6b97 /arch/ia64
parent96ded9dadde397a9e372a650534a9ffbba97194a (diff)
[IA64] kprobes: kprobe-booster for ia64
Add kprobe-booster support on ia64. Kprobe-booster improves the performance of kprobes by eliminating single-step, where possible. Currently, kprobe-booster is implemented on x86 and x86-64. This is an ia64 port. On ia64, kprobe-booster executes a copied bundle directly, instead of single stepping. Bundles which have B or X unit and which may cause an exception (including break) are not executed directly. And also, to prevent hitting break exceptions on the copied bundle, only the hindmost kprobe is executed directly if several kprobes share a bundle and are placed in different slots. Note: set_brl_inst() is used for preparing an instruction buffer(it does not modify any active code), so it does not need any atomic operation. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: bibo,mao <bibo.mao@intel.com> Cc: Rusty Lynch <rusty.lynch@intel.com> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/kprobes.c133
1 files changed, 107 insertions, 26 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 8d9a446a0d17..233434f4f88f 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = {
78 { u, u, u }, /* 1F */ 78 { u, u, u }, /* 1F */
79}; 79};
80 80
81/* Insert a long branch code */
82static void __kprobes set_brl_inst(void *from, void *to)
83{
84 s64 rel = ((s64) to - (s64) from) >> 4;
85 bundle_t *brl;
86 brl = (bundle_t *) ((u64) from & ~0xf);
87 brl->quad0.template = 0x05; /* [MLX](stop) */
88 brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */
89 brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
90 brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
91 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */
92 brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
93}
94
81/* 95/*
82 * In this function we check to see if the instruction 96 * In this function we check to see if the instruction
83 * is IP relative instruction and update the kprobe 97 * is IP relative instruction and update the kprobe
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
496 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; 510 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
497} 511}
498 512
513/* Check the instruction in the slot is break */
514static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
515{
516 unsigned int major_opcode;
517 unsigned int template = bundle->quad0.template;
518 unsigned long kprobe_inst;
519
520 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
521 if (slot == 1 && bundle_encoding[template][1] == L)
522 slot++;
523
524 /* Get Kprobe probe instruction at given slot*/
525 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
526
527 /* For break instruction,
528 * Bits 37:40 Major opcode to be zero
529 * Bits 27:32 X6 to be zero
530 * Bits 32:35 X3 to be zero
531 */
532 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
533 /* Not a break instruction */
534 return 0;
535 }
536
537 /* Is a break instruction */
538 return 1;
539}
540
541/*
542 * In this function, we check whether the target bundle modifies IP or
543 * it triggers an exception. If so, it cannot be boostable.
544 */
545static int __kprobes can_boost(bundle_t *bundle, uint slot,
546 unsigned long bundle_addr)
547{
548 unsigned int template = bundle->quad0.template;
549
550 do {
551 if (search_exception_tables(bundle_addr + slot) ||
552 __is_ia64_break_inst(bundle, slot))
553 return 0; /* exception may occur in this bundle*/
554 } while ((++slot) < 3);
555 template &= 0x1e;
556 if (template >= 0x10 /* including B unit */ ||
557 template == 0x04 /* including X unit */ ||
558 template == 0x06) /* undefined */
559 return 0;
560
561 return 1;
562}
563
564/* Prepare long jump bundle and disables other boosters if need */
565static void __kprobes prepare_booster(struct kprobe *p)
566{
567 unsigned long addr = (unsigned long)p->addr & ~0xFULL;
568 unsigned int slot = (unsigned long)p->addr & 0xf;
569 struct kprobe *other_kp;
570
571 if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
572 set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
573 p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
574 }
575
576 /* disables boosters in previous slots */
577 for (; addr < (unsigned long)p->addr; addr++) {
578 other_kp = get_kprobe((void *)addr);
579 if (other_kp)
580 other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
581 }
582}
583
499int __kprobes arch_prepare_kprobe(struct kprobe *p) 584int __kprobes arch_prepare_kprobe(struct kprobe *p)
500{ 585{
501 unsigned long addr = (unsigned long) p->addr; 586 unsigned long addr = (unsigned long) p->addr;
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
530 615
531 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 616 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
532 617
618 prepare_booster(p);
619
533 return 0; 620 return 0;
534} 621}
535 622
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
543 src = &p->opcode.bundle; 630 src = &p->opcode.bundle;
544 631
545 flush_icache_range((unsigned long)p->ainsn.insn, 632 flush_icache_range((unsigned long)p->ainsn.insn,
546 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 633 (unsigned long)p->ainsn.insn +
634 sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
635
547 switch (p->ainsn.slot) { 636 switch (p->ainsn.slot) {
548 case 0: 637 case 0:
549 dest->quad0.slot0 = src->quad0.slot0; 638 dest->quad0.slot0 = src->quad0.slot0;
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
584void __kprobes arch_remove_kprobe(struct kprobe *p) 673void __kprobes arch_remove_kprobe(struct kprobe *p)
585{ 674{
586 mutex_lock(&kprobe_mutex); 675 mutex_lock(&kprobe_mutex);
587 free_insn_slot(p->ainsn.insn, 0); 676 free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
588 mutex_unlock(&kprobe_mutex); 677 mutex_unlock(&kprobe_mutex);
589} 678}
590/* 679/*
591 * We are resuming execution after a single step fault, so the pt_regs 680 * We are resuming execution after a single step fault, so the pt_regs
592 * structure reflects the register state after we executed the instruction 681 * structure reflects the register state after we executed the instruction
593 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust 682 * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
594 * the ip to point back to the original stack address. To set the IP address 683 * the ip to point back to the original stack address. To set the IP address
595 * to original stack address, handle the case where we need to fixup the 684 * to original stack address, handle the case where we need to fixup the
596 * relative IP address and/or fixup branch register. 685 * relative IP address and/or fixup branch register.
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
607 if (slot == 1 && bundle_encoding[template][1] == L) 696 if (slot == 1 && bundle_encoding[template][1] == L)
608 slot = 2; 697 slot = 2;
609 698
610 if (p->ainsn.inst_flag) { 699 if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
611 700
612 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 701 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
613 /* Fix relative IP address */ 702 /* Fix relative IP address */
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
686static int __kprobes is_ia64_break_inst(struct pt_regs *regs) 775static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
687{ 776{
688 unsigned int slot = ia64_psr(regs)->ri; 777 unsigned int slot = ia64_psr(regs)->ri;
689 unsigned int template, major_opcode;
690 unsigned long kprobe_inst;
691 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; 778 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
692 bundle_t bundle; 779 bundle_t bundle;
693 780
694 memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); 781 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
695 template = bundle.quad0.template;
696
697 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
698 if (slot == 1 && bundle_encoding[template][1] == L)
699 slot++;
700 782
701 /* Get Kprobe probe instruction at given slot*/ 783 return __is_ia64_break_inst(&bundle, slot);
702 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
703
704 /* For break instruction,
705 * Bits 37:40 Major opcode to be zero
706 * Bits 27:32 X6 to be zero
707 * Bits 32:35 X3 to be zero
708 */
709 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
710 /* Not a break instruction */
711 return 0;
712 }
713
714 /* Is a break instruction */
715 return 1;
716} 784}
717 785
718static int __kprobes pre_kprobes_handler(struct die_args *args) 786static int __kprobes pre_kprobes_handler(struct die_args *args)
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
802 return 1; 870 return 1;
803 871
804ss_probe: 872ss_probe:
873#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
875 /* Boost up -- we can execute copied instructions directly */
876 ia64_psr(regs)->ri = p->ainsn.slot;
877 regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
878 /* turn single stepping off */
879 ia64_psr(regs)->ss = 0;
880
881 reset_current_kprobe();
882 preempt_enable_no_resched();
883 return 1;
884 }
885#endif
805 prepare_ss(p, regs); 886 prepare_ss(p, regs);
806 kcb->kprobe_status = KPROBE_HIT_SS; 887 kcb->kprobe_status = KPROBE_HIT_SS;
807 return 1; 888 return 1;