aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@redhat.com>2009-04-06 22:00:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-07 11:31:07 -0400
commitb918e5e60d775549478e4268155142156a95aa17 (patch)
tree92e7d435088772803b24b1d9901ea3e17e6a9189 /kernel
parent96a6d9aa096aeb758273a8fb6388c279ecef5e7e (diff)
kprobes: cleanup aggr_kprobe related code
Currently, kprobes can disable all probes at once, but can't disable it individually (not unregister, just disable an kprobe, because unregistering needs to wait for scheduler synchronization). These patches introduce APIs for on-the-fly per-probe disabling and re-enabling by dis-arming/re-arming its breakpoint instruction. This patch: Change old_p to ap in add_new_kprobe() for readability, copy flags member in add_aggr_kprobe(), and simplify the code flow of register_aggr_kprobe(). Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5016bfb682b9..a55bfadfd766 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -518,20 +518,20 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
518} 518}
519 519
520/* 520/*
521* Add the new probe to old_p->list. Fail if this is the 521* Add the new probe to ap->list. Fail if this is the
522* second jprobe at the address - two jprobes can't coexist 522* second jprobe at the address - two jprobes can't coexist
523*/ 523*/
524static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 524static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
525{ 525{
526 if (p->break_handler) { 526 if (p->break_handler) {
527 if (old_p->break_handler) 527 if (ap->break_handler)
528 return -EEXIST; 528 return -EEXIST;
529 list_add_tail_rcu(&p->list, &old_p->list); 529 list_add_tail_rcu(&p->list, &ap->list);
530 old_p->break_handler = aggr_break_handler; 530 ap->break_handler = aggr_break_handler;
531 } else 531 } else
532 list_add_rcu(&p->list, &old_p->list); 532 list_add_rcu(&p->list, &ap->list);
533 if (p->post_handler && !old_p->post_handler) 533 if (p->post_handler && !ap->post_handler)
534 old_p->post_handler = aggr_post_handler; 534 ap->post_handler = aggr_post_handler;
535 return 0; 535 return 0;
536} 536}
537 537
@@ -544,6 +544,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
544 copy_kprobe(p, ap); 544 copy_kprobe(p, ap);
545 flush_insn_slot(ap); 545 flush_insn_slot(ap);
546 ap->addr = p->addr; 546 ap->addr = p->addr;
547 ap->flags = p->flags;
547 ap->pre_handler = aggr_pre_handler; 548 ap->pre_handler = aggr_pre_handler;
548 ap->fault_handler = aggr_fault_handler; 549 ap->fault_handler = aggr_fault_handler;
549 /* We don't care the kprobe which has gone. */ 550 /* We don't care the kprobe which has gone. */
@@ -566,44 +567,43 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
566 struct kprobe *p) 567 struct kprobe *p)
567{ 568{
568 int ret = 0; 569 int ret = 0;
569 struct kprobe *ap; 570 struct kprobe *ap = old_p;
570 571
571 if (kprobe_gone(old_p)) { 572 if (old_p->pre_handler != aggr_pre_handler) {
573 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
574 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
575 if (!ap)
576 return -ENOMEM;
577 add_aggr_kprobe(ap, old_p);
578 }
579
580 if (kprobe_gone(ap)) {
572 /* 581 /*
573 * Attempting to insert new probe at the same location that 582 * Attempting to insert new probe at the same location that
574 * had a probe in the module vaddr area which already 583 * had a probe in the module vaddr area which already
575 * freed. So, the instruction slot has already been 584 * freed. So, the instruction slot has already been
576 * released. We need a new slot for the new probe. 585 * released. We need a new slot for the new probe.
577 */ 586 */
578 ret = arch_prepare_kprobe(old_p); 587 ret = arch_prepare_kprobe(ap);
579 if (ret) 588 if (ret)
589 /*
590 * Even if fail to allocate new slot, don't need to
591 * free aggr_probe. It will be used next time, or
592 * freed by unregister_kprobe.
593 */
580 return ret; 594 return ret;
581 } 595 /* Clear gone flag to prevent allocating new slot again. */
582 if (old_p->pre_handler == aggr_pre_handler) { 596 ap->flags &= ~KPROBE_FLAG_GONE;
583 copy_kprobe(old_p, p);
584 ret = add_new_kprobe(old_p, p);
585 ap = old_p;
586 } else {
587 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
588 if (!ap) {
589 if (kprobe_gone(old_p))
590 arch_remove_kprobe(old_p);
591 return -ENOMEM;
592 }
593 add_aggr_kprobe(ap, old_p);
594 copy_kprobe(ap, p);
595 ret = add_new_kprobe(ap, p);
596 }
597 if (kprobe_gone(old_p)) {
598 /* 597 /*
599 * If the old_p has gone, its breakpoint has been disarmed. 598 * If the old_p has gone, its breakpoint has been disarmed.
600 * We have to arm it again after preparing real kprobes. 599 * We have to arm it again after preparing real kprobes.
601 */ 600 */
602 ap->flags &= ~KPROBE_FLAG_GONE;
603 if (kprobe_enabled) 601 if (kprobe_enabled)
604 arch_arm_kprobe(ap); 602 arch_arm_kprobe(ap);
605 } 603 }
606 return ret; 604
605 copy_kprobe(ap, p);
606 return add_new_kprobe(ap, p);
607} 607}
608 608
609static int __kprobes in_kprobes_functions(unsigned long addr) 609static int __kprobes in_kprobes_functions(unsigned long addr)