diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2008-04-28 05:14:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:32 -0400 |
commit | 9861668f747895608cea425f8457989d8dd2edf2 (patch) | |
tree | db5dad352826407afb549a8cfa44d4c66823af15 | |
parent | 99602572812442d47403d85f376ad51298dd82a6 (diff) |
kprobes: add (un)register_kprobes for batch registration
Introduce unregister_/register_kprobes() for kprobe batch registration. This
can reduce waiting time for synchronized_sched() when a lot of probes have to
be unregistered at once.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/kprobes.h | 9 | ||||
-rw-r--r-- | kernel/kprobes.c | 124 |
2 files changed, 96 insertions, 37 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index cd507ab4fed7..2ba7df645a84 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -234,6 +234,8 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) | |||
234 | 234 | ||
235 | int register_kprobe(struct kprobe *p); | 235 | int register_kprobe(struct kprobe *p); |
236 | void unregister_kprobe(struct kprobe *p); | 236 | void unregister_kprobe(struct kprobe *p); |
237 | int register_kprobes(struct kprobe **kps, int num); | ||
238 | void unregister_kprobes(struct kprobe **kps, int num); | ||
237 | int setjmp_pre_handler(struct kprobe *, struct pt_regs *); | 239 | int setjmp_pre_handler(struct kprobe *, struct pt_regs *); |
238 | int longjmp_break_handler(struct kprobe *, struct pt_regs *); | 240 | int longjmp_break_handler(struct kprobe *, struct pt_regs *); |
239 | int register_jprobe(struct jprobe *p); | 241 | int register_jprobe(struct jprobe *p); |
@@ -261,9 +263,16 @@ static inline int register_kprobe(struct kprobe *p) | |||
261 | { | 263 | { |
262 | return -ENOSYS; | 264 | return -ENOSYS; |
263 | } | 265 | } |
266 | static inline int register_kprobes(struct kprobe **kps, int num) | ||
267 | { | ||
268 | return -ENOSYS; | ||
269 | } | ||
264 | static inline void unregister_kprobe(struct kprobe *p) | 270 | static inline void unregister_kprobe(struct kprobe *p) |
265 | { | 271 | { |
266 | } | 272 | } |
273 | static inline void unregister_kprobes(struct kprobe **kps, int num) | ||
274 | { | ||
275 | } | ||
267 | static inline int register_jprobe(struct jprobe *p) | 276 | static inline int register_jprobe(struct jprobe *p) |
268 | { | 277 | { |
269 | return -ENOSYS; | 278 | return -ENOSYS; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f02a4311768b..76275fc025a5 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -580,6 +580,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
580 | } | 580 | } |
581 | 581 | ||
582 | p->nmissed = 0; | 582 | p->nmissed = 0; |
583 | INIT_LIST_HEAD(&p->list); | ||
583 | mutex_lock(&kprobe_mutex); | 584 | mutex_lock(&kprobe_mutex); |
584 | old_p = get_kprobe(p->addr); | 585 | old_p = get_kprobe(p->addr); |
585 | if (old_p) { | 586 | if (old_p) { |
@@ -606,35 +607,28 @@ out: | |||
606 | return ret; | 607 | return ret; |
607 | } | 608 | } |
608 | 609 | ||
609 | int __kprobes register_kprobe(struct kprobe *p) | 610 | /* |
610 | { | 611 | * Unregister a kprobe without a scheduler synchronization. |
611 | return __register_kprobe(p, (unsigned long)__builtin_return_address(0)); | 612 | */ |
612 | } | 613 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) |
613 | |||
614 | void __kprobes unregister_kprobe(struct kprobe *p) | ||
615 | { | 614 | { |
616 | struct module *mod; | ||
617 | struct kprobe *old_p, *list_p; | 615 | struct kprobe *old_p, *list_p; |
618 | int cleanup_p; | ||
619 | 616 | ||
620 | mutex_lock(&kprobe_mutex); | ||
621 | old_p = get_kprobe(p->addr); | 617 | old_p = get_kprobe(p->addr); |
622 | if (unlikely(!old_p)) { | 618 | if (unlikely(!old_p)) |
623 | mutex_unlock(&kprobe_mutex); | 619 | return -EINVAL; |
624 | return; | 620 | |
625 | } | ||
626 | if (p != old_p) { | 621 | if (p != old_p) { |
627 | list_for_each_entry_rcu(list_p, &old_p->list, list) | 622 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
628 | if (list_p == p) | 623 | if (list_p == p) |
629 | /* kprobe p is a valid probe */ | 624 | /* kprobe p is a valid probe */ |
630 | goto valid_p; | 625 | goto valid_p; |
631 | mutex_unlock(&kprobe_mutex); | 626 | return -EINVAL; |
632 | return; | ||
633 | } | 627 | } |
634 | valid_p: | 628 | valid_p: |
635 | if (old_p == p || | 629 | if (old_p == p || |
636 | (old_p->pre_handler == aggr_pre_handler && | 630 | (old_p->pre_handler == aggr_pre_handler && |
637 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { | 631 | list_is_singular(&old_p->list))) { |
638 | /* | 632 | /* |
639 | * Only probe on the hash list. Disarm only if kprobes are | 633 | * Only probe on the hash list. Disarm only if kprobes are |
640 | * enabled - otherwise, the breakpoint would already have | 634 | * enabled - otherwise, the breakpoint would already have |
@@ -643,43 +637,97 @@ valid_p: | |||
643 | if (kprobe_enabled) | 637 | if (kprobe_enabled) |
644 | arch_disarm_kprobe(p); | 638 | arch_disarm_kprobe(p); |
645 | hlist_del_rcu(&old_p->hlist); | 639 | hlist_del_rcu(&old_p->hlist); |
646 | cleanup_p = 1; | ||
647 | } else { | 640 | } else { |
641 | if (p->break_handler) | ||
642 | old_p->break_handler = NULL; | ||
643 | if (p->post_handler) { | ||
644 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | ||
645 | if ((list_p != p) && (list_p->post_handler)) | ||
646 | goto noclean; | ||
647 | } | ||
648 | old_p->post_handler = NULL; | ||
649 | } | ||
650 | noclean: | ||
648 | list_del_rcu(&p->list); | 651 | list_del_rcu(&p->list); |
649 | cleanup_p = 0; | ||
650 | } | 652 | } |
653 | return 0; | ||
654 | } | ||
651 | 655 | ||
652 | mutex_unlock(&kprobe_mutex); | 656 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
657 | { | ||
658 | struct module *mod; | ||
659 | struct kprobe *old_p; | ||
653 | 660 | ||
654 | synchronize_sched(); | ||
655 | if (p->mod_refcounted) { | 661 | if (p->mod_refcounted) { |
656 | mod = module_text_address((unsigned long)p->addr); | 662 | mod = module_text_address((unsigned long)p->addr); |
657 | if (mod) | 663 | if (mod) |
658 | module_put(mod); | 664 | module_put(mod); |
659 | } | 665 | } |
660 | 666 | ||
661 | if (cleanup_p) { | 667 | if (list_empty(&p->list) || list_is_singular(&p->list)) { |
662 | if (p != old_p) { | 668 | if (!list_empty(&p->list)) { |
663 | list_del_rcu(&p->list); | 669 | /* "p" is the last child of an aggr_kprobe */ |
670 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
671 | list_del(&p->list); | ||
664 | kfree(old_p); | 672 | kfree(old_p); |
665 | } | 673 | } |
666 | arch_remove_kprobe(p); | 674 | arch_remove_kprobe(p); |
667 | } else { | 675 | } |
668 | mutex_lock(&kprobe_mutex); | 676 | } |
669 | if (p->break_handler) | 677 | |
670 | old_p->break_handler = NULL; | 678 | static int __register_kprobes(struct kprobe **kps, int num, |
671 | if (p->post_handler){ | 679 | unsigned long called_from) |
672 | list_for_each_entry_rcu(list_p, &old_p->list, list){ | 680 | { |
673 | if (list_p->post_handler){ | 681 | int i, ret = 0; |
674 | cleanup_p = 2; | 682 | |
675 | break; | 683 | if (num <= 0) |
676 | } | 684 | return -EINVAL; |
677 | } | 685 | for (i = 0; i < num; i++) { |
678 | if (cleanup_p == 0) | 686 | ret = __register_kprobe(kps[i], called_from); |
679 | old_p->post_handler = NULL; | 687 | if (ret < 0 && i > 0) { |
688 | unregister_kprobes(kps, i); | ||
689 | break; | ||
680 | } | 690 | } |
681 | mutex_unlock(&kprobe_mutex); | ||
682 | } | 691 | } |
692 | return ret; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Registration and unregistration functions for kprobe. | ||
697 | */ | ||
698 | int __kprobes register_kprobe(struct kprobe *p) | ||
699 | { | ||
700 | return __register_kprobes(&p, 1, | ||
701 | (unsigned long)__builtin_return_address(0)); | ||
702 | } | ||
703 | |||
704 | void __kprobes unregister_kprobe(struct kprobe *p) | ||
705 | { | ||
706 | unregister_kprobes(&p, 1); | ||
707 | } | ||
708 | |||
709 | int __kprobes register_kprobes(struct kprobe **kps, int num) | ||
710 | { | ||
711 | return __register_kprobes(kps, num, | ||
712 | (unsigned long)__builtin_return_address(0)); | ||
713 | } | ||
714 | |||
715 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) | ||
716 | { | ||
717 | int i; | ||
718 | |||
719 | if (num <= 0) | ||
720 | return; | ||
721 | mutex_lock(&kprobe_mutex); | ||
722 | for (i = 0; i < num; i++) | ||
723 | if (__unregister_kprobe_top(kps[i]) < 0) | ||
724 | kps[i]->addr = NULL; | ||
725 | mutex_unlock(&kprobe_mutex); | ||
726 | |||
727 | synchronize_sched(); | ||
728 | for (i = 0; i < num; i++) | ||
729 | if (kps[i]->addr) | ||
730 | __unregister_kprobe_bottom(kps[i]); | ||
683 | } | 731 | } |
684 | 732 | ||
685 | static struct notifier_block kprobe_exceptions_nb = { | 733 | static struct notifier_block kprobe_exceptions_nb = { |
@@ -1118,6 +1166,8 @@ module_init(init_kprobes); | |||
1118 | 1166 | ||
1119 | EXPORT_SYMBOL_GPL(register_kprobe); | 1167 | EXPORT_SYMBOL_GPL(register_kprobe); |
1120 | EXPORT_SYMBOL_GPL(unregister_kprobe); | 1168 | EXPORT_SYMBOL_GPL(unregister_kprobe); |
1169 | EXPORT_SYMBOL_GPL(register_kprobes); | ||
1170 | EXPORT_SYMBOL_GPL(unregister_kprobes); | ||
1121 | EXPORT_SYMBOL_GPL(register_jprobe); | 1171 | EXPORT_SYMBOL_GPL(register_jprobe); |
1122 | EXPORT_SYMBOL_GPL(unregister_jprobe); | 1172 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
1123 | #ifdef CONFIG_KPROBES | 1173 | #ifdef CONFIG_KPROBES |