diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 349 |
1 files changed, 283 insertions, 66 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fcfb580c3afc..1e0250cb9486 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | |||
72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | 74 | ||
75 | /* | ||
76 | * Normally, functions that we'd want to prohibit kprobes in, are marked | ||
77 | * __kprobes. But, there are cases where such functions already belong to | ||
78 | * a different section (__sched for preempt_schedule) | ||
79 | * | ||
80 | * For such cases, we now have a blacklist | ||
81 | */ | ||
82 | struct kprobe_blackpoint kprobe_blacklist[] = { | ||
83 | {"preempt_schedule",}, | ||
84 | {NULL} /* Terminator */ | ||
85 | }; | ||
86 | |||
75 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT | 87 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
76 | /* | 88 | /* |
77 | * kprobe->ainsn.insn points to the copy of the instruction to be | 89 | * kprobe->ainsn.insn points to the copy of the instruction to be |
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp) | |||
417 | } | 429 | } |
418 | } | 430 | } |
419 | 431 | ||
432 | static void __kprobes cleanup_rp_inst(struct kretprobe *rp) | ||
433 | { | ||
434 | unsigned long flags; | ||
435 | struct kretprobe_instance *ri; | ||
436 | struct hlist_node *pos, *next; | ||
437 | /* No race here */ | ||
438 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
439 | hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { | ||
440 | ri->rp = NULL; | ||
441 | hlist_del(&ri->uflist); | ||
442 | } | ||
443 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
444 | free_rp_inst(rp); | ||
445 | } | ||
446 | |||
420 | /* | 447 | /* |
421 | * Keep all fields in the kprobe consistent | 448 | * Keep all fields in the kprobe consistent |
422 | */ | 449 | */ |
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
492 | 519 | ||
493 | static int __kprobes in_kprobes_functions(unsigned long addr) | 520 | static int __kprobes in_kprobes_functions(unsigned long addr) |
494 | { | 521 | { |
522 | struct kprobe_blackpoint *kb; | ||
523 | |||
495 | if (addr >= (unsigned long)__kprobes_text_start && | 524 | if (addr >= (unsigned long)__kprobes_text_start && |
496 | addr < (unsigned long)__kprobes_text_end) | 525 | addr < (unsigned long)__kprobes_text_end) |
497 | return -EINVAL; | 526 | return -EINVAL; |
527 | /* | ||
528 | * If there exists a kprobe_blacklist, verify and | ||
529 | * fail any probe registration in the prohibited area | ||
530 | */ | ||
531 | for (kb = kprobe_blacklist; kb->name != NULL; kb++) { | ||
532 | if (kb->start_addr) { | ||
533 | if (addr >= kb->start_addr && | ||
534 | addr < (kb->start_addr + kb->range)) | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | } | ||
498 | return 0; | 538 | return 0; |
499 | } | 539 | } |
500 | 540 | ||
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
555 | } | 595 | } |
556 | 596 | ||
557 | p->nmissed = 0; | 597 | p->nmissed = 0; |
598 | INIT_LIST_HEAD(&p->list); | ||
558 | mutex_lock(&kprobe_mutex); | 599 | mutex_lock(&kprobe_mutex); |
559 | old_p = get_kprobe(p->addr); | 600 | old_p = get_kprobe(p->addr); |
560 | if (old_p) { | 601 | if (old_p) { |
@@ -581,35 +622,28 @@ out: | |||
581 | return ret; | 622 | return ret; |
582 | } | 623 | } |
583 | 624 | ||
584 | int __kprobes register_kprobe(struct kprobe *p) | 625 | /* |
585 | { | 626 | * Unregister a kprobe without a scheduler synchronization. |
586 | return __register_kprobe(p, (unsigned long)__builtin_return_address(0)); | 627 | */ |
587 | } | 628 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) |
588 | |||
589 | void __kprobes unregister_kprobe(struct kprobe *p) | ||
590 | { | 629 | { |
591 | struct module *mod; | ||
592 | struct kprobe *old_p, *list_p; | 630 | struct kprobe *old_p, *list_p; |
593 | int cleanup_p; | ||
594 | 631 | ||
595 | mutex_lock(&kprobe_mutex); | ||
596 | old_p = get_kprobe(p->addr); | 632 | old_p = get_kprobe(p->addr); |
597 | if (unlikely(!old_p)) { | 633 | if (unlikely(!old_p)) |
598 | mutex_unlock(&kprobe_mutex); | 634 | return -EINVAL; |
599 | return; | 635 | |
600 | } | ||
601 | if (p != old_p) { | 636 | if (p != old_p) { |
602 | list_for_each_entry_rcu(list_p, &old_p->list, list) | 637 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
603 | if (list_p == p) | 638 | if (list_p == p) |
604 | /* kprobe p is a valid probe */ | 639 | /* kprobe p is a valid probe */ |
605 | goto valid_p; | 640 | goto valid_p; |
606 | mutex_unlock(&kprobe_mutex); | 641 | return -EINVAL; |
607 | return; | ||
608 | } | 642 | } |
609 | valid_p: | 643 | valid_p: |
610 | if (old_p == p || | 644 | if (old_p == p || |
611 | (old_p->pre_handler == aggr_pre_handler && | 645 | (old_p->pre_handler == aggr_pre_handler && |
612 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { | 646 | list_is_singular(&old_p->list))) { |
613 | /* | 647 | /* |
614 | * Only probe on the hash list. Disarm only if kprobes are | 648 | * Only probe on the hash list. Disarm only if kprobes are |
615 | * enabled - otherwise, the breakpoint would already have | 649 | * enabled - otherwise, the breakpoint would already have |
@@ -618,43 +652,97 @@ valid_p: | |||
618 | if (kprobe_enabled) | 652 | if (kprobe_enabled) |
619 | arch_disarm_kprobe(p); | 653 | arch_disarm_kprobe(p); |
620 | hlist_del_rcu(&old_p->hlist); | 654 | hlist_del_rcu(&old_p->hlist); |
621 | cleanup_p = 1; | ||
622 | } else { | 655 | } else { |
656 | if (p->break_handler) | ||
657 | old_p->break_handler = NULL; | ||
658 | if (p->post_handler) { | ||
659 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | ||
660 | if ((list_p != p) && (list_p->post_handler)) | ||
661 | goto noclean; | ||
662 | } | ||
663 | old_p->post_handler = NULL; | ||
664 | } | ||
665 | noclean: | ||
623 | list_del_rcu(&p->list); | 666 | list_del_rcu(&p->list); |
624 | cleanup_p = 0; | ||
625 | } | 667 | } |
668 | return 0; | ||
669 | } | ||
626 | 670 | ||
627 | mutex_unlock(&kprobe_mutex); | 671 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
672 | { | ||
673 | struct module *mod; | ||
674 | struct kprobe *old_p; | ||
628 | 675 | ||
629 | synchronize_sched(); | ||
630 | if (p->mod_refcounted) { | 676 | if (p->mod_refcounted) { |
631 | mod = module_text_address((unsigned long)p->addr); | 677 | mod = module_text_address((unsigned long)p->addr); |
632 | if (mod) | 678 | if (mod) |
633 | module_put(mod); | 679 | module_put(mod); |
634 | } | 680 | } |
635 | 681 | ||
636 | if (cleanup_p) { | 682 | if (list_empty(&p->list) || list_is_singular(&p->list)) { |
637 | if (p != old_p) { | 683 | if (!list_empty(&p->list)) { |
638 | list_del_rcu(&p->list); | 684 | /* "p" is the last child of an aggr_kprobe */ |
685 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
686 | list_del(&p->list); | ||
639 | kfree(old_p); | 687 | kfree(old_p); |
640 | } | 688 | } |
641 | arch_remove_kprobe(p); | 689 | arch_remove_kprobe(p); |
642 | } else { | 690 | } |
643 | mutex_lock(&kprobe_mutex); | 691 | } |
644 | if (p->break_handler) | 692 | |
645 | old_p->break_handler = NULL; | 693 | static int __register_kprobes(struct kprobe **kps, int num, |
646 | if (p->post_handler){ | 694 | unsigned long called_from) |
647 | list_for_each_entry_rcu(list_p, &old_p->list, list){ | 695 | { |
648 | if (list_p->post_handler){ | 696 | int i, ret = 0; |
649 | cleanup_p = 2; | 697 | |
650 | break; | 698 | if (num <= 0) |
651 | } | 699 | return -EINVAL; |
652 | } | 700 | for (i = 0; i < num; i++) { |
653 | if (cleanup_p == 0) | 701 | ret = __register_kprobe(kps[i], called_from); |
654 | old_p->post_handler = NULL; | 702 | if (ret < 0 && i > 0) { |
703 | unregister_kprobes(kps, i); | ||
704 | break; | ||
655 | } | 705 | } |
656 | mutex_unlock(&kprobe_mutex); | ||
657 | } | 706 | } |
707 | return ret; | ||
708 | } | ||
709 | |||
710 | /* | ||
711 | * Registration and unregistration functions for kprobe. | ||
712 | */ | ||
713 | int __kprobes register_kprobe(struct kprobe *p) | ||
714 | { | ||
715 | return __register_kprobes(&p, 1, | ||
716 | (unsigned long)__builtin_return_address(0)); | ||
717 | } | ||
718 | |||
719 | void __kprobes unregister_kprobe(struct kprobe *p) | ||
720 | { | ||
721 | unregister_kprobes(&p, 1); | ||
722 | } | ||
723 | |||
724 | int __kprobes register_kprobes(struct kprobe **kps, int num) | ||
725 | { | ||
726 | return __register_kprobes(kps, num, | ||
727 | (unsigned long)__builtin_return_address(0)); | ||
728 | } | ||
729 | |||
730 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) | ||
731 | { | ||
732 | int i; | ||
733 | |||
734 | if (num <= 0) | ||
735 | return; | ||
736 | mutex_lock(&kprobe_mutex); | ||
737 | for (i = 0; i < num; i++) | ||
738 | if (__unregister_kprobe_top(kps[i]) < 0) | ||
739 | kps[i]->addr = NULL; | ||
740 | mutex_unlock(&kprobe_mutex); | ||
741 | |||
742 | synchronize_sched(); | ||
743 | for (i = 0; i < num; i++) | ||
744 | if (kps[i]->addr) | ||
745 | __unregister_kprobe_bottom(kps[i]); | ||
658 | } | 746 | } |
659 | 747 | ||
660 | static struct notifier_block kprobe_exceptions_nb = { | 748 | static struct notifier_block kprobe_exceptions_nb = { |
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry) | |||
667 | return (unsigned long)entry; | 755 | return (unsigned long)entry; |
668 | } | 756 | } |
669 | 757 | ||
670 | int __kprobes register_jprobe(struct jprobe *jp) | 758 | static int __register_jprobes(struct jprobe **jps, int num, |
759 | unsigned long called_from) | ||
671 | { | 760 | { |
672 | unsigned long addr = arch_deref_entry_point(jp->entry); | 761 | struct jprobe *jp; |
762 | int ret = 0, i; | ||
673 | 763 | ||
674 | if (!kernel_text_address(addr)) | 764 | if (num <= 0) |
675 | return -EINVAL; | 765 | return -EINVAL; |
766 | for (i = 0; i < num; i++) { | ||
767 | unsigned long addr; | ||
768 | jp = jps[i]; | ||
769 | addr = arch_deref_entry_point(jp->entry); | ||
770 | |||
771 | if (!kernel_text_address(addr)) | ||
772 | ret = -EINVAL; | ||
773 | else { | ||
774 | /* Todo: Verify probepoint is a function entry point */ | ||
775 | jp->kp.pre_handler = setjmp_pre_handler; | ||
776 | jp->kp.break_handler = longjmp_break_handler; | ||
777 | ret = __register_kprobe(&jp->kp, called_from); | ||
778 | } | ||
779 | if (ret < 0 && i > 0) { | ||
780 | unregister_jprobes(jps, i); | ||
781 | break; | ||
782 | } | ||
783 | } | ||
784 | return ret; | ||
785 | } | ||
676 | 786 | ||
677 | /* Todo: Verify probepoint is a function entry point */ | 787 | int __kprobes register_jprobe(struct jprobe *jp) |
678 | jp->kp.pre_handler = setjmp_pre_handler; | 788 | { |
679 | jp->kp.break_handler = longjmp_break_handler; | 789 | return __register_jprobes(&jp, 1, |
680 | |||
681 | return __register_kprobe(&jp->kp, | ||
682 | (unsigned long)__builtin_return_address(0)); | 790 | (unsigned long)__builtin_return_address(0)); |
683 | } | 791 | } |
684 | 792 | ||
685 | void __kprobes unregister_jprobe(struct jprobe *jp) | 793 | void __kprobes unregister_jprobe(struct jprobe *jp) |
686 | { | 794 | { |
687 | unregister_kprobe(&jp->kp); | 795 | unregister_jprobes(&jp, 1); |
796 | } | ||
797 | |||
798 | int __kprobes register_jprobes(struct jprobe **jps, int num) | ||
799 | { | ||
800 | return __register_jprobes(jps, num, | ||
801 | (unsigned long)__builtin_return_address(0)); | ||
802 | } | ||
803 | |||
804 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) | ||
805 | { | ||
806 | int i; | ||
807 | |||
808 | if (num <= 0) | ||
809 | return; | ||
810 | mutex_lock(&kprobe_mutex); | ||
811 | for (i = 0; i < num; i++) | ||
812 | if (__unregister_kprobe_top(&jps[i]->kp) < 0) | ||
813 | jps[i]->kp.addr = NULL; | ||
814 | mutex_unlock(&kprobe_mutex); | ||
815 | |||
816 | synchronize_sched(); | ||
817 | for (i = 0; i < num; i++) { | ||
818 | if (jps[i]->kp.addr) | ||
819 | __unregister_kprobe_bottom(&jps[i]->kp); | ||
820 | } | ||
688 | } | 821 | } |
689 | 822 | ||
690 | #ifdef CONFIG_KRETPROBES | 823 | #ifdef CONFIG_KRETPROBES |
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
725 | return 0; | 858 | return 0; |
726 | } | 859 | } |
727 | 860 | ||
728 | int __kprobes register_kretprobe(struct kretprobe *rp) | 861 | static int __kprobes __register_kretprobe(struct kretprobe *rp, |
862 | unsigned long called_from) | ||
729 | { | 863 | { |
730 | int ret = 0; | 864 | int ret = 0; |
731 | struct kretprobe_instance *inst; | 865 | struct kretprobe_instance *inst; |
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
771 | 905 | ||
772 | rp->nmissed = 0; | 906 | rp->nmissed = 0; |
773 | /* Establish function entry probe point */ | 907 | /* Establish function entry probe point */ |
774 | if ((ret = __register_kprobe(&rp->kp, | 908 | ret = __register_kprobe(&rp->kp, called_from); |
775 | (unsigned long)__builtin_return_address(0))) != 0) | 909 | if (ret != 0) |
776 | free_rp_inst(rp); | 910 | free_rp_inst(rp); |
777 | return ret; | 911 | return ret; |
778 | } | 912 | } |
779 | 913 | ||
914 | static int __register_kretprobes(struct kretprobe **rps, int num, | ||
915 | unsigned long called_from) | ||
916 | { | ||
917 | int ret = 0, i; | ||
918 | |||
919 | if (num <= 0) | ||
920 | return -EINVAL; | ||
921 | for (i = 0; i < num; i++) { | ||
922 | ret = __register_kretprobe(rps[i], called_from); | ||
923 | if (ret < 0 && i > 0) { | ||
924 | unregister_kretprobes(rps, i); | ||
925 | break; | ||
926 | } | ||
927 | } | ||
928 | return ret; | ||
929 | } | ||
930 | |||
931 | int __kprobes register_kretprobe(struct kretprobe *rp) | ||
932 | { | ||
933 | return __register_kretprobes(&rp, 1, | ||
934 | (unsigned long)__builtin_return_address(0)); | ||
935 | } | ||
936 | |||
937 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | ||
938 | { | ||
939 | unregister_kretprobes(&rp, 1); | ||
940 | } | ||
941 | |||
942 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) | ||
943 | { | ||
944 | return __register_kretprobes(rps, num, | ||
945 | (unsigned long)__builtin_return_address(0)); | ||
946 | } | ||
947 | |||
948 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | ||
949 | { | ||
950 | int i; | ||
951 | |||
952 | if (num <= 0) | ||
953 | return; | ||
954 | mutex_lock(&kprobe_mutex); | ||
955 | for (i = 0; i < num; i++) | ||
956 | if (__unregister_kprobe_top(&rps[i]->kp) < 0) | ||
957 | rps[i]->kp.addr = NULL; | ||
958 | mutex_unlock(&kprobe_mutex); | ||
959 | |||
960 | synchronize_sched(); | ||
961 | for (i = 0; i < num; i++) { | ||
962 | if (rps[i]->kp.addr) { | ||
963 | __unregister_kprobe_bottom(&rps[i]->kp); | ||
964 | cleanup_rp_inst(rps[i]); | ||
965 | } | ||
966 | } | ||
967 | } | ||
968 | |||
780 | #else /* CONFIG_KRETPROBES */ | 969 | #else /* CONFIG_KRETPROBES */ |
781 | int __kprobes register_kretprobe(struct kretprobe *rp) | 970 | int __kprobes register_kretprobe(struct kretprobe *rp) |
782 | { | 971 | { |
783 | return -ENOSYS; | 972 | return -ENOSYS; |
784 | } | 973 | } |
785 | 974 | ||
786 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, | 975 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) |
787 | struct pt_regs *regs) | ||
788 | { | 976 | { |
789 | return 0; | 977 | return -ENOSYS; |
790 | } | 978 | } |
791 | #endif /* CONFIG_KRETPROBES */ | ||
792 | |||
793 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 979 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
794 | { | 980 | { |
795 | unsigned long flags; | 981 | } |
796 | struct kretprobe_instance *ri; | ||
797 | struct hlist_node *pos, *next; | ||
798 | 982 | ||
799 | unregister_kprobe(&rp->kp); | 983 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) |
984 | { | ||
985 | } | ||
800 | 986 | ||
801 | /* No race here */ | 987 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, |
802 | spin_lock_irqsave(&kretprobe_lock, flags); | 988 | struct pt_regs *regs) |
803 | hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { | 989 | { |
804 | ri->rp = NULL; | 990 | return 0; |
805 | hlist_del(&ri->uflist); | ||
806 | } | ||
807 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
808 | free_rp_inst(rp); | ||
809 | } | 991 | } |
810 | 992 | ||
993 | #endif /* CONFIG_KRETPROBES */ | ||
994 | |||
811 | static int __init init_kprobes(void) | 995 | static int __init init_kprobes(void) |
812 | { | 996 | { |
813 | int i, err = 0; | 997 | int i, err = 0; |
998 | unsigned long offset = 0, size = 0; | ||
999 | char *modname, namebuf[128]; | ||
1000 | const char *symbol_name; | ||
1001 | void *addr; | ||
1002 | struct kprobe_blackpoint *kb; | ||
814 | 1003 | ||
815 | /* FIXME allocate the probe table, currently defined statically */ | 1004 | /* FIXME allocate the probe table, currently defined statically */ |
816 | /* initialize all list heads */ | 1005 | /* initialize all list heads */ |
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void) | |||
819 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 1008 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
820 | } | 1009 | } |
821 | 1010 | ||
1011 | /* | ||
1012 | * Lookup and populate the kprobe_blacklist. | ||
1013 | * | ||
1014 | * Unlike the kretprobe blacklist, we'll need to determine | ||
1015 | * the range of addresses that belong to the said functions, | ||
1016 | * since a kprobe need not necessarily be at the beginning | ||
1017 | * of a function. | ||
1018 | */ | ||
1019 | for (kb = kprobe_blacklist; kb->name != NULL; kb++) { | ||
1020 | kprobe_lookup_name(kb->name, addr); | ||
1021 | if (!addr) | ||
1022 | continue; | ||
1023 | |||
1024 | kb->start_addr = (unsigned long)addr; | ||
1025 | symbol_name = kallsyms_lookup(kb->start_addr, | ||
1026 | &size, &offset, &modname, namebuf); | ||
1027 | if (!symbol_name) | ||
1028 | kb->range = 0; | ||
1029 | else | ||
1030 | kb->range = size; | ||
1031 | } | ||
1032 | |||
822 | if (kretprobe_blacklist_size) { | 1033 | if (kretprobe_blacklist_size) { |
823 | /* lookup the function address from its name */ | 1034 | /* lookup the function address from its name */ |
824 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { | 1035 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
@@ -1066,8 +1277,12 @@ module_init(init_kprobes); | |||
1066 | 1277 | ||
1067 | EXPORT_SYMBOL_GPL(register_kprobe); | 1278 | EXPORT_SYMBOL_GPL(register_kprobe); |
1068 | EXPORT_SYMBOL_GPL(unregister_kprobe); | 1279 | EXPORT_SYMBOL_GPL(unregister_kprobe); |
1280 | EXPORT_SYMBOL_GPL(register_kprobes); | ||
1281 | EXPORT_SYMBOL_GPL(unregister_kprobes); | ||
1069 | EXPORT_SYMBOL_GPL(register_jprobe); | 1282 | EXPORT_SYMBOL_GPL(register_jprobe); |
1070 | EXPORT_SYMBOL_GPL(unregister_jprobe); | 1283 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
1284 | EXPORT_SYMBOL_GPL(register_jprobes); | ||
1285 | EXPORT_SYMBOL_GPL(unregister_jprobes); | ||
1071 | #ifdef CONFIG_KPROBES | 1286 | #ifdef CONFIG_KPROBES |
1072 | EXPORT_SYMBOL_GPL(jprobe_return); | 1287 | EXPORT_SYMBOL_GPL(jprobe_return); |
1073 | #endif | 1288 | #endif |
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return); | |||
1075 | #ifdef CONFIG_KPROBES | 1290 | #ifdef CONFIG_KPROBES |
1076 | EXPORT_SYMBOL_GPL(register_kretprobe); | 1291 | EXPORT_SYMBOL_GPL(register_kretprobe); |
1077 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | 1292 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
1293 | EXPORT_SYMBOL_GPL(register_kretprobes); | ||
1294 | EXPORT_SYMBOL_GPL(unregister_kretprobes); | ||
1078 | #endif | 1295 | #endif |