diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 39 |
1 files changed, 3 insertions, 36 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 4b8a4493c541..f9798ff7899f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -64,7 +64,6 @@ | |||
64 | 64 | ||
65 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 65 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
66 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 66 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
67 | static atomic_t kprobe_count; | ||
68 | 67 | ||
69 | /* NOTE: change this value only with kprobe_mutex held */ | 68 | /* NOTE: change this value only with kprobe_mutex held */ |
70 | static bool kprobe_enabled; | 69 | static bool kprobe_enabled; |
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | |||
73 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
74 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
75 | 74 | ||
76 | static struct notifier_block kprobe_page_fault_nb = { | ||
77 | .notifier_call = kprobe_exceptions_notify, | ||
78 | .priority = 0x7fffffff /* we need to notified first */ | ||
79 | }; | ||
80 | |||
81 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT | 75 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
82 | /* | 76 | /* |
83 | * kprobe->ainsn.insn points to the copy of the instruction to be | 77 | * kprobe->ainsn.insn points to the copy of the instruction to be |
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
556 | old_p = get_kprobe(p->addr); | 550 | old_p = get_kprobe(p->addr); |
557 | if (old_p) { | 551 | if (old_p) { |
558 | ret = register_aggr_kprobe(old_p, p); | 552 | ret = register_aggr_kprobe(old_p, p); |
559 | if (!ret) | ||
560 | atomic_inc(&kprobe_count); | ||
561 | goto out; | 553 | goto out; |
562 | } | 554 | } |
563 | 555 | ||
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
569 | hlist_add_head_rcu(&p->hlist, | 561 | hlist_add_head_rcu(&p->hlist, |
570 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 562 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
571 | 563 | ||
572 | if (kprobe_enabled) { | 564 | if (kprobe_enabled) |
573 | if (atomic_add_return(1, &kprobe_count) == \ | ||
574 | (ARCH_INACTIVE_KPROBE_COUNT + 1)) | ||
575 | register_page_fault_notifier(&kprobe_page_fault_nb); | ||
576 | |||
577 | arch_arm_kprobe(p); | 565 | arch_arm_kprobe(p); |
578 | } | 566 | |
579 | out: | 567 | out: |
580 | mutex_unlock(&kprobe_mutex); | 568 | mutex_unlock(&kprobe_mutex); |
581 | 569 | ||
@@ -658,16 +646,6 @@ valid_p: | |||
658 | } | 646 | } |
659 | mutex_unlock(&kprobe_mutex); | 647 | mutex_unlock(&kprobe_mutex); |
660 | } | 648 | } |
661 | |||
662 | /* Call unregister_page_fault_notifier() | ||
663 | * if no probes are active | ||
664 | */ | ||
665 | mutex_lock(&kprobe_mutex); | ||
666 | if (atomic_add_return(-1, &kprobe_count) == \ | ||
667 | ARCH_INACTIVE_KPROBE_COUNT) | ||
668 | unregister_page_fault_notifier(&kprobe_page_fault_nb); | ||
669 | mutex_unlock(&kprobe_mutex); | ||
670 | return; | ||
671 | } | 649 | } |
672 | 650 | ||
673 | static struct notifier_block kprobe_exceptions_nb = { | 651 | static struct notifier_block kprobe_exceptions_nb = { |
@@ -815,7 +793,6 @@ static int __init init_kprobes(void) | |||
815 | INIT_HLIST_HEAD(&kprobe_table[i]); | 793 | INIT_HLIST_HEAD(&kprobe_table[i]); |
816 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 794 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
817 | } | 795 | } |
818 | atomic_set(&kprobe_count, 0); | ||
819 | 796 | ||
820 | /* By default, kprobes are enabled */ | 797 | /* By default, kprobes are enabled */ |
821 | kprobe_enabled = true; | 798 | kprobe_enabled = true; |
@@ -921,13 +898,6 @@ static void __kprobes enable_all_kprobes(void) | |||
921 | if (kprobe_enabled) | 898 | if (kprobe_enabled) |
922 | goto already_enabled; | 899 | goto already_enabled; |
923 | 900 | ||
924 | /* | ||
925 | * Re-register the page fault notifier only if there are any | ||
926 | * active probes at the time of enabling kprobes globally | ||
927 | */ | ||
928 | if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT) | ||
929 | register_page_fault_notifier(&kprobe_page_fault_nb); | ||
930 | |||
931 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 901 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
932 | head = &kprobe_table[i]; | 902 | head = &kprobe_table[i]; |
933 | hlist_for_each_entry_rcu(p, node, head, hlist) | 903 | hlist_for_each_entry_rcu(p, node, head, hlist) |
@@ -968,10 +938,7 @@ static void __kprobes disable_all_kprobes(void) | |||
968 | mutex_unlock(&kprobe_mutex); | 938 | mutex_unlock(&kprobe_mutex); |
969 | /* Allow all currently running kprobes to complete */ | 939 | /* Allow all currently running kprobes to complete */ |
970 | synchronize_sched(); | 940 | synchronize_sched(); |
971 | 941 | return; | |
972 | mutex_lock(&kprobe_mutex); | ||
973 | /* Unconditionally unregister the page_fault notifier */ | ||
974 | unregister_page_fault_notifier(&kprobe_page_fault_nb); | ||
975 | 942 | ||
976 | already_disabled: | 943 | already_disabled: |
977 | mutex_unlock(&kprobe_mutex); | 944 | mutex_unlock(&kprobe_mutex); |