diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 156 |
1 files changed, 149 insertions, 7 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f58f171bd65f..9e47d8c493f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -43,9 +43,11 @@ | |||
43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
44 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> |
45 | #include <linux/kdebug.h> | 45 | #include <linux/kdebug.h> |
46 | |||
46 | #include <asm-generic/sections.h> | 47 | #include <asm-generic/sections.h> |
47 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
48 | #include <asm/errno.h> | 49 | #include <asm/errno.h> |
50 | #include <asm/uaccess.h> | ||
49 | 51 | ||
50 | #define KPROBE_HASH_BITS 6 | 52 | #define KPROBE_HASH_BITS 6 |
51 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) | 53 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
@@ -64,6 +66,9 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | |||
64 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 66 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
65 | static atomic_t kprobe_count; | 67 | static atomic_t kprobe_count; |
66 | 68 | ||
69 | /* NOTE: change this value only with kprobe_mutex held */ | ||
70 | static bool kprobe_enabled; | ||
71 | |||
67 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
68 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 73 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
69 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 74 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
@@ -564,12 +569,13 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
564 | hlist_add_head_rcu(&p->hlist, | 569 | hlist_add_head_rcu(&p->hlist, |
565 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 570 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
566 | 571 | ||
567 | if (atomic_add_return(1, &kprobe_count) == \ | 572 | if (kprobe_enabled) { |
573 | if (atomic_add_return(1, &kprobe_count) == \ | ||
568 | (ARCH_INACTIVE_KPROBE_COUNT + 1)) | 574 | (ARCH_INACTIVE_KPROBE_COUNT + 1)) |
569 | register_page_fault_notifier(&kprobe_page_fault_nb); | 575 | register_page_fault_notifier(&kprobe_page_fault_nb); |
570 | |||
571 | arch_arm_kprobe(p); | ||
572 | 576 | ||
577 | arch_arm_kprobe(p); | ||
578 | } | ||
573 | out: | 579 | out: |
574 | mutex_unlock(&kprobe_mutex); | 580 | mutex_unlock(&kprobe_mutex); |
575 | 581 | ||
@@ -607,8 +613,13 @@ valid_p: | |||
607 | if (old_p == p || | 613 | if (old_p == p || |
608 | (old_p->pre_handler == aggr_pre_handler && | 614 | (old_p->pre_handler == aggr_pre_handler && |
609 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { | 615 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { |
610 | /* Only probe on the hash list */ | 616 | /* |
611 | arch_disarm_kprobe(p); | 617 | * Only probe on the hash list. Disarm only if kprobes are |
618 | * enabled - otherwise, the breakpoint would already have | ||
619 | * been removed. We save on flushing icache. | ||
620 | */ | ||
621 | if (kprobe_enabled) | ||
622 | arch_disarm_kprobe(p); | ||
612 | hlist_del_rcu(&old_p->hlist); | 623 | hlist_del_rcu(&old_p->hlist); |
613 | cleanup_p = 1; | 624 | cleanup_p = 1; |
614 | } else { | 625 | } else { |
@@ -797,6 +808,9 @@ static int __init init_kprobes(void) | |||
797 | } | 808 | } |
798 | atomic_set(&kprobe_count, 0); | 809 | atomic_set(&kprobe_count, 0); |
799 | 810 | ||
811 | /* By default, kprobes are enabled */ | ||
812 | kprobe_enabled = true; | ||
813 | |||
800 | err = arch_init_kprobes(); | 814 | err = arch_init_kprobes(); |
801 | if (!err) | 815 | if (!err) |
802 | err = register_die_notifier(&kprobe_exceptions_nb); | 816 | err = register_die_notifier(&kprobe_exceptions_nb); |
@@ -806,7 +820,7 @@ static int __init init_kprobes(void) | |||
806 | 820 | ||
807 | #ifdef CONFIG_DEBUG_FS | 821 | #ifdef CONFIG_DEBUG_FS |
808 | static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | 822 | static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, |
809 | const char *sym, int offset,char *modname) | 823 | const char *sym, int offset,char *modname) |
810 | { | 824 | { |
811 | char *kprobe_type; | 825 | char *kprobe_type; |
812 | 826 | ||
@@ -885,9 +899,130 @@ static struct file_operations debugfs_kprobes_operations = { | |||
885 | .release = seq_release, | 899 | .release = seq_release, |
886 | }; | 900 | }; |
887 | 901 | ||
902 | static void __kprobes enable_all_kprobes(void) | ||
903 | { | ||
904 | struct hlist_head *head; | ||
905 | struct hlist_node *node; | ||
906 | struct kprobe *p; | ||
907 | unsigned int i; | ||
908 | |||
909 | mutex_lock(&kprobe_mutex); | ||
910 | |||
911 | /* If kprobes are already enabled, just return */ | ||
912 | if (kprobe_enabled) | ||
913 | goto already_enabled; | ||
914 | |||
915 | /* | ||
916 | * Re-register the page fault notifier only if there are any | ||
917 | * active probes at the time of enabling kprobes globally | ||
918 | */ | ||
919 | if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT) | ||
920 | register_page_fault_notifier(&kprobe_page_fault_nb); | ||
921 | |||
922 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
923 | head = &kprobe_table[i]; | ||
924 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
925 | arch_arm_kprobe(p); | ||
926 | } | ||
927 | |||
928 | kprobe_enabled = true; | ||
929 | printk(KERN_INFO "Kprobes globally enabled\n"); | ||
930 | |||
931 | already_enabled: | ||
932 | mutex_unlock(&kprobe_mutex); | ||
933 | return; | ||
934 | } | ||
935 | |||
936 | static void __kprobes disable_all_kprobes(void) | ||
937 | { | ||
938 | struct hlist_head *head; | ||
939 | struct hlist_node *node; | ||
940 | struct kprobe *p; | ||
941 | unsigned int i; | ||
942 | |||
943 | mutex_lock(&kprobe_mutex); | ||
944 | |||
945 | /* If kprobes are already disabled, just return */ | ||
946 | if (!kprobe_enabled) | ||
947 | goto already_disabled; | ||
948 | |||
949 | kprobe_enabled = false; | ||
950 | printk(KERN_INFO "Kprobes globally disabled\n"); | ||
951 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
952 | head = &kprobe_table[i]; | ||
953 | hlist_for_each_entry_rcu(p, node, head, hlist) { | ||
954 | if (!arch_trampoline_kprobe(p)) | ||
955 | arch_disarm_kprobe(p); | ||
956 | } | ||
957 | } | ||
958 | |||
959 | mutex_unlock(&kprobe_mutex); | ||
960 | /* Allow all currently running kprobes to complete */ | ||
961 | synchronize_sched(); | ||
962 | |||
963 | mutex_lock(&kprobe_mutex); | ||
964 | /* Unconditionally unregister the page_fault notifier */ | ||
965 | unregister_page_fault_notifier(&kprobe_page_fault_nb); | ||
966 | |||
967 | already_disabled: | ||
968 | mutex_unlock(&kprobe_mutex); | ||
969 | return; | ||
970 | } | ||
971 | |||
972 | /* | ||
973 | * XXX: The debugfs bool file interface doesn't allow for callbacks | ||
974 | * when the bool state is switched. We can reuse that facility when | ||
975 | * available | ||
976 | */ | ||
977 | static ssize_t read_enabled_file_bool(struct file *file, | ||
978 | char __user *user_buf, size_t count, loff_t *ppos) | ||
979 | { | ||
980 | char buf[3]; | ||
981 | |||
982 | if (kprobe_enabled) | ||
983 | buf[0] = '1'; | ||
984 | else | ||
985 | buf[0] = '0'; | ||
986 | buf[1] = '\n'; | ||
987 | buf[2] = 0x00; | ||
988 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | ||
989 | } | ||
990 | |||
991 | static ssize_t write_enabled_file_bool(struct file *file, | ||
992 | const char __user *user_buf, size_t count, loff_t *ppos) | ||
993 | { | ||
994 | char buf[32]; | ||
995 | int buf_size; | ||
996 | |||
997 | buf_size = min(count, (sizeof(buf)-1)); | ||
998 | if (copy_from_user(buf, user_buf, buf_size)) | ||
999 | return -EFAULT; | ||
1000 | |||
1001 | switch (buf[0]) { | ||
1002 | case 'y': | ||
1003 | case 'Y': | ||
1004 | case '1': | ||
1005 | enable_all_kprobes(); | ||
1006 | break; | ||
1007 | case 'n': | ||
1008 | case 'N': | ||
1009 | case '0': | ||
1010 | disable_all_kprobes(); | ||
1011 | break; | ||
1012 | } | ||
1013 | |||
1014 | return count; | ||
1015 | } | ||
1016 | |||
1017 | static struct file_operations fops_kp = { | ||
1018 | .read = read_enabled_file_bool, | ||
1019 | .write = write_enabled_file_bool, | ||
1020 | }; | ||
1021 | |||
888 | static int __kprobes debugfs_kprobe_init(void) | 1022 | static int __kprobes debugfs_kprobe_init(void) |
889 | { | 1023 | { |
890 | struct dentry *dir, *file; | 1024 | struct dentry *dir, *file; |
1025 | unsigned int value = 1; | ||
891 | 1026 | ||
892 | dir = debugfs_create_dir("kprobes", NULL); | 1027 | dir = debugfs_create_dir("kprobes", NULL); |
893 | if (!dir) | 1028 | if (!dir) |
@@ -900,6 +1035,13 @@ static int __kprobes debugfs_kprobe_init(void) | |||
900 | return -ENOMEM; | 1035 | return -ENOMEM; |
901 | } | 1036 | } |
902 | 1037 | ||
1038 | file = debugfs_create_file("enabled", 0600, dir, | ||
1039 | &value, &fops_kp); | ||
1040 | if (!file) { | ||
1041 | debugfs_remove(dir); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | |||
903 | return 0; | 1045 | return 0; |
904 | } | 1046 | } |
905 | 1047 | ||