diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 58 |
1 files changed, 37 insertions, 21 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae96..9737a76e106f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/memory.h> | 47 | #include <linux/memory.h> |
48 | #include <linux/ftrace.h> | 48 | #include <linux/ftrace.h> |
49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
50 | #include <linux/jump_label.h> | ||
50 | 51 | ||
51 | #include <asm-generic/sections.h> | 52 | #include <asm-generic/sections.h> |
52 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
@@ -73,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
73 | /* NOTE: change this value only with kprobe_mutex held */ | 74 | /* NOTE: change this value only with kprobe_mutex held */ |
74 | static bool kprobes_all_disarmed; | 75 | static bool kprobes_all_disarmed; |
75 | 76 | ||
76 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 77 | /* This protects kprobe_table and optimizing_list */ |
78 | static DEFINE_MUTEX(kprobe_mutex); | ||
77 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
78 | static struct { | 80 | static struct { |
79 | spinlock_t lock ____cacheline_aligned_in_smp; | 81 | spinlock_t lock ____cacheline_aligned_in_smp; |
@@ -399,7 +401,7 @@ static inline int kprobe_optready(struct kprobe *p) | |||
399 | * Return an optimized kprobe whose optimizing code replaces | 401 | * Return an optimized kprobe whose optimizing code replaces |
400 | * instructions including addr (exclude breakpoint). | 402 | * instructions including addr (exclude breakpoint). |
401 | */ | 403 | */ |
402 | struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | 404 | static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) |
403 | { | 405 | { |
404 | int i; | 406 | int i; |
405 | struct kprobe *p = NULL; | 407 | struct kprobe *p = NULL; |
@@ -594,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
594 | } | 596 | } |
595 | 597 | ||
596 | #ifdef CONFIG_SYSCTL | 598 | #ifdef CONFIG_SYSCTL |
599 | /* This should be called with kprobe_mutex locked */ | ||
597 | static void __kprobes optimize_all_kprobes(void) | 600 | static void __kprobes optimize_all_kprobes(void) |
598 | { | 601 | { |
599 | struct hlist_head *head; | 602 | struct hlist_head *head; |
@@ -606,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void) | |||
606 | return; | 609 | return; |
607 | 610 | ||
608 | kprobes_allow_optimization = true; | 611 | kprobes_allow_optimization = true; |
609 | mutex_lock(&text_mutex); | ||
610 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 612 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
611 | head = &kprobe_table[i]; | 613 | head = &kprobe_table[i]; |
612 | hlist_for_each_entry_rcu(p, node, head, hlist) | 614 | hlist_for_each_entry_rcu(p, node, head, hlist) |
613 | if (!kprobe_disabled(p)) | 615 | if (!kprobe_disabled(p)) |
614 | optimize_kprobe(p); | 616 | optimize_kprobe(p); |
615 | } | 617 | } |
616 | mutex_unlock(&text_mutex); | ||
617 | printk(KERN_INFO "Kprobes globally optimized\n"); | 618 | printk(KERN_INFO "Kprobes globally optimized\n"); |
618 | } | 619 | } |
619 | 620 | ||
621 | /* This should be called with kprobe_mutex locked */ | ||
620 | static void __kprobes unoptimize_all_kprobes(void) | 622 | static void __kprobes unoptimize_all_kprobes(void) |
621 | { | 623 | { |
622 | struct hlist_head *head; | 624 | struct hlist_head *head; |
@@ -831,6 +833,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
831 | 833 | ||
832 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | 834 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
833 | struct hlist_head **head, unsigned long *flags) | 835 | struct hlist_head **head, unsigned long *flags) |
836 | __acquires(hlist_lock) | ||
834 | { | 837 | { |
835 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 838 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
836 | spinlock_t *hlist_lock; | 839 | spinlock_t *hlist_lock; |
@@ -842,6 +845,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
842 | 845 | ||
843 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 846 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
844 | unsigned long *flags) | 847 | unsigned long *flags) |
848 | __acquires(hlist_lock) | ||
845 | { | 849 | { |
846 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 850 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
847 | spin_lock_irqsave(hlist_lock, *flags); | 851 | spin_lock_irqsave(hlist_lock, *flags); |
@@ -849,6 +853,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash, | |||
849 | 853 | ||
850 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 854 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
851 | unsigned long *flags) | 855 | unsigned long *flags) |
856 | __releases(hlist_lock) | ||
852 | { | 857 | { |
853 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 858 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
854 | spinlock_t *hlist_lock; | 859 | spinlock_t *hlist_lock; |
@@ -857,7 +862,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
857 | spin_unlock_irqrestore(hlist_lock, *flags); | 862 | spin_unlock_irqrestore(hlist_lock, *flags); |
858 | } | 863 | } |
859 | 864 | ||
860 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 865 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
866 | unsigned long *flags) | ||
867 | __releases(hlist_lock) | ||
861 | { | 868 | { |
862 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 869 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
863 | spin_unlock_irqrestore(hlist_lock, *flags); | 870 | spin_unlock_irqrestore(hlist_lock, *flags); |
@@ -1138,13 +1145,13 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1138 | if (ret) | 1145 | if (ret) |
1139 | return ret; | 1146 | return ret; |
1140 | 1147 | ||
1148 | jump_label_lock(); | ||
1141 | preempt_disable(); | 1149 | preempt_disable(); |
1142 | if (!kernel_text_address((unsigned long) p->addr) || | 1150 | if (!kernel_text_address((unsigned long) p->addr) || |
1143 | in_kprobes_functions((unsigned long) p->addr) || | 1151 | in_kprobes_functions((unsigned long) p->addr) || |
1144 | ftrace_text_reserved(p->addr, p->addr)) { | 1152 | ftrace_text_reserved(p->addr, p->addr) || |
1145 | preempt_enable(); | 1153 | jump_label_text_reserved(p->addr, p->addr)) |
1146 | return -EINVAL; | 1154 | goto fail_with_jump_label; |
1147 | } | ||
1148 | 1155 | ||
1149 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ | 1156 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
1150 | p->flags &= KPROBE_FLAG_DISABLED; | 1157 | p->flags &= KPROBE_FLAG_DISABLED; |
@@ -1158,10 +1165,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1158 | * We must hold a refcount of the probed module while updating | 1165 | * We must hold a refcount of the probed module while updating |
1159 | * its code to prohibit unexpected unloading. | 1166 | * its code to prohibit unexpected unloading. |
1160 | */ | 1167 | */ |
1161 | if (unlikely(!try_module_get(probed_mod))) { | 1168 | if (unlikely(!try_module_get(probed_mod))) |
1162 | preempt_enable(); | 1169 | goto fail_with_jump_label; |
1163 | return -EINVAL; | 1170 | |
1164 | } | ||
1165 | /* | 1171 | /* |
1166 | * If the module freed .init.text, we couldn't insert | 1172 | * If the module freed .init.text, we couldn't insert |
1167 | * kprobes in there. | 1173 | * kprobes in there. |
@@ -1169,16 +1175,18 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1169 | if (within_module_init((unsigned long)p->addr, probed_mod) && | 1175 | if (within_module_init((unsigned long)p->addr, probed_mod) && |
1170 | probed_mod->state != MODULE_STATE_COMING) { | 1176 | probed_mod->state != MODULE_STATE_COMING) { |
1171 | module_put(probed_mod); | 1177 | module_put(probed_mod); |
1172 | preempt_enable(); | 1178 | goto fail_with_jump_label; |
1173 | return -EINVAL; | ||
1174 | } | 1179 | } |
1175 | } | 1180 | } |
1176 | preempt_enable(); | 1181 | preempt_enable(); |
1182 | jump_label_unlock(); | ||
1177 | 1183 | ||
1178 | p->nmissed = 0; | 1184 | p->nmissed = 0; |
1179 | INIT_LIST_HEAD(&p->list); | 1185 | INIT_LIST_HEAD(&p->list); |
1180 | mutex_lock(&kprobe_mutex); | 1186 | mutex_lock(&kprobe_mutex); |
1181 | 1187 | ||
1188 | jump_label_lock(); /* needed to call jump_label_text_reserved() */ | ||
1189 | |||
1182 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ | 1190 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ |
1183 | mutex_lock(&text_mutex); | 1191 | mutex_lock(&text_mutex); |
1184 | 1192 | ||
@@ -1206,12 +1214,18 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1206 | out: | 1214 | out: |
1207 | mutex_unlock(&text_mutex); | 1215 | mutex_unlock(&text_mutex); |
1208 | put_online_cpus(); | 1216 | put_online_cpus(); |
1217 | jump_label_unlock(); | ||
1209 | mutex_unlock(&kprobe_mutex); | 1218 | mutex_unlock(&kprobe_mutex); |
1210 | 1219 | ||
1211 | if (probed_mod) | 1220 | if (probed_mod) |
1212 | module_put(probed_mod); | 1221 | module_put(probed_mod); |
1213 | 1222 | ||
1214 | return ret; | 1223 | return ret; |
1224 | |||
1225 | fail_with_jump_label: | ||
1226 | preempt_enable(); | ||
1227 | jump_label_unlock(); | ||
1228 | return -EINVAL; | ||
1215 | } | 1229 | } |
1216 | EXPORT_SYMBOL_GPL(register_kprobe); | 1230 | EXPORT_SYMBOL_GPL(register_kprobe); |
1217 | 1231 | ||
@@ -1339,18 +1353,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) | |||
1339 | if (num <= 0) | 1353 | if (num <= 0) |
1340 | return -EINVAL; | 1354 | return -EINVAL; |
1341 | for (i = 0; i < num; i++) { | 1355 | for (i = 0; i < num; i++) { |
1342 | unsigned long addr; | 1356 | unsigned long addr, offset; |
1343 | jp = jps[i]; | 1357 | jp = jps[i]; |
1344 | addr = arch_deref_entry_point(jp->entry); | 1358 | addr = arch_deref_entry_point(jp->entry); |
1345 | 1359 | ||
1346 | if (!kernel_text_address(addr)) | 1360 | /* Verify probepoint is a function entry point */ |
1347 | ret = -EINVAL; | 1361 | if (kallsyms_lookup_size_offset(addr, NULL, &offset) && |
1348 | else { | 1362 | offset == 0) { |
1349 | /* Todo: Verify probepoint is a function entry point */ | ||
1350 | jp->kp.pre_handler = setjmp_pre_handler; | 1363 | jp->kp.pre_handler = setjmp_pre_handler; |
1351 | jp->kp.break_handler = longjmp_break_handler; | 1364 | jp->kp.break_handler = longjmp_break_handler; |
1352 | ret = register_kprobe(&jp->kp); | 1365 | ret = register_kprobe(&jp->kp); |
1353 | } | 1366 | } else |
1367 | ret = -EINVAL; | ||
1368 | |||
1354 | if (ret < 0) { | 1369 | if (ret < 0) { |
1355 | if (i > 0) | 1370 | if (i > 0) |
1356 | unregister_jprobes(jps, i); | 1371 | unregister_jprobes(jps, i); |
@@ -1992,6 +2007,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
1992 | static const struct file_operations fops_kp = { | 2007 | static const struct file_operations fops_kp = { |
1993 | .read = read_enabled_file_bool, | 2008 | .read = read_enabled_file_bool, |
1994 | .write = write_enabled_file_bool, | 2009 | .write = write_enabled_file_bool, |
2010 | .llseek = default_llseek, | ||
1995 | }; | 2011 | }; |
1996 | 2012 | ||
1997 | static int __kprobes debugfs_kprobe_init(void) | 2013 | static int __kprobes debugfs_kprobe_init(void) |