diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae96..ec4210c6501e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/memory.h> | 47 | #include <linux/memory.h> |
48 | #include <linux/ftrace.h> | 48 | #include <linux/ftrace.h> |
49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
50 | #include <linux/jump_label.h> | ||
50 | 51 | ||
51 | #include <asm-generic/sections.h> | 52 | #include <asm-generic/sections.h> |
52 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
@@ -399,7 +400,7 @@ static inline int kprobe_optready(struct kprobe *p) | |||
399 | * Return an optimized kprobe whose optimizing code replaces | 400 | * Return an optimized kprobe whose optimizing code replaces |
400 | * instructions including addr (exclude breakpoint). | 401 | * instructions including addr (exclude breakpoint). |
401 | */ | 402 | */ |
402 | struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | 403 | static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) |
403 | { | 404 | { |
404 | int i; | 405 | int i; |
405 | struct kprobe *p = NULL; | 406 | struct kprobe *p = NULL; |
@@ -831,6 +832,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
831 | 832 | ||
832 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | 833 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
833 | struct hlist_head **head, unsigned long *flags) | 834 | struct hlist_head **head, unsigned long *flags) |
835 | __acquires(hlist_lock) | ||
834 | { | 836 | { |
835 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 837 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
836 | spinlock_t *hlist_lock; | 838 | spinlock_t *hlist_lock; |
@@ -842,6 +844,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
842 | 844 | ||
843 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 845 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
844 | unsigned long *flags) | 846 | unsigned long *flags) |
847 | __acquires(hlist_lock) | ||
845 | { | 848 | { |
846 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 849 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
847 | spin_lock_irqsave(hlist_lock, *flags); | 850 | spin_lock_irqsave(hlist_lock, *flags); |
@@ -849,6 +852,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash, | |||
849 | 852 | ||
850 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 853 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
851 | unsigned long *flags) | 854 | unsigned long *flags) |
855 | __releases(hlist_lock) | ||
852 | { | 856 | { |
853 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 857 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
854 | spinlock_t *hlist_lock; | 858 | spinlock_t *hlist_lock; |
@@ -857,7 +861,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
857 | spin_unlock_irqrestore(hlist_lock, *flags); | 861 | spin_unlock_irqrestore(hlist_lock, *flags); |
858 | } | 862 | } |
859 | 863 | ||
860 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 864 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
865 | unsigned long *flags) | ||
866 | __releases(hlist_lock) | ||
861 | { | 867 | { |
862 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 868 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
863 | spin_unlock_irqrestore(hlist_lock, *flags); | 869 | spin_unlock_irqrestore(hlist_lock, *flags); |
@@ -1141,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1141 | preempt_disable(); | 1147 | preempt_disable(); |
1142 | if (!kernel_text_address((unsigned long) p->addr) || | 1148 | if (!kernel_text_address((unsigned long) p->addr) || |
1143 | in_kprobes_functions((unsigned long) p->addr) || | 1149 | in_kprobes_functions((unsigned long) p->addr) || |
1144 | ftrace_text_reserved(p->addr, p->addr)) { | 1150 | ftrace_text_reserved(p->addr, p->addr) || |
1151 | jump_label_text_reserved(p->addr, p->addr)) { | ||
1145 | preempt_enable(); | 1152 | preempt_enable(); |
1146 | return -EINVAL; | 1153 | return -EINVAL; |
1147 | } | 1154 | } |
@@ -1339,18 +1346,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) | |||
1339 | if (num <= 0) | 1346 | if (num <= 0) |
1340 | return -EINVAL; | 1347 | return -EINVAL; |
1341 | for (i = 0; i < num; i++) { | 1348 | for (i = 0; i < num; i++) { |
1342 | unsigned long addr; | 1349 | unsigned long addr, offset; |
1343 | jp = jps[i]; | 1350 | jp = jps[i]; |
1344 | addr = arch_deref_entry_point(jp->entry); | 1351 | addr = arch_deref_entry_point(jp->entry); |
1345 | 1352 | ||
1346 | if (!kernel_text_address(addr)) | 1353 | /* Verify probepoint is a function entry point */ |
1347 | ret = -EINVAL; | 1354 | if (kallsyms_lookup_size_offset(addr, NULL, &offset) && |
1348 | else { | 1355 | offset == 0) { |
1349 | /* Todo: Verify probepoint is a function entry point */ | ||
1350 | jp->kp.pre_handler = setjmp_pre_handler; | 1356 | jp->kp.pre_handler = setjmp_pre_handler; |
1351 | jp->kp.break_handler = longjmp_break_handler; | 1357 | jp->kp.break_handler = longjmp_break_handler; |
1352 | ret = register_kprobe(&jp->kp); | 1358 | ret = register_kprobe(&jp->kp); |
1353 | } | 1359 | } else |
1360 | ret = -EINVAL; | ||
1361 | |||
1354 | if (ret < 0) { | 1362 | if (ret < 0) { |
1355 | if (i > 0) | 1363 | if (i > 0) |
1356 | unregister_jprobes(jps, i); | 1364 | unregister_jprobes(jps, i); |