diff options
Diffstat (limited to 'kernel/kprobes.c')
| -rw-r--r-- | kernel/kprobes.c | 34 |
1 files changed, 22 insertions, 12 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae96..99865c33a60d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/memory.h> | 47 | #include <linux/memory.h> |
| 48 | #include <linux/ftrace.h> | 48 | #include <linux/ftrace.h> |
| 49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
| 50 | #include <linux/jump_label.h> | ||
| 50 | 51 | ||
| 51 | #include <asm-generic/sections.h> | 52 | #include <asm-generic/sections.h> |
| 52 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
| @@ -73,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
| 73 | /* NOTE: change this value only with kprobe_mutex held */ | 74 | /* NOTE: change this value only with kprobe_mutex held */ |
| 74 | static bool kprobes_all_disarmed; | 75 | static bool kprobes_all_disarmed; |
| 75 | 76 | ||
| 76 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 77 | /* This protects kprobe_table and optimizing_list */ |
| 78 | static DEFINE_MUTEX(kprobe_mutex); | ||
| 77 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 78 | static struct { | 80 | static struct { |
| 79 | spinlock_t lock ____cacheline_aligned_in_smp; | 81 | spinlock_t lock ____cacheline_aligned_in_smp; |
| @@ -399,7 +401,7 @@ static inline int kprobe_optready(struct kprobe *p) | |||
| 399 | * Return an optimized kprobe whose optimizing code replaces | 401 | * Return an optimized kprobe whose optimizing code replaces |
| 400 | * instructions including addr (exclude breakpoint). | 402 | * instructions including addr (exclude breakpoint). |
| 401 | */ | 403 | */ |
| 402 | struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | 404 | static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) |
| 403 | { | 405 | { |
| 404 | int i; | 406 | int i; |
| 405 | struct kprobe *p = NULL; | 407 | struct kprobe *p = NULL; |
| @@ -594,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
| 594 | } | 596 | } |
| 595 | 597 | ||
| 596 | #ifdef CONFIG_SYSCTL | 598 | #ifdef CONFIG_SYSCTL |
| 599 | /* This should be called with kprobe_mutex locked */ | ||
| 597 | static void __kprobes optimize_all_kprobes(void) | 600 | static void __kprobes optimize_all_kprobes(void) |
| 598 | { | 601 | { |
| 599 | struct hlist_head *head; | 602 | struct hlist_head *head; |
| @@ -606,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void) | |||
| 606 | return; | 609 | return; |
| 607 | 610 | ||
| 608 | kprobes_allow_optimization = true; | 611 | kprobes_allow_optimization = true; |
| 609 | mutex_lock(&text_mutex); | ||
| 610 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 612 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 611 | head = &kprobe_table[i]; | 613 | head = &kprobe_table[i]; |
| 612 | hlist_for_each_entry_rcu(p, node, head, hlist) | 614 | hlist_for_each_entry_rcu(p, node, head, hlist) |
| 613 | if (!kprobe_disabled(p)) | 615 | if (!kprobe_disabled(p)) |
| 614 | optimize_kprobe(p); | 616 | optimize_kprobe(p); |
| 615 | } | 617 | } |
| 616 | mutex_unlock(&text_mutex); | ||
| 617 | printk(KERN_INFO "Kprobes globally optimized\n"); | 618 | printk(KERN_INFO "Kprobes globally optimized\n"); |
| 618 | } | 619 | } |
| 619 | 620 | ||
| 621 | /* This should be called with kprobe_mutex locked */ | ||
| 620 | static void __kprobes unoptimize_all_kprobes(void) | 622 | static void __kprobes unoptimize_all_kprobes(void) |
| 621 | { | 623 | { |
| 622 | struct hlist_head *head; | 624 | struct hlist_head *head; |
| @@ -831,6 +833,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
| 831 | 833 | ||
| 832 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | 834 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
| 833 | struct hlist_head **head, unsigned long *flags) | 835 | struct hlist_head **head, unsigned long *flags) |
| 836 | __acquires(hlist_lock) | ||
| 834 | { | 837 | { |
| 835 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 838 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 836 | spinlock_t *hlist_lock; | 839 | spinlock_t *hlist_lock; |
| @@ -842,6 +845,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
| 842 | 845 | ||
| 843 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 846 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
| 844 | unsigned long *flags) | 847 | unsigned long *flags) |
| 848 | __acquires(hlist_lock) | ||
| 845 | { | 849 | { |
| 846 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 850 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 847 | spin_lock_irqsave(hlist_lock, *flags); | 851 | spin_lock_irqsave(hlist_lock, *flags); |
| @@ -849,6 +853,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash, | |||
| 849 | 853 | ||
| 850 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 854 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
| 851 | unsigned long *flags) | 855 | unsigned long *flags) |
| 856 | __releases(hlist_lock) | ||
| 852 | { | 857 | { |
| 853 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 858 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 854 | spinlock_t *hlist_lock; | 859 | spinlock_t *hlist_lock; |
| @@ -857,7 +862,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
| 857 | spin_unlock_irqrestore(hlist_lock, *flags); | 862 | spin_unlock_irqrestore(hlist_lock, *flags); |
| 858 | } | 863 | } |
| 859 | 864 | ||
| 860 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 865 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
| 866 | unsigned long *flags) | ||
| 867 | __releases(hlist_lock) | ||
| 861 | { | 868 | { |
| 862 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 869 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 863 | spin_unlock_irqrestore(hlist_lock, *flags); | 870 | spin_unlock_irqrestore(hlist_lock, *flags); |
| @@ -1141,7 +1148,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 1141 | preempt_disable(); | 1148 | preempt_disable(); |
| 1142 | if (!kernel_text_address((unsigned long) p->addr) || | 1149 | if (!kernel_text_address((unsigned long) p->addr) || |
| 1143 | in_kprobes_functions((unsigned long) p->addr) || | 1150 | in_kprobes_functions((unsigned long) p->addr) || |
| 1144 | ftrace_text_reserved(p->addr, p->addr)) { | 1151 | ftrace_text_reserved(p->addr, p->addr) || |
| 1152 | jump_label_text_reserved(p->addr, p->addr)) { | ||
| 1145 | preempt_enable(); | 1153 | preempt_enable(); |
| 1146 | return -EINVAL; | 1154 | return -EINVAL; |
| 1147 | } | 1155 | } |
| @@ -1339,18 +1347,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) | |||
| 1339 | if (num <= 0) | 1347 | if (num <= 0) |
| 1340 | return -EINVAL; | 1348 | return -EINVAL; |
| 1341 | for (i = 0; i < num; i++) { | 1349 | for (i = 0; i < num; i++) { |
| 1342 | unsigned long addr; | 1350 | unsigned long addr, offset; |
| 1343 | jp = jps[i]; | 1351 | jp = jps[i]; |
| 1344 | addr = arch_deref_entry_point(jp->entry); | 1352 | addr = arch_deref_entry_point(jp->entry); |
| 1345 | 1353 | ||
| 1346 | if (!kernel_text_address(addr)) | 1354 | /* Verify probepoint is a function entry point */ |
| 1347 | ret = -EINVAL; | 1355 | if (kallsyms_lookup_size_offset(addr, NULL, &offset) && |
| 1348 | else { | 1356 | offset == 0) { |
| 1349 | /* Todo: Verify probepoint is a function entry point */ | ||
| 1350 | jp->kp.pre_handler = setjmp_pre_handler; | 1357 | jp->kp.pre_handler = setjmp_pre_handler; |
| 1351 | jp->kp.break_handler = longjmp_break_handler; | 1358 | jp->kp.break_handler = longjmp_break_handler; |
| 1352 | ret = register_kprobe(&jp->kp); | 1359 | ret = register_kprobe(&jp->kp); |
| 1353 | } | 1360 | } else |
| 1361 | ret = -EINVAL; | ||
| 1362 | |||
| 1354 | if (ret < 0) { | 1363 | if (ret < 0) { |
| 1355 | if (i > 0) | 1364 | if (i > 0) |
| 1356 | unregister_jprobes(jps, i); | 1365 | unregister_jprobes(jps, i); |
| @@ -1992,6 +2001,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
| 1992 | static const struct file_operations fops_kp = { | 2001 | static const struct file_operations fops_kp = { |
| 1993 | .read = read_enabled_file_bool, | 2002 | .read = read_enabled_file_bool, |
| 1994 | .write = write_enabled_file_bool, | 2003 | .write = write_enabled_file_bool, |
| 2004 | .llseek = default_llseek, | ||
| 1995 | }; | 2005 | }; |
| 1996 | 2006 | ||
| 1997 | static int __kprobes debugfs_kprobe_init(void) | 2007 | static int __kprobes debugfs_kprobe_init(void) |
