diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 098f396aa409..e35be53f6613 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void) | |||
334 | struct kprobe __kprobes *get_kprobe(void *addr) | 334 | struct kprobe __kprobes *get_kprobe(void *addr) |
335 | { | 335 | { |
336 | struct hlist_head *head; | 336 | struct hlist_head *head; |
337 | struct hlist_node *node; | ||
338 | struct kprobe *p; | 337 | struct kprobe *p; |
339 | 338 | ||
340 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; | 339 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
341 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 340 | hlist_for_each_entry_rcu(p, head, hlist) { |
342 | if (p->addr == addr) | 341 | if (p->addr == addr) |
343 | return p; | 342 | return p; |
344 | } | 343 | } |
@@ -471,7 +470,6 @@ static LIST_HEAD(unoptimizing_list); | |||
471 | 470 | ||
472 | static void kprobe_optimizer(struct work_struct *work); | 471 | static void kprobe_optimizer(struct work_struct *work); |
473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 472 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
474 | static DECLARE_COMPLETION(optimizer_comp); | ||
475 | #define OPTIMIZE_DELAY 5 | 473 | #define OPTIMIZE_DELAY 5 |
476 | 474 | ||
477 | /* | 475 | /* |
@@ -552,8 +550,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | |||
552 | /* Start optimizer after OPTIMIZE_DELAY passed */ | 550 | /* Start optimizer after OPTIMIZE_DELAY passed */ |
553 | static __kprobes void kick_kprobe_optimizer(void) | 551 | static __kprobes void kick_kprobe_optimizer(void) |
554 | { | 552 | { |
555 | if (!delayed_work_pending(&optimizing_work)) | 553 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); |
556 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | ||
557 | } | 554 | } |
558 | 555 | ||
559 | /* Kprobe jump optimizer */ | 556 | /* Kprobe jump optimizer */ |
@@ -592,16 +589,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
592 | /* Step 5: Kick optimizer again if needed */ | 589 | /* Step 5: Kick optimizer again if needed */ |
593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) | 590 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
594 | kick_kprobe_optimizer(); | 591 | kick_kprobe_optimizer(); |
595 | else | ||
596 | /* Wake up all waiters */ | ||
597 | complete_all(&optimizer_comp); | ||
598 | } | 592 | } |
599 | 593 | ||
600 | /* Wait for completing optimization and unoptimization */ | 594 | /* Wait for completing optimization and unoptimization */ |
601 | static __kprobes void wait_for_kprobe_optimizer(void) | 595 | static __kprobes void wait_for_kprobe_optimizer(void) |
602 | { | 596 | { |
603 | if (delayed_work_pending(&optimizing_work)) | 597 | mutex_lock(&kprobe_mutex); |
604 | wait_for_completion(&optimizer_comp); | 598 | |
599 | while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { | ||
600 | mutex_unlock(&kprobe_mutex); | ||
601 | |||
602 | /* this will also make optimizing_work execute immmediately */ | ||
603 | flush_delayed_work(&optimizing_work); | ||
604 | /* @optimizing_work might not have been queued yet, relax */ | ||
605 | cpu_relax(); | ||
606 | |||
607 | mutex_lock(&kprobe_mutex); | ||
608 | } | ||
609 | |||
610 | mutex_unlock(&kprobe_mutex); | ||
605 | } | 611 | } |
606 | 612 | ||
607 | /* Optimize kprobe if p is ready to be optimized */ | 613 | /* Optimize kprobe if p is ready to be optimized */ |
@@ -792,7 +798,6 @@ out: | |||
792 | static void __kprobes optimize_all_kprobes(void) | 798 | static void __kprobes optimize_all_kprobes(void) |
793 | { | 799 | { |
794 | struct hlist_head *head; | 800 | struct hlist_head *head; |
795 | struct hlist_node *node; | ||
796 | struct kprobe *p; | 801 | struct kprobe *p; |
797 | unsigned int i; | 802 | unsigned int i; |
798 | 803 | ||
@@ -803,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void) | |||
803 | kprobes_allow_optimization = true; | 808 | kprobes_allow_optimization = true; |
804 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 809 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
805 | head = &kprobe_table[i]; | 810 | head = &kprobe_table[i]; |
806 | hlist_for_each_entry_rcu(p, node, head, hlist) | 811 | hlist_for_each_entry_rcu(p, head, hlist) |
807 | if (!kprobe_disabled(p)) | 812 | if (!kprobe_disabled(p)) |
808 | optimize_kprobe(p); | 813 | optimize_kprobe(p); |
809 | } | 814 | } |
@@ -814,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void) | |||
814 | static void __kprobes unoptimize_all_kprobes(void) | 819 | static void __kprobes unoptimize_all_kprobes(void) |
815 | { | 820 | { |
816 | struct hlist_head *head; | 821 | struct hlist_head *head; |
817 | struct hlist_node *node; | ||
818 | struct kprobe *p; | 822 | struct kprobe *p; |
819 | unsigned int i; | 823 | unsigned int i; |
820 | 824 | ||
@@ -825,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void) | |||
825 | kprobes_allow_optimization = false; | 829 | kprobes_allow_optimization = false; |
826 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 830 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
827 | head = &kprobe_table[i]; | 831 | head = &kprobe_table[i]; |
828 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 832 | hlist_for_each_entry_rcu(p, head, hlist) { |
829 | if (!kprobe_disabled(p)) | 833 | if (!kprobe_disabled(p)) |
830 | unoptimize_kprobe(p, false); | 834 | unoptimize_kprobe(p, false); |
831 | } | 835 | } |
@@ -919,7 +923,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | |||
919 | } | 923 | } |
920 | #endif /* CONFIG_OPTPROBES */ | 924 | #endif /* CONFIG_OPTPROBES */ |
921 | 925 | ||
922 | #ifdef KPROBES_CAN_USE_FTRACE | 926 | #ifdef CONFIG_KPROBES_ON_FTRACE |
923 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { | 927 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
924 | .func = kprobe_ftrace_handler, | 928 | .func = kprobe_ftrace_handler, |
925 | .flags = FTRACE_OPS_FL_SAVE_REGS, | 929 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
@@ -964,7 +968,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) | |||
964 | (unsigned long)p->addr, 1, 0); | 968 | (unsigned long)p->addr, 1, 0); |
965 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); | 969 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); |
966 | } | 970 | } |
967 | #else /* !KPROBES_CAN_USE_FTRACE */ | 971 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
968 | #define prepare_kprobe(p) arch_prepare_kprobe(p) | 972 | #define prepare_kprobe(p) arch_prepare_kprobe(p) |
969 | #define arm_kprobe_ftrace(p) do {} while (0) | 973 | #define arm_kprobe_ftrace(p) do {} while (0) |
970 | #define disarm_kprobe_ftrace(p) do {} while (0) | 974 | #define disarm_kprobe_ftrace(p) do {} while (0) |
@@ -1141,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1141 | { | 1145 | { |
1142 | struct kretprobe_instance *ri; | 1146 | struct kretprobe_instance *ri; |
1143 | struct hlist_head *head, empty_rp; | 1147 | struct hlist_head *head, empty_rp; |
1144 | struct hlist_node *node, *tmp; | 1148 | struct hlist_node *tmp; |
1145 | unsigned long hash, flags = 0; | 1149 | unsigned long hash, flags = 0; |
1146 | 1150 | ||
1147 | if (unlikely(!kprobes_initialized)) | 1151 | if (unlikely(!kprobes_initialized)) |
@@ -1152,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1152 | hash = hash_ptr(tk, KPROBE_HASH_BITS); | 1156 | hash = hash_ptr(tk, KPROBE_HASH_BITS); |
1153 | head = &kretprobe_inst_table[hash]; | 1157 | head = &kretprobe_inst_table[hash]; |
1154 | kretprobe_table_lock(hash, &flags); | 1158 | kretprobe_table_lock(hash, &flags); |
1155 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 1159 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
1156 | if (ri->task == tk) | 1160 | if (ri->task == tk) |
1157 | recycle_rp_inst(ri, &empty_rp); | 1161 | recycle_rp_inst(ri, &empty_rp); |
1158 | } | 1162 | } |
1159 | kretprobe_table_unlock(hash, &flags); | 1163 | kretprobe_table_unlock(hash, &flags); |
1160 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 1164 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
1161 | hlist_del(&ri->hlist); | 1165 | hlist_del(&ri->hlist); |
1162 | kfree(ri); | 1166 | kfree(ri); |
1163 | } | 1167 | } |
@@ -1166,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1166 | static inline void free_rp_inst(struct kretprobe *rp) | 1170 | static inline void free_rp_inst(struct kretprobe *rp) |
1167 | { | 1171 | { |
1168 | struct kretprobe_instance *ri; | 1172 | struct kretprobe_instance *ri; |
1169 | struct hlist_node *pos, *next; | 1173 | struct hlist_node *next; |
1170 | 1174 | ||
1171 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { | 1175 | hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { |
1172 | hlist_del(&ri->hlist); | 1176 | hlist_del(&ri->hlist); |
1173 | kfree(ri); | 1177 | kfree(ri); |
1174 | } | 1178 | } |
@@ -1178,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp) | |||
1178 | { | 1182 | { |
1179 | unsigned long flags, hash; | 1183 | unsigned long flags, hash; |
1180 | struct kretprobe_instance *ri; | 1184 | struct kretprobe_instance *ri; |
1181 | struct hlist_node *pos, *next; | 1185 | struct hlist_node *next; |
1182 | struct hlist_head *head; | 1186 | struct hlist_head *head; |
1183 | 1187 | ||
1184 | /* No race here */ | 1188 | /* No race here */ |
1185 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { | 1189 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { |
1186 | kretprobe_table_lock(hash, &flags); | 1190 | kretprobe_table_lock(hash, &flags); |
1187 | head = &kretprobe_inst_table[hash]; | 1191 | head = &kretprobe_inst_table[hash]; |
1188 | hlist_for_each_entry_safe(ri, pos, next, head, hlist) { | 1192 | hlist_for_each_entry_safe(ri, next, head, hlist) { |
1189 | if (ri->rp == rp) | 1193 | if (ri->rp == rp) |
1190 | ri->rp = NULL; | 1194 | ri->rp = NULL; |
1191 | } | 1195 | } |
@@ -1414,12 +1418,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, | |||
1414 | */ | 1418 | */ |
1415 | ftrace_addr = ftrace_location((unsigned long)p->addr); | 1419 | ftrace_addr = ftrace_location((unsigned long)p->addr); |
1416 | if (ftrace_addr) { | 1420 | if (ftrace_addr) { |
1417 | #ifdef KPROBES_CAN_USE_FTRACE | 1421 | #ifdef CONFIG_KPROBES_ON_FTRACE |
1418 | /* Given address is not on the instruction boundary */ | 1422 | /* Given address is not on the instruction boundary */ |
1419 | if ((unsigned long)p->addr != ftrace_addr) | 1423 | if ((unsigned long)p->addr != ftrace_addr) |
1420 | return -EILSEQ; | 1424 | return -EILSEQ; |
1421 | p->flags |= KPROBE_FLAG_FTRACE; | 1425 | p->flags |= KPROBE_FLAG_FTRACE; |
1422 | #else /* !KPROBES_CAN_USE_FTRACE */ | 1426 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
1423 | return -EINVAL; | 1427 | return -EINVAL; |
1424 | #endif | 1428 | #endif |
1425 | } | 1429 | } |
@@ -2021,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb, | |||
2021 | { | 2025 | { |
2022 | struct module *mod = data; | 2026 | struct module *mod = data; |
2023 | struct hlist_head *head; | 2027 | struct hlist_head *head; |
2024 | struct hlist_node *node; | ||
2025 | struct kprobe *p; | 2028 | struct kprobe *p; |
2026 | unsigned int i; | 2029 | unsigned int i; |
2027 | int checkcore = (val == MODULE_STATE_GOING); | 2030 | int checkcore = (val == MODULE_STATE_GOING); |
@@ -2038,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb, | |||
2038 | mutex_lock(&kprobe_mutex); | 2041 | mutex_lock(&kprobe_mutex); |
2039 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2042 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2040 | head = &kprobe_table[i]; | 2043 | head = &kprobe_table[i]; |
2041 | hlist_for_each_entry_rcu(p, node, head, hlist) | 2044 | hlist_for_each_entry_rcu(p, head, hlist) |
2042 | if (within_module_init((unsigned long)p->addr, mod) || | 2045 | if (within_module_init((unsigned long)p->addr, mod) || |
2043 | (checkcore && | 2046 | (checkcore && |
2044 | within_module_core((unsigned long)p->addr, mod))) { | 2047 | within_module_core((unsigned long)p->addr, mod))) { |
@@ -2185,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) | |||
2185 | static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | 2188 | static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) |
2186 | { | 2189 | { |
2187 | struct hlist_head *head; | 2190 | struct hlist_head *head; |
2188 | struct hlist_node *node; | ||
2189 | struct kprobe *p, *kp; | 2191 | struct kprobe *p, *kp; |
2190 | const char *sym = NULL; | 2192 | const char *sym = NULL; |
2191 | unsigned int i = *(loff_t *) v; | 2193 | unsigned int i = *(loff_t *) v; |
@@ -2194,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | |||
2194 | 2196 | ||
2195 | head = &kprobe_table[i]; | 2197 | head = &kprobe_table[i]; |
2196 | preempt_disable(); | 2198 | preempt_disable(); |
2197 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2199 | hlist_for_each_entry_rcu(p, head, hlist) { |
2198 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, | 2200 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
2199 | &offset, &modname, namebuf); | 2201 | &offset, &modname, namebuf); |
2200 | if (kprobe_aggrprobe(p)) { | 2202 | if (kprobe_aggrprobe(p)) { |
@@ -2229,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = { | |||
2229 | static void __kprobes arm_all_kprobes(void) | 2231 | static void __kprobes arm_all_kprobes(void) |
2230 | { | 2232 | { |
2231 | struct hlist_head *head; | 2233 | struct hlist_head *head; |
2232 | struct hlist_node *node; | ||
2233 | struct kprobe *p; | 2234 | struct kprobe *p; |
2234 | unsigned int i; | 2235 | unsigned int i; |
2235 | 2236 | ||
@@ -2242,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void) | |||
2242 | /* Arming kprobes doesn't optimize kprobe itself */ | 2243 | /* Arming kprobes doesn't optimize kprobe itself */ |
2243 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2244 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2244 | head = &kprobe_table[i]; | 2245 | head = &kprobe_table[i]; |
2245 | hlist_for_each_entry_rcu(p, node, head, hlist) | 2246 | hlist_for_each_entry_rcu(p, head, hlist) |
2246 | if (!kprobe_disabled(p)) | 2247 | if (!kprobe_disabled(p)) |
2247 | arm_kprobe(p); | 2248 | arm_kprobe(p); |
2248 | } | 2249 | } |
@@ -2258,7 +2259,6 @@ already_enabled: | |||
2258 | static void __kprobes disarm_all_kprobes(void) | 2259 | static void __kprobes disarm_all_kprobes(void) |
2259 | { | 2260 | { |
2260 | struct hlist_head *head; | 2261 | struct hlist_head *head; |
2261 | struct hlist_node *node; | ||
2262 | struct kprobe *p; | 2262 | struct kprobe *p; |
2263 | unsigned int i; | 2263 | unsigned int i; |
2264 | 2264 | ||
@@ -2275,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void) | |||
2275 | 2275 | ||
2276 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2276 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2277 | head = &kprobe_table[i]; | 2277 | head = &kprobe_table[i]; |
2278 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2278 | hlist_for_each_entry_rcu(p, head, hlist) { |
2279 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 2279 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
2280 | disarm_kprobe(p, false); | 2280 | disarm_kprobe(p, false); |
2281 | } | 2281 | } |