diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 159 |
1 files changed, 119 insertions, 40 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a1e233a19586..cb732a9aa55f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -327,7 +327,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
327 | struct kprobe *kp; | 327 | struct kprobe *kp; |
328 | 328 | ||
329 | list_for_each_entry_rcu(kp, &p->list, list) { | 329 | list_for_each_entry_rcu(kp, &p->list, list) { |
330 | if (kp->pre_handler) { | 330 | if (kp->pre_handler && !kprobe_gone(kp)) { |
331 | set_kprobe_instance(kp); | 331 | set_kprobe_instance(kp); |
332 | if (kp->pre_handler(kp, regs)) | 332 | if (kp->pre_handler(kp, regs)) |
333 | return 1; | 333 | return 1; |
@@ -343,7 +343,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
343 | struct kprobe *kp; | 343 | struct kprobe *kp; |
344 | 344 | ||
345 | list_for_each_entry_rcu(kp, &p->list, list) { | 345 | list_for_each_entry_rcu(kp, &p->list, list) { |
346 | if (kp->post_handler) { | 346 | if (kp->post_handler && !kprobe_gone(kp)) { |
347 | set_kprobe_instance(kp); | 347 | set_kprobe_instance(kp); |
348 | kp->post_handler(kp, regs, flags); | 348 | kp->post_handler(kp, regs, flags); |
349 | reset_kprobe_instance(); | 349 | reset_kprobe_instance(); |
@@ -545,9 +545,10 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
545 | ap->addr = p->addr; | 545 | ap->addr = p->addr; |
546 | ap->pre_handler = aggr_pre_handler; | 546 | ap->pre_handler = aggr_pre_handler; |
547 | ap->fault_handler = aggr_fault_handler; | 547 | ap->fault_handler = aggr_fault_handler; |
548 | if (p->post_handler) | 548 | /* We don't care the kprobe which has gone. */ |
549 | if (p->post_handler && !kprobe_gone(p)) | ||
549 | ap->post_handler = aggr_post_handler; | 550 | ap->post_handler = aggr_post_handler; |
550 | if (p->break_handler) | 551 | if (p->break_handler && !kprobe_gone(p)) |
551 | ap->break_handler = aggr_break_handler; | 552 | ap->break_handler = aggr_break_handler; |
552 | 553 | ||
553 | INIT_LIST_HEAD(&ap->list); | 554 | INIT_LIST_HEAD(&ap->list); |
@@ -566,17 +567,41 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
566 | int ret = 0; | 567 | int ret = 0; |
567 | struct kprobe *ap; | 568 | struct kprobe *ap; |
568 | 569 | ||
570 | if (kprobe_gone(old_p)) { | ||
571 | /* | ||
572 | * Attempting to insert new probe at the same location that | ||
573 | * had a probe in the module vaddr area which already | ||
574 | * freed. So, the instruction slot has already been | ||
575 | * released. We need a new slot for the new probe. | ||
576 | */ | ||
577 | ret = arch_prepare_kprobe(old_p); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | } | ||
569 | if (old_p->pre_handler == aggr_pre_handler) { | 581 | if (old_p->pre_handler == aggr_pre_handler) { |
570 | copy_kprobe(old_p, p); | 582 | copy_kprobe(old_p, p); |
571 | ret = add_new_kprobe(old_p, p); | 583 | ret = add_new_kprobe(old_p, p); |
584 | ap = old_p; | ||
572 | } else { | 585 | } else { |
573 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | 586 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
574 | if (!ap) | 587 | if (!ap) { |
588 | if (kprobe_gone(old_p)) | ||
589 | arch_remove_kprobe(old_p); | ||
575 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | } | ||
576 | add_aggr_kprobe(ap, old_p); | 592 | add_aggr_kprobe(ap, old_p); |
577 | copy_kprobe(ap, p); | 593 | copy_kprobe(ap, p); |
578 | ret = add_new_kprobe(ap, p); | 594 | ret = add_new_kprobe(ap, p); |
579 | } | 595 | } |
596 | if (kprobe_gone(old_p)) { | ||
597 | /* | ||
598 | * If the old_p has gone, its breakpoint has been disarmed. | ||
599 | * We have to arm it again after preparing real kprobes. | ||
600 | */ | ||
601 | ap->flags &= ~KPROBE_FLAG_GONE; | ||
602 | if (kprobe_enabled) | ||
603 | arch_arm_kprobe(ap); | ||
604 | } | ||
580 | return ret; | 605 | return ret; |
581 | } | 606 | } |
582 | 607 | ||
@@ -639,8 +664,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
639 | return -EINVAL; | 664 | return -EINVAL; |
640 | } | 665 | } |
641 | 666 | ||
642 | p->mod_refcounted = 0; | 667 | p->flags = 0; |
643 | |||
644 | /* | 668 | /* |
645 | * Check if are we probing a module. | 669 | * Check if are we probing a module. |
646 | */ | 670 | */ |
@@ -649,16 +673,14 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
649 | struct module *calling_mod; | 673 | struct module *calling_mod; |
650 | calling_mod = __module_text_address(called_from); | 674 | calling_mod = __module_text_address(called_from); |
651 | /* | 675 | /* |
652 | * We must allow modules to probe themself and in this case | 676 | * We must hold a refcount of the probed module while updating |
653 | * avoid incrementing the module refcount, so as to allow | 677 | * its code to prohibit unexpected unloading. |
654 | * unloading of self probing modules. | ||
655 | */ | 678 | */ |
656 | if (calling_mod != probed_mod) { | 679 | if (calling_mod != probed_mod) { |
657 | if (unlikely(!try_module_get(probed_mod))) { | 680 | if (unlikely(!try_module_get(probed_mod))) { |
658 | preempt_enable(); | 681 | preempt_enable(); |
659 | return -EINVAL; | 682 | return -EINVAL; |
660 | } | 683 | } |
661 | p->mod_refcounted = 1; | ||
662 | } else | 684 | } else |
663 | probed_mod = NULL; | 685 | probed_mod = NULL; |
664 | } | 686 | } |
@@ -687,8 +709,9 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
687 | out: | 709 | out: |
688 | mutex_unlock(&kprobe_mutex); | 710 | mutex_unlock(&kprobe_mutex); |
689 | 711 | ||
690 | if (ret && probed_mod) | 712 | if (probed_mod) |
691 | module_put(probed_mod); | 713 | module_put(probed_mod); |
714 | |||
692 | return ret; | 715 | return ret; |
693 | } | 716 | } |
694 | 717 | ||
@@ -716,16 +739,16 @@ valid_p: | |||
716 | list_is_singular(&old_p->list))) { | 739 | list_is_singular(&old_p->list))) { |
717 | /* | 740 | /* |
718 | * Only probe on the hash list. Disarm only if kprobes are | 741 | * Only probe on the hash list. Disarm only if kprobes are |
719 | * enabled - otherwise, the breakpoint would already have | 742 | * enabled and not gone - otherwise, the breakpoint would |
720 | * been removed. We save on flushing icache. | 743 | * already have been removed. We save on flushing icache. |
721 | */ | 744 | */ |
722 | if (kprobe_enabled) | 745 | if (kprobe_enabled && !kprobe_gone(old_p)) |
723 | arch_disarm_kprobe(p); | 746 | arch_disarm_kprobe(p); |
724 | hlist_del_rcu(&old_p->hlist); | 747 | hlist_del_rcu(&old_p->hlist); |
725 | } else { | 748 | } else { |
726 | if (p->break_handler) | 749 | if (p->break_handler && !kprobe_gone(p)) |
727 | old_p->break_handler = NULL; | 750 | old_p->break_handler = NULL; |
728 | if (p->post_handler) { | 751 | if (p->post_handler && !kprobe_gone(p)) { |
729 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | 752 | list_for_each_entry_rcu(list_p, &old_p->list, list) { |
730 | if ((list_p != p) && (list_p->post_handler)) | 753 | if ((list_p != p) && (list_p->post_handler)) |
731 | goto noclean; | 754 | goto noclean; |
@@ -740,27 +763,16 @@ noclean: | |||
740 | 763 | ||
741 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | 764 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
742 | { | 765 | { |
743 | struct module *mod; | ||
744 | struct kprobe *old_p; | 766 | struct kprobe *old_p; |
745 | 767 | ||
746 | if (p->mod_refcounted) { | 768 | if (list_empty(&p->list)) |
747 | /* | ||
748 | * Since we've already incremented refcount, | ||
749 | * we don't need to disable preemption. | ||
750 | */ | ||
751 | mod = module_text_address((unsigned long)p->addr); | ||
752 | if (mod) | ||
753 | module_put(mod); | ||
754 | } | ||
755 | |||
756 | if (list_empty(&p->list) || list_is_singular(&p->list)) { | ||
757 | if (!list_empty(&p->list)) { | ||
758 | /* "p" is the last child of an aggr_kprobe */ | ||
759 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
760 | list_del(&p->list); | ||
761 | kfree(old_p); | ||
762 | } | ||
763 | arch_remove_kprobe(p); | 769 | arch_remove_kprobe(p); |
770 | else if (list_is_singular(&p->list)) { | ||
771 | /* "p" is the last child of an aggr_kprobe */ | ||
772 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
773 | list_del(&p->list); | ||
774 | arch_remove_kprobe(old_p); | ||
775 | kfree(old_p); | ||
764 | } | 776 | } |
765 | } | 777 | } |
766 | 778 | ||
@@ -1074,6 +1086,67 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1074 | 1086 | ||
1075 | #endif /* CONFIG_KRETPROBES */ | 1087 | #endif /* CONFIG_KRETPROBES */ |
1076 | 1088 | ||
1089 | /* Set the kprobe gone and remove its instruction buffer. */ | ||
1090 | static void __kprobes kill_kprobe(struct kprobe *p) | ||
1091 | { | ||
1092 | struct kprobe *kp; | ||
1093 | p->flags |= KPROBE_FLAG_GONE; | ||
1094 | if (p->pre_handler == aggr_pre_handler) { | ||
1095 | /* | ||
1096 | * If this is an aggr_kprobe, we have to list all the | ||
1097 | * chained probes and mark them GONE. | ||
1098 | */ | ||
1099 | list_for_each_entry_rcu(kp, &p->list, list) | ||
1100 | kp->flags |= KPROBE_FLAG_GONE; | ||
1101 | p->post_handler = NULL; | ||
1102 | p->break_handler = NULL; | ||
1103 | } | ||
1104 | /* | ||
1105 | * Here, we can remove insn_slot safely, because no thread calls | ||
1106 | * the original probed function (which will be freed soon) any more. | ||
1107 | */ | ||
1108 | arch_remove_kprobe(p); | ||
1109 | } | ||
1110 | |||
1111 | /* Module notifier call back, checking kprobes on the module */ | ||
1112 | static int __kprobes kprobes_module_callback(struct notifier_block *nb, | ||
1113 | unsigned long val, void *data) | ||
1114 | { | ||
1115 | struct module *mod = data; | ||
1116 | struct hlist_head *head; | ||
1117 | struct hlist_node *node; | ||
1118 | struct kprobe *p; | ||
1119 | unsigned int i; | ||
1120 | |||
1121 | if (val != MODULE_STATE_GOING) | ||
1122 | return NOTIFY_DONE; | ||
1123 | |||
1124 | /* | ||
1125 | * module .text section will be freed. We need to | ||
1126 | * disable kprobes which have been inserted in the section. | ||
1127 | */ | ||
1128 | mutex_lock(&kprobe_mutex); | ||
1129 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
1130 | head = &kprobe_table[i]; | ||
1131 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
1132 | if (within_module_core((unsigned long)p->addr, mod)) { | ||
1133 | /* | ||
1134 | * The vaddr this probe is installed will soon | ||
1135 | * be vfreed buy not synced to disk. Hence, | ||
1136 | * disarming the breakpoint isn't needed. | ||
1137 | */ | ||
1138 | kill_kprobe(p); | ||
1139 | } | ||
1140 | } | ||
1141 | mutex_unlock(&kprobe_mutex); | ||
1142 | return NOTIFY_DONE; | ||
1143 | } | ||
1144 | |||
1145 | static struct notifier_block kprobe_module_nb = { | ||
1146 | .notifier_call = kprobes_module_callback, | ||
1147 | .priority = 0 | ||
1148 | }; | ||
1149 | |||
1077 | static int __init init_kprobes(void) | 1150 | static int __init init_kprobes(void) |
1078 | { | 1151 | { |
1079 | int i, err = 0; | 1152 | int i, err = 0; |
@@ -1130,6 +1203,9 @@ static int __init init_kprobes(void) | |||
1130 | err = arch_init_kprobes(); | 1203 | err = arch_init_kprobes(); |
1131 | if (!err) | 1204 | if (!err) |
1132 | err = register_die_notifier(&kprobe_exceptions_nb); | 1205 | err = register_die_notifier(&kprobe_exceptions_nb); |
1206 | if (!err) | ||
1207 | err = register_module_notifier(&kprobe_module_nb); | ||
1208 | |||
1133 | kprobes_initialized = (err == 0); | 1209 | kprobes_initialized = (err == 0); |
1134 | 1210 | ||
1135 | if (!err) | 1211 | if (!err) |
@@ -1150,10 +1226,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
1150 | else | 1226 | else |
1151 | kprobe_type = "k"; | 1227 | kprobe_type = "k"; |
1152 | if (sym) | 1228 | if (sym) |
1153 | seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, | 1229 | seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, |
1154 | sym, offset, (modname ? modname : " ")); | 1230 | sym, offset, (modname ? modname : " "), |
1231 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
1155 | else | 1232 | else |
1156 | seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr); | 1233 | seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, |
1234 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
1157 | } | 1235 | } |
1158 | 1236 | ||
1159 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1237 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
@@ -1234,7 +1312,8 @@ static void __kprobes enable_all_kprobes(void) | |||
1234 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1312 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1235 | head = &kprobe_table[i]; | 1313 | head = &kprobe_table[i]; |
1236 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1314 | hlist_for_each_entry_rcu(p, node, head, hlist) |
1237 | arch_arm_kprobe(p); | 1315 | if (!kprobe_gone(p)) |
1316 | arch_arm_kprobe(p); | ||
1238 | } | 1317 | } |
1239 | 1318 | ||
1240 | kprobe_enabled = true; | 1319 | kprobe_enabled = true; |
@@ -1263,7 +1342,7 @@ static void __kprobes disable_all_kprobes(void) | |||
1263 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1342 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1264 | head = &kprobe_table[i]; | 1343 | head = &kprobe_table[i]; |
1265 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1344 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1266 | if (!arch_trampoline_kprobe(p)) | 1345 | if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) |
1267 | arch_disarm_kprobe(p); | 1346 | arch_disarm_kprobe(p); |
1268 | } | 1347 | } |
1269 | } | 1348 | } |