diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 283 |
1 files changed, 176 insertions, 107 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9f8a3f25259a..7ba8cd9845cb 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -69,7 +69,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
69 | /* NOTE: change this value only with kprobe_mutex held */ | 69 | /* NOTE: change this value only with kprobe_mutex held */ |
70 | static bool kprobe_enabled; | 70 | static bool kprobe_enabled; |
71 | 71 | ||
72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | static struct { | 74 | static struct { |
75 | spinlock_t lock ____cacheline_aligned_in_smp; | 75 | spinlock_t lock ____cacheline_aligned_in_smp; |
@@ -115,6 +115,7 @@ enum kprobe_slot_state { | |||
115 | SLOT_USED = 2, | 115 | SLOT_USED = 2, |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ | ||
118 | static struct hlist_head kprobe_insn_pages; | 119 | static struct hlist_head kprobe_insn_pages; |
119 | static int kprobe_garbage_slots; | 120 | static int kprobe_garbage_slots; |
120 | static int collect_garbage_slots(void); | 121 | static int collect_garbage_slots(void); |
@@ -122,7 +123,7 @@ static int collect_garbage_slots(void); | |||
122 | static int __kprobes check_safety(void) | 123 | static int __kprobes check_safety(void) |
123 | { | 124 | { |
124 | int ret = 0; | 125 | int ret = 0; |
125 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) | 126 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) |
126 | ret = freeze_processes(); | 127 | ret = freeze_processes(); |
127 | if (ret == 0) { | 128 | if (ret == 0) { |
128 | struct task_struct *p, *q; | 129 | struct task_struct *p, *q; |
@@ -144,10 +145,10 @@ loop_end: | |||
144 | } | 145 | } |
145 | 146 | ||
146 | /** | 147 | /** |
147 | * get_insn_slot() - Find a slot on an executable page for an instruction. | 148 | * __get_insn_slot() - Find a slot on an executable page for an instruction. |
148 | * We allocate an executable page if there's no room on existing ones. | 149 | * We allocate an executable page if there's no room on existing ones. |
149 | */ | 150 | */ |
150 | kprobe_opcode_t __kprobes *get_insn_slot(void) | 151 | static kprobe_opcode_t __kprobes *__get_insn_slot(void) |
151 | { | 152 | { |
152 | struct kprobe_insn_page *kip; | 153 | struct kprobe_insn_page *kip; |
153 | struct hlist_node *pos; | 154 | struct hlist_node *pos; |
@@ -196,6 +197,15 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) | |||
196 | return kip->insns; | 197 | return kip->insns; |
197 | } | 198 | } |
198 | 199 | ||
200 | kprobe_opcode_t __kprobes *get_insn_slot(void) | ||
201 | { | ||
202 | kprobe_opcode_t *ret; | ||
203 | mutex_lock(&kprobe_insn_mutex); | ||
204 | ret = __get_insn_slot(); | ||
205 | mutex_unlock(&kprobe_insn_mutex); | ||
206 | return ret; | ||
207 | } | ||
208 | |||
199 | /* Return 1 if all garbages are collected, otherwise 0. */ | 209 | /* Return 1 if all garbages are collected, otherwise 0. */ |
200 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | 210 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) |
201 | { | 211 | { |
@@ -226,9 +236,13 @@ static int __kprobes collect_garbage_slots(void) | |||
226 | { | 236 | { |
227 | struct kprobe_insn_page *kip; | 237 | struct kprobe_insn_page *kip; |
228 | struct hlist_node *pos, *next; | 238 | struct hlist_node *pos, *next; |
239 | int safety; | ||
229 | 240 | ||
230 | /* Ensure no-one is preepmted on the garbages */ | 241 | /* Ensure no-one is preepmted on the garbages */ |
231 | if (check_safety() != 0) | 242 | mutex_unlock(&kprobe_insn_mutex); |
243 | safety = check_safety(); | ||
244 | mutex_lock(&kprobe_insn_mutex); | ||
245 | if (safety != 0) | ||
232 | return -EAGAIN; | 246 | return -EAGAIN; |
233 | 247 | ||
234 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { | 248 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { |
@@ -251,6 +265,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |||
251 | struct kprobe_insn_page *kip; | 265 | struct kprobe_insn_page *kip; |
252 | struct hlist_node *pos; | 266 | struct hlist_node *pos; |
253 | 267 | ||
268 | mutex_lock(&kprobe_insn_mutex); | ||
254 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { | 269 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
255 | if (kip->insns <= slot && | 270 | if (kip->insns <= slot && |
256 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | 271 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { |
@@ -267,6 +282,8 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |||
267 | 282 | ||
268 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) | 283 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) |
269 | collect_garbage_slots(); | 284 | collect_garbage_slots(); |
285 | |||
286 | mutex_unlock(&kprobe_insn_mutex); | ||
270 | } | 287 | } |
271 | #endif | 288 | #endif |
272 | 289 | ||
@@ -310,7 +327,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
310 | struct kprobe *kp; | 327 | struct kprobe *kp; |
311 | 328 | ||
312 | list_for_each_entry_rcu(kp, &p->list, list) { | 329 | list_for_each_entry_rcu(kp, &p->list, list) { |
313 | if (kp->pre_handler) { | 330 | if (kp->pre_handler && !kprobe_gone(kp)) { |
314 | set_kprobe_instance(kp); | 331 | set_kprobe_instance(kp); |
315 | if (kp->pre_handler(kp, regs)) | 332 | if (kp->pre_handler(kp, regs)) |
316 | return 1; | 333 | return 1; |
@@ -326,7 +343,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
326 | struct kprobe *kp; | 343 | struct kprobe *kp; |
327 | 344 | ||
328 | list_for_each_entry_rcu(kp, &p->list, list) { | 345 | list_for_each_entry_rcu(kp, &p->list, list) { |
329 | if (kp->post_handler) { | 346 | if (kp->post_handler && !kprobe_gone(kp)) { |
330 | set_kprobe_instance(kp); | 347 | set_kprobe_instance(kp); |
331 | kp->post_handler(kp, regs, flags); | 348 | kp->post_handler(kp, regs, flags); |
332 | reset_kprobe_instance(); | 349 | reset_kprobe_instance(); |
@@ -393,7 +410,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
393 | hlist_add_head(&ri->hlist, head); | 410 | hlist_add_head(&ri->hlist, head); |
394 | } | 411 | } |
395 | 412 | ||
396 | void kretprobe_hash_lock(struct task_struct *tsk, | 413 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
397 | struct hlist_head **head, unsigned long *flags) | 414 | struct hlist_head **head, unsigned long *flags) |
398 | { | 415 | { |
399 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 416 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
@@ -404,13 +421,15 @@ void kretprobe_hash_lock(struct task_struct *tsk, | |||
404 | spin_lock_irqsave(hlist_lock, *flags); | 421 | spin_lock_irqsave(hlist_lock, *flags); |
405 | } | 422 | } |
406 | 423 | ||
407 | static void kretprobe_table_lock(unsigned long hash, unsigned long *flags) | 424 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
425 | unsigned long *flags) | ||
408 | { | 426 | { |
409 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 427 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
410 | spin_lock_irqsave(hlist_lock, *flags); | 428 | spin_lock_irqsave(hlist_lock, *flags); |
411 | } | 429 | } |
412 | 430 | ||
413 | void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) | 431 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
432 | unsigned long *flags) | ||
414 | { | 433 | { |
415 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 434 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
416 | spinlock_t *hlist_lock; | 435 | spinlock_t *hlist_lock; |
@@ -419,7 +438,7 @@ void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) | |||
419 | spin_unlock_irqrestore(hlist_lock, *flags); | 438 | spin_unlock_irqrestore(hlist_lock, *flags); |
420 | } | 439 | } |
421 | 440 | ||
422 | void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 441 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) |
423 | { | 442 | { |
424 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 443 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
425 | spin_unlock_irqrestore(hlist_lock, *flags); | 444 | spin_unlock_irqrestore(hlist_lock, *flags); |
@@ -526,9 +545,10 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
526 | ap->addr = p->addr; | 545 | ap->addr = p->addr; |
527 | ap->pre_handler = aggr_pre_handler; | 546 | ap->pre_handler = aggr_pre_handler; |
528 | ap->fault_handler = aggr_fault_handler; | 547 | ap->fault_handler = aggr_fault_handler; |
529 | if (p->post_handler) | 548 | /* We don't care the kprobe which has gone. */ |
549 | if (p->post_handler && !kprobe_gone(p)) | ||
530 | ap->post_handler = aggr_post_handler; | 550 | ap->post_handler = aggr_post_handler; |
531 | if (p->break_handler) | 551 | if (p->break_handler && !kprobe_gone(p)) |
532 | ap->break_handler = aggr_break_handler; | 552 | ap->break_handler = aggr_break_handler; |
533 | 553 | ||
534 | INIT_LIST_HEAD(&ap->list); | 554 | INIT_LIST_HEAD(&ap->list); |
@@ -547,17 +567,41 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
547 | int ret = 0; | 567 | int ret = 0; |
548 | struct kprobe *ap; | 568 | struct kprobe *ap; |
549 | 569 | ||
570 | if (kprobe_gone(old_p)) { | ||
571 | /* | ||
572 | * Attempting to insert new probe at the same location that | ||
573 | * had a probe in the module vaddr area which already | ||
574 | * freed. So, the instruction slot has already been | ||
575 | * released. We need a new slot for the new probe. | ||
576 | */ | ||
577 | ret = arch_prepare_kprobe(old_p); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | } | ||
550 | if (old_p->pre_handler == aggr_pre_handler) { | 581 | if (old_p->pre_handler == aggr_pre_handler) { |
551 | copy_kprobe(old_p, p); | 582 | copy_kprobe(old_p, p); |
552 | ret = add_new_kprobe(old_p, p); | 583 | ret = add_new_kprobe(old_p, p); |
584 | ap = old_p; | ||
553 | } else { | 585 | } else { |
554 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | 586 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
555 | if (!ap) | 587 | if (!ap) { |
588 | if (kprobe_gone(old_p)) | ||
589 | arch_remove_kprobe(old_p); | ||
556 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | } | ||
557 | add_aggr_kprobe(ap, old_p); | 592 | add_aggr_kprobe(ap, old_p); |
558 | copy_kprobe(ap, p); | 593 | copy_kprobe(ap, p); |
559 | ret = add_new_kprobe(ap, p); | 594 | ret = add_new_kprobe(ap, p); |
560 | } | 595 | } |
596 | if (kprobe_gone(old_p)) { | ||
597 | /* | ||
598 | * If the old_p has gone, its breakpoint has been disarmed. | ||
599 | * We have to arm it again after preparing real kprobes. | ||
600 | */ | ||
601 | ap->flags &= ~KPROBE_FLAG_GONE; | ||
602 | if (kprobe_enabled) | ||
603 | arch_arm_kprobe(ap); | ||
604 | } | ||
561 | return ret; | 605 | return ret; |
562 | } | 606 | } |
563 | 607 | ||
@@ -600,8 +644,7 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) | |||
600 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); | 644 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); |
601 | } | 645 | } |
602 | 646 | ||
603 | static int __kprobes __register_kprobe(struct kprobe *p, | 647 | int __kprobes register_kprobe(struct kprobe *p) |
604 | unsigned long called_from) | ||
605 | { | 648 | { |
606 | int ret = 0; | 649 | int ret = 0; |
607 | struct kprobe *old_p; | 650 | struct kprobe *old_p; |
@@ -620,28 +663,30 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
620 | return -EINVAL; | 663 | return -EINVAL; |
621 | } | 664 | } |
622 | 665 | ||
623 | p->mod_refcounted = 0; | 666 | p->flags = 0; |
624 | |||
625 | /* | 667 | /* |
626 | * Check if are we probing a module. | 668 | * Check if are we probing a module. |
627 | */ | 669 | */ |
628 | probed_mod = __module_text_address((unsigned long) p->addr); | 670 | probed_mod = __module_text_address((unsigned long) p->addr); |
629 | if (probed_mod) { | 671 | if (probed_mod) { |
630 | struct module *calling_mod; | ||
631 | calling_mod = __module_text_address(called_from); | ||
632 | /* | 672 | /* |
633 | * We must allow modules to probe themself and in this case | 673 | * We must hold a refcount of the probed module while updating |
634 | * avoid incrementing the module refcount, so as to allow | 674 | * its code to prohibit unexpected unloading. |
635 | * unloading of self probing modules. | ||
636 | */ | 675 | */ |
637 | if (calling_mod && calling_mod != probed_mod) { | 676 | if (unlikely(!try_module_get(probed_mod))) { |
638 | if (unlikely(!try_module_get(probed_mod))) { | 677 | preempt_enable(); |
639 | preempt_enable(); | 678 | return -EINVAL; |
640 | return -EINVAL; | 679 | } |
641 | } | 680 | /* |
642 | p->mod_refcounted = 1; | 681 | * If the module freed .init.text, we couldn't insert |
643 | } else | 682 | * kprobes in there. |
644 | probed_mod = NULL; | 683 | */ |
684 | if (within_module_init((unsigned long)p->addr, probed_mod) && | ||
685 | probed_mod->state != MODULE_STATE_COMING) { | ||
686 | module_put(probed_mod); | ||
687 | preempt_enable(); | ||
688 | return -EINVAL; | ||
689 | } | ||
645 | } | 690 | } |
646 | preempt_enable(); | 691 | preempt_enable(); |
647 | 692 | ||
@@ -668,8 +713,9 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
668 | out: | 713 | out: |
669 | mutex_unlock(&kprobe_mutex); | 714 | mutex_unlock(&kprobe_mutex); |
670 | 715 | ||
671 | if (ret && probed_mod) | 716 | if (probed_mod) |
672 | module_put(probed_mod); | 717 | module_put(probed_mod); |
718 | |||
673 | return ret; | 719 | return ret; |
674 | } | 720 | } |
675 | 721 | ||
@@ -697,16 +743,16 @@ valid_p: | |||
697 | list_is_singular(&old_p->list))) { | 743 | list_is_singular(&old_p->list))) { |
698 | /* | 744 | /* |
699 | * Only probe on the hash list. Disarm only if kprobes are | 745 | * Only probe on the hash list. Disarm only if kprobes are |
700 | * enabled - otherwise, the breakpoint would already have | 746 | * enabled and not gone - otherwise, the breakpoint would |
701 | * been removed. We save on flushing icache. | 747 | * already have been removed. We save on flushing icache. |
702 | */ | 748 | */ |
703 | if (kprobe_enabled) | 749 | if (kprobe_enabled && !kprobe_gone(old_p)) |
704 | arch_disarm_kprobe(p); | 750 | arch_disarm_kprobe(p); |
705 | hlist_del_rcu(&old_p->hlist); | 751 | hlist_del_rcu(&old_p->hlist); |
706 | } else { | 752 | } else { |
707 | if (p->break_handler) | 753 | if (p->break_handler && !kprobe_gone(p)) |
708 | old_p->break_handler = NULL; | 754 | old_p->break_handler = NULL; |
709 | if (p->post_handler) { | 755 | if (p->post_handler && !kprobe_gone(p)) { |
710 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | 756 | list_for_each_entry_rcu(list_p, &old_p->list, list) { |
711 | if ((list_p != p) && (list_p->post_handler)) | 757 | if ((list_p != p) && (list_p->post_handler)) |
712 | goto noclean; | 758 | goto noclean; |
@@ -721,39 +767,27 @@ noclean: | |||
721 | 767 | ||
722 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | 768 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
723 | { | 769 | { |
724 | struct module *mod; | ||
725 | struct kprobe *old_p; | 770 | struct kprobe *old_p; |
726 | 771 | ||
727 | if (p->mod_refcounted) { | 772 | if (list_empty(&p->list)) |
728 | /* | ||
729 | * Since we've already incremented refcount, | ||
730 | * we don't need to disable preemption. | ||
731 | */ | ||
732 | mod = module_text_address((unsigned long)p->addr); | ||
733 | if (mod) | ||
734 | module_put(mod); | ||
735 | } | ||
736 | |||
737 | if (list_empty(&p->list) || list_is_singular(&p->list)) { | ||
738 | if (!list_empty(&p->list)) { | ||
739 | /* "p" is the last child of an aggr_kprobe */ | ||
740 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
741 | list_del(&p->list); | ||
742 | kfree(old_p); | ||
743 | } | ||
744 | arch_remove_kprobe(p); | 773 | arch_remove_kprobe(p); |
774 | else if (list_is_singular(&p->list)) { | ||
775 | /* "p" is the last child of an aggr_kprobe */ | ||
776 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
777 | list_del(&p->list); | ||
778 | arch_remove_kprobe(old_p); | ||
779 | kfree(old_p); | ||
745 | } | 780 | } |
746 | } | 781 | } |
747 | 782 | ||
748 | static int __register_kprobes(struct kprobe **kps, int num, | 783 | int __kprobes register_kprobes(struct kprobe **kps, int num) |
749 | unsigned long called_from) | ||
750 | { | 784 | { |
751 | int i, ret = 0; | 785 | int i, ret = 0; |
752 | 786 | ||
753 | if (num <= 0) | 787 | if (num <= 0) |
754 | return -EINVAL; | 788 | return -EINVAL; |
755 | for (i = 0; i < num; i++) { | 789 | for (i = 0; i < num; i++) { |
756 | ret = __register_kprobe(kps[i], called_from); | 790 | ret = register_kprobe(kps[i]); |
757 | if (ret < 0) { | 791 | if (ret < 0) { |
758 | if (i > 0) | 792 | if (i > 0) |
759 | unregister_kprobes(kps, i); | 793 | unregister_kprobes(kps, i); |
@@ -763,26 +797,11 @@ static int __register_kprobes(struct kprobe **kps, int num, | |||
763 | return ret; | 797 | return ret; |
764 | } | 798 | } |
765 | 799 | ||
766 | /* | ||
767 | * Registration and unregistration functions for kprobe. | ||
768 | */ | ||
769 | int __kprobes register_kprobe(struct kprobe *p) | ||
770 | { | ||
771 | return __register_kprobes(&p, 1, | ||
772 | (unsigned long)__builtin_return_address(0)); | ||
773 | } | ||
774 | |||
775 | void __kprobes unregister_kprobe(struct kprobe *p) | 800 | void __kprobes unregister_kprobe(struct kprobe *p) |
776 | { | 801 | { |
777 | unregister_kprobes(&p, 1); | 802 | unregister_kprobes(&p, 1); |
778 | } | 803 | } |
779 | 804 | ||
780 | int __kprobes register_kprobes(struct kprobe **kps, int num) | ||
781 | { | ||
782 | return __register_kprobes(kps, num, | ||
783 | (unsigned long)__builtin_return_address(0)); | ||
784 | } | ||
785 | |||
786 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) | 805 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) |
787 | { | 806 | { |
788 | int i; | 807 | int i; |
@@ -811,8 +830,7 @@ unsigned long __weak arch_deref_entry_point(void *entry) | |||
811 | return (unsigned long)entry; | 830 | return (unsigned long)entry; |
812 | } | 831 | } |
813 | 832 | ||
814 | static int __register_jprobes(struct jprobe **jps, int num, | 833 | int __kprobes register_jprobes(struct jprobe **jps, int num) |
815 | unsigned long called_from) | ||
816 | { | 834 | { |
817 | struct jprobe *jp; | 835 | struct jprobe *jp; |
818 | int ret = 0, i; | 836 | int ret = 0, i; |
@@ -830,7 +848,7 @@ static int __register_jprobes(struct jprobe **jps, int num, | |||
830 | /* Todo: Verify probepoint is a function entry point */ | 848 | /* Todo: Verify probepoint is a function entry point */ |
831 | jp->kp.pre_handler = setjmp_pre_handler; | 849 | jp->kp.pre_handler = setjmp_pre_handler; |
832 | jp->kp.break_handler = longjmp_break_handler; | 850 | jp->kp.break_handler = longjmp_break_handler; |
833 | ret = __register_kprobe(&jp->kp, called_from); | 851 | ret = register_kprobe(&jp->kp); |
834 | } | 852 | } |
835 | if (ret < 0) { | 853 | if (ret < 0) { |
836 | if (i > 0) | 854 | if (i > 0) |
@@ -843,8 +861,7 @@ static int __register_jprobes(struct jprobe **jps, int num, | |||
843 | 861 | ||
844 | int __kprobes register_jprobe(struct jprobe *jp) | 862 | int __kprobes register_jprobe(struct jprobe *jp) |
845 | { | 863 | { |
846 | return __register_jprobes(&jp, 1, | 864 | return register_jprobes(&jp, 1); |
847 | (unsigned long)__builtin_return_address(0)); | ||
848 | } | 865 | } |
849 | 866 | ||
850 | void __kprobes unregister_jprobe(struct jprobe *jp) | 867 | void __kprobes unregister_jprobe(struct jprobe *jp) |
@@ -852,12 +869,6 @@ void __kprobes unregister_jprobe(struct jprobe *jp) | |||
852 | unregister_jprobes(&jp, 1); | 869 | unregister_jprobes(&jp, 1); |
853 | } | 870 | } |
854 | 871 | ||
855 | int __kprobes register_jprobes(struct jprobe **jps, int num) | ||
856 | { | ||
857 | return __register_jprobes(jps, num, | ||
858 | (unsigned long)__builtin_return_address(0)); | ||
859 | } | ||
860 | |||
861 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) | 872 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) |
862 | { | 873 | { |
863 | int i; | 874 | int i; |
@@ -920,8 +931,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
920 | return 0; | 931 | return 0; |
921 | } | 932 | } |
922 | 933 | ||
923 | static int __kprobes __register_kretprobe(struct kretprobe *rp, | 934 | int __kprobes register_kretprobe(struct kretprobe *rp) |
924 | unsigned long called_from) | ||
925 | { | 935 | { |
926 | int ret = 0; | 936 | int ret = 0; |
927 | struct kretprobe_instance *inst; | 937 | struct kretprobe_instance *inst; |
@@ -967,21 +977,20 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, | |||
967 | 977 | ||
968 | rp->nmissed = 0; | 978 | rp->nmissed = 0; |
969 | /* Establish function entry probe point */ | 979 | /* Establish function entry probe point */ |
970 | ret = __register_kprobe(&rp->kp, called_from); | 980 | ret = register_kprobe(&rp->kp); |
971 | if (ret != 0) | 981 | if (ret != 0) |
972 | free_rp_inst(rp); | 982 | free_rp_inst(rp); |
973 | return ret; | 983 | return ret; |
974 | } | 984 | } |
975 | 985 | ||
976 | static int __register_kretprobes(struct kretprobe **rps, int num, | 986 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) |
977 | unsigned long called_from) | ||
978 | { | 987 | { |
979 | int ret = 0, i; | 988 | int ret = 0, i; |
980 | 989 | ||
981 | if (num <= 0) | 990 | if (num <= 0) |
982 | return -EINVAL; | 991 | return -EINVAL; |
983 | for (i = 0; i < num; i++) { | 992 | for (i = 0; i < num; i++) { |
984 | ret = __register_kretprobe(rps[i], called_from); | 993 | ret = register_kretprobe(rps[i]); |
985 | if (ret < 0) { | 994 | if (ret < 0) { |
986 | if (i > 0) | 995 | if (i > 0) |
987 | unregister_kretprobes(rps, i); | 996 | unregister_kretprobes(rps, i); |
@@ -991,23 +1000,11 @@ static int __register_kretprobes(struct kretprobe **rps, int num, | |||
991 | return ret; | 1000 | return ret; |
992 | } | 1001 | } |
993 | 1002 | ||
994 | int __kprobes register_kretprobe(struct kretprobe *rp) | ||
995 | { | ||
996 | return __register_kretprobes(&rp, 1, | ||
997 | (unsigned long)__builtin_return_address(0)); | ||
998 | } | ||
999 | |||
1000 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 1003 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
1001 | { | 1004 | { |
1002 | unregister_kretprobes(&rp, 1); | 1005 | unregister_kretprobes(&rp, 1); |
1003 | } | 1006 | } |
1004 | 1007 | ||
1005 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) | ||
1006 | { | ||
1007 | return __register_kretprobes(rps, num, | ||
1008 | (unsigned long)__builtin_return_address(0)); | ||
1009 | } | ||
1010 | |||
1011 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | 1008 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) |
1012 | { | 1009 | { |
1013 | int i; | 1010 | int i; |
@@ -1055,6 +1052,72 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1055 | 1052 | ||
1056 | #endif /* CONFIG_KRETPROBES */ | 1053 | #endif /* CONFIG_KRETPROBES */ |
1057 | 1054 | ||
1055 | /* Set the kprobe gone and remove its instruction buffer. */ | ||
1056 | static void __kprobes kill_kprobe(struct kprobe *p) | ||
1057 | { | ||
1058 | struct kprobe *kp; | ||
1059 | p->flags |= KPROBE_FLAG_GONE; | ||
1060 | if (p->pre_handler == aggr_pre_handler) { | ||
1061 | /* | ||
1062 | * If this is an aggr_kprobe, we have to list all the | ||
1063 | * chained probes and mark them GONE. | ||
1064 | */ | ||
1065 | list_for_each_entry_rcu(kp, &p->list, list) | ||
1066 | kp->flags |= KPROBE_FLAG_GONE; | ||
1067 | p->post_handler = NULL; | ||
1068 | p->break_handler = NULL; | ||
1069 | } | ||
1070 | /* | ||
1071 | * Here, we can remove insn_slot safely, because no thread calls | ||
1072 | * the original probed function (which will be freed soon) any more. | ||
1073 | */ | ||
1074 | arch_remove_kprobe(p); | ||
1075 | } | ||
1076 | |||
1077 | /* Module notifier call back, checking kprobes on the module */ | ||
1078 | static int __kprobes kprobes_module_callback(struct notifier_block *nb, | ||
1079 | unsigned long val, void *data) | ||
1080 | { | ||
1081 | struct module *mod = data; | ||
1082 | struct hlist_head *head; | ||
1083 | struct hlist_node *node; | ||
1084 | struct kprobe *p; | ||
1085 | unsigned int i; | ||
1086 | int checkcore = (val == MODULE_STATE_GOING); | ||
1087 | |||
1088 | if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) | ||
1089 | return NOTIFY_DONE; | ||
1090 | |||
1091 | /* | ||
1092 | * When MODULE_STATE_GOING was notified, both of module .text and | ||
1093 | * .init.text sections would be freed. When MODULE_STATE_LIVE was | ||
1094 | * notified, only .init.text section would be freed. We need to | ||
1095 | * disable kprobes which have been inserted in the sections. | ||
1096 | */ | ||
1097 | mutex_lock(&kprobe_mutex); | ||
1098 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
1099 | head = &kprobe_table[i]; | ||
1100 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
1101 | if (within_module_init((unsigned long)p->addr, mod) || | ||
1102 | (checkcore && | ||
1103 | within_module_core((unsigned long)p->addr, mod))) { | ||
1104 | /* | ||
1105 | * The vaddr this probe is installed will soon | ||
1106 | * be vfreed buy not synced to disk. Hence, | ||
1107 | * disarming the breakpoint isn't needed. | ||
1108 | */ | ||
1109 | kill_kprobe(p); | ||
1110 | } | ||
1111 | } | ||
1112 | mutex_unlock(&kprobe_mutex); | ||
1113 | return NOTIFY_DONE; | ||
1114 | } | ||
1115 | |||
1116 | static struct notifier_block kprobe_module_nb = { | ||
1117 | .notifier_call = kprobes_module_callback, | ||
1118 | .priority = 0 | ||
1119 | }; | ||
1120 | |||
1058 | static int __init init_kprobes(void) | 1121 | static int __init init_kprobes(void) |
1059 | { | 1122 | { |
1060 | int i, err = 0; | 1123 | int i, err = 0; |
@@ -1111,6 +1174,9 @@ static int __init init_kprobes(void) | |||
1111 | err = arch_init_kprobes(); | 1174 | err = arch_init_kprobes(); |
1112 | if (!err) | 1175 | if (!err) |
1113 | err = register_die_notifier(&kprobe_exceptions_nb); | 1176 | err = register_die_notifier(&kprobe_exceptions_nb); |
1177 | if (!err) | ||
1178 | err = register_module_notifier(&kprobe_module_nb); | ||
1179 | |||
1114 | kprobes_initialized = (err == 0); | 1180 | kprobes_initialized = (err == 0); |
1115 | 1181 | ||
1116 | if (!err) | 1182 | if (!err) |
@@ -1131,10 +1197,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
1131 | else | 1197 | else |
1132 | kprobe_type = "k"; | 1198 | kprobe_type = "k"; |
1133 | if (sym) | 1199 | if (sym) |
1134 | seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, | 1200 | seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, |
1135 | sym, offset, (modname ? modname : " ")); | 1201 | sym, offset, (modname ? modname : " "), |
1202 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
1136 | else | 1203 | else |
1137 | seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr); | 1204 | seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, |
1205 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
1138 | } | 1206 | } |
1139 | 1207 | ||
1140 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1208 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
@@ -1215,7 +1283,8 @@ static void __kprobes enable_all_kprobes(void) | |||
1215 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1283 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1216 | head = &kprobe_table[i]; | 1284 | head = &kprobe_table[i]; |
1217 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1285 | hlist_for_each_entry_rcu(p, node, head, hlist) |
1218 | arch_arm_kprobe(p); | 1286 | if (!kprobe_gone(p)) |
1287 | arch_arm_kprobe(p); | ||
1219 | } | 1288 | } |
1220 | 1289 | ||
1221 | kprobe_enabled = true; | 1290 | kprobe_enabled = true; |
@@ -1244,7 +1313,7 @@ static void __kprobes disable_all_kprobes(void) | |||
1244 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1313 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1245 | head = &kprobe_table[i]; | 1314 | head = &kprobe_table[i]; |
1246 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1315 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1247 | if (!arch_trampoline_kprobe(p)) | 1316 | if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) |
1248 | arch_disarm_kprobe(p); | 1317 | arch_disarm_kprobe(p); |
1249 | } | 1318 | } |
1250 | } | 1319 | } |