aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c313
1 files changed, 228 insertions, 85 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7ba8cd9845cb..c0fa54b276d9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -43,6 +43,7 @@
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/kdebug.h> 45#include <linux/kdebug.h>
46#include <linux/memory.h>
46 47
47#include <asm-generic/sections.h> 48#include <asm-generic/sections.h>
48#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
@@ -67,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
67static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
68 69
69/* NOTE: change this value only with kprobe_mutex held */ 70/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled; 71static bool kprobes_all_disarmed;
71 72
72static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
@@ -318,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
318 return NULL; 319 return NULL;
319} 320}
320 321
322/* Arm a kprobe with text_mutex */
323static void __kprobes arm_kprobe(struct kprobe *kp)
324{
325 mutex_lock(&text_mutex);
326 arch_arm_kprobe(kp);
327 mutex_unlock(&text_mutex);
328}
329
330/* Disarm a kprobe with text_mutex */
331static void __kprobes disarm_kprobe(struct kprobe *kp)
332{
333 mutex_lock(&text_mutex);
334 arch_disarm_kprobe(kp);
335 mutex_unlock(&text_mutex);
336}
337
321/* 338/*
322 * Aggregate handlers for multiple kprobes support - these handlers 339 * Aggregate handlers for multiple kprobes support - these handlers
323 * take care of invoking the individual kprobe handlers on p->list 340 * take care of invoking the individual kprobe handlers on p->list
@@ -327,7 +344,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
327 struct kprobe *kp; 344 struct kprobe *kp;
328 345
329 list_for_each_entry_rcu(kp, &p->list, list) { 346 list_for_each_entry_rcu(kp, &p->list, list) {
330 if (kp->pre_handler && !kprobe_gone(kp)) { 347 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
331 set_kprobe_instance(kp); 348 set_kprobe_instance(kp);
332 if (kp->pre_handler(kp, regs)) 349 if (kp->pre_handler(kp, regs))
333 return 1; 350 return 1;
@@ -343,7 +360,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
343 struct kprobe *kp; 360 struct kprobe *kp;
344 361
345 list_for_each_entry_rcu(kp, &p->list, list) { 362 list_for_each_entry_rcu(kp, &p->list, list) {
346 if (kp->post_handler && !kprobe_gone(kp)) { 363 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
347 set_kprobe_instance(kp); 364 set_kprobe_instance(kp);
348 kp->post_handler(kp, regs, flags); 365 kp->post_handler(kp, regs, flags);
349 reset_kprobe_instance(); 366 reset_kprobe_instance();
@@ -517,20 +534,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
517} 534}
518 535
519/* 536/*
520* Add the new probe to old_p->list. Fail if this is the 537* Add the new probe to ap->list. Fail if this is the
521* second jprobe at the address - two jprobes can't coexist 538* second jprobe at the address - two jprobes can't coexist
522*/ 539*/
523static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 540static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
524{ 541{
542 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
525 if (p->break_handler) { 543 if (p->break_handler) {
526 if (old_p->break_handler) 544 if (ap->break_handler)
527 return -EEXIST; 545 return -EEXIST;
528 list_add_tail_rcu(&p->list, &old_p->list); 546 list_add_tail_rcu(&p->list, &ap->list);
529 old_p->break_handler = aggr_break_handler; 547 ap->break_handler = aggr_break_handler;
530 } else 548 } else
531 list_add_rcu(&p->list, &old_p->list); 549 list_add_rcu(&p->list, &ap->list);
532 if (p->post_handler && !old_p->post_handler) 550 if (p->post_handler && !ap->post_handler)
533 old_p->post_handler = aggr_post_handler; 551 ap->post_handler = aggr_post_handler;
552
553 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
554 ap->flags &= ~KPROBE_FLAG_DISABLED;
555 if (!kprobes_all_disarmed)
556 /* Arm the breakpoint again. */
557 arm_kprobe(ap);
558 }
534 return 0; 559 return 0;
535} 560}
536 561
@@ -543,6 +568,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
543 copy_kprobe(p, ap); 568 copy_kprobe(p, ap);
544 flush_insn_slot(ap); 569 flush_insn_slot(ap);
545 ap->addr = p->addr; 570 ap->addr = p->addr;
571 ap->flags = p->flags;
546 ap->pre_handler = aggr_pre_handler; 572 ap->pre_handler = aggr_pre_handler;
547 ap->fault_handler = aggr_fault_handler; 573 ap->fault_handler = aggr_fault_handler;
548 /* We don't care the kprobe which has gone. */ 574 /* We don't care the kprobe which has gone. */
@@ -565,44 +591,59 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
565 struct kprobe *p) 591 struct kprobe *p)
566{ 592{
567 int ret = 0; 593 int ret = 0;
568 struct kprobe *ap; 594 struct kprobe *ap = old_p;
595
596 if (old_p->pre_handler != aggr_pre_handler) {
597 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
598 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
599 if (!ap)
600 return -ENOMEM;
601 add_aggr_kprobe(ap, old_p);
602 }
569 603
570 if (kprobe_gone(old_p)) { 604 if (kprobe_gone(ap)) {
571 /* 605 /*
572 * Attempting to insert new probe at the same location that 606 * Attempting to insert new probe at the same location that
573 * had a probe in the module vaddr area which already 607 * had a probe in the module vaddr area which already
574 * freed. So, the instruction slot has already been 608 * freed. So, the instruction slot has already been
575 * released. We need a new slot for the new probe. 609 * released. We need a new slot for the new probe.
576 */ 610 */
577 ret = arch_prepare_kprobe(old_p); 611 ret = arch_prepare_kprobe(ap);
578 if (ret) 612 if (ret)
613 /*
614 * Even if fail to allocate new slot, don't need to
615 * free aggr_probe. It will be used next time, or
616 * freed by unregister_kprobe.
617 */
579 return ret; 618 return ret;
580 } 619
581 if (old_p->pre_handler == aggr_pre_handler) {
582 copy_kprobe(old_p, p);
583 ret = add_new_kprobe(old_p, p);
584 ap = old_p;
585 } else {
586 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
587 if (!ap) {
588 if (kprobe_gone(old_p))
589 arch_remove_kprobe(old_p);
590 return -ENOMEM;
591 }
592 add_aggr_kprobe(ap, old_p);
593 copy_kprobe(ap, p);
594 ret = add_new_kprobe(ap, p);
595 }
596 if (kprobe_gone(old_p)) {
597 /* 620 /*
598 * If the old_p has gone, its breakpoint has been disarmed. 621 * Clear gone flag to prevent allocating new slot again, and
599 * We have to arm it again after preparing real kprobes. 622 * set disabled flag because it is not armed yet.
600 */ 623 */
601 ap->flags &= ~KPROBE_FLAG_GONE; 624 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
602 if (kprobe_enabled) 625 | KPROBE_FLAG_DISABLED;
603 arch_arm_kprobe(ap);
604 } 626 }
605 return ret; 627
628 copy_kprobe(ap, p);
629 return add_new_kprobe(ap, p);
630}
631
632/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
633static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
634{
635 struct kprobe *kp;
636
637 list_for_each_entry_rcu(kp, &p->list, list) {
638 if (!kprobe_disabled(kp))
639 /*
640 * There is an active probe on the list.
641 * We can't disable aggr_kprobe.
642 */
643 return 0;
644 }
645 p->flags |= KPROBE_FLAG_DISABLED;
646 return 1;
606} 647}
607 648
608static int __kprobes in_kprobes_functions(unsigned long addr) 649static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -663,7 +704,9 @@ int __kprobes register_kprobe(struct kprobe *p)
663 return -EINVAL; 704 return -EINVAL;
664 } 705 }
665 706
666 p->flags = 0; 707 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
708 p->flags &= KPROBE_FLAG_DISABLED;
709
667 /* 710 /*
668 * Check if are we probing a module. 711 * Check if are we probing a module.
669 */ 712 */
@@ -699,17 +742,20 @@ int __kprobes register_kprobe(struct kprobe *p)
699 goto out; 742 goto out;
700 } 743 }
701 744
745 mutex_lock(&text_mutex);
702 ret = arch_prepare_kprobe(p); 746 ret = arch_prepare_kprobe(p);
703 if (ret) 747 if (ret)
704 goto out; 748 goto out_unlock_text;
705 749
706 INIT_HLIST_NODE(&p->hlist); 750 INIT_HLIST_NODE(&p->hlist);
707 hlist_add_head_rcu(&p->hlist, 751 hlist_add_head_rcu(&p->hlist,
708 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 752 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
709 753
710 if (kprobe_enabled) 754 if (!kprobes_all_disarmed && !kprobe_disabled(p))
711 arch_arm_kprobe(p); 755 arch_arm_kprobe(p);
712 756
757out_unlock_text:
758 mutex_unlock(&text_mutex);
713out: 759out:
714 mutex_unlock(&kprobe_mutex); 760 mutex_unlock(&kprobe_mutex);
715 761
@@ -718,26 +764,39 @@ out:
718 764
719 return ret; 765 return ret;
720} 766}
767EXPORT_SYMBOL_GPL(register_kprobe);
721 768
722/* 769/* Check passed kprobe is valid and return kprobe in kprobe_table. */
723 * Unregister a kprobe without a scheduler synchronization. 770static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
724 */
725static int __kprobes __unregister_kprobe_top(struct kprobe *p)
726{ 771{
727 struct kprobe *old_p, *list_p; 772 struct kprobe *old_p, *list_p;
728 773
729 old_p = get_kprobe(p->addr); 774 old_p = get_kprobe(p->addr);
730 if (unlikely(!old_p)) 775 if (unlikely(!old_p))
731 return -EINVAL; 776 return NULL;
732 777
733 if (p != old_p) { 778 if (p != old_p) {
734 list_for_each_entry_rcu(list_p, &old_p->list, list) 779 list_for_each_entry_rcu(list_p, &old_p->list, list)
735 if (list_p == p) 780 if (list_p == p)
736 /* kprobe p is a valid probe */ 781 /* kprobe p is a valid probe */
737 goto valid_p; 782 goto valid;
738 return -EINVAL; 783 return NULL;
739 } 784 }
740valid_p: 785valid:
786 return old_p;
787}
788
789/*
790 * Unregister a kprobe without a scheduler synchronization.
791 */
792static int __kprobes __unregister_kprobe_top(struct kprobe *p)
793{
794 struct kprobe *old_p, *list_p;
795
796 old_p = __get_valid_kprobe(p);
797 if (old_p == NULL)
798 return -EINVAL;
799
741 if (old_p == p || 800 if (old_p == p ||
742 (old_p->pre_handler == aggr_pre_handler && 801 (old_p->pre_handler == aggr_pre_handler &&
743 list_is_singular(&old_p->list))) { 802 list_is_singular(&old_p->list))) {
@@ -746,8 +805,8 @@ valid_p:
746 * enabled and not gone - otherwise, the breakpoint would 805 * enabled and not gone - otherwise, the breakpoint would
747 * already have been removed. We save on flushing icache. 806 * already have been removed. We save on flushing icache.
748 */ 807 */
749 if (kprobe_enabled && !kprobe_gone(old_p)) 808 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
750 arch_disarm_kprobe(p); 809 disarm_kprobe(p);
751 hlist_del_rcu(&old_p->hlist); 810 hlist_del_rcu(&old_p->hlist);
752 } else { 811 } else {
753 if (p->break_handler && !kprobe_gone(p)) 812 if (p->break_handler && !kprobe_gone(p))
@@ -761,6 +820,11 @@ valid_p:
761 } 820 }
762noclean: 821noclean:
763 list_del_rcu(&p->list); 822 list_del_rcu(&p->list);
823 if (!kprobe_disabled(old_p)) {
824 try_to_disable_aggr_kprobe(old_p);
825 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
826 disarm_kprobe(old_p);
827 }
764 } 828 }
765 return 0; 829 return 0;
766} 830}
@@ -796,11 +860,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
796 } 860 }
797 return ret; 861 return ret;
798} 862}
863EXPORT_SYMBOL_GPL(register_kprobes);
799 864
800void __kprobes unregister_kprobe(struct kprobe *p) 865void __kprobes unregister_kprobe(struct kprobe *p)
801{ 866{
802 unregister_kprobes(&p, 1); 867 unregister_kprobes(&p, 1);
803} 868}
869EXPORT_SYMBOL_GPL(unregister_kprobe);
804 870
805void __kprobes unregister_kprobes(struct kprobe **kps, int num) 871void __kprobes unregister_kprobes(struct kprobe **kps, int num)
806{ 872{
@@ -819,6 +885,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num)
819 if (kps[i]->addr) 885 if (kps[i]->addr)
820 __unregister_kprobe_bottom(kps[i]); 886 __unregister_kprobe_bottom(kps[i]);
821} 887}
888EXPORT_SYMBOL_GPL(unregister_kprobes);
822 889
823static struct notifier_block kprobe_exceptions_nb = { 890static struct notifier_block kprobe_exceptions_nb = {
824 .notifier_call = kprobe_exceptions_notify, 891 .notifier_call = kprobe_exceptions_notify,
@@ -858,16 +925,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
858 } 925 }
859 return ret; 926 return ret;
860} 927}
928EXPORT_SYMBOL_GPL(register_jprobes);
861 929
862int __kprobes register_jprobe(struct jprobe *jp) 930int __kprobes register_jprobe(struct jprobe *jp)
863{ 931{
864 return register_jprobes(&jp, 1); 932 return register_jprobes(&jp, 1);
865} 933}
934EXPORT_SYMBOL_GPL(register_jprobe);
866 935
867void __kprobes unregister_jprobe(struct jprobe *jp) 936void __kprobes unregister_jprobe(struct jprobe *jp)
868{ 937{
869 unregister_jprobes(&jp, 1); 938 unregister_jprobes(&jp, 1);
870} 939}
940EXPORT_SYMBOL_GPL(unregister_jprobe);
871 941
872void __kprobes unregister_jprobes(struct jprobe **jps, int num) 942void __kprobes unregister_jprobes(struct jprobe **jps, int num)
873{ 943{
@@ -887,6 +957,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num)
887 __unregister_kprobe_bottom(&jps[i]->kp); 957 __unregister_kprobe_bottom(&jps[i]->kp);
888 } 958 }
889} 959}
960EXPORT_SYMBOL_GPL(unregister_jprobes);
890 961
891#ifdef CONFIG_KRETPROBES 962#ifdef CONFIG_KRETPROBES
892/* 963/*
@@ -912,10 +983,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
912 ri->rp = rp; 983 ri->rp = rp;
913 ri->task = current; 984 ri->task = current;
914 985
915 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 986 if (rp->entry_handler && rp->entry_handler(ri, regs))
916 spin_unlock_irqrestore(&rp->lock, flags);
917 return 0; 987 return 0;
918 }
919 988
920 arch_prepare_kretprobe(ri, regs); 989 arch_prepare_kretprobe(ri, regs);
921 990
@@ -982,6 +1051,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
982 free_rp_inst(rp); 1051 free_rp_inst(rp);
983 return ret; 1052 return ret;
984} 1053}
1054EXPORT_SYMBOL_GPL(register_kretprobe);
985 1055
986int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1056int __kprobes register_kretprobes(struct kretprobe **rps, int num)
987{ 1057{
@@ -999,11 +1069,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
999 } 1069 }
1000 return ret; 1070 return ret;
1001} 1071}
1072EXPORT_SYMBOL_GPL(register_kretprobes);
1002 1073
1003void __kprobes unregister_kretprobe(struct kretprobe *rp) 1074void __kprobes unregister_kretprobe(struct kretprobe *rp)
1004{ 1075{
1005 unregister_kretprobes(&rp, 1); 1076 unregister_kretprobes(&rp, 1);
1006} 1077}
1078EXPORT_SYMBOL_GPL(unregister_kretprobe);
1007 1079
1008void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1080void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1009{ 1081{
@@ -1025,24 +1097,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1025 } 1097 }
1026 } 1098 }
1027} 1099}
1100EXPORT_SYMBOL_GPL(unregister_kretprobes);
1028 1101
1029#else /* CONFIG_KRETPROBES */ 1102#else /* CONFIG_KRETPROBES */
1030int __kprobes register_kretprobe(struct kretprobe *rp) 1103int __kprobes register_kretprobe(struct kretprobe *rp)
1031{ 1104{
1032 return -ENOSYS; 1105 return -ENOSYS;
1033} 1106}
1107EXPORT_SYMBOL_GPL(register_kretprobe);
1034 1108
1035int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1109int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1036{ 1110{
1037 return -ENOSYS; 1111 return -ENOSYS;
1038} 1112}
1113EXPORT_SYMBOL_GPL(register_kretprobes);
1114
1039void __kprobes unregister_kretprobe(struct kretprobe *rp) 1115void __kprobes unregister_kretprobe(struct kretprobe *rp)
1040{ 1116{
1041} 1117}
1118EXPORT_SYMBOL_GPL(unregister_kretprobe);
1042 1119
1043void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1120void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1044{ 1121{
1045} 1122}
1123EXPORT_SYMBOL_GPL(unregister_kretprobes);
1046 1124
1047static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1125static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1048 struct pt_regs *regs) 1126 struct pt_regs *regs)
@@ -1056,6 +1134,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1056static void __kprobes kill_kprobe(struct kprobe *p) 1134static void __kprobes kill_kprobe(struct kprobe *p)
1057{ 1135{
1058 struct kprobe *kp; 1136 struct kprobe *kp;
1137
1059 p->flags |= KPROBE_FLAG_GONE; 1138 p->flags |= KPROBE_FLAG_GONE;
1060 if (p->pre_handler == aggr_pre_handler) { 1139 if (p->pre_handler == aggr_pre_handler) {
1061 /* 1140 /*
@@ -1168,8 +1247,8 @@ static int __init init_kprobes(void)
1168 } 1247 }
1169 } 1248 }
1170 1249
1171 /* By default, kprobes are enabled */ 1250 /* By default, kprobes are armed */
1172 kprobe_enabled = true; 1251 kprobes_all_disarmed = false;
1173 1252
1174 err = arch_init_kprobes(); 1253 err = arch_init_kprobes();
1175 if (!err) 1254 if (!err)
@@ -1197,12 +1276,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1197 else 1276 else
1198 kprobe_type = "k"; 1277 kprobe_type = "k";
1199 if (sym) 1278 if (sym)
1200 seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, 1279 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1201 sym, offset, (modname ? modname : " "), 1280 p->addr, kprobe_type, sym, offset,
1202 (kprobe_gone(p) ? "[GONE]" : "")); 1281 (modname ? modname : " "),
1282 (kprobe_gone(p) ? "[GONE]" : ""),
1283 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1284 "[DISABLED]" : ""));
1203 else 1285 else
1204 seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, 1286 seq_printf(pi, "%p %s %p %s%s\n",
1205 (kprobe_gone(p) ? "[GONE]" : "")); 1287 p->addr, kprobe_type, p->addr,
1288 (kprobe_gone(p) ? "[GONE]" : ""),
1289 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1290 "[DISABLED]" : ""));
1206} 1291}
1207 1292
1208static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1293static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1267,7 +1352,72 @@ static struct file_operations debugfs_kprobes_operations = {
1267 .release = seq_release, 1352 .release = seq_release,
1268}; 1353};
1269 1354
1270static void __kprobes enable_all_kprobes(void) 1355/* Disable one kprobe */
1356int __kprobes disable_kprobe(struct kprobe *kp)
1357{
1358 int ret = 0;
1359 struct kprobe *p;
1360
1361 mutex_lock(&kprobe_mutex);
1362
1363 /* Check whether specified probe is valid. */
1364 p = __get_valid_kprobe(kp);
1365 if (unlikely(p == NULL)) {
1366 ret = -EINVAL;
1367 goto out;
1368 }
1369
1370 /* If the probe is already disabled (or gone), just return */
1371 if (kprobe_disabled(kp))
1372 goto out;
1373
1374 kp->flags |= KPROBE_FLAG_DISABLED;
1375 if (p != kp)
1376 /* When kp != p, p is always enabled. */
1377 try_to_disable_aggr_kprobe(p);
1378
1379 if (!kprobes_all_disarmed && kprobe_disabled(p))
1380 disarm_kprobe(p);
1381out:
1382 mutex_unlock(&kprobe_mutex);
1383 return ret;
1384}
1385EXPORT_SYMBOL_GPL(disable_kprobe);
1386
1387/* Enable one kprobe */
1388int __kprobes enable_kprobe(struct kprobe *kp)
1389{
1390 int ret = 0;
1391 struct kprobe *p;
1392
1393 mutex_lock(&kprobe_mutex);
1394
1395 /* Check whether specified probe is valid. */
1396 p = __get_valid_kprobe(kp);
1397 if (unlikely(p == NULL)) {
1398 ret = -EINVAL;
1399 goto out;
1400 }
1401
1402 if (kprobe_gone(kp)) {
1403 /* This kprobe has gone, we couldn't enable it. */
1404 ret = -EINVAL;
1405 goto out;
1406 }
1407
1408 if (!kprobes_all_disarmed && kprobe_disabled(p))
1409 arm_kprobe(p);
1410
1411 p->flags &= ~KPROBE_FLAG_DISABLED;
1412 if (p != kp)
1413 kp->flags &= ~KPROBE_FLAG_DISABLED;
1414out:
1415 mutex_unlock(&kprobe_mutex);
1416 return ret;
1417}
1418EXPORT_SYMBOL_GPL(enable_kprobe);
1419
1420static void __kprobes arm_all_kprobes(void)
1271{ 1421{
1272 struct hlist_head *head; 1422 struct hlist_head *head;
1273 struct hlist_node *node; 1423 struct hlist_node *node;
@@ -1276,18 +1426,20 @@ static void __kprobes enable_all_kprobes(void)
1276 1426
1277 mutex_lock(&kprobe_mutex); 1427 mutex_lock(&kprobe_mutex);
1278 1428
1279 /* If kprobes are already enabled, just return */ 1429 /* If kprobes are armed, just return */
1280 if (kprobe_enabled) 1430 if (!kprobes_all_disarmed)
1281 goto already_enabled; 1431 goto already_enabled;
1282 1432
1433 mutex_lock(&text_mutex);
1283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1434 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1284 head = &kprobe_table[i]; 1435 head = &kprobe_table[i];
1285 hlist_for_each_entry_rcu(p, node, head, hlist) 1436 hlist_for_each_entry_rcu(p, node, head, hlist)
1286 if (!kprobe_gone(p)) 1437 if (!kprobe_disabled(p))
1287 arch_arm_kprobe(p); 1438 arch_arm_kprobe(p);
1288 } 1439 }
1440 mutex_unlock(&text_mutex);
1289 1441
1290 kprobe_enabled = true; 1442 kprobes_all_disarmed = false;
1291 printk(KERN_INFO "Kprobes globally enabled\n"); 1443 printk(KERN_INFO "Kprobes globally enabled\n");
1292 1444
1293already_enabled: 1445already_enabled:
@@ -1295,7 +1447,7 @@ already_enabled:
1295 return; 1447 return;
1296} 1448}
1297 1449
1298static void __kprobes disable_all_kprobes(void) 1450static void __kprobes disarm_all_kprobes(void)
1299{ 1451{
1300 struct hlist_head *head; 1452 struct hlist_head *head;
1301 struct hlist_node *node; 1453 struct hlist_node *node;
@@ -1304,20 +1456,22 @@ static void __kprobes disable_all_kprobes(void)
1304 1456
1305 mutex_lock(&kprobe_mutex); 1457 mutex_lock(&kprobe_mutex);
1306 1458
1307 /* If kprobes are already disabled, just return */ 1459 /* If kprobes are already disarmed, just return */
1308 if (!kprobe_enabled) 1460 if (kprobes_all_disarmed)
1309 goto already_disabled; 1461 goto already_disabled;
1310 1462
1311 kprobe_enabled = false; 1463 kprobes_all_disarmed = true;
1312 printk(KERN_INFO "Kprobes globally disabled\n"); 1464 printk(KERN_INFO "Kprobes globally disabled\n");
1465 mutex_lock(&text_mutex);
1313 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1466 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1314 head = &kprobe_table[i]; 1467 head = &kprobe_table[i];
1315 hlist_for_each_entry_rcu(p, node, head, hlist) { 1468 hlist_for_each_entry_rcu(p, node, head, hlist) {
1316 if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) 1469 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1317 arch_disarm_kprobe(p); 1470 arch_disarm_kprobe(p);
1318 } 1471 }
1319 } 1472 }
1320 1473
1474 mutex_unlock(&text_mutex);
1321 mutex_unlock(&kprobe_mutex); 1475 mutex_unlock(&kprobe_mutex);
1322 /* Allow all currently running kprobes to complete */ 1476 /* Allow all currently running kprobes to complete */
1323 synchronize_sched(); 1477 synchronize_sched();
@@ -1338,7 +1492,7 @@ static ssize_t read_enabled_file_bool(struct file *file,
1338{ 1492{
1339 char buf[3]; 1493 char buf[3];
1340 1494
1341 if (kprobe_enabled) 1495 if (!kprobes_all_disarmed)
1342 buf[0] = '1'; 1496 buf[0] = '1';
1343 else 1497 else
1344 buf[0] = '0'; 1498 buf[0] = '0';
@@ -1361,12 +1515,12 @@ static ssize_t write_enabled_file_bool(struct file *file,
1361 case 'y': 1515 case 'y':
1362 case 'Y': 1516 case 'Y':
1363 case '1': 1517 case '1':
1364 enable_all_kprobes(); 1518 arm_all_kprobes();
1365 break; 1519 break;
1366 case 'n': 1520 case 'n':
1367 case 'N': 1521 case 'N':
1368 case '0': 1522 case '0':
1369 disable_all_kprobes(); 1523 disarm_all_kprobes();
1370 break; 1524 break;
1371 } 1525 }
1372 1526
@@ -1409,16 +1563,5 @@ late_initcall(debugfs_kprobe_init);
1409 1563
1410module_init(init_kprobes); 1564module_init(init_kprobes);
1411 1565
1412EXPORT_SYMBOL_GPL(register_kprobe); 1566/* defined in arch/.../kernel/kprobes.c */
1413EXPORT_SYMBOL_GPL(unregister_kprobe);
1414EXPORT_SYMBOL_GPL(register_kprobes);
1415EXPORT_SYMBOL_GPL(unregister_kprobes);
1416EXPORT_SYMBOL_GPL(register_jprobe);
1417EXPORT_SYMBOL_GPL(unregister_jprobe);
1418EXPORT_SYMBOL_GPL(register_jprobes);
1419EXPORT_SYMBOL_GPL(unregister_jprobes);
1420EXPORT_SYMBOL_GPL(jprobe_return); 1567EXPORT_SYMBOL_GPL(jprobe_return);
1421EXPORT_SYMBOL_GPL(register_kretprobe);
1422EXPORT_SYMBOL_GPL(unregister_kretprobe);
1423EXPORT_SYMBOL_GPL(register_kretprobes);
1424EXPORT_SYMBOL_GPL(unregister_kretprobes);