diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2009-04-06 22:01:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-07 11:31:08 -0400 |
commit | de5bd88d5a5cce3cacea904d3503e5ebdb3852a2 (patch) | |
tree | da24ac8b38d371ee03a21ed0f3647c518689ebd3 | |
parent | e579abeb58eb4b8d7321c6eb44dd9e2d0cbaebaa (diff) |
kprobes: support per-kprobe disabling
Add disable_kprobe() and enable_kprobe() to disable/enable kprobes
temporarily.
disable_kprobe() asynchronously disables probe handlers of specified
kprobe. So, after calling it, some handlers can be called at a while.
enable_kprobe() enables specified kprobe.
aggr_pre_handler and aggr_post_handler check disabled probes. On the
other hand aggr_break_handler and aggr_fault_handler don't check it
because these handlers will be called while executing pre or post handlers
and usually those help error handling.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | Documentation/kprobes.txt | 34 | ||||
-rw-r--r-- | include/linux/kprobes.h | 23 | ||||
-rw-r--r-- | kernel/kprobes.c | 167 |
3 files changed, 191 insertions, 33 deletions
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 48b3de90eb1e..f609af242d6c 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt | |||
@@ -212,7 +212,9 @@ hit, Kprobes calls kp->pre_handler. After the probed instruction | |||
212 | is single-stepped, Kprobe calls kp->post_handler. If a fault | 212 | is single-stepped, Kprobe calls kp->post_handler. If a fault |
213 | occurs during execution of kp->pre_handler or kp->post_handler, | 213 | occurs during execution of kp->pre_handler or kp->post_handler, |
214 | or during single-stepping of the probed instruction, Kprobes calls | 214 | or during single-stepping of the probed instruction, Kprobes calls |
215 | kp->fault_handler. Any or all handlers can be NULL. | 215 | kp->fault_handler. Any or all handlers can be NULL. If kp->flags |
216 | is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, | ||
217 | so, it's handlers aren't hit until calling enable_kprobe(kp). | ||
216 | 218 | ||
217 | NOTE: | 219 | NOTE: |
218 | 1. With the introduction of the "symbol_name" field to struct kprobe, | 220 | 1. With the introduction of the "symbol_name" field to struct kprobe, |
@@ -363,6 +365,22 @@ probes) in the specified array, they clear the addr field of those | |||
363 | incorrect probes. However, other probes in the array are | 365 | incorrect probes. However, other probes in the array are |
364 | unregistered correctly. | 366 | unregistered correctly. |
365 | 367 | ||
368 | 4.7 disable_kprobe | ||
369 | |||
370 | #include <linux/kprobes.h> | ||
371 | int disable_kprobe(struct kprobe *kp); | ||
372 | |||
373 | Temporarily disables the specified kprobe. You can enable it again by using | ||
374 | enable_kprobe(). You must specify the kprobe which has been registered. | ||
375 | |||
376 | 4.8 enable_kprobe | ||
377 | |||
378 | #include <linux/kprobes.h> | ||
379 | int enable_kprobe(struct kprobe *kp); | ||
380 | |||
381 | Enables kprobe which has been disabled by disable_kprobe(). You must specify | ||
382 | the kprobe which has been registered. | ||
383 | |||
366 | 5. Kprobes Features and Limitations | 384 | 5. Kprobes Features and Limitations |
367 | 385 | ||
368 | Kprobes allows multiple probes at the same address. Currently, | 386 | Kprobes allows multiple probes at the same address. Currently, |
@@ -500,10 +518,14 @@ the probe. If the probed function belongs to a module, the module name | |||
500 | is also specified. Following columns show probe status. If the probe is on | 518 | is also specified. Following columns show probe status. If the probe is on |
501 | a virtual address that is no longer valid (module init sections, module | 519 | a virtual address that is no longer valid (module init sections, module |
502 | virtual addresses that correspond to modules that've been unloaded), | 520 | virtual addresses that correspond to modules that've been unloaded), |
503 | such probes are marked with [GONE]. | 521 | such probes are marked with [GONE]. If the probe is temporarily disabled, |
522 | such probes are marked with [DISABLED]. | ||
504 | 523 | ||
505 | /debug/kprobes/enabled: Turn kprobes ON/OFF | 524 | /debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. |
506 | 525 | ||
507 | Provides a knob to globally turn registered kprobes ON or OFF. By default, | 526 | Provides a knob to globally and forcibly turn registered kprobes ON or OFF. |
508 | all kprobes are enabled. By echoing "0" to this file, all registered probes | 527 | By default, all kprobes are enabled. By echoing "0" to this file, all |
509 | will be disarmed, till such time a "1" is echoed to this file. | 528 | registered probes will be disarmed, till such time a "1" is echoed to this |
529 | file. Note that this knob just disarms and arms all kprobes and doesn't | ||
530 | change each probe's disabling state. This means that disabled kprobes (marked | ||
531 | [DISABLED]) will be not enabled if you turn ON all kprobes by this knob. | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 39826a678364..1071cfddddc9 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -112,18 +112,28 @@ struct kprobe { | |||
112 | /* copy of the original instruction */ | 112 | /* copy of the original instruction */ |
113 | struct arch_specific_insn ainsn; | 113 | struct arch_specific_insn ainsn; |
114 | 114 | ||
115 | /* Indicates various status flags. Protected by kprobe_mutex. */ | 115 | /* |
116 | * Indicates various status flags. | ||
117 | * Protected by kprobe_mutex after this kprobe is registered. | ||
118 | */ | ||
116 | u32 flags; | 119 | u32 flags; |
117 | }; | 120 | }; |
118 | 121 | ||
119 | /* Kprobe status flags */ | 122 | /* Kprobe status flags */ |
120 | #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ | 123 | #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ |
124 | #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ | ||
121 | 125 | ||
126 | /* Has this kprobe gone ? */ | ||
122 | static inline int kprobe_gone(struct kprobe *p) | 127 | static inline int kprobe_gone(struct kprobe *p) |
123 | { | 128 | { |
124 | return p->flags & KPROBE_FLAG_GONE; | 129 | return p->flags & KPROBE_FLAG_GONE; |
125 | } | 130 | } |
126 | 131 | ||
132 | /* Is this kprobe disabled ? */ | ||
133 | static inline int kprobe_disabled(struct kprobe *p) | ||
134 | { | ||
135 | return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); | ||
136 | } | ||
127 | /* | 137 | /* |
128 | * Special probe type that uses setjmp-longjmp type tricks to resume | 138 | * Special probe type that uses setjmp-longjmp type tricks to resume |
129 | * execution at a specified entry with a matching prototype corresponding | 139 | * execution at a specified entry with a matching prototype corresponding |
@@ -283,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num); | |||
283 | void kprobe_flush_task(struct task_struct *tk); | 293 | void kprobe_flush_task(struct task_struct *tk); |
284 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); | 294 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); |
285 | 295 | ||
296 | int disable_kprobe(struct kprobe *kp); | ||
297 | int enable_kprobe(struct kprobe *kp); | ||
298 | |||
286 | #else /* !CONFIG_KPROBES: */ | 299 | #else /* !CONFIG_KPROBES: */ |
287 | 300 | ||
288 | static inline int kprobes_built_in(void) | 301 | static inline int kprobes_built_in(void) |
@@ -349,5 +362,13 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) | |||
349 | static inline void kprobe_flush_task(struct task_struct *tk) | 362 | static inline void kprobe_flush_task(struct task_struct *tk) |
350 | { | 363 | { |
351 | } | 364 | } |
365 | static inline int disable_kprobe(struct kprobe *kp) | ||
366 | { | ||
367 | return -ENOSYS; | ||
368 | } | ||
369 | static inline int enable_kprobe(struct kprobe *kp) | ||
370 | { | ||
371 | return -ENOSYS; | ||
372 | } | ||
352 | #endif /* CONFIG_KPROBES */ | 373 | #endif /* CONFIG_KPROBES */ |
353 | #endif /* _LINUX_KPROBES_H */ | 374 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index dae198b68e97..a5e74ddee0e2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -328,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
328 | struct kprobe *kp; | 328 | struct kprobe *kp; |
329 | 329 | ||
330 | list_for_each_entry_rcu(kp, &p->list, list) { | 330 | list_for_each_entry_rcu(kp, &p->list, list) { |
331 | if (kp->pre_handler && !kprobe_gone(kp)) { | 331 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
332 | set_kprobe_instance(kp); | 332 | set_kprobe_instance(kp); |
333 | if (kp->pre_handler(kp, regs)) | 333 | if (kp->pre_handler(kp, regs)) |
334 | return 1; | 334 | return 1; |
@@ -344,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
344 | struct kprobe *kp; | 344 | struct kprobe *kp; |
345 | 345 | ||
346 | list_for_each_entry_rcu(kp, &p->list, list) { | 346 | list_for_each_entry_rcu(kp, &p->list, list) { |
347 | if (kp->post_handler && !kprobe_gone(kp)) { | 347 | if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
348 | set_kprobe_instance(kp); | 348 | set_kprobe_instance(kp); |
349 | kp->post_handler(kp, regs, flags); | 349 | kp->post_handler(kp, regs, flags); |
350 | reset_kprobe_instance(); | 350 | reset_kprobe_instance(); |
@@ -523,6 +523,7 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | |||
523 | */ | 523 | */ |
524 | static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | 524 | static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
525 | { | 525 | { |
526 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | ||
526 | if (p->break_handler) { | 527 | if (p->break_handler) { |
527 | if (ap->break_handler) | 528 | if (ap->break_handler) |
528 | return -EEXIST; | 529 | return -EEXIST; |
@@ -532,6 +533,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
532 | list_add_rcu(&p->list, &ap->list); | 533 | list_add_rcu(&p->list, &ap->list); |
533 | if (p->post_handler && !ap->post_handler) | 534 | if (p->post_handler && !ap->post_handler) |
534 | ap->post_handler = aggr_post_handler; | 535 | ap->post_handler = aggr_post_handler; |
536 | |||
537 | if (kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
538 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
539 | if (!kprobes_all_disarmed) | ||
540 | /* Arm the breakpoint again. */ | ||
541 | arch_arm_kprobe(ap); | ||
542 | } | ||
535 | return 0; | 543 | return 0; |
536 | } | 544 | } |
537 | 545 | ||
@@ -592,20 +600,36 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
592 | * freed by unregister_kprobe. | 600 | * freed by unregister_kprobe. |
593 | */ | 601 | */ |
594 | return ret; | 602 | return ret; |
595 | /* Clear gone flag to prevent allocating new slot again. */ | 603 | |
596 | ap->flags &= ~KPROBE_FLAG_GONE; | ||
597 | /* | 604 | /* |
598 | * If the old_p has gone, its breakpoint has been disarmed. | 605 | * Clear gone flag to prevent allocating new slot again, and |
599 | * We have to arm it again after preparing real kprobes. | 606 | * set disabled flag because it is not armed yet. |
600 | */ | 607 | */ |
601 | if (!kprobes_all_disarmed) | 608 | ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) |
602 | arch_arm_kprobe(ap); | 609 | | KPROBE_FLAG_DISABLED; |
603 | } | 610 | } |
604 | 611 | ||
605 | copy_kprobe(ap, p); | 612 | copy_kprobe(ap, p); |
606 | return add_new_kprobe(ap, p); | 613 | return add_new_kprobe(ap, p); |
607 | } | 614 | } |
608 | 615 | ||
616 | /* Try to disable aggr_kprobe, and return 1 if succeeded.*/ | ||
617 | static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) | ||
618 | { | ||
619 | struct kprobe *kp; | ||
620 | |||
621 | list_for_each_entry_rcu(kp, &p->list, list) { | ||
622 | if (!kprobe_disabled(kp)) | ||
623 | /* | ||
624 | * There is an active probe on the list. | ||
625 | * We can't disable aggr_kprobe. | ||
626 | */ | ||
627 | return 0; | ||
628 | } | ||
629 | p->flags |= KPROBE_FLAG_DISABLED; | ||
630 | return 1; | ||
631 | } | ||
632 | |||
609 | static int __kprobes in_kprobes_functions(unsigned long addr) | 633 | static int __kprobes in_kprobes_functions(unsigned long addr) |
610 | { | 634 | { |
611 | struct kprobe_blackpoint *kb; | 635 | struct kprobe_blackpoint *kb; |
@@ -664,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
664 | return -EINVAL; | 688 | return -EINVAL; |
665 | } | 689 | } |
666 | 690 | ||
667 | p->flags = 0; | 691 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
692 | p->flags &= KPROBE_FLAG_DISABLED; | ||
693 | |||
668 | /* | 694 | /* |
669 | * Check if are we probing a module. | 695 | * Check if are we probing a module. |
670 | */ | 696 | */ |
@@ -709,7 +735,7 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
709 | hlist_add_head_rcu(&p->hlist, | 735 | hlist_add_head_rcu(&p->hlist, |
710 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 736 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
711 | 737 | ||
712 | if (!kprobes_all_disarmed) | 738 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
713 | arch_arm_kprobe(p); | 739 | arch_arm_kprobe(p); |
714 | 740 | ||
715 | out_unlock_text: | 741 | out_unlock_text: |
@@ -724,25 +750,37 @@ out: | |||
724 | } | 750 | } |
725 | EXPORT_SYMBOL_GPL(register_kprobe); | 751 | EXPORT_SYMBOL_GPL(register_kprobe); |
726 | 752 | ||
727 | /* | 753 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
728 | * Unregister a kprobe without a scheduler synchronization. | 754 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) |
729 | */ | ||
730 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) | ||
731 | { | 755 | { |
732 | struct kprobe *old_p, *list_p; | 756 | struct kprobe *old_p, *list_p; |
733 | 757 | ||
734 | old_p = get_kprobe(p->addr); | 758 | old_p = get_kprobe(p->addr); |
735 | if (unlikely(!old_p)) | 759 | if (unlikely(!old_p)) |
736 | return -EINVAL; | 760 | return NULL; |
737 | 761 | ||
738 | if (p != old_p) { | 762 | if (p != old_p) { |
739 | list_for_each_entry_rcu(list_p, &old_p->list, list) | 763 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
740 | if (list_p == p) | 764 | if (list_p == p) |
741 | /* kprobe p is a valid probe */ | 765 | /* kprobe p is a valid probe */ |
742 | goto valid_p; | 766 | goto valid; |
743 | return -EINVAL; | 767 | return NULL; |
744 | } | 768 | } |
745 | valid_p: | 769 | valid: |
770 | return old_p; | ||
771 | } | ||
772 | |||
773 | /* | ||
774 | * Unregister a kprobe without a scheduler synchronization. | ||
775 | */ | ||
776 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) | ||
777 | { | ||
778 | struct kprobe *old_p, *list_p; | ||
779 | |||
780 | old_p = __get_valid_kprobe(p); | ||
781 | if (old_p == NULL) | ||
782 | return -EINVAL; | ||
783 | |||
746 | if (old_p == p || | 784 | if (old_p == p || |
747 | (old_p->pre_handler == aggr_pre_handler && | 785 | (old_p->pre_handler == aggr_pre_handler && |
748 | list_is_singular(&old_p->list))) { | 786 | list_is_singular(&old_p->list))) { |
@@ -751,7 +789,7 @@ valid_p: | |||
751 | * enabled and not gone - otherwise, the breakpoint would | 789 | * enabled and not gone - otherwise, the breakpoint would |
752 | * already have been removed. We save on flushing icache. | 790 | * already have been removed. We save on flushing icache. |
753 | */ | 791 | */ |
754 | if (!kprobes_all_disarmed && !kprobe_gone(old_p)) { | 792 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { |
755 | mutex_lock(&text_mutex); | 793 | mutex_lock(&text_mutex); |
756 | arch_disarm_kprobe(p); | 794 | arch_disarm_kprobe(p); |
757 | mutex_unlock(&text_mutex); | 795 | mutex_unlock(&text_mutex); |
@@ -769,6 +807,11 @@ valid_p: | |||
769 | } | 807 | } |
770 | noclean: | 808 | noclean: |
771 | list_del_rcu(&p->list); | 809 | list_del_rcu(&p->list); |
810 | if (!kprobe_disabled(old_p)) { | ||
811 | try_to_disable_aggr_kprobe(old_p); | ||
812 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) | ||
813 | arch_disarm_kprobe(old_p); | ||
814 | } | ||
772 | } | 815 | } |
773 | return 0; | 816 | return 0; |
774 | } | 817 | } |
@@ -1078,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1078 | static void __kprobes kill_kprobe(struct kprobe *p) | 1121 | static void __kprobes kill_kprobe(struct kprobe *p) |
1079 | { | 1122 | { |
1080 | struct kprobe *kp; | 1123 | struct kprobe *kp; |
1124 | |||
1081 | p->flags |= KPROBE_FLAG_GONE; | 1125 | p->flags |= KPROBE_FLAG_GONE; |
1082 | if (p->pre_handler == aggr_pre_handler) { | 1126 | if (p->pre_handler == aggr_pre_handler) { |
1083 | /* | 1127 | /* |
@@ -1219,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
1219 | else | 1263 | else |
1220 | kprobe_type = "k"; | 1264 | kprobe_type = "k"; |
1221 | if (sym) | 1265 | if (sym) |
1222 | seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, | 1266 | seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", |
1223 | sym, offset, (modname ? modname : " "), | 1267 | p->addr, kprobe_type, sym, offset, |
1224 | (kprobe_gone(p) ? "[GONE]" : "")); | 1268 | (modname ? modname : " "), |
1269 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
1270 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | ||
1271 | "[DISABLED]" : "")); | ||
1225 | else | 1272 | else |
1226 | seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, | 1273 | seq_printf(pi, "%p %s %p %s%s\n", |
1227 | (kprobe_gone(p) ? "[GONE]" : "")); | 1274 | p->addr, kprobe_type, p->addr, |
1275 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
1276 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | ||
1277 | "[DISABLED]" : "")); | ||
1228 | } | 1278 | } |
1229 | 1279 | ||
1230 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1280 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
@@ -1289,6 +1339,71 @@ static struct file_operations debugfs_kprobes_operations = { | |||
1289 | .release = seq_release, | 1339 | .release = seq_release, |
1290 | }; | 1340 | }; |
1291 | 1341 | ||
1342 | /* Disable one kprobe */ | ||
1343 | int __kprobes disable_kprobe(struct kprobe *kp) | ||
1344 | { | ||
1345 | int ret = 0; | ||
1346 | struct kprobe *p; | ||
1347 | |||
1348 | mutex_lock(&kprobe_mutex); | ||
1349 | |||
1350 | /* Check whether specified probe is valid. */ | ||
1351 | p = __get_valid_kprobe(kp); | ||
1352 | if (unlikely(p == NULL)) { | ||
1353 | ret = -EINVAL; | ||
1354 | goto out; | ||
1355 | } | ||
1356 | |||
1357 | /* If the probe is already disabled (or gone), just return */ | ||
1358 | if (kprobe_disabled(kp)) | ||
1359 | goto out; | ||
1360 | |||
1361 | kp->flags |= KPROBE_FLAG_DISABLED; | ||
1362 | if (p != kp) | ||
1363 | /* When kp != p, p is always enabled. */ | ||
1364 | try_to_disable_aggr_kprobe(p); | ||
1365 | |||
1366 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
1367 | arch_disarm_kprobe(p); | ||
1368 | out: | ||
1369 | mutex_unlock(&kprobe_mutex); | ||
1370 | return ret; | ||
1371 | } | ||
1372 | EXPORT_SYMBOL_GPL(disable_kprobe); | ||
1373 | |||
1374 | /* Enable one kprobe */ | ||
1375 | int __kprobes enable_kprobe(struct kprobe *kp) | ||
1376 | { | ||
1377 | int ret = 0; | ||
1378 | struct kprobe *p; | ||
1379 | |||
1380 | mutex_lock(&kprobe_mutex); | ||
1381 | |||
1382 | /* Check whether specified probe is valid. */ | ||
1383 | p = __get_valid_kprobe(kp); | ||
1384 | if (unlikely(p == NULL)) { | ||
1385 | ret = -EINVAL; | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (kprobe_gone(kp)) { | ||
1390 | /* This kprobe has gone, we couldn't enable it. */ | ||
1391 | ret = -EINVAL; | ||
1392 | goto out; | ||
1393 | } | ||
1394 | |||
1395 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
1396 | arch_arm_kprobe(p); | ||
1397 | |||
1398 | p->flags &= ~KPROBE_FLAG_DISABLED; | ||
1399 | if (p != kp) | ||
1400 | kp->flags &= ~KPROBE_FLAG_DISABLED; | ||
1401 | out: | ||
1402 | mutex_unlock(&kprobe_mutex); | ||
1403 | return ret; | ||
1404 | } | ||
1405 | EXPORT_SYMBOL_GPL(enable_kprobe); | ||
1406 | |||
1292 | static void __kprobes arm_all_kprobes(void) | 1407 | static void __kprobes arm_all_kprobes(void) |
1293 | { | 1408 | { |
1294 | struct hlist_head *head; | 1409 | struct hlist_head *head; |
@@ -1306,7 +1421,7 @@ static void __kprobes arm_all_kprobes(void) | |||
1306 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1421 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1307 | head = &kprobe_table[i]; | 1422 | head = &kprobe_table[i]; |
1308 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1423 | hlist_for_each_entry_rcu(p, node, head, hlist) |
1309 | if (!kprobe_gone(p)) | 1424 | if (!kprobe_disabled(p)) |
1310 | arch_arm_kprobe(p); | 1425 | arch_arm_kprobe(p); |
1311 | } | 1426 | } |
1312 | mutex_unlock(&text_mutex); | 1427 | mutex_unlock(&text_mutex); |
@@ -1338,7 +1453,7 @@ static void __kprobes disarm_all_kprobes(void) | |||
1338 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1453 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1339 | head = &kprobe_table[i]; | 1454 | head = &kprobe_table[i]; |
1340 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1455 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1341 | if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) | 1456 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
1342 | arch_disarm_kprobe(p); | 1457 | arch_disarm_kprobe(p); |
1343 | } | 1458 | } |
1344 | } | 1459 | } |