aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c461
1 files changed, 410 insertions, 51 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 78105623d739..612af2d61614 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -45,6 +45,7 @@
45#include <linux/kdebug.h> 45#include <linux/kdebug.h>
46#include <linux/memory.h> 46#include <linux/memory.h>
47#include <linux/ftrace.h> 47#include <linux/ftrace.h>
48#include <linux/cpu.h>
48 49
49#include <asm-generic/sections.h> 50#include <asm-generic/sections.h>
50#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
@@ -280,6 +281,33 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
280 __free_insn_slot(&kprobe_insn_slots, slot, dirty); 281 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
281 mutex_unlock(&kprobe_insn_mutex); 282 mutex_unlock(&kprobe_insn_mutex);
282} 283}
284#ifdef CONFIG_OPTPROBES
285/* For optimized_kprobe buffer */
286static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
287static struct kprobe_insn_cache kprobe_optinsn_slots = {
288 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
289 /* .insn_size is initialized later */
290 .nr_garbage = 0,
291};
292/* Get a slot for optimized_kprobe buffer */
293kprobe_opcode_t __kprobes *get_optinsn_slot(void)
294{
295 kprobe_opcode_t *ret = NULL;
296
297 mutex_lock(&kprobe_optinsn_mutex);
298 ret = __get_insn_slot(&kprobe_optinsn_slots);
299 mutex_unlock(&kprobe_optinsn_mutex);
300
301 return ret;
302}
303
304void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
305{
306 mutex_lock(&kprobe_optinsn_mutex);
307 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
308 mutex_unlock(&kprobe_optinsn_mutex);
309}
310#endif
283#endif 311#endif
284 312
285/* We have preemption disabled.. so it is safe to use __ versions */ 313/* We have preemption disabled.. so it is safe to use __ versions */
@@ -310,23 +338,324 @@ struct kprobe __kprobes *get_kprobe(void *addr)
310 if (p->addr == addr) 338 if (p->addr == addr)
311 return p; 339 return p;
312 } 340 }
341
313 return NULL; 342 return NULL;
314} 343}
315 344
345static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
346
347/* Return true if the kprobe is an aggregator */
348static inline int kprobe_aggrprobe(struct kprobe *p)
349{
350 return p->pre_handler == aggr_pre_handler;
351}
352
353/*
354 * Keep all fields in the kprobe consistent
355 */
356static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
357{
358 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
359 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
360}
361
362#ifdef CONFIG_OPTPROBES
363/*
364 * Call all pre_handler on the list, but ignores its return value.
365 * This must be called from arch-dep optimized caller.
366 */
367void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
368{
369 struct kprobe *kp;
370
371 list_for_each_entry_rcu(kp, &p->list, list) {
372 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
373 set_kprobe_instance(kp);
374 kp->pre_handler(kp, regs);
375 }
376 reset_kprobe_instance();
377 }
378}
379
380/* Return true(!0) if the kprobe is ready for optimization. */
381static inline int kprobe_optready(struct kprobe *p)
382{
383 struct optimized_kprobe *op;
384
385 if (kprobe_aggrprobe(p)) {
386 op = container_of(p, struct optimized_kprobe, kp);
387 return arch_prepared_optinsn(&op->optinsn);
388 }
389
390 return 0;
391}
392
393/*
394 * Return an optimized kprobe whose optimizing code replaces
395 * instructions including addr (exclude breakpoint).
396 */
397struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
398{
399 int i;
400 struct kprobe *p = NULL;
401 struct optimized_kprobe *op;
402
403 /* Don't check i == 0, since that is a breakpoint case. */
404 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
405 p = get_kprobe((void *)(addr - i));
406
407 if (p && kprobe_optready(p)) {
408 op = container_of(p, struct optimized_kprobe, kp);
409 if (arch_within_optimized_kprobe(op, addr))
410 return p;
411 }
412
413 return NULL;
414}
415
416/* Optimization staging list, protected by kprobe_mutex */
417static LIST_HEAD(optimizing_list);
418
419static void kprobe_optimizer(struct work_struct *work);
420static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
421#define OPTIMIZE_DELAY 5
422
423/* Kprobe jump optimizer */
424static __kprobes void kprobe_optimizer(struct work_struct *work)
425{
426 struct optimized_kprobe *op, *tmp;
427
428 /* Lock modules while optimizing kprobes */
429 mutex_lock(&module_mutex);
430 mutex_lock(&kprobe_mutex);
431 if (kprobes_all_disarmed)
432 goto end;
433
434 /*
435 * Wait for quiesence period to ensure all running interrupts
436 * are done. Because optprobe may modify multiple instructions
437 * there is a chance that Nth instruction is interrupted. In that
438 * case, running interrupt can return to 2nd-Nth byte of jump
439 * instruction. This wait is for avoiding it.
440 */
441 synchronize_sched();
442
443 /*
444 * The optimization/unoptimization refers online_cpus via
445 * stop_machine() and cpu-hotplug modifies online_cpus.
446 * And same time, text_mutex will be held in cpu-hotplug and here.
447 * This combination can cause a deadlock (cpu-hotplug try to lock
448 * text_mutex but stop_machine can not be done because online_cpus
449 * has been changed)
450 * To avoid this deadlock, we need to call get_online_cpus()
451 * for preventing cpu-hotplug outside of text_mutex locking.
452 */
453 get_online_cpus();
454 mutex_lock(&text_mutex);
455 list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
456 WARN_ON(kprobe_disabled(&op->kp));
457 if (arch_optimize_kprobe(op) < 0)
458 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
459 list_del_init(&op->list);
460 }
461 mutex_unlock(&text_mutex);
462 put_online_cpus();
463end:
464 mutex_unlock(&kprobe_mutex);
465 mutex_unlock(&module_mutex);
466}
467
468/* Optimize kprobe if p is ready to be optimized */
469static __kprobes void optimize_kprobe(struct kprobe *p)
470{
471 struct optimized_kprobe *op;
472
473 /* Check if the kprobe is disabled or not ready for optimization. */
474 if (!kprobe_optready(p) ||
475 (kprobe_disabled(p) || kprobes_all_disarmed))
476 return;
477
478 /* Both of break_handler and post_handler are not supported. */
479 if (p->break_handler || p->post_handler)
480 return;
481
482 op = container_of(p, struct optimized_kprobe, kp);
483
484 /* Check there is no other kprobes at the optimized instructions */
485 if (arch_check_optimized_kprobe(op) < 0)
486 return;
487
488 /* Check if it is already optimized. */
489 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
490 return;
491
492 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
493 list_add(&op->list, &optimizing_list);
494 if (!delayed_work_pending(&optimizing_work))
495 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
496}
497
498/* Unoptimize a kprobe if p is optimized */
499static __kprobes void unoptimize_kprobe(struct kprobe *p)
500{
501 struct optimized_kprobe *op;
502
503 if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
504 op = container_of(p, struct optimized_kprobe, kp);
505 if (!list_empty(&op->list))
506 /* Dequeue from the optimization queue */
507 list_del_init(&op->list);
508 else
509 /* Replace jump with break */
510 arch_unoptimize_kprobe(op);
511 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
512 }
513}
514
515/* Remove optimized instructions */
516static void __kprobes kill_optimized_kprobe(struct kprobe *p)
517{
518 struct optimized_kprobe *op;
519
520 op = container_of(p, struct optimized_kprobe, kp);
521 if (!list_empty(&op->list)) {
522 /* Dequeue from the optimization queue */
523 list_del_init(&op->list);
524 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
525 }
526 /* Don't unoptimize, because the target code will be freed. */
527 arch_remove_optimized_kprobe(op);
528}
529
530/* Try to prepare optimized instructions */
531static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
532{
533 struct optimized_kprobe *op;
534
535 op = container_of(p, struct optimized_kprobe, kp);
536 arch_prepare_optimized_kprobe(op);
537}
538
539/* Free optimized instructions and optimized_kprobe */
540static __kprobes void free_aggr_kprobe(struct kprobe *p)
541{
542 struct optimized_kprobe *op;
543
544 op = container_of(p, struct optimized_kprobe, kp);
545 arch_remove_optimized_kprobe(op);
546 kfree(op);
547}
548
549/* Allocate new optimized_kprobe and try to prepare optimized instructions */
550static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
551{
552 struct optimized_kprobe *op;
553
554 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
555 if (!op)
556 return NULL;
557
558 INIT_LIST_HEAD(&op->list);
559 op->kp.addr = p->addr;
560 arch_prepare_optimized_kprobe(op);
561
562 return &op->kp;
563}
564
565static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
566
567/*
568 * Prepare an optimized_kprobe and optimize it
569 * NOTE: p must be a normal registered kprobe
570 */
571static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
572{
573 struct kprobe *ap;
574 struct optimized_kprobe *op;
575
576 ap = alloc_aggr_kprobe(p);
577 if (!ap)
578 return;
579
580 op = container_of(ap, struct optimized_kprobe, kp);
581 if (!arch_prepared_optinsn(&op->optinsn)) {
582 /* If failed to setup optimizing, fallback to kprobe */
583 free_aggr_kprobe(ap);
584 return;
585 }
586
587 init_aggr_kprobe(ap, p);
588 optimize_kprobe(ap);
589}
590
591static void __kprobes __arm_kprobe(struct kprobe *p)
592{
593 struct kprobe *old_p;
594
595 /* Check collision with other optimized kprobes */
596 old_p = get_optimized_kprobe((unsigned long)p->addr);
597 if (unlikely(old_p))
598 unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
599
600 arch_arm_kprobe(p);
601 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
602}
603
604static void __kprobes __disarm_kprobe(struct kprobe *p)
605{
606 struct kprobe *old_p;
607
608 unoptimize_kprobe(p); /* Try to unoptimize */
609 arch_disarm_kprobe(p);
610
611 /* If another kprobe was blocked, optimize it. */
612 old_p = get_optimized_kprobe((unsigned long)p->addr);
613 if (unlikely(old_p))
614 optimize_kprobe(old_p);
615}
616
617#else /* !CONFIG_OPTPROBES */
618
619#define optimize_kprobe(p) do {} while (0)
620#define unoptimize_kprobe(p) do {} while (0)
621#define kill_optimized_kprobe(p) do {} while (0)
622#define prepare_optimized_kprobe(p) do {} while (0)
623#define try_to_optimize_kprobe(p) do {} while (0)
624#define __arm_kprobe(p) arch_arm_kprobe(p)
625#define __disarm_kprobe(p) arch_disarm_kprobe(p)
626
627static __kprobes void free_aggr_kprobe(struct kprobe *p)
628{
629 kfree(p);
630}
631
632static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
633{
634 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
635}
636#endif /* CONFIG_OPTPROBES */
637
316/* Arm a kprobe with text_mutex */ 638/* Arm a kprobe with text_mutex */
317static void __kprobes arm_kprobe(struct kprobe *kp) 639static void __kprobes arm_kprobe(struct kprobe *kp)
318{ 640{
641 /*
642 * Here, since __arm_kprobe() doesn't use stop_machine(),
643 * this doesn't cause deadlock on text_mutex. So, we don't
644 * need get_online_cpus().
645 */
319 mutex_lock(&text_mutex); 646 mutex_lock(&text_mutex);
320 arch_arm_kprobe(kp); 647 __arm_kprobe(kp);
321 mutex_unlock(&text_mutex); 648 mutex_unlock(&text_mutex);
322} 649}
323 650
324/* Disarm a kprobe with text_mutex */ 651/* Disarm a kprobe with text_mutex */
325static void __kprobes disarm_kprobe(struct kprobe *kp) 652static void __kprobes disarm_kprobe(struct kprobe *kp)
326{ 653{
654 get_online_cpus(); /* For avoiding text_mutex deadlock */
327 mutex_lock(&text_mutex); 655 mutex_lock(&text_mutex);
328 arch_disarm_kprobe(kp); 656 __disarm_kprobe(kp);
329 mutex_unlock(&text_mutex); 657 mutex_unlock(&text_mutex);
658 put_online_cpus();
330} 659}
331 660
332/* 661/*
@@ -395,7 +724,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
395void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 724void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
396{ 725{
397 struct kprobe *kp; 726 struct kprobe *kp;
398 if (p->pre_handler != aggr_pre_handler) { 727 if (!kprobe_aggrprobe(p)) {
399 p->nmissed++; 728 p->nmissed++;
400 } else { 729 } else {
401 list_for_each_entry_rcu(kp, &p->list, list) 730 list_for_each_entry_rcu(kp, &p->list, list)
@@ -519,21 +848,16 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
519} 848}
520 849
521/* 850/*
522 * Keep all fields in the kprobe consistent
523 */
524static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
525{
526 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
527 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
528}
529
530/*
531* Add the new probe to ap->list. Fail if this is the 851* Add the new probe to ap->list. Fail if this is the
532* second jprobe at the address - two jprobes can't coexist 852* second jprobe at the address - two jprobes can't coexist
533*/ 853*/
534static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 854static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
535{ 855{
536 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 856 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
857
858 if (p->break_handler || p->post_handler)
859 unoptimize_kprobe(ap); /* Fall back to normal kprobe */
860
537 if (p->break_handler) { 861 if (p->break_handler) {
538 if (ap->break_handler) 862 if (ap->break_handler)
539 return -EEXIST; 863 return -EEXIST;
@@ -548,7 +872,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
548 ap->flags &= ~KPROBE_FLAG_DISABLED; 872 ap->flags &= ~KPROBE_FLAG_DISABLED;
549 if (!kprobes_all_disarmed) 873 if (!kprobes_all_disarmed)
550 /* Arm the breakpoint again. */ 874 /* Arm the breakpoint again. */
551 arm_kprobe(ap); 875 __arm_kprobe(ap);
552 } 876 }
553 return 0; 877 return 0;
554} 878}
@@ -557,12 +881,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
557 * Fill in the required fields of the "manager kprobe". Replace the 881 * Fill in the required fields of the "manager kprobe". Replace the
558 * earlier kprobe in the hlist with the manager kprobe 882 * earlier kprobe in the hlist with the manager kprobe
559 */ 883 */
560static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 884static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
561{ 885{
886 /* Copy p's insn slot to ap */
562 copy_kprobe(p, ap); 887 copy_kprobe(p, ap);
563 flush_insn_slot(ap); 888 flush_insn_slot(ap);
564 ap->addr = p->addr; 889 ap->addr = p->addr;
565 ap->flags = p->flags; 890 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
566 ap->pre_handler = aggr_pre_handler; 891 ap->pre_handler = aggr_pre_handler;
567 ap->fault_handler = aggr_fault_handler; 892 ap->fault_handler = aggr_fault_handler;
568 /* We don't care the kprobe which has gone. */ 893 /* We don't care the kprobe which has gone. */
@@ -572,8 +897,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
572 ap->break_handler = aggr_break_handler; 897 ap->break_handler = aggr_break_handler;
573 898
574 INIT_LIST_HEAD(&ap->list); 899 INIT_LIST_HEAD(&ap->list);
575 list_add_rcu(&p->list, &ap->list); 900 INIT_HLIST_NODE(&ap->hlist);
576 901
902 list_add_rcu(&p->list, &ap->list);
577 hlist_replace_rcu(&p->hlist, &ap->hlist); 903 hlist_replace_rcu(&p->hlist, &ap->hlist);
578} 904}
579 905
@@ -587,12 +913,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
587 int ret = 0; 913 int ret = 0;
588 struct kprobe *ap = old_p; 914 struct kprobe *ap = old_p;
589 915
590 if (old_p->pre_handler != aggr_pre_handler) { 916 if (!kprobe_aggrprobe(old_p)) {
591 /* If old_p is not an aggr_probe, create new aggr_kprobe. */ 917 /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
592 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 918 ap = alloc_aggr_kprobe(old_p);
593 if (!ap) 919 if (!ap)
594 return -ENOMEM; 920 return -ENOMEM;
595 add_aggr_kprobe(ap, old_p); 921 init_aggr_kprobe(ap, old_p);
596 } 922 }
597 923
598 if (kprobe_gone(ap)) { 924 if (kprobe_gone(ap)) {
@@ -611,6 +937,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
611 */ 937 */
612 return ret; 938 return ret;
613 939
940 /* Prepare optimized instructions if possible. */
941 prepare_optimized_kprobe(ap);
942
614 /* 943 /*
615 * Clear gone flag to prevent allocating new slot again, and 944 * Clear gone flag to prevent allocating new slot again, and
616 * set disabled flag because it is not armed yet. 945 * set disabled flag because it is not armed yet.
@@ -619,6 +948,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
619 | KPROBE_FLAG_DISABLED; 948 | KPROBE_FLAG_DISABLED;
620 } 949 }
621 950
951 /* Copy ap's insn slot to p */
622 copy_kprobe(ap, p); 952 copy_kprobe(ap, p);
623 return add_new_kprobe(ap, p); 953 return add_new_kprobe(ap, p);
624} 954}
@@ -769,27 +1099,34 @@ int __kprobes register_kprobe(struct kprobe *p)
769 p->nmissed = 0; 1099 p->nmissed = 0;
770 INIT_LIST_HEAD(&p->list); 1100 INIT_LIST_HEAD(&p->list);
771 mutex_lock(&kprobe_mutex); 1101 mutex_lock(&kprobe_mutex);
1102
1103 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1104 mutex_lock(&text_mutex);
1105
772 old_p = get_kprobe(p->addr); 1106 old_p = get_kprobe(p->addr);
773 if (old_p) { 1107 if (old_p) {
1108 /* Since this may unoptimize old_p, locking text_mutex. */
774 ret = register_aggr_kprobe(old_p, p); 1109 ret = register_aggr_kprobe(old_p, p);
775 goto out; 1110 goto out;
776 } 1111 }
777 1112
778 mutex_lock(&text_mutex);
779 ret = arch_prepare_kprobe(p); 1113 ret = arch_prepare_kprobe(p);
780 if (ret) 1114 if (ret)
781 goto out_unlock_text; 1115 goto out;
782 1116
783 INIT_HLIST_NODE(&p->hlist); 1117 INIT_HLIST_NODE(&p->hlist);
784 hlist_add_head_rcu(&p->hlist, 1118 hlist_add_head_rcu(&p->hlist,
785 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1119 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
786 1120
787 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1121 if (!kprobes_all_disarmed && !kprobe_disabled(p))
788 arch_arm_kprobe(p); 1122 __arm_kprobe(p);
1123
1124 /* Try to optimize kprobe */
1125 try_to_optimize_kprobe(p);
789 1126
790out_unlock_text:
791 mutex_unlock(&text_mutex);
792out: 1127out:
1128 mutex_unlock(&text_mutex);
1129 put_online_cpus();
793 mutex_unlock(&kprobe_mutex); 1130 mutex_unlock(&kprobe_mutex);
794 1131
795 if (probed_mod) 1132 if (probed_mod)
@@ -811,7 +1148,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
811 return -EINVAL; 1148 return -EINVAL;
812 1149
813 if (old_p == p || 1150 if (old_p == p ||
814 (old_p->pre_handler == aggr_pre_handler && 1151 (kprobe_aggrprobe(old_p) &&
815 list_is_singular(&old_p->list))) { 1152 list_is_singular(&old_p->list))) {
816 /* 1153 /*
817 * Only probe on the hash list. Disarm only if kprobes are 1154 * Only probe on the hash list. Disarm only if kprobes are
@@ -819,7 +1156,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
819 * already have been removed. We save on flushing icache. 1156 * already have been removed. We save on flushing icache.
820 */ 1157 */
821 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) 1158 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
822 disarm_kprobe(p); 1159 disarm_kprobe(old_p);
823 hlist_del_rcu(&old_p->hlist); 1160 hlist_del_rcu(&old_p->hlist);
824 } else { 1161 } else {
825 if (p->break_handler && !kprobe_gone(p)) 1162 if (p->break_handler && !kprobe_gone(p))
@@ -835,8 +1172,13 @@ noclean:
835 list_del_rcu(&p->list); 1172 list_del_rcu(&p->list);
836 if (!kprobe_disabled(old_p)) { 1173 if (!kprobe_disabled(old_p)) {
837 try_to_disable_aggr_kprobe(old_p); 1174 try_to_disable_aggr_kprobe(old_p);
838 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 1175 if (!kprobes_all_disarmed) {
839 disarm_kprobe(old_p); 1176 if (kprobe_disabled(old_p))
1177 disarm_kprobe(old_p);
1178 else
1179 /* Try to optimize this probe again */
1180 optimize_kprobe(old_p);
1181 }
840 } 1182 }
841 } 1183 }
842 return 0; 1184 return 0;
@@ -853,7 +1195,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
853 old_p = list_entry(p->list.next, struct kprobe, list); 1195 old_p = list_entry(p->list.next, struct kprobe, list);
854 list_del(&p->list); 1196 list_del(&p->list);
855 arch_remove_kprobe(old_p); 1197 arch_remove_kprobe(old_p);
856 kfree(old_p); 1198 free_aggr_kprobe(old_p);
857 } 1199 }
858} 1200}
859 1201
@@ -1149,7 +1491,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1149 struct kprobe *kp; 1491 struct kprobe *kp;
1150 1492
1151 p->flags |= KPROBE_FLAG_GONE; 1493 p->flags |= KPROBE_FLAG_GONE;
1152 if (p->pre_handler == aggr_pre_handler) { 1494 if (kprobe_aggrprobe(p)) {
1153 /* 1495 /*
1154 * If this is an aggr_kprobe, we have to list all the 1496 * If this is an aggr_kprobe, we have to list all the
1155 * chained probes and mark them GONE. 1497 * chained probes and mark them GONE.
@@ -1158,6 +1500,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1158 kp->flags |= KPROBE_FLAG_GONE; 1500 kp->flags |= KPROBE_FLAG_GONE;
1159 p->post_handler = NULL; 1501 p->post_handler = NULL;
1160 p->break_handler = NULL; 1502 p->break_handler = NULL;
1503 kill_optimized_kprobe(p);
1161 } 1504 }
1162 /* 1505 /*
1163 * Here, we can remove insn_slot safely, because no thread calls 1506 * Here, we can remove insn_slot safely, because no thread calls
@@ -1267,6 +1610,11 @@ static int __init init_kprobes(void)
1267 } 1610 }
1268 } 1611 }
1269 1612
1613#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1614 /* Init kprobe_optinsn_slots */
1615 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
1616#endif
1617
1270 /* By default, kprobes are armed */ 1618 /* By default, kprobes are armed */
1271 kprobes_all_disarmed = false; 1619 kprobes_all_disarmed = false;
1272 1620
@@ -1285,7 +1633,7 @@ static int __init init_kprobes(void)
1285 1633
1286#ifdef CONFIG_DEBUG_FS 1634#ifdef CONFIG_DEBUG_FS
1287static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1635static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1288 const char *sym, int offset,char *modname) 1636 const char *sym, int offset, char *modname, struct kprobe *pp)
1289{ 1637{
1290 char *kprobe_type; 1638 char *kprobe_type;
1291 1639
@@ -1295,19 +1643,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1295 kprobe_type = "j"; 1643 kprobe_type = "j";
1296 else 1644 else
1297 kprobe_type = "k"; 1645 kprobe_type = "k";
1646
1298 if (sym) 1647 if (sym)
1299 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", 1648 seq_printf(pi, "%p %s %s+0x%x %s ",
1300 p->addr, kprobe_type, sym, offset, 1649 p->addr, kprobe_type, sym, offset,
1301 (modname ? modname : " "), 1650 (modname ? modname : " "));
1302 (kprobe_gone(p) ? "[GONE]" : ""),
1303 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1304 "[DISABLED]" : ""));
1305 else 1651 else
1306 seq_printf(pi, "%p %s %p %s%s\n", 1652 seq_printf(pi, "%p %s %p ",
1307 p->addr, kprobe_type, p->addr, 1653 p->addr, kprobe_type, p->addr);
1308 (kprobe_gone(p) ? "[GONE]" : ""), 1654
1309 ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1655 if (!pp)
1310 "[DISABLED]" : "")); 1656 pp = p;
1657 seq_printf(pi, "%s%s%s\n",
1658 (kprobe_gone(p) ? "[GONE]" : ""),
1659 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
1660 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
1311} 1661}
1312 1662
1313static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1663static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1343,11 +1693,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1343 hlist_for_each_entry_rcu(p, node, head, hlist) { 1693 hlist_for_each_entry_rcu(p, node, head, hlist) {
1344 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1694 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1345 &offset, &modname, namebuf); 1695 &offset, &modname, namebuf);
1346 if (p->pre_handler == aggr_pre_handler) { 1696 if (kprobe_aggrprobe(p)) {
1347 list_for_each_entry_rcu(kp, &p->list, list) 1697 list_for_each_entry_rcu(kp, &p->list, list)
1348 report_probe(pi, kp, sym, offset, modname); 1698 report_probe(pi, kp, sym, offset, modname, p);
1349 } else 1699 } else
1350 report_probe(pi, p, sym, offset, modname); 1700 report_probe(pi, p, sym, offset, modname, NULL);
1351 } 1701 }
1352 preempt_enable(); 1702 preempt_enable();
1353 return 0; 1703 return 0;
@@ -1425,12 +1775,13 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1425 goto out; 1775 goto out;
1426 } 1776 }
1427 1777
1428 if (!kprobes_all_disarmed && kprobe_disabled(p))
1429 arm_kprobe(p);
1430
1431 p->flags &= ~KPROBE_FLAG_DISABLED;
1432 if (p != kp) 1778 if (p != kp)
1433 kp->flags &= ~KPROBE_FLAG_DISABLED; 1779 kp->flags &= ~KPROBE_FLAG_DISABLED;
1780
1781 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1782 p->flags &= ~KPROBE_FLAG_DISABLED;
1783 arm_kprobe(p);
1784 }
1434out: 1785out:
1435 mutex_unlock(&kprobe_mutex); 1786 mutex_unlock(&kprobe_mutex);
1436 return ret; 1787 return ret;
@@ -1450,12 +1801,13 @@ static void __kprobes arm_all_kprobes(void)
1450 if (!kprobes_all_disarmed) 1801 if (!kprobes_all_disarmed)
1451 goto already_enabled; 1802 goto already_enabled;
1452 1803
1804 /* Arming kprobes doesn't optimize kprobe itself */
1453 mutex_lock(&text_mutex); 1805 mutex_lock(&text_mutex);
1454 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1806 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1455 head = &kprobe_table[i]; 1807 head = &kprobe_table[i];
1456 hlist_for_each_entry_rcu(p, node, head, hlist) 1808 hlist_for_each_entry_rcu(p, node, head, hlist)
1457 if (!kprobe_disabled(p)) 1809 if (!kprobe_disabled(p))
1458 arch_arm_kprobe(p); 1810 __arm_kprobe(p);
1459 } 1811 }
1460 mutex_unlock(&text_mutex); 1812 mutex_unlock(&text_mutex);
1461 1813
@@ -1482,16 +1834,23 @@ static void __kprobes disarm_all_kprobes(void)
1482 1834
1483 kprobes_all_disarmed = true; 1835 kprobes_all_disarmed = true;
1484 printk(KERN_INFO "Kprobes globally disabled\n"); 1836 printk(KERN_INFO "Kprobes globally disabled\n");
1837
1838 /*
1839 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
1840 * because disarming may also unoptimize kprobes.
1841 */
1842 get_online_cpus();
1485 mutex_lock(&text_mutex); 1843 mutex_lock(&text_mutex);
1486 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1844 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1487 head = &kprobe_table[i]; 1845 head = &kprobe_table[i];
1488 hlist_for_each_entry_rcu(p, node, head, hlist) { 1846 hlist_for_each_entry_rcu(p, node, head, hlist) {
1489 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1847 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1490 arch_disarm_kprobe(p); 1848 __disarm_kprobe(p);
1491 } 1849 }
1492 } 1850 }
1493 1851
1494 mutex_unlock(&text_mutex); 1852 mutex_unlock(&text_mutex);
1853 put_online_cpus();
1495 mutex_unlock(&kprobe_mutex); 1854 mutex_unlock(&kprobe_mutex);
1496 /* Allow all currently running kprobes to complete */ 1855 /* Allow all currently running kprobes to complete */
1497 synchronize_sched(); 1856 synchronize_sched();