aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c631
1 files changed, 425 insertions, 206 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 282035f3ae96..77981813a1e7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,6 +47,7 @@
47#include <linux/memory.h> 47#include <linux/memory.h>
48#include <linux/ftrace.h> 48#include <linux/ftrace.h>
49#include <linux/cpu.h> 49#include <linux/cpu.h>
50#include <linux/jump_label.h>
50 51
51#include <asm-generic/sections.h> 52#include <asm-generic/sections.h>
52#include <asm/cacheflush.h> 53#include <asm/cacheflush.h>
@@ -73,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
73/* NOTE: change this value only with kprobe_mutex held */ 74/* NOTE: change this value only with kprobe_mutex held */
74static bool kprobes_all_disarmed; 75static bool kprobes_all_disarmed;
75 76
76static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 77/* This protects kprobe_table and optimizing_list */
78static DEFINE_MUTEX(kprobe_mutex);
77static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
78static struct { 80static struct {
79 spinlock_t lock ____cacheline_aligned_in_smp; 81 spinlock_t lock ____cacheline_aligned_in_smp;
@@ -315,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
315/* We have preemption disabled.. so it is safe to use __ versions */ 317/* We have preemption disabled.. so it is safe to use __ versions */
316static inline void set_kprobe_instance(struct kprobe *kp) 318static inline void set_kprobe_instance(struct kprobe *kp)
317{ 319{
318 __get_cpu_var(kprobe_instance) = kp; 320 __this_cpu_write(kprobe_instance, kp);
319} 321}
320 322
321static inline void reset_kprobe_instance(void) 323static inline void reset_kprobe_instance(void)
322{ 324{
323 __get_cpu_var(kprobe_instance) = NULL; 325 __this_cpu_write(kprobe_instance, NULL);
324} 326}
325 327
326/* 328/*
@@ -352,13 +354,20 @@ static inline int kprobe_aggrprobe(struct kprobe *p)
352 return p->pre_handler == aggr_pre_handler; 354 return p->pre_handler == aggr_pre_handler;
353} 355}
354 356
357/* Return true(!0) if the kprobe is unused */
358static inline int kprobe_unused(struct kprobe *p)
359{
360 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
361 list_empty(&p->list);
362}
363
355/* 364/*
356 * Keep all fields in the kprobe consistent 365 * Keep all fields in the kprobe consistent
357 */ 366 */
358static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 367static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
359{ 368{
360 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 369 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
361 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 370 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
362} 371}
363 372
364#ifdef CONFIG_OPTPROBES 373#ifdef CONFIG_OPTPROBES
@@ -382,6 +391,17 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
382 } 391 }
383} 392}
384 393
394/* Free optimized instructions and optimized_kprobe */
395static __kprobes void free_aggr_kprobe(struct kprobe *p)
396{
397 struct optimized_kprobe *op;
398
399 op = container_of(p, struct optimized_kprobe, kp);
400 arch_remove_optimized_kprobe(op);
401 arch_remove_kprobe(p);
402 kfree(op);
403}
404
385/* Return true(!0) if the kprobe is ready for optimization. */ 405/* Return true(!0) if the kprobe is ready for optimization. */
386static inline int kprobe_optready(struct kprobe *p) 406static inline int kprobe_optready(struct kprobe *p)
387{ 407{
@@ -395,11 +415,38 @@ static inline int kprobe_optready(struct kprobe *p)
395 return 0; 415 return 0;
396} 416}
397 417
418/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
419static inline int kprobe_disarmed(struct kprobe *p)
420{
421 struct optimized_kprobe *op;
422
423 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
424 if (!kprobe_aggrprobe(p))
425 return kprobe_disabled(p);
426
427 op = container_of(p, struct optimized_kprobe, kp);
428
429 return kprobe_disabled(p) && list_empty(&op->list);
430}
431
432/* Return true(!0) if the probe is queued on (un)optimizing lists */
433static int __kprobes kprobe_queued(struct kprobe *p)
434{
435 struct optimized_kprobe *op;
436
437 if (kprobe_aggrprobe(p)) {
438 op = container_of(p, struct optimized_kprobe, kp);
439 if (!list_empty(&op->list))
440 return 1;
441 }
442 return 0;
443}
444
398/* 445/*
399 * Return an optimized kprobe whose optimizing code replaces 446 * Return an optimized kprobe whose optimizing code replaces
400 * instructions including addr (exclude breakpoint). 447 * instructions including addr (exclude breakpoint).
401 */ 448 */
402struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 449static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
403{ 450{
404 int i; 451 int i;
405 struct kprobe *p = NULL; 452 struct kprobe *p = NULL;
@@ -420,30 +467,23 @@ struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
420 467
421/* Optimization staging list, protected by kprobe_mutex */ 468/* Optimization staging list, protected by kprobe_mutex */
422static LIST_HEAD(optimizing_list); 469static LIST_HEAD(optimizing_list);
470static LIST_HEAD(unoptimizing_list);
423 471
424static void kprobe_optimizer(struct work_struct *work); 472static void kprobe_optimizer(struct work_struct *work);
425static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474static DECLARE_COMPLETION(optimizer_comp);
426#define OPTIMIZE_DELAY 5 475#define OPTIMIZE_DELAY 5
427 476
428/* Kprobe jump optimizer */ 477/*
429static __kprobes void kprobe_optimizer(struct work_struct *work) 478 * Optimize (replace a breakpoint with a jump) kprobes listed on
479 * optimizing_list.
480 */
481static __kprobes void do_optimize_kprobes(void)
430{ 482{
431 struct optimized_kprobe *op, *tmp; 483 /* Optimization never be done when disarmed */
432 484 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
433 /* Lock modules while optimizing kprobes */ 485 list_empty(&optimizing_list))
434 mutex_lock(&module_mutex); 486 return;
435 mutex_lock(&kprobe_mutex);
436 if (kprobes_all_disarmed || !kprobes_allow_optimization)
437 goto end;
438
439 /*
440 * Wait for quiesence period to ensure all running interrupts
441 * are done. Because optprobe may modify multiple instructions
442 * there is a chance that Nth instruction is interrupted. In that
443 * case, running interrupt can return to 2nd-Nth byte of jump
444 * instruction. This wait is for avoiding it.
445 */
446 synchronize_sched();
447 487
448 /* 488 /*
449 * The optimization/unoptimization refers online_cpus via 489 * The optimization/unoptimization refers online_cpus via
@@ -457,17 +497,111 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
457 */ 497 */
458 get_online_cpus(); 498 get_online_cpus();
459 mutex_lock(&text_mutex); 499 mutex_lock(&text_mutex);
460 list_for_each_entry_safe(op, tmp, &optimizing_list, list) { 500 arch_optimize_kprobes(&optimizing_list);
461 WARN_ON(kprobe_disabled(&op->kp)); 501 mutex_unlock(&text_mutex);
462 if (arch_optimize_kprobe(op) < 0) 502 put_online_cpus();
463 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 503}
464 list_del_init(&op->list); 504
505/*
506 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
507 * if need) kprobes listed on unoptimizing_list.
508 */
509static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
510{
511 struct optimized_kprobe *op, *tmp;
512
513 /* Unoptimization must be done anytime */
514 if (list_empty(&unoptimizing_list))
515 return;
516
517 /* Ditto to do_optimize_kprobes */
518 get_online_cpus();
519 mutex_lock(&text_mutex);
520 arch_unoptimize_kprobes(&unoptimizing_list, free_list);
521 /* Loop free_list for disarming */
522 list_for_each_entry_safe(op, tmp, free_list, list) {
523 /* Disarm probes if marked disabled */
524 if (kprobe_disabled(&op->kp))
525 arch_disarm_kprobe(&op->kp);
526 if (kprobe_unused(&op->kp)) {
527 /*
528 * Remove unused probes from hash list. After waiting
529 * for synchronization, these probes are reclaimed.
530 * (reclaiming is done by do_free_cleaned_kprobes.)
531 */
532 hlist_del_rcu(&op->kp.hlist);
533 } else
534 list_del_init(&op->list);
465 } 535 }
466 mutex_unlock(&text_mutex); 536 mutex_unlock(&text_mutex);
467 put_online_cpus(); 537 put_online_cpus();
468end: 538}
539
540/* Reclaim all kprobes on the free_list */
541static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
542{
543 struct optimized_kprobe *op, *tmp;
544
545 list_for_each_entry_safe(op, tmp, free_list, list) {
546 BUG_ON(!kprobe_unused(&op->kp));
547 list_del_init(&op->list);
548 free_aggr_kprobe(&op->kp);
549 }
550}
551
552/* Start optimizer after OPTIMIZE_DELAY passed */
553static __kprobes void kick_kprobe_optimizer(void)
554{
555 if (!delayed_work_pending(&optimizing_work))
556 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
557}
558
559/* Kprobe jump optimizer */
560static __kprobes void kprobe_optimizer(struct work_struct *work)
561{
562 LIST_HEAD(free_list);
563
564 /* Lock modules while optimizing kprobes */
565 mutex_lock(&module_mutex);
566 mutex_lock(&kprobe_mutex);
567
568 /*
569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
570 * kprobes before waiting for quiesence period.
571 */
572 do_unoptimize_kprobes(&free_list);
573
574 /*
575 * Step 2: Wait for quiesence period to ensure all running interrupts
576 * are done. Because optprobe may modify multiple instructions
577 * there is a chance that Nth instruction is interrupted. In that
578 * case, running interrupt can return to 2nd-Nth byte of jump
579 * instruction. This wait is for avoiding it.
580 */
581 synchronize_sched();
582
583 /* Step 3: Optimize kprobes after quiesence period */
584 do_optimize_kprobes();
585
586 /* Step 4: Free cleaned kprobes after quiesence period */
587 do_free_cleaned_kprobes(&free_list);
588
469 mutex_unlock(&kprobe_mutex); 589 mutex_unlock(&kprobe_mutex);
470 mutex_unlock(&module_mutex); 590 mutex_unlock(&module_mutex);
591
592 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
594 kick_kprobe_optimizer();
595 else
596 /* Wake up all waiters */
597 complete_all(&optimizer_comp);
598}
599
600/* Wait for completing optimization and unoptimization */
601static __kprobes void wait_for_kprobe_optimizer(void)
602{
603 if (delayed_work_pending(&optimizing_work))
604 wait_for_completion(&optimizer_comp);
471} 605}
472 606
473/* Optimize kprobe if p is ready to be optimized */ 607/* Optimize kprobe if p is ready to be optimized */
@@ -493,42 +627,99 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
493 /* Check if it is already optimized. */ 627 /* Check if it is already optimized. */
494 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) 628 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
495 return; 629 return;
496
497 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 630 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
498 list_add(&op->list, &optimizing_list); 631
499 if (!delayed_work_pending(&optimizing_work)) 632 if (!list_empty(&op->list))
500 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 633 /* This is under unoptimizing. Just dequeue the probe */
634 list_del_init(&op->list);
635 else {
636 list_add(&op->list, &optimizing_list);
637 kick_kprobe_optimizer();
638 }
639}
640
641/* Short cut to direct unoptimizing */
642static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
643{
644 get_online_cpus();
645 arch_unoptimize_kprobe(op);
646 put_online_cpus();
647 if (kprobe_disabled(&op->kp))
648 arch_disarm_kprobe(&op->kp);
501} 649}
502 650
503/* Unoptimize a kprobe if p is optimized */ 651/* Unoptimize a kprobe if p is optimized */
504static __kprobes void unoptimize_kprobe(struct kprobe *p) 652static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
505{ 653{
506 struct optimized_kprobe *op; 654 struct optimized_kprobe *op;
507 655
508 if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { 656 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
509 op = container_of(p, struct optimized_kprobe, kp); 657 return; /* This is not an optprobe nor optimized */
510 if (!list_empty(&op->list)) 658
511 /* Dequeue from the optimization queue */ 659 op = container_of(p, struct optimized_kprobe, kp);
660 if (!kprobe_optimized(p)) {
661 /* Unoptimized or unoptimizing case */
662 if (force && !list_empty(&op->list)) {
663 /*
664 * Only if this is unoptimizing kprobe and forced,
665 * forcibly unoptimize it. (No need to unoptimize
666 * unoptimized kprobe again :)
667 */
512 list_del_init(&op->list); 668 list_del_init(&op->list);
513 else 669 force_unoptimize_kprobe(op);
514 /* Replace jump with break */ 670 }
515 arch_unoptimize_kprobe(op); 671 return;
516 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 672 }
673
674 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
675 if (!list_empty(&op->list)) {
676 /* Dequeue from the optimization queue */
677 list_del_init(&op->list);
678 return;
679 }
680 /* Optimized kprobe case */
681 if (force)
682 /* Forcibly update the code: this is a special case */
683 force_unoptimize_kprobe(op);
684 else {
685 list_add(&op->list, &unoptimizing_list);
686 kick_kprobe_optimizer();
517 } 687 }
518} 688}
519 689
690/* Cancel unoptimizing for reusing */
691static void reuse_unused_kprobe(struct kprobe *ap)
692{
693 struct optimized_kprobe *op;
694
695 BUG_ON(!kprobe_unused(ap));
696 /*
697 * Unused kprobe MUST be on the way of delayed unoptimizing (means
698 * there is still a relative jump) and disabled.
699 */
700 op = container_of(ap, struct optimized_kprobe, kp);
701 if (unlikely(list_empty(&op->list)))
702 printk(KERN_WARNING "Warning: found a stray unused "
703 "aggrprobe@%p\n", ap->addr);
704 /* Enable the probe again */
705 ap->flags &= ~KPROBE_FLAG_DISABLED;
706 /* Optimize it again (remove from op->list) */
707 BUG_ON(!kprobe_optready(ap));
708 optimize_kprobe(ap);
709}
710
520/* Remove optimized instructions */ 711/* Remove optimized instructions */
521static void __kprobes kill_optimized_kprobe(struct kprobe *p) 712static void __kprobes kill_optimized_kprobe(struct kprobe *p)
522{ 713{
523 struct optimized_kprobe *op; 714 struct optimized_kprobe *op;
524 715
525 op = container_of(p, struct optimized_kprobe, kp); 716 op = container_of(p, struct optimized_kprobe, kp);
526 if (!list_empty(&op->list)) { 717 if (!list_empty(&op->list))
527 /* Dequeue from the optimization queue */ 718 /* Dequeue from the (un)optimization queue */
528 list_del_init(&op->list); 719 list_del_init(&op->list);
529 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 720
530 } 721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
531 /* Don't unoptimize, because the target code will be freed. */ 722 /* Don't touch the code, because it is already freed. */
532 arch_remove_optimized_kprobe(op); 723 arch_remove_optimized_kprobe(op);
533} 724}
534 725
@@ -541,16 +732,6 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
541 arch_prepare_optimized_kprobe(op); 732 arch_prepare_optimized_kprobe(op);
542} 733}
543 734
544/* Free optimized instructions and optimized_kprobe */
545static __kprobes void free_aggr_kprobe(struct kprobe *p)
546{
547 struct optimized_kprobe *op;
548
549 op = container_of(p, struct optimized_kprobe, kp);
550 arch_remove_optimized_kprobe(op);
551 kfree(op);
552}
553
554/* Allocate new optimized_kprobe and try to prepare optimized instructions */ 735/* Allocate new optimized_kprobe and try to prepare optimized instructions */
555static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 736static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
556{ 737{
@@ -585,7 +766,8 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
585 op = container_of(ap, struct optimized_kprobe, kp); 766 op = container_of(ap, struct optimized_kprobe, kp);
586 if (!arch_prepared_optinsn(&op->optinsn)) { 767 if (!arch_prepared_optinsn(&op->optinsn)) {
587 /* If failed to setup optimizing, fallback to kprobe */ 768 /* If failed to setup optimizing, fallback to kprobe */
588 free_aggr_kprobe(ap); 769 arch_remove_optimized_kprobe(op);
770 kfree(op);
589 return; 771 return;
590 } 772 }
591 773
@@ -594,6 +776,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
594} 776}
595 777
596#ifdef CONFIG_SYSCTL 778#ifdef CONFIG_SYSCTL
779/* This should be called with kprobe_mutex locked */
597static void __kprobes optimize_all_kprobes(void) 780static void __kprobes optimize_all_kprobes(void)
598{ 781{
599 struct hlist_head *head; 782 struct hlist_head *head;
@@ -606,17 +789,16 @@ static void __kprobes optimize_all_kprobes(void)
606 return; 789 return;
607 790
608 kprobes_allow_optimization = true; 791 kprobes_allow_optimization = true;
609 mutex_lock(&text_mutex);
610 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 792 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
611 head = &kprobe_table[i]; 793 head = &kprobe_table[i];
612 hlist_for_each_entry_rcu(p, node, head, hlist) 794 hlist_for_each_entry_rcu(p, node, head, hlist)
613 if (!kprobe_disabled(p)) 795 if (!kprobe_disabled(p))
614 optimize_kprobe(p); 796 optimize_kprobe(p);
615 } 797 }
616 mutex_unlock(&text_mutex);
617 printk(KERN_INFO "Kprobes globally optimized\n"); 798 printk(KERN_INFO "Kprobes globally optimized\n");
618} 799}
619 800
801/* This should be called with kprobe_mutex locked */
620static void __kprobes unoptimize_all_kprobes(void) 802static void __kprobes unoptimize_all_kprobes(void)
621{ 803{
622 struct hlist_head *head; 804 struct hlist_head *head;
@@ -629,21 +811,16 @@ static void __kprobes unoptimize_all_kprobes(void)
629 return; 811 return;
630 812
631 kprobes_allow_optimization = false; 813 kprobes_allow_optimization = false;
632 printk(KERN_INFO "Kprobes globally unoptimized\n");
633 get_online_cpus(); /* For avoiding text_mutex deadlock */
634 mutex_lock(&text_mutex);
635 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 814 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
636 head = &kprobe_table[i]; 815 head = &kprobe_table[i];
637 hlist_for_each_entry_rcu(p, node, head, hlist) { 816 hlist_for_each_entry_rcu(p, node, head, hlist) {
638 if (!kprobe_disabled(p)) 817 if (!kprobe_disabled(p))
639 unoptimize_kprobe(p); 818 unoptimize_kprobe(p, false);
640 } 819 }
641 } 820 }
642 821 /* Wait for unoptimizing completion */
643 mutex_unlock(&text_mutex); 822 wait_for_kprobe_optimizer();
644 put_online_cpus(); 823 printk(KERN_INFO "Kprobes globally unoptimized\n");
645 /* Allow all currently running kprobes to complete */
646 synchronize_sched();
647} 824}
648 825
649int sysctl_kprobes_optimization; 826int sysctl_kprobes_optimization;
@@ -667,44 +844,60 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
667} 844}
668#endif /* CONFIG_SYSCTL */ 845#endif /* CONFIG_SYSCTL */
669 846
847/* Put a breakpoint for a probe. Must be called with text_mutex locked */
670static void __kprobes __arm_kprobe(struct kprobe *p) 848static void __kprobes __arm_kprobe(struct kprobe *p)
671{ 849{
672 struct kprobe *old_p; 850 struct kprobe *_p;
673 851
674 /* Check collision with other optimized kprobes */ 852 /* Check collision with other optimized kprobes */
675 old_p = get_optimized_kprobe((unsigned long)p->addr); 853 _p = get_optimized_kprobe((unsigned long)p->addr);
676 if (unlikely(old_p)) 854 if (unlikely(_p))
677 unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */ 855 /* Fallback to unoptimized kprobe */
856 unoptimize_kprobe(_p, true);
678 857
679 arch_arm_kprobe(p); 858 arch_arm_kprobe(p);
680 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 859 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
681} 860}
682 861
683static void __kprobes __disarm_kprobe(struct kprobe *p) 862/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
863static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
684{ 864{
685 struct kprobe *old_p; 865 struct kprobe *_p;
686 866
687 unoptimize_kprobe(p); /* Try to unoptimize */ 867 unoptimize_kprobe(p, false); /* Try to unoptimize */
688 arch_disarm_kprobe(p);
689 868
690 /* If another kprobe was blocked, optimize it. */ 869 if (!kprobe_queued(p)) {
691 old_p = get_optimized_kprobe((unsigned long)p->addr); 870 arch_disarm_kprobe(p);
692 if (unlikely(old_p)) 871 /* If another kprobe was blocked, optimize it. */
693 optimize_kprobe(old_p); 872 _p = get_optimized_kprobe((unsigned long)p->addr);
873 if (unlikely(_p) && reopt)
874 optimize_kprobe(_p);
875 }
876 /* TODO: reoptimize others after unoptimized this probe */
694} 877}
695 878
696#else /* !CONFIG_OPTPROBES */ 879#else /* !CONFIG_OPTPROBES */
697 880
698#define optimize_kprobe(p) do {} while (0) 881#define optimize_kprobe(p) do {} while (0)
699#define unoptimize_kprobe(p) do {} while (0) 882#define unoptimize_kprobe(p, f) do {} while (0)
700#define kill_optimized_kprobe(p) do {} while (0) 883#define kill_optimized_kprobe(p) do {} while (0)
701#define prepare_optimized_kprobe(p) do {} while (0) 884#define prepare_optimized_kprobe(p) do {} while (0)
702#define try_to_optimize_kprobe(p) do {} while (0) 885#define try_to_optimize_kprobe(p) do {} while (0)
703#define __arm_kprobe(p) arch_arm_kprobe(p) 886#define __arm_kprobe(p) arch_arm_kprobe(p)
704#define __disarm_kprobe(p) arch_disarm_kprobe(p) 887#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
888#define kprobe_disarmed(p) kprobe_disabled(p)
889#define wait_for_kprobe_optimizer() do {} while (0)
890
891/* There should be no unused kprobes can be reused without optimization */
892static void reuse_unused_kprobe(struct kprobe *ap)
893{
894 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
895 BUG_ON(kprobe_unused(ap));
896}
705 897
706static __kprobes void free_aggr_kprobe(struct kprobe *p) 898static __kprobes void free_aggr_kprobe(struct kprobe *p)
707{ 899{
900 arch_remove_kprobe(p);
708 kfree(p); 901 kfree(p);
709} 902}
710 903
@@ -730,11 +923,10 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
730/* Disarm a kprobe with text_mutex */ 923/* Disarm a kprobe with text_mutex */
731static void __kprobes disarm_kprobe(struct kprobe *kp) 924static void __kprobes disarm_kprobe(struct kprobe *kp)
732{ 925{
733 get_online_cpus(); /* For avoiding text_mutex deadlock */ 926 /* Ditto */
734 mutex_lock(&text_mutex); 927 mutex_lock(&text_mutex);
735 __disarm_kprobe(kp); 928 __disarm_kprobe(kp, true);
736 mutex_unlock(&text_mutex); 929 mutex_unlock(&text_mutex);
737 put_online_cpus();
738} 930}
739 931
740/* 932/*
@@ -773,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
773static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 965static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
774 int trapnr) 966 int trapnr)
775{ 967{
776 struct kprobe *cur = __get_cpu_var(kprobe_instance); 968 struct kprobe *cur = __this_cpu_read(kprobe_instance);
777 969
778 /* 970 /*
779 * if we faulted "during" the execution of a user specified 971 * if we faulted "during" the execution of a user specified
@@ -788,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
788 980
789static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 981static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
790{ 982{
791 struct kprobe *cur = __get_cpu_var(kprobe_instance); 983 struct kprobe *cur = __this_cpu_read(kprobe_instance);
792 int ret = 0; 984 int ret = 0;
793 985
794 if (cur && cur->break_handler) { 986 if (cur && cur->break_handler) {
@@ -831,6 +1023,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
831 1023
832void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 1024void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
833 struct hlist_head **head, unsigned long *flags) 1025 struct hlist_head **head, unsigned long *flags)
1026__acquires(hlist_lock)
834{ 1027{
835 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
836 spinlock_t *hlist_lock; 1029 spinlock_t *hlist_lock;
@@ -842,6 +1035,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
842 1035
843static void __kprobes kretprobe_table_lock(unsigned long hash, 1036static void __kprobes kretprobe_table_lock(unsigned long hash,
844 unsigned long *flags) 1037 unsigned long *flags)
1038__acquires(hlist_lock)
845{ 1039{
846 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1040 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
847 spin_lock_irqsave(hlist_lock, *flags); 1041 spin_lock_irqsave(hlist_lock, *flags);
@@ -849,6 +1043,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash,
849 1043
850void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 1044void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
851 unsigned long *flags) 1045 unsigned long *flags)
1046__releases(hlist_lock)
852{ 1047{
853 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
854 spinlock_t *hlist_lock; 1049 spinlock_t *hlist_lock;
@@ -857,7 +1052,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
857 spin_unlock_irqrestore(hlist_lock, *flags); 1052 spin_unlock_irqrestore(hlist_lock, *flags);
858} 1053}
859 1054
860void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) 1055static void __kprobes kretprobe_table_unlock(unsigned long hash,
1056 unsigned long *flags)
1057__releases(hlist_lock)
861{ 1058{
862 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1059 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
863 spin_unlock_irqrestore(hlist_lock, *flags); 1060 spin_unlock_irqrestore(hlist_lock, *flags);
@@ -935,7 +1132,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
935 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1132 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
936 1133
937 if (p->break_handler || p->post_handler) 1134 if (p->break_handler || p->post_handler)
938 unoptimize_kprobe(ap); /* Fall back to normal kprobe */ 1135 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
939 1136
940 if (p->break_handler) { 1137 if (p->break_handler) {
941 if (ap->break_handler) 1138 if (ap->break_handler)
@@ -986,19 +1183,21 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
986 * This is the second or subsequent kprobe at the address - handle 1183 * This is the second or subsequent kprobe at the address - handle
987 * the intricacies 1184 * the intricacies
988 */ 1185 */
989static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 1186static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
990 struct kprobe *p) 1187 struct kprobe *p)
991{ 1188{
992 int ret = 0; 1189 int ret = 0;
993 struct kprobe *ap = old_p; 1190 struct kprobe *ap = orig_p;
994 1191
995 if (!kprobe_aggrprobe(old_p)) { 1192 if (!kprobe_aggrprobe(orig_p)) {
996 /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */ 1193 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
997 ap = alloc_aggr_kprobe(old_p); 1194 ap = alloc_aggr_kprobe(orig_p);
998 if (!ap) 1195 if (!ap)
999 return -ENOMEM; 1196 return -ENOMEM;
1000 init_aggr_kprobe(ap, old_p); 1197 init_aggr_kprobe(ap, orig_p);
1001 } 1198 } else if (kprobe_unused(ap))
1199 /* This probe is going to die. Rescue it */
1200 reuse_unused_kprobe(ap);
1002 1201
1003 if (kprobe_gone(ap)) { 1202 if (kprobe_gone(ap)) {
1004 /* 1203 /*
@@ -1032,23 +1231,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
1032 return add_new_kprobe(ap, p); 1231 return add_new_kprobe(ap, p);
1033} 1232}
1034 1233
1035/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
1036static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
1037{
1038 struct kprobe *kp;
1039
1040 list_for_each_entry_rcu(kp, &p->list, list) {
1041 if (!kprobe_disabled(kp))
1042 /*
1043 * There is an active probe on the list.
1044 * We can't disable aggr_kprobe.
1045 */
1046 return 0;
1047 }
1048 p->flags |= KPROBE_FLAG_DISABLED;
1049 return 1;
1050}
1051
1052static int __kprobes in_kprobes_functions(unsigned long addr) 1234static int __kprobes in_kprobes_functions(unsigned long addr)
1053{ 1235{
1054 struct kprobe_blackpoint *kb; 1236 struct kprobe_blackpoint *kb;
@@ -1091,34 +1273,33 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1091/* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1273/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1092static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1274static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1093{ 1275{
1094 struct kprobe *old_p, *list_p; 1276 struct kprobe *ap, *list_p;
1095 1277
1096 old_p = get_kprobe(p->addr); 1278 ap = get_kprobe(p->addr);
1097 if (unlikely(!old_p)) 1279 if (unlikely(!ap))
1098 return NULL; 1280 return NULL;
1099 1281
1100 if (p != old_p) { 1282 if (p != ap) {
1101 list_for_each_entry_rcu(list_p, &old_p->list, list) 1283 list_for_each_entry_rcu(list_p, &ap->list, list)
1102 if (list_p == p) 1284 if (list_p == p)
1103 /* kprobe p is a valid probe */ 1285 /* kprobe p is a valid probe */
1104 goto valid; 1286 goto valid;
1105 return NULL; 1287 return NULL;
1106 } 1288 }
1107valid: 1289valid:
1108 return old_p; 1290 return ap;
1109} 1291}
1110 1292
1111/* Return error if the kprobe is being re-registered */ 1293/* Return error if the kprobe is being re-registered */
1112static inline int check_kprobe_rereg(struct kprobe *p) 1294static inline int check_kprobe_rereg(struct kprobe *p)
1113{ 1295{
1114 int ret = 0; 1296 int ret = 0;
1115 struct kprobe *old_p;
1116 1297
1117 mutex_lock(&kprobe_mutex); 1298 mutex_lock(&kprobe_mutex);
1118 old_p = __get_valid_kprobe(p); 1299 if (__get_valid_kprobe(p))
1119 if (old_p)
1120 ret = -EINVAL; 1300 ret = -EINVAL;
1121 mutex_unlock(&kprobe_mutex); 1301 mutex_unlock(&kprobe_mutex);
1302
1122 return ret; 1303 return ret;
1123} 1304}
1124 1305
@@ -1138,13 +1319,13 @@ int __kprobes register_kprobe(struct kprobe *p)
1138 if (ret) 1319 if (ret)
1139 return ret; 1320 return ret;
1140 1321
1322 jump_label_lock();
1141 preempt_disable(); 1323 preempt_disable();
1142 if (!kernel_text_address((unsigned long) p->addr) || 1324 if (!kernel_text_address((unsigned long) p->addr) ||
1143 in_kprobes_functions((unsigned long) p->addr) || 1325 in_kprobes_functions((unsigned long) p->addr) ||
1144 ftrace_text_reserved(p->addr, p->addr)) { 1326 ftrace_text_reserved(p->addr, p->addr) ||
1145 preempt_enable(); 1327 jump_label_text_reserved(p->addr, p->addr))
1146 return -EINVAL; 1328 goto fail_with_jump_label;
1147 }
1148 1329
1149 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1330 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1150 p->flags &= KPROBE_FLAG_DISABLED; 1331 p->flags &= KPROBE_FLAG_DISABLED;
@@ -1158,10 +1339,9 @@ int __kprobes register_kprobe(struct kprobe *p)
1158 * We must hold a refcount of the probed module while updating 1339 * We must hold a refcount of the probed module while updating
1159 * its code to prohibit unexpected unloading. 1340 * its code to prohibit unexpected unloading.
1160 */ 1341 */
1161 if (unlikely(!try_module_get(probed_mod))) { 1342 if (unlikely(!try_module_get(probed_mod)))
1162 preempt_enable(); 1343 goto fail_with_jump_label;
1163 return -EINVAL; 1344
1164 }
1165 /* 1345 /*
1166 * If the module freed .init.text, we couldn't insert 1346 * If the module freed .init.text, we couldn't insert
1167 * kprobes in there. 1347 * kprobes in there.
@@ -1169,16 +1349,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1169 if (within_module_init((unsigned long)p->addr, probed_mod) && 1349 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1170 probed_mod->state != MODULE_STATE_COMING) { 1350 probed_mod->state != MODULE_STATE_COMING) {
1171 module_put(probed_mod); 1351 module_put(probed_mod);
1172 preempt_enable(); 1352 goto fail_with_jump_label;
1173 return -EINVAL;
1174 } 1353 }
1175 } 1354 }
1176 preempt_enable(); 1355 preempt_enable();
1356 jump_label_unlock();
1177 1357
1178 p->nmissed = 0; 1358 p->nmissed = 0;
1179 INIT_LIST_HEAD(&p->list); 1359 INIT_LIST_HEAD(&p->list);
1180 mutex_lock(&kprobe_mutex); 1360 mutex_lock(&kprobe_mutex);
1181 1361
1362 jump_label_lock(); /* needed to call jump_label_text_reserved() */
1363
1182 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1364 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1183 mutex_lock(&text_mutex); 1365 mutex_lock(&text_mutex);
1184 1366
@@ -1206,76 +1388,136 @@ int __kprobes register_kprobe(struct kprobe *p)
1206out: 1388out:
1207 mutex_unlock(&text_mutex); 1389 mutex_unlock(&text_mutex);
1208 put_online_cpus(); 1390 put_online_cpus();
1391 jump_label_unlock();
1209 mutex_unlock(&kprobe_mutex); 1392 mutex_unlock(&kprobe_mutex);
1210 1393
1211 if (probed_mod) 1394 if (probed_mod)
1212 module_put(probed_mod); 1395 module_put(probed_mod);
1213 1396
1214 return ret; 1397 return ret;
1398
1399fail_with_jump_label:
1400 preempt_enable();
1401 jump_label_unlock();
1402 return -EINVAL;
1215} 1403}
1216EXPORT_SYMBOL_GPL(register_kprobe); 1404EXPORT_SYMBOL_GPL(register_kprobe);
1217 1405
1406/* Check if all probes on the aggrprobe are disabled */
1407static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1408{
1409 struct kprobe *kp;
1410
1411 list_for_each_entry_rcu(kp, &ap->list, list)
1412 if (!kprobe_disabled(kp))
1413 /*
1414 * There is an active probe on the list.
1415 * We can't disable this ap.
1416 */
1417 return 0;
1418
1419 return 1;
1420}
1421
1422/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1423static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1424{
1425 struct kprobe *orig_p;
1426
1427 /* Get an original kprobe for return */
1428 orig_p = __get_valid_kprobe(p);
1429 if (unlikely(orig_p == NULL))
1430 return NULL;
1431
1432 if (!kprobe_disabled(p)) {
1433 /* Disable probe if it is a child probe */
1434 if (p != orig_p)
1435 p->flags |= KPROBE_FLAG_DISABLED;
1436
1437 /* Try to disarm and disable this/parent probe */
1438 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1439 disarm_kprobe(orig_p);
1440 orig_p->flags |= KPROBE_FLAG_DISABLED;
1441 }
1442 }
1443
1444 return orig_p;
1445}
1446
1218/* 1447/*
1219 * Unregister a kprobe without a scheduler synchronization. 1448 * Unregister a kprobe without a scheduler synchronization.
1220 */ 1449 */
1221static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1450static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1222{ 1451{
1223 struct kprobe *old_p, *list_p; 1452 struct kprobe *ap, *list_p;
1224 1453
1225 old_p = __get_valid_kprobe(p); 1454 /* Disable kprobe. This will disarm it if needed. */
1226 if (old_p == NULL) 1455 ap = __disable_kprobe(p);
1456 if (ap == NULL)
1227 return -EINVAL; 1457 return -EINVAL;
1228 1458
1229 if (old_p == p || 1459 if (ap == p)
1230 (kprobe_aggrprobe(old_p) &&
1231 list_is_singular(&old_p->list))) {
1232 /* 1460 /*
1233 * Only probe on the hash list. Disarm only if kprobes are 1461 * This probe is an independent(and non-optimized) kprobe
1234 * enabled and not gone - otherwise, the breakpoint would 1462 * (not an aggrprobe). Remove from the hash list.
1235 * already have been removed. We save on flushing icache.
1236 */ 1463 */
1237 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) 1464 goto disarmed;
1238 disarm_kprobe(old_p); 1465
1239 hlist_del_rcu(&old_p->hlist); 1466 /* Following process expects this probe is an aggrprobe */
1240 } else { 1467 WARN_ON(!kprobe_aggrprobe(ap));
1468
1469 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1470 /*
1471 * !disarmed could be happen if the probe is under delayed
1472 * unoptimizing.
1473 */
1474 goto disarmed;
1475 else {
1476 /* If disabling probe has special handlers, update aggrprobe */
1241 if (p->break_handler && !kprobe_gone(p)) 1477 if (p->break_handler && !kprobe_gone(p))
1242 old_p->break_handler = NULL; 1478 ap->break_handler = NULL;
1243 if (p->post_handler && !kprobe_gone(p)) { 1479 if (p->post_handler && !kprobe_gone(p)) {
1244 list_for_each_entry_rcu(list_p, &old_p->list, list) { 1480 list_for_each_entry_rcu(list_p, &ap->list, list) {
1245 if ((list_p != p) && (list_p->post_handler)) 1481 if ((list_p != p) && (list_p->post_handler))
1246 goto noclean; 1482 goto noclean;
1247 } 1483 }
1248 old_p->post_handler = NULL; 1484 ap->post_handler = NULL;
1249 } 1485 }
1250noclean: 1486noclean:
1487 /*
1488 * Remove from the aggrprobe: this path will do nothing in
1489 * __unregister_kprobe_bottom().
1490 */
1251 list_del_rcu(&p->list); 1491 list_del_rcu(&p->list);
1252 if (!kprobe_disabled(old_p)) { 1492 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1253 try_to_disable_aggr_kprobe(old_p); 1493 /*
1254 if (!kprobes_all_disarmed) { 1494 * Try to optimize this probe again, because post
1255 if (kprobe_disabled(old_p)) 1495 * handler may have been changed.
1256 disarm_kprobe(old_p); 1496 */
1257 else 1497 optimize_kprobe(ap);
1258 /* Try to optimize this probe again */
1259 optimize_kprobe(old_p);
1260 }
1261 }
1262 } 1498 }
1263 return 0; 1499 return 0;
1500
1501disarmed:
1502 BUG_ON(!kprobe_disarmed(ap));
1503 hlist_del_rcu(&ap->hlist);
1504 return 0;
1264} 1505}
1265 1506
1266static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1507static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1267{ 1508{
1268 struct kprobe *old_p; 1509 struct kprobe *ap;
1269 1510
1270 if (list_empty(&p->list)) 1511 if (list_empty(&p->list))
1512 /* This is an independent kprobe */
1271 arch_remove_kprobe(p); 1513 arch_remove_kprobe(p);
1272 else if (list_is_singular(&p->list)) { 1514 else if (list_is_singular(&p->list)) {
1273 /* "p" is the last child of an aggr_kprobe */ 1515 /* This is the last child of an aggrprobe */
1274 old_p = list_entry(p->list.next, struct kprobe, list); 1516 ap = list_entry(p->list.next, struct kprobe, list);
1275 list_del(&p->list); 1517 list_del(&p->list);
1276 arch_remove_kprobe(old_p); 1518 free_aggr_kprobe(ap);
1277 free_aggr_kprobe(old_p);
1278 } 1519 }
1520 /* Otherwise, do nothing. */
1279} 1521}
1280 1522
1281int __kprobes register_kprobes(struct kprobe **kps, int num) 1523int __kprobes register_kprobes(struct kprobe **kps, int num)
@@ -1339,18 +1581,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
1339 if (num <= 0) 1581 if (num <= 0)
1340 return -EINVAL; 1582 return -EINVAL;
1341 for (i = 0; i < num; i++) { 1583 for (i = 0; i < num; i++) {
1342 unsigned long addr; 1584 unsigned long addr, offset;
1343 jp = jps[i]; 1585 jp = jps[i];
1344 addr = arch_deref_entry_point(jp->entry); 1586 addr = arch_deref_entry_point(jp->entry);
1345 1587
1346 if (!kernel_text_address(addr)) 1588 /* Verify probepoint is a function entry point */
1347 ret = -EINVAL; 1589 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1348 else { 1590 offset == 0) {
1349 /* Todo: Verify probepoint is a function entry point */
1350 jp->kp.pre_handler = setjmp_pre_handler; 1591 jp->kp.pre_handler = setjmp_pre_handler;
1351 jp->kp.break_handler = longjmp_break_handler; 1592 jp->kp.break_handler = longjmp_break_handler;
1352 ret = register_kprobe(&jp->kp); 1593 ret = register_kprobe(&jp->kp);
1353 } 1594 } else
1595 ret = -EINVAL;
1596
1354 if (ret < 0) { 1597 if (ret < 0) {
1355 if (i > 0) 1598 if (i > 0)
1356 unregister_jprobes(jps, i); 1599 unregister_jprobes(jps, i);
@@ -1592,29 +1835,13 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1592int __kprobes disable_kprobe(struct kprobe *kp) 1835int __kprobes disable_kprobe(struct kprobe *kp)
1593{ 1836{
1594 int ret = 0; 1837 int ret = 0;
1595 struct kprobe *p;
1596 1838
1597 mutex_lock(&kprobe_mutex); 1839 mutex_lock(&kprobe_mutex);
1598 1840
1599 /* Check whether specified probe is valid. */ 1841 /* Disable this kprobe */
1600 p = __get_valid_kprobe(kp); 1842 if (__disable_kprobe(kp) == NULL)
1601 if (unlikely(p == NULL)) {
1602 ret = -EINVAL; 1843 ret = -EINVAL;
1603 goto out;
1604 }
1605 1844
1606 /* If the probe is already disabled (or gone), just return */
1607 if (kprobe_disabled(kp))
1608 goto out;
1609
1610 kp->flags |= KPROBE_FLAG_DISABLED;
1611 if (p != kp)
1612 /* When kp != p, p is always enabled. */
1613 try_to_disable_aggr_kprobe(p);
1614
1615 if (!kprobes_all_disarmed && kprobe_disabled(p))
1616 disarm_kprobe(p);
1617out:
1618 mutex_unlock(&kprobe_mutex); 1845 mutex_unlock(&kprobe_mutex);
1619 return ret; 1846 return ret;
1620} 1847}
@@ -1912,36 +2139,27 @@ static void __kprobes disarm_all_kprobes(void)
1912 mutex_lock(&kprobe_mutex); 2139 mutex_lock(&kprobe_mutex);
1913 2140
1914 /* If kprobes are already disarmed, just return */ 2141 /* If kprobes are already disarmed, just return */
1915 if (kprobes_all_disarmed) 2142 if (kprobes_all_disarmed) {
1916 goto already_disabled; 2143 mutex_unlock(&kprobe_mutex);
2144 return;
2145 }
1917 2146
1918 kprobes_all_disarmed = true; 2147 kprobes_all_disarmed = true;
1919 printk(KERN_INFO "Kprobes globally disabled\n"); 2148 printk(KERN_INFO "Kprobes globally disabled\n");
1920 2149
1921 /*
1922 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
1923 * because disarming may also unoptimize kprobes.
1924 */
1925 get_online_cpus();
1926 mutex_lock(&text_mutex); 2150 mutex_lock(&text_mutex);
1927 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2151 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1928 head = &kprobe_table[i]; 2152 head = &kprobe_table[i];
1929 hlist_for_each_entry_rcu(p, node, head, hlist) { 2153 hlist_for_each_entry_rcu(p, node, head, hlist) {
1930 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2154 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1931 __disarm_kprobe(p); 2155 __disarm_kprobe(p, false);
1932 } 2156 }
1933 } 2157 }
1934
1935 mutex_unlock(&text_mutex); 2158 mutex_unlock(&text_mutex);
1936 put_online_cpus();
1937 mutex_unlock(&kprobe_mutex); 2159 mutex_unlock(&kprobe_mutex);
1938 /* Allow all currently running kprobes to complete */
1939 synchronize_sched();
1940 return;
1941 2160
1942already_disabled: 2161 /* Wait for disarming all kprobes by optimizer */
1943 mutex_unlock(&kprobe_mutex); 2162 wait_for_kprobe_optimizer();
1944 return;
1945} 2163}
1946 2164
1947/* 2165/*
@@ -1992,6 +2210,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1992static const struct file_operations fops_kp = { 2210static const struct file_operations fops_kp = {
1993 .read = read_enabled_file_bool, 2211 .read = read_enabled_file_bool,
1994 .write = write_enabled_file_bool, 2212 .write = write_enabled_file_bool,
2213 .llseek = default_llseek,
1995}; 2214};
1996 2215
1997static int __kprobes debugfs_kprobe_init(void) 2216static int __kprobes debugfs_kprobe_init(void)