diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2010-12-03 04:54:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-12-06 11:59:30 -0500 |
commit | 6274de4984a630b45c6934b3ee62e5692c745328 (patch) | |
tree | dfe66f06596b0165c87a75d800fa83acb6201d1b /kernel/kprobes.c | |
parent | 61f4e13ffd85c037a942c5b7fd13f2b0c3162862 (diff) |
kprobes: Support delayed unoptimizing
Unoptimization occurs when a probe is unregistered or disabled,
and is heavy because it recovers instructions by using
stop_machine(). This patch delays unoptimization operations and
unoptimize several probes at once by using
text_poke_smp_batch(). This can avoid unexpected system slowdown
coming from stop_machine().
Changes in v5:
- Split this patch into several cleanup patches and this patch.
- Fix some text_mutex lock miss.
- Use bool instead of int for behavior flags.
- Add additional comment for (un)optimizing path.
Changes in v2:
- Use dynamic allocated buffers and params.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: 2nddept-manager@sdl.hitachi.co.jp
LKML-Reference: <20101203095409.2961.82733.stgit@ltc236.sdl.hitachi.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 310 |
1 files changed, 233 insertions, 77 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1f4f9b9d5c89..ba4d4c0740cf 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -354,6 +354,13 @@ static inline int kprobe_aggrprobe(struct kprobe *p) | |||
354 | return p->pre_handler == aggr_pre_handler; | 354 | return p->pre_handler == aggr_pre_handler; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* Return true(!0) if the kprobe is unused */ | ||
358 | static inline int kprobe_unused(struct kprobe *p) | ||
359 | { | ||
360 | return kprobe_aggrprobe(p) && kprobe_disabled(p) && | ||
361 | list_empty(&p->list); | ||
362 | } | ||
363 | |||
357 | /* | 364 | /* |
358 | * Keep all fields in the kprobe consistent | 365 | * Keep all fields in the kprobe consistent |
359 | */ | 366 | */ |
@@ -384,6 +391,17 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
384 | } | 391 | } |
385 | } | 392 | } |
386 | 393 | ||
394 | /* Free optimized instructions and optimized_kprobe */ | ||
395 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
396 | { | ||
397 | struct optimized_kprobe *op; | ||
398 | |||
399 | op = container_of(p, struct optimized_kprobe, kp); | ||
400 | arch_remove_optimized_kprobe(op); | ||
401 | arch_remove_kprobe(p); | ||
402 | kfree(op); | ||
403 | } | ||
404 | |||
387 | /* Return true(!0) if the kprobe is ready for optimization. */ | 405 | /* Return true(!0) if the kprobe is ready for optimization. */ |
388 | static inline int kprobe_optready(struct kprobe *p) | 406 | static inline int kprobe_optready(struct kprobe *p) |
389 | { | 407 | { |
@@ -397,6 +415,33 @@ static inline int kprobe_optready(struct kprobe *p) | |||
397 | return 0; | 415 | return 0; |
398 | } | 416 | } |
399 | 417 | ||
418 | /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ | ||
419 | static inline int kprobe_disarmed(struct kprobe *p) | ||
420 | { | ||
421 | struct optimized_kprobe *op; | ||
422 | |||
423 | /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ | ||
424 | if (!kprobe_aggrprobe(p)) | ||
425 | return kprobe_disabled(p); | ||
426 | |||
427 | op = container_of(p, struct optimized_kprobe, kp); | ||
428 | |||
429 | return kprobe_disabled(p) && list_empty(&op->list); | ||
430 | } | ||
431 | |||
432 | /* Return true(!0) if the probe is queued on (un)optimizing lists */ | ||
433 | static int __kprobes kprobe_queued(struct kprobe *p) | ||
434 | { | ||
435 | struct optimized_kprobe *op; | ||
436 | |||
437 | if (kprobe_aggrprobe(p)) { | ||
438 | op = container_of(p, struct optimized_kprobe, kp); | ||
439 | if (!list_empty(&op->list)) | ||
440 | return 1; | ||
441 | } | ||
442 | return 0; | ||
443 | } | ||
444 | |||
400 | /* | 445 | /* |
401 | * Return an optimized kprobe whose optimizing code replaces | 446 | * Return an optimized kprobe whose optimizing code replaces |
402 | * instructions including addr (exclude breakpoint). | 447 | * instructions including addr (exclude breakpoint). |
@@ -422,9 +467,11 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | |||
422 | 467 | ||
423 | /* Optimization staging list, protected by kprobe_mutex */ | 468 | /* Optimization staging list, protected by kprobe_mutex */ |
424 | static LIST_HEAD(optimizing_list); | 469 | static LIST_HEAD(optimizing_list); |
470 | static LIST_HEAD(unoptimizing_list); | ||
425 | 471 | ||
426 | static void kprobe_optimizer(struct work_struct *work); | 472 | static void kprobe_optimizer(struct work_struct *work); |
427 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
474 | static DECLARE_COMPLETION(optimizer_comp); | ||
428 | #define OPTIMIZE_DELAY 5 | 475 | #define OPTIMIZE_DELAY 5 |
429 | 476 | ||
430 | /* | 477 | /* |
@@ -435,6 +482,11 @@ static __kprobes void do_optimize_kprobes(void) | |||
435 | { | 482 | { |
436 | struct optimized_kprobe *op, *tmp; | 483 | struct optimized_kprobe *op, *tmp; |
437 | 484 | ||
485 | /* Optimization never be done when disarmed */ | ||
486 | if (kprobes_all_disarmed || !kprobes_allow_optimization || | ||
487 | list_empty(&optimizing_list)) | ||
488 | return; | ||
489 | |||
438 | /* | 490 | /* |
439 | * The optimization/unoptimization refers online_cpus via | 491 | * The optimization/unoptimization refers online_cpus via |
440 | * stop_machine() and cpu-hotplug modifies online_cpus. | 492 | * stop_machine() and cpu-hotplug modifies online_cpus. |
@@ -457,17 +509,79 @@ static __kprobes void do_optimize_kprobes(void) | |||
457 | put_online_cpus(); | 509 | put_online_cpus(); |
458 | } | 510 | } |
459 | 511 | ||
512 | /* | ||
513 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint | ||
514 | * if need) kprobes listed on unoptimizing_list. | ||
515 | */ | ||
516 | static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | ||
517 | { | ||
518 | struct optimized_kprobe *op, *tmp; | ||
519 | |||
520 | /* Unoptimization must be done anytime */ | ||
521 | if (list_empty(&unoptimizing_list)) | ||
522 | return; | ||
523 | |||
524 | /* Ditto to do_optimize_kprobes */ | ||
525 | get_online_cpus(); | ||
526 | mutex_lock(&text_mutex); | ||
527 | list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) { | ||
528 | /* Unoptimize kprobes */ | ||
529 | arch_unoptimize_kprobe(op); | ||
530 | /* Disarm probes if marked disabled */ | ||
531 | if (kprobe_disabled(&op->kp)) | ||
532 | arch_disarm_kprobe(&op->kp); | ||
533 | if (kprobe_unused(&op->kp)) { | ||
534 | /* | ||
535 | * Remove unused probes from hash list. After waiting | ||
536 | * for synchronization, these probes are reclaimed. | ||
537 | * (reclaiming is done by do_free_cleaned_kprobes.) | ||
538 | */ | ||
539 | hlist_del_rcu(&op->kp.hlist); | ||
540 | /* Move only unused probes on free_list */ | ||
541 | list_move(&op->list, free_list); | ||
542 | } else | ||
543 | list_del_init(&op->list); | ||
544 | } | ||
545 | mutex_unlock(&text_mutex); | ||
546 | put_online_cpus(); | ||
547 | } | ||
548 | |||
549 | /* Reclaim all kprobes on the free_list */ | ||
550 | static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | ||
551 | { | ||
552 | struct optimized_kprobe *op, *tmp; | ||
553 | |||
554 | list_for_each_entry_safe(op, tmp, free_list, list) { | ||
555 | BUG_ON(!kprobe_unused(&op->kp)); | ||
556 | list_del_init(&op->list); | ||
557 | free_aggr_kprobe(&op->kp); | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /* Start optimizer after OPTIMIZE_DELAY passed */ | ||
562 | static __kprobes void kick_kprobe_optimizer(void) | ||
563 | { | ||
564 | if (!delayed_work_pending(&optimizing_work)) | ||
565 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | ||
566 | } | ||
567 | |||
460 | /* Kprobe jump optimizer */ | 568 | /* Kprobe jump optimizer */ |
461 | static __kprobes void kprobe_optimizer(struct work_struct *work) | 569 | static __kprobes void kprobe_optimizer(struct work_struct *work) |
462 | { | 570 | { |
571 | LIST_HEAD(free_list); | ||
572 | |||
463 | /* Lock modules while optimizing kprobes */ | 573 | /* Lock modules while optimizing kprobes */ |
464 | mutex_lock(&module_mutex); | 574 | mutex_lock(&module_mutex); |
465 | mutex_lock(&kprobe_mutex); | 575 | mutex_lock(&kprobe_mutex); |
466 | if (kprobes_all_disarmed || !kprobes_allow_optimization) | ||
467 | goto end; | ||
468 | 576 | ||
469 | /* | 577 | /* |
470 | * Wait for quiesence period to ensure all running interrupts | 578 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
579 | * kprobes before waiting for quiesence period. | ||
580 | */ | ||
581 | do_unoptimize_kprobes(&free_list); | ||
582 | |||
583 | /* | ||
584 | * Step 2: Wait for quiesence period to ensure all running interrupts | ||
471 | * are done. Because optprobe may modify multiple instructions | 585 | * are done. Because optprobe may modify multiple instructions |
472 | * there is a chance that Nth instruction is interrupted. In that | 586 | * there is a chance that Nth instruction is interrupted. In that |
473 | * case, running interrupt can return to 2nd-Nth byte of jump | 587 | * case, running interrupt can return to 2nd-Nth byte of jump |
@@ -475,10 +589,24 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
475 | */ | 589 | */ |
476 | synchronize_sched(); | 590 | synchronize_sched(); |
477 | 591 | ||
592 | /* Step 3: Optimize kprobes after quiesence period */ | ||
478 | do_optimize_kprobes(); | 593 | do_optimize_kprobes(); |
479 | end: | 594 | |
595 | /* Step 4: Free cleaned kprobes after quiesence period */ | ||
596 | do_free_cleaned_kprobes(&free_list); | ||
597 | |||
480 | mutex_unlock(&kprobe_mutex); | 598 | mutex_unlock(&kprobe_mutex); |
481 | mutex_unlock(&module_mutex); | 599 | mutex_unlock(&module_mutex); |
600 | |||
601 | /* Wake up all waiters */ | ||
602 | complete_all(&optimizer_comp); | ||
603 | } | ||
604 | |||
605 | /* Wait for completing optimization and unoptimization */ | ||
606 | static __kprobes void wait_for_kprobe_optimizer(void) | ||
607 | { | ||
608 | if (delayed_work_pending(&optimizing_work)) | ||
609 | wait_for_completion(&optimizer_comp); | ||
482 | } | 610 | } |
483 | 611 | ||
484 | /* Optimize kprobe if p is ready to be optimized */ | 612 | /* Optimize kprobe if p is ready to be optimized */ |
@@ -504,27 +632,63 @@ static __kprobes void optimize_kprobe(struct kprobe *p) | |||
504 | /* Check if it is already optimized. */ | 632 | /* Check if it is already optimized. */ |
505 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) | 633 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) |
506 | return; | 634 | return; |
507 | |||
508 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; | 635 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
509 | list_add(&op->list, &optimizing_list); | 636 | |
510 | if (!delayed_work_pending(&optimizing_work)) | 637 | if (!list_empty(&op->list)) |
511 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | 638 | /* This is under unoptimizing. Just dequeue the probe */ |
639 | list_del_init(&op->list); | ||
640 | else { | ||
641 | list_add(&op->list, &optimizing_list); | ||
642 | kick_kprobe_optimizer(); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | /* Short cut to direct unoptimizing */ | ||
647 | static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) | ||
648 | { | ||
649 | get_online_cpus(); | ||
650 | arch_unoptimize_kprobe(op); | ||
651 | put_online_cpus(); | ||
652 | if (kprobe_disabled(&op->kp)) | ||
653 | arch_disarm_kprobe(&op->kp); | ||
512 | } | 654 | } |
513 | 655 | ||
514 | /* Unoptimize a kprobe if p is optimized */ | 656 | /* Unoptimize a kprobe if p is optimized */ |
515 | static __kprobes void unoptimize_kprobe(struct kprobe *p) | 657 | static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) |
516 | { | 658 | { |
517 | struct optimized_kprobe *op; | 659 | struct optimized_kprobe *op; |
518 | 660 | ||
519 | if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { | 661 | if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) |
520 | op = container_of(p, struct optimized_kprobe, kp); | 662 | return; /* This is not an optprobe nor optimized */ |
521 | if (!list_empty(&op->list)) | 663 | |
522 | /* Dequeue from the optimization queue */ | 664 | op = container_of(p, struct optimized_kprobe, kp); |
665 | if (!kprobe_optimized(p)) { | ||
666 | /* Unoptimized or unoptimizing case */ | ||
667 | if (force && !list_empty(&op->list)) { | ||
668 | /* | ||
669 | * Only if this is unoptimizing kprobe and forced, | ||
670 | * forcibly unoptimize it. (No need to unoptimize | ||
671 | * unoptimized kprobe again :) | ||
672 | */ | ||
523 | list_del_init(&op->list); | 673 | list_del_init(&op->list); |
524 | else | 674 | force_unoptimize_kprobe(op); |
525 | /* Replace jump with break */ | 675 | } |
526 | arch_unoptimize_kprobe(op); | 676 | return; |
527 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 677 | } |
678 | |||
679 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | ||
680 | if (!list_empty(&op->list)) { | ||
681 | /* Dequeue from the optimization queue */ | ||
682 | list_del_init(&op->list); | ||
683 | return; | ||
684 | } | ||
685 | /* Optimized kprobe case */ | ||
686 | if (force) | ||
687 | /* Forcibly update the code: this is a special case */ | ||
688 | force_unoptimize_kprobe(op); | ||
689 | else { | ||
690 | list_add(&op->list, &unoptimizing_list); | ||
691 | kick_kprobe_optimizer(); | ||
528 | } | 692 | } |
529 | } | 693 | } |
530 | 694 | ||
@@ -534,12 +698,12 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) | |||
534 | struct optimized_kprobe *op; | 698 | struct optimized_kprobe *op; |
535 | 699 | ||
536 | op = container_of(p, struct optimized_kprobe, kp); | 700 | op = container_of(p, struct optimized_kprobe, kp); |
537 | if (!list_empty(&op->list)) { | 701 | if (!list_empty(&op->list)) |
538 | /* Dequeue from the optimization queue */ | 702 | /* Dequeue from the (un)optimization queue */ |
539 | list_del_init(&op->list); | 703 | list_del_init(&op->list); |
540 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 704 | |
541 | } | 705 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
542 | /* Don't unoptimize, because the target code will be freed. */ | 706 | /* Don't touch the code, because it is already freed. */ |
543 | arch_remove_optimized_kprobe(op); | 707 | arch_remove_optimized_kprobe(op); |
544 | } | 708 | } |
545 | 709 | ||
@@ -552,16 +716,6 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p) | |||
552 | arch_prepare_optimized_kprobe(op); | 716 | arch_prepare_optimized_kprobe(op); |
553 | } | 717 | } |
554 | 718 | ||
555 | /* Free optimized instructions and optimized_kprobe */ | ||
556 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
557 | { | ||
558 | struct optimized_kprobe *op; | ||
559 | |||
560 | op = container_of(p, struct optimized_kprobe, kp); | ||
561 | arch_remove_optimized_kprobe(op); | ||
562 | kfree(op); | ||
563 | } | ||
564 | |||
565 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ | 719 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ |
566 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | 720 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
567 | { | 721 | { |
@@ -596,7 +750,8 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
596 | op = container_of(ap, struct optimized_kprobe, kp); | 750 | op = container_of(ap, struct optimized_kprobe, kp); |
597 | if (!arch_prepared_optinsn(&op->optinsn)) { | 751 | if (!arch_prepared_optinsn(&op->optinsn)) { |
598 | /* If failed to setup optimizing, fallback to kprobe */ | 752 | /* If failed to setup optimizing, fallback to kprobe */ |
599 | free_aggr_kprobe(ap); | 753 | arch_remove_optimized_kprobe(op); |
754 | kfree(op); | ||
600 | return; | 755 | return; |
601 | } | 756 | } |
602 | 757 | ||
@@ -640,21 +795,16 @@ static void __kprobes unoptimize_all_kprobes(void) | |||
640 | return; | 795 | return; |
641 | 796 | ||
642 | kprobes_allow_optimization = false; | 797 | kprobes_allow_optimization = false; |
643 | printk(KERN_INFO "Kprobes globally unoptimized\n"); | ||
644 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | ||
645 | mutex_lock(&text_mutex); | ||
646 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 798 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
647 | head = &kprobe_table[i]; | 799 | head = &kprobe_table[i]; |
648 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 800 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
649 | if (!kprobe_disabled(p)) | 801 | if (!kprobe_disabled(p)) |
650 | unoptimize_kprobe(p); | 802 | unoptimize_kprobe(p, false); |
651 | } | 803 | } |
652 | } | 804 | } |
653 | 805 | /* Wait for unoptimizing completion */ | |
654 | mutex_unlock(&text_mutex); | 806 | wait_for_kprobe_optimizer(); |
655 | put_online_cpus(); | 807 | printk(KERN_INFO "Kprobes globally unoptimized\n"); |
656 | /* Allow all currently running kprobes to complete */ | ||
657 | synchronize_sched(); | ||
658 | } | 808 | } |
659 | 809 | ||
660 | int sysctl_kprobes_optimization; | 810 | int sysctl_kprobes_optimization; |
@@ -678,6 +828,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | |||
678 | } | 828 | } |
679 | #endif /* CONFIG_SYSCTL */ | 829 | #endif /* CONFIG_SYSCTL */ |
680 | 830 | ||
831 | /* Put a breakpoint for a probe. Must be called with text_mutex locked */ | ||
681 | static void __kprobes __arm_kprobe(struct kprobe *p) | 832 | static void __kprobes __arm_kprobe(struct kprobe *p) |
682 | { | 833 | { |
683 | struct kprobe *_p; | 834 | struct kprobe *_p; |
@@ -685,37 +836,45 @@ static void __kprobes __arm_kprobe(struct kprobe *p) | |||
685 | /* Check collision with other optimized kprobes */ | 836 | /* Check collision with other optimized kprobes */ |
686 | _p = get_optimized_kprobe((unsigned long)p->addr); | 837 | _p = get_optimized_kprobe((unsigned long)p->addr); |
687 | if (unlikely(_p)) | 838 | if (unlikely(_p)) |
688 | unoptimize_kprobe(_p); /* Fallback to unoptimized kprobe */ | 839 | /* Fallback to unoptimized kprobe */ |
840 | unoptimize_kprobe(_p, true); | ||
689 | 841 | ||
690 | arch_arm_kprobe(p); | 842 | arch_arm_kprobe(p); |
691 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ | 843 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ |
692 | } | 844 | } |
693 | 845 | ||
694 | static void __kprobes __disarm_kprobe(struct kprobe *p) | 846 | /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ |
847 | static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) | ||
695 | { | 848 | { |
696 | struct kprobe *_p; | 849 | struct kprobe *_p; |
697 | 850 | ||
698 | unoptimize_kprobe(p); /* Try to unoptimize */ | 851 | unoptimize_kprobe(p, false); /* Try to unoptimize */ |
699 | arch_disarm_kprobe(p); | ||
700 | 852 | ||
701 | /* If another kprobe was blocked, optimize it. */ | 853 | if (!kprobe_queued(p)) { |
702 | _p = get_optimized_kprobe((unsigned long)p->addr); | 854 | arch_disarm_kprobe(p); |
703 | if (unlikely(_p)) | 855 | /* If another kprobe was blocked, optimize it. */ |
704 | optimize_kprobe(_p); | 856 | _p = get_optimized_kprobe((unsigned long)p->addr); |
857 | if (unlikely(_p) && reopt) | ||
858 | optimize_kprobe(_p); | ||
859 | } | ||
860 | /* TODO: reoptimize others after unoptimized this probe */ | ||
705 | } | 861 | } |
706 | 862 | ||
707 | #else /* !CONFIG_OPTPROBES */ | 863 | #else /* !CONFIG_OPTPROBES */ |
708 | 864 | ||
709 | #define optimize_kprobe(p) do {} while (0) | 865 | #define optimize_kprobe(p) do {} while (0) |
710 | #define unoptimize_kprobe(p) do {} while (0) | 866 | #define unoptimize_kprobe(p, f) do {} while (0) |
711 | #define kill_optimized_kprobe(p) do {} while (0) | 867 | #define kill_optimized_kprobe(p) do {} while (0) |
712 | #define prepare_optimized_kprobe(p) do {} while (0) | 868 | #define prepare_optimized_kprobe(p) do {} while (0) |
713 | #define try_to_optimize_kprobe(p) do {} while (0) | 869 | #define try_to_optimize_kprobe(p) do {} while (0) |
714 | #define __arm_kprobe(p) arch_arm_kprobe(p) | 870 | #define __arm_kprobe(p) arch_arm_kprobe(p) |
715 | #define __disarm_kprobe(p) arch_disarm_kprobe(p) | 871 | #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) |
872 | #define kprobe_disarmed(p) kprobe_disabled(p) | ||
873 | #define wait_for_kprobe_optimizer() do {} while (0) | ||
716 | 874 | ||
717 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | 875 | static __kprobes void free_aggr_kprobe(struct kprobe *p) |
718 | { | 876 | { |
877 | arch_remove_kprobe(p); | ||
719 | kfree(p); | 878 | kfree(p); |
720 | } | 879 | } |
721 | 880 | ||
@@ -741,11 +900,10 @@ static void __kprobes arm_kprobe(struct kprobe *kp) | |||
741 | /* Disarm a kprobe with text_mutex */ | 900 | /* Disarm a kprobe with text_mutex */ |
742 | static void __kprobes disarm_kprobe(struct kprobe *kp) | 901 | static void __kprobes disarm_kprobe(struct kprobe *kp) |
743 | { | 902 | { |
744 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | 903 | /* Ditto */ |
745 | mutex_lock(&text_mutex); | 904 | mutex_lock(&text_mutex); |
746 | __disarm_kprobe(kp); | 905 | __disarm_kprobe(kp, true); |
747 | mutex_unlock(&text_mutex); | 906 | mutex_unlock(&text_mutex); |
748 | put_online_cpus(); | ||
749 | } | 907 | } |
750 | 908 | ||
751 | /* | 909 | /* |
@@ -951,7 +1109,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
951 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | 1109 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); |
952 | 1110 | ||
953 | if (p->break_handler || p->post_handler) | 1111 | if (p->break_handler || p->post_handler) |
954 | unoptimize_kprobe(ap); /* Fall back to normal kprobe */ | 1112 | unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ |
955 | 1113 | ||
956 | if (p->break_handler) { | 1114 | if (p->break_handler) { |
957 | if (ap->break_handler) | 1115 | if (ap->break_handler) |
@@ -1014,7 +1172,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1014 | if (!ap) | 1172 | if (!ap) |
1015 | return -ENOMEM; | 1173 | return -ENOMEM; |
1016 | init_aggr_kprobe(ap, orig_p); | 1174 | init_aggr_kprobe(ap, orig_p); |
1017 | } | 1175 | } else if (kprobe_unused(ap)) |
1176 | /* Busy to die */ | ||
1177 | return -EBUSY; | ||
1018 | 1178 | ||
1019 | if (kprobe_gone(ap)) { | 1179 | if (kprobe_gone(ap)) { |
1020 | /* | 1180 | /* |
@@ -1283,8 +1443,11 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) | |||
1283 | /* Following process expects this probe is an aggrprobe */ | 1443 | /* Following process expects this probe is an aggrprobe */ |
1284 | WARN_ON(!kprobe_aggrprobe(ap)); | 1444 | WARN_ON(!kprobe_aggrprobe(ap)); |
1285 | 1445 | ||
1286 | if (list_is_singular(&ap->list)) | 1446 | if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) |
1287 | /* This probe is the last child of aggrprobe */ | 1447 | /* |
1448 | * !disarmed could be happen if the probe is under delayed | ||
1449 | * unoptimizing. | ||
1450 | */ | ||
1288 | goto disarmed; | 1451 | goto disarmed; |
1289 | else { | 1452 | else { |
1290 | /* If disabling probe has special handlers, update aggrprobe */ | 1453 | /* If disabling probe has special handlers, update aggrprobe */ |
@@ -1313,6 +1476,7 @@ noclean: | |||
1313 | return 0; | 1476 | return 0; |
1314 | 1477 | ||
1315 | disarmed: | 1478 | disarmed: |
1479 | BUG_ON(!kprobe_disarmed(ap)); | ||
1316 | hlist_del_rcu(&ap->hlist); | 1480 | hlist_del_rcu(&ap->hlist); |
1317 | return 0; | 1481 | return 0; |
1318 | } | 1482 | } |
@@ -1322,14 +1486,15 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | |||
1322 | struct kprobe *ap; | 1486 | struct kprobe *ap; |
1323 | 1487 | ||
1324 | if (list_empty(&p->list)) | 1488 | if (list_empty(&p->list)) |
1489 | /* This is an independent kprobe */ | ||
1325 | arch_remove_kprobe(p); | 1490 | arch_remove_kprobe(p); |
1326 | else if (list_is_singular(&p->list)) { | 1491 | else if (list_is_singular(&p->list)) { |
1327 | /* "p" is the last child of an aggr_kprobe */ | 1492 | /* This is the last child of an aggrprobe */ |
1328 | ap = list_entry(p->list.next, struct kprobe, list); | 1493 | ap = list_entry(p->list.next, struct kprobe, list); |
1329 | list_del(&p->list); | 1494 | list_del(&p->list); |
1330 | arch_remove_kprobe(ap); | ||
1331 | free_aggr_kprobe(ap); | 1495 | free_aggr_kprobe(ap); |
1332 | } | 1496 | } |
1497 | /* Otherwise, do nothing. */ | ||
1333 | } | 1498 | } |
1334 | 1499 | ||
1335 | int __kprobes register_kprobes(struct kprobe **kps, int num) | 1500 | int __kprobes register_kprobes(struct kprobe **kps, int num) |
@@ -1951,36 +2116,27 @@ static void __kprobes disarm_all_kprobes(void) | |||
1951 | mutex_lock(&kprobe_mutex); | 2116 | mutex_lock(&kprobe_mutex); |
1952 | 2117 | ||
1953 | /* If kprobes are already disarmed, just return */ | 2118 | /* If kprobes are already disarmed, just return */ |
1954 | if (kprobes_all_disarmed) | 2119 | if (kprobes_all_disarmed) { |
1955 | goto already_disabled; | 2120 | mutex_unlock(&kprobe_mutex); |
2121 | return; | ||
2122 | } | ||
1956 | 2123 | ||
1957 | kprobes_all_disarmed = true; | 2124 | kprobes_all_disarmed = true; |
1958 | printk(KERN_INFO "Kprobes globally disabled\n"); | 2125 | printk(KERN_INFO "Kprobes globally disabled\n"); |
1959 | 2126 | ||
1960 | /* | ||
1961 | * Here we call get_online_cpus() for avoiding text_mutex deadlock, | ||
1962 | * because disarming may also unoptimize kprobes. | ||
1963 | */ | ||
1964 | get_online_cpus(); | ||
1965 | mutex_lock(&text_mutex); | 2127 | mutex_lock(&text_mutex); |
1966 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2128 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1967 | head = &kprobe_table[i]; | 2129 | head = &kprobe_table[i]; |
1968 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2130 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1969 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 2131 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
1970 | __disarm_kprobe(p); | 2132 | __disarm_kprobe(p, false); |
1971 | } | 2133 | } |
1972 | } | 2134 | } |
1973 | |||
1974 | mutex_unlock(&text_mutex); | 2135 | mutex_unlock(&text_mutex); |
1975 | put_online_cpus(); | ||
1976 | mutex_unlock(&kprobe_mutex); | 2136 | mutex_unlock(&kprobe_mutex); |
1977 | /* Allow all currently running kprobes to complete */ | ||
1978 | synchronize_sched(); | ||
1979 | return; | ||
1980 | 2137 | ||
1981 | already_disabled: | 2138 | /* Wait for disarming all kprobes by optimizer */ |
1982 | mutex_unlock(&kprobe_mutex); | 2139 | wait_for_kprobe_optimizer(); |
1983 | return; | ||
1984 | } | 2140 | } |
1985 | 2141 | ||
1986 | /* | 2142 | /* |