diff options
Diffstat (limited to 'arch/x86/kernel/ptrace.c')
-rw-r--r-- | arch/x86/kernel/ptrace.c | 284 |
1 files changed, 182 insertions, 102 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 23b7c8f017e2..09ecbde91c13 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
22 | #include <linux/seccomp.h> | 22 | #include <linux/seccomp.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/workqueue.h> | ||
24 | 25 | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
@@ -578,17 +579,130 @@ static int ioperm_get(struct task_struct *target, | |||
578 | } | 579 | } |
579 | 580 | ||
580 | #ifdef CONFIG_X86_PTRACE_BTS | 581 | #ifdef CONFIG_X86_PTRACE_BTS |
582 | /* | ||
583 | * A branch trace store context. | ||
584 | * | ||
585 | * Contexts may only be installed by ptrace_bts_config() and only for | ||
586 | * ptraced tasks. | ||
587 | * | ||
588 | * Contexts are destroyed when the tracee is detached from the tracer. | ||
589 | * The actual destruction work requires interrupts enabled, so the | ||
590 | * work is deferred and will be scheduled during __ptrace_unlink(). | ||
591 | * | ||
592 | * Contexts hold an additional task_struct reference on the traced | ||
593 | * task, as well as a reference on the tracer's mm. | ||
594 | * | ||
595 | * Ptrace already holds a task_struct for the duration of ptrace operations, | ||
596 | * but since destruction is deferred, it may be executed after both | ||
597 | * tracer and tracee exited. | ||
598 | */ | ||
599 | struct bts_context { | ||
600 | /* The branch trace handle. */ | ||
601 | struct bts_tracer *tracer; | ||
602 | |||
603 | /* The buffer used to store the branch trace and its size. */ | ||
604 | void *buffer; | ||
605 | unsigned int size; | ||
606 | |||
607 | /* The mm that paid for the above buffer. */ | ||
608 | struct mm_struct *mm; | ||
609 | |||
610 | /* The task this context belongs to. */ | ||
611 | struct task_struct *task; | ||
612 | |||
613 | /* The signal to send on a bts buffer overflow. */ | ||
614 | unsigned int bts_ovfl_signal; | ||
615 | |||
616 | /* The work struct to destroy a context. */ | ||
617 | struct work_struct work; | ||
618 | }; | ||
619 | |||
620 | static int alloc_bts_buffer(struct bts_context *context, unsigned int size) | ||
621 | { | ||
622 | void *buffer = NULL; | ||
623 | int err = -ENOMEM; | ||
624 | |||
625 | err = account_locked_memory(current->mm, current->signal->rlim, size); | ||
626 | if (err < 0) | ||
627 | return err; | ||
628 | |||
629 | buffer = kzalloc(size, GFP_KERNEL); | ||
630 | if (!buffer) | ||
631 | goto out_refund; | ||
632 | |||
633 | context->buffer = buffer; | ||
634 | context->size = size; | ||
635 | context->mm = get_task_mm(current); | ||
636 | |||
637 | return 0; | ||
638 | |||
639 | out_refund: | ||
640 | refund_locked_memory(current->mm, size); | ||
641 | return err; | ||
642 | } | ||
643 | |||
644 | static inline void free_bts_buffer(struct bts_context *context) | ||
645 | { | ||
646 | if (!context->buffer) | ||
647 | return; | ||
648 | |||
649 | kfree(context->buffer); | ||
650 | context->buffer = NULL; | ||
651 | |||
652 | refund_locked_memory(context->mm, context->size); | ||
653 | context->size = 0; | ||
654 | |||
655 | mmput(context->mm); | ||
656 | context->mm = NULL; | ||
657 | } | ||
658 | |||
659 | static void free_bts_context_work(struct work_struct *w) | ||
660 | { | ||
661 | struct bts_context *context; | ||
662 | |||
663 | context = container_of(w, struct bts_context, work); | ||
664 | |||
665 | ds_release_bts(context->tracer); | ||
666 | put_task_struct(context->task); | ||
667 | free_bts_buffer(context); | ||
668 | kfree(context); | ||
669 | } | ||
670 | |||
671 | static inline void free_bts_context(struct bts_context *context) | ||
672 | { | ||
673 | INIT_WORK(&context->work, free_bts_context_work); | ||
674 | schedule_work(&context->work); | ||
675 | } | ||
676 | |||
677 | static inline struct bts_context *alloc_bts_context(struct task_struct *task) | ||
678 | { | ||
679 | struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL); | ||
680 | if (context) { | ||
681 | context->task = task; | ||
682 | task->bts = context; | ||
683 | |||
684 | get_task_struct(task); | ||
685 | } | ||
686 | |||
687 | return context; | ||
688 | } | ||
689 | |||
581 | static int ptrace_bts_read_record(struct task_struct *child, size_t index, | 690 | static int ptrace_bts_read_record(struct task_struct *child, size_t index, |
582 | struct bts_struct __user *out) | 691 | struct bts_struct __user *out) |
583 | { | 692 | { |
693 | struct bts_context *context; | ||
584 | const struct bts_trace *trace; | 694 | const struct bts_trace *trace; |
585 | struct bts_struct bts; | 695 | struct bts_struct bts; |
586 | const unsigned char *at; | 696 | const unsigned char *at; |
587 | int error; | 697 | int error; |
588 | 698 | ||
589 | trace = ds_read_bts(child->bts); | 699 | context = child->bts; |
700 | if (!context) | ||
701 | return -ESRCH; | ||
702 | |||
703 | trace = ds_read_bts(context->tracer); | ||
590 | if (!trace) | 704 | if (!trace) |
591 | return -EPERM; | 705 | return -ESRCH; |
592 | 706 | ||
593 | at = trace->ds.top - ((index + 1) * trace->ds.size); | 707 | at = trace->ds.top - ((index + 1) * trace->ds.size); |
594 | if ((void *)at < trace->ds.begin) | 708 | if ((void *)at < trace->ds.begin) |
@@ -597,7 +711,7 @@ static int ptrace_bts_read_record(struct task_struct *child, size_t index, | |||
597 | if (!trace->read) | 711 | if (!trace->read) |
598 | return -EOPNOTSUPP; | 712 | return -EOPNOTSUPP; |
599 | 713 | ||
600 | error = trace->read(child->bts, at, &bts); | 714 | error = trace->read(context->tracer, at, &bts); |
601 | if (error < 0) | 715 | if (error < 0) |
602 | return error; | 716 | return error; |
603 | 717 | ||
@@ -611,13 +725,18 @@ static int ptrace_bts_drain(struct task_struct *child, | |||
611 | long size, | 725 | long size, |
612 | struct bts_struct __user *out) | 726 | struct bts_struct __user *out) |
613 | { | 727 | { |
728 | struct bts_context *context; | ||
614 | const struct bts_trace *trace; | 729 | const struct bts_trace *trace; |
615 | const unsigned char *at; | 730 | const unsigned char *at; |
616 | int error, drained = 0; | 731 | int error, drained = 0; |
617 | 732 | ||
618 | trace = ds_read_bts(child->bts); | 733 | context = child->bts; |
734 | if (!context) | ||
735 | return -ESRCH; | ||
736 | |||
737 | trace = ds_read_bts(context->tracer); | ||
619 | if (!trace) | 738 | if (!trace) |
620 | return -EPERM; | 739 | return -ESRCH; |
621 | 740 | ||
622 | if (!trace->read) | 741 | if (!trace->read) |
623 | return -EOPNOTSUPP; | 742 | return -EOPNOTSUPP; |
@@ -628,9 +747,8 @@ static int ptrace_bts_drain(struct task_struct *child, | |||
628 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | 747 | for (at = trace->ds.begin; (void *)at < trace->ds.top; |
629 | out++, drained++, at += trace->ds.size) { | 748 | out++, drained++, at += trace->ds.size) { |
630 | struct bts_struct bts; | 749 | struct bts_struct bts; |
631 | int error; | ||
632 | 750 | ||
633 | error = trace->read(child->bts, at, &bts); | 751 | error = trace->read(context->tracer, at, &bts); |
634 | if (error < 0) | 752 | if (error < 0) |
635 | return error; | 753 | return error; |
636 | 754 | ||
@@ -640,35 +758,18 @@ static int ptrace_bts_drain(struct task_struct *child, | |||
640 | 758 | ||
641 | memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); | 759 | memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); |
642 | 760 | ||
643 | error = ds_reset_bts(child->bts); | 761 | error = ds_reset_bts(context->tracer); |
644 | if (error < 0) | 762 | if (error < 0) |
645 | return error; | 763 | return error; |
646 | 764 | ||
647 | return drained; | 765 | return drained; |
648 | } | 766 | } |
649 | 767 | ||
650 | static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size) | ||
651 | { | ||
652 | child->bts_buffer = alloc_locked_buffer(size); | ||
653 | if (!child->bts_buffer) | ||
654 | return -ENOMEM; | ||
655 | |||
656 | child->bts_size = size; | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static void ptrace_bts_free_buffer(struct task_struct *child) | ||
662 | { | ||
663 | free_locked_buffer(child->bts_buffer, child->bts_size); | ||
664 | child->bts_buffer = NULL; | ||
665 | child->bts_size = 0; | ||
666 | } | ||
667 | |||
668 | static int ptrace_bts_config(struct task_struct *child, | 768 | static int ptrace_bts_config(struct task_struct *child, |
669 | long cfg_size, | 769 | long cfg_size, |
670 | const struct ptrace_bts_config __user *ucfg) | 770 | const struct ptrace_bts_config __user *ucfg) |
671 | { | 771 | { |
772 | struct bts_context *context; | ||
672 | struct ptrace_bts_config cfg; | 773 | struct ptrace_bts_config cfg; |
673 | unsigned int flags = 0; | 774 | unsigned int flags = 0; |
674 | 775 | ||
@@ -678,28 +779,33 @@ static int ptrace_bts_config(struct task_struct *child, | |||
678 | if (copy_from_user(&cfg, ucfg, sizeof(cfg))) | 779 | if (copy_from_user(&cfg, ucfg, sizeof(cfg))) |
679 | return -EFAULT; | 780 | return -EFAULT; |
680 | 781 | ||
681 | if (child->bts) { | 782 | context = child->bts; |
682 | ds_release_bts(child->bts); | 783 | if (!context) |
683 | child->bts = NULL; | 784 | context = alloc_bts_context(child); |
684 | } | 785 | if (!context) |
786 | return -ENOMEM; | ||
685 | 787 | ||
686 | if (cfg.flags & PTRACE_BTS_O_SIGNAL) { | 788 | if (cfg.flags & PTRACE_BTS_O_SIGNAL) { |
687 | if (!cfg.signal) | 789 | if (!cfg.signal) |
688 | return -EINVAL; | 790 | return -EINVAL; |
689 | 791 | ||
690 | child->thread.bts_ovfl_signal = cfg.signal; | ||
691 | return -EOPNOTSUPP; | 792 | return -EOPNOTSUPP; |
793 | context->bts_ovfl_signal = cfg.signal; | ||
692 | } | 794 | } |
693 | 795 | ||
694 | if ((cfg.flags & PTRACE_BTS_O_ALLOC) && | 796 | ds_release_bts(context->tracer); |
695 | (cfg.size != child->bts_size)) { | 797 | context->tracer = NULL; |
696 | int error; | ||
697 | 798 | ||
698 | ptrace_bts_free_buffer(child); | 799 | if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { |
800 | int err; | ||
699 | 801 | ||
700 | error = ptrace_bts_allocate_buffer(child, cfg.size); | 802 | free_bts_buffer(context); |
701 | if (error < 0) | 803 | if (!cfg.size) |
702 | return error; | 804 | return 0; |
805 | |||
806 | err = alloc_bts_buffer(context, cfg.size); | ||
807 | if (err < 0) | ||
808 | return err; | ||
703 | } | 809 | } |
704 | 810 | ||
705 | if (cfg.flags & PTRACE_BTS_O_TRACE) | 811 | if (cfg.flags & PTRACE_BTS_O_TRACE) |
@@ -708,15 +814,14 @@ static int ptrace_bts_config(struct task_struct *child, | |||
708 | if (cfg.flags & PTRACE_BTS_O_SCHED) | 814 | if (cfg.flags & PTRACE_BTS_O_SCHED) |
709 | flags |= BTS_TIMESTAMPS; | 815 | flags |= BTS_TIMESTAMPS; |
710 | 816 | ||
711 | child->bts = ds_request_bts(child, child->bts_buffer, child->bts_size, | 817 | context->tracer = |
712 | /* ovfl = */ NULL, /* th = */ (size_t)-1, | 818 | ds_request_bts_task(child, context->buffer, context->size, |
713 | flags); | 819 | NULL, (size_t)-1, flags); |
714 | if (IS_ERR(child->bts)) { | 820 | if (unlikely(IS_ERR(context->tracer))) { |
715 | int error = PTR_ERR(child->bts); | 821 | int error = PTR_ERR(context->tracer); |
716 | |||
717 | ptrace_bts_free_buffer(child); | ||
718 | child->bts = NULL; | ||
719 | 822 | ||
823 | free_bts_buffer(context); | ||
824 | context->tracer = NULL; | ||
720 | return error; | 825 | return error; |
721 | } | 826 | } |
722 | 827 | ||
@@ -727,20 +832,25 @@ static int ptrace_bts_status(struct task_struct *child, | |||
727 | long cfg_size, | 832 | long cfg_size, |
728 | struct ptrace_bts_config __user *ucfg) | 833 | struct ptrace_bts_config __user *ucfg) |
729 | { | 834 | { |
835 | struct bts_context *context; | ||
730 | const struct bts_trace *trace; | 836 | const struct bts_trace *trace; |
731 | struct ptrace_bts_config cfg; | 837 | struct ptrace_bts_config cfg; |
732 | 838 | ||
839 | context = child->bts; | ||
840 | if (!context) | ||
841 | return -ESRCH; | ||
842 | |||
733 | if (cfg_size < sizeof(cfg)) | 843 | if (cfg_size < sizeof(cfg)) |
734 | return -EIO; | 844 | return -EIO; |
735 | 845 | ||
736 | trace = ds_read_bts(child->bts); | 846 | trace = ds_read_bts(context->tracer); |
737 | if (!trace) | 847 | if (!trace) |
738 | return -EPERM; | 848 | return -ESRCH; |
739 | 849 | ||
740 | memset(&cfg, 0, sizeof(cfg)); | 850 | memset(&cfg, 0, sizeof(cfg)); |
741 | cfg.size = trace->ds.end - trace->ds.begin; | 851 | cfg.size = trace->ds.end - trace->ds.begin; |
742 | cfg.signal = child->thread.bts_ovfl_signal; | 852 | cfg.signal = context->bts_ovfl_signal; |
743 | cfg.bts_size = sizeof(struct bts_struct); | 853 | cfg.bts_size = sizeof(struct bts_struct); |
744 | 854 | ||
745 | if (cfg.signal) | 855 | if (cfg.signal) |
746 | cfg.flags |= PTRACE_BTS_O_SIGNAL; | 856 | cfg.flags |= PTRACE_BTS_O_SIGNAL; |
@@ -759,80 +869,51 @@ static int ptrace_bts_status(struct task_struct *child, | |||
759 | 869 | ||
760 | static int ptrace_bts_clear(struct task_struct *child) | 870 | static int ptrace_bts_clear(struct task_struct *child) |
761 | { | 871 | { |
872 | struct bts_context *context; | ||
762 | const struct bts_trace *trace; | 873 | const struct bts_trace *trace; |
763 | 874 | ||
764 | trace = ds_read_bts(child->bts); | 875 | context = child->bts; |
876 | if (!context) | ||
877 | return -ESRCH; | ||
878 | |||
879 | trace = ds_read_bts(context->tracer); | ||
765 | if (!trace) | 880 | if (!trace) |
766 | return -EPERM; | 881 | return -ESRCH; |
767 | 882 | ||
768 | memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); | 883 | memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); |
769 | 884 | ||
770 | return ds_reset_bts(child->bts); | 885 | return ds_reset_bts(context->tracer); |
771 | } | 886 | } |
772 | 887 | ||
773 | static int ptrace_bts_size(struct task_struct *child) | 888 | static int ptrace_bts_size(struct task_struct *child) |
774 | { | 889 | { |
890 | struct bts_context *context; | ||
775 | const struct bts_trace *trace; | 891 | const struct bts_trace *trace; |
776 | 892 | ||
777 | trace = ds_read_bts(child->bts); | 893 | context = child->bts; |
894 | if (!context) | ||
895 | return -ESRCH; | ||
896 | |||
897 | trace = ds_read_bts(context->tracer); | ||
778 | if (!trace) | 898 | if (!trace) |
779 | return -EPERM; | 899 | return -ESRCH; |
780 | 900 | ||
781 | return (trace->ds.top - trace->ds.begin) / trace->ds.size; | 901 | return (trace->ds.top - trace->ds.begin) / trace->ds.size; |
782 | } | 902 | } |
783 | 903 | ||
784 | static void ptrace_bts_fork(struct task_struct *tsk) | 904 | /* |
785 | { | 905 | * Called from __ptrace_unlink() after the child has been moved back |
786 | tsk->bts = NULL; | 906 | * to its original parent. |
787 | tsk->bts_buffer = NULL; | 907 | */ |
788 | tsk->bts_size = 0; | 908 | void ptrace_bts_untrace(struct task_struct *child) |
789 | tsk->thread.bts_ovfl_signal = 0; | ||
790 | } | ||
791 | |||
792 | static void ptrace_bts_untrace(struct task_struct *child) | ||
793 | { | 909 | { |
794 | if (unlikely(child->bts)) { | 910 | if (unlikely(child->bts)) { |
795 | ds_release_bts(child->bts); | 911 | free_bts_context(child->bts); |
796 | child->bts = NULL; | 912 | child->bts = NULL; |
797 | |||
798 | /* We cannot update total_vm and locked_vm since | ||
799 | child's mm is already gone. But we can reclaim the | ||
800 | memory. */ | ||
801 | kfree(child->bts_buffer); | ||
802 | child->bts_buffer = NULL; | ||
803 | child->bts_size = 0; | ||
804 | } | 913 | } |
805 | } | 914 | } |
806 | |||
807 | static void ptrace_bts_detach(struct task_struct *child) | ||
808 | { | ||
809 | /* | ||
810 | * Ptrace_detach() races with ptrace_untrace() in case | ||
811 | * the child dies and is reaped by another thread. | ||
812 | * | ||
813 | * We only do the memory accounting at this point and | ||
814 | * leave the buffer deallocation and the bts tracer | ||
815 | * release to ptrace_bts_untrace() which will be called | ||
816 | * later on with tasklist_lock held. | ||
817 | */ | ||
818 | release_locked_buffer(child->bts_buffer, child->bts_size); | ||
819 | } | ||
820 | #else | ||
821 | static inline void ptrace_bts_fork(struct task_struct *tsk) {} | ||
822 | static inline void ptrace_bts_detach(struct task_struct *child) {} | ||
823 | static inline void ptrace_bts_untrace(struct task_struct *child) {} | ||
824 | #endif /* CONFIG_X86_PTRACE_BTS */ | 915 | #endif /* CONFIG_X86_PTRACE_BTS */ |
825 | 916 | ||
826 | void x86_ptrace_fork(struct task_struct *child, unsigned long clone_flags) | ||
827 | { | ||
828 | ptrace_bts_fork(child); | ||
829 | } | ||
830 | |||
831 | void x86_ptrace_untrace(struct task_struct *child) | ||
832 | { | ||
833 | ptrace_bts_untrace(child); | ||
834 | } | ||
835 | |||
836 | /* | 917 | /* |
837 | * Called by kernel/ptrace.c when detaching.. | 918 | * Called by kernel/ptrace.c when detaching.. |
838 | * | 919 | * |
@@ -844,7 +925,6 @@ void ptrace_disable(struct task_struct *child) | |||
844 | #ifdef TIF_SYSCALL_EMU | 925 | #ifdef TIF_SYSCALL_EMU |
845 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | 926 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
846 | #endif | 927 | #endif |
847 | ptrace_bts_detach(child); | ||
848 | } | 928 | } |
849 | 929 | ||
850 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | 930 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |