diff options
Diffstat (limited to 'kernel/ptrace.c')
-rw-r--r-- | kernel/ptrace.c | 207 |
1 files changed, 165 insertions, 42 deletions
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 2df115790cd..67d1fdd3c55 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -23,8 +23,15 @@ | |||
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/regset.h> | 24 | #include <linux/regset.h> |
25 | #include <linux/hw_breakpoint.h> | 25 | #include <linux/hw_breakpoint.h> |
26 | #include <linux/cn_proc.h> | ||
26 | 27 | ||
27 | 28 | ||
29 | static int ptrace_trapping_sleep_fn(void *flags) | ||
30 | { | ||
31 | schedule(); | ||
32 | return 0; | ||
33 | } | ||
34 | |||
28 | /* | 35 | /* |
29 | * ptrace a task: make the debugger its new parent and | 36 | * ptrace a task: make the debugger its new parent and |
30 | * move it to the ptrace list. | 37 | * move it to the ptrace list. |
@@ -77,13 +84,31 @@ void __ptrace_unlink(struct task_struct *child) | |||
77 | spin_lock(&child->sighand->siglock); | 84 | spin_lock(&child->sighand->siglock); |
78 | 85 | ||
79 | /* | 86 | /* |
80 | * Reinstate GROUP_STOP_PENDING if group stop is in effect and | 87 | * Clear all pending traps and TRAPPING. TRAPPING should be |
88 | * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. | ||
89 | */ | ||
90 | task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); | ||
91 | task_clear_jobctl_trapping(child); | ||
92 | |||
93 | /* | ||
94 | * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and | ||
81 | * @child isn't dead. | 95 | * @child isn't dead. |
82 | */ | 96 | */ |
83 | if (!(child->flags & PF_EXITING) && | 97 | if (!(child->flags & PF_EXITING) && |
84 | (child->signal->flags & SIGNAL_STOP_STOPPED || | 98 | (child->signal->flags & SIGNAL_STOP_STOPPED || |
85 | child->signal->group_stop_count)) | 99 | child->signal->group_stop_count)) { |
86 | child->group_stop |= GROUP_STOP_PENDING; | 100 | child->jobctl |= JOBCTL_STOP_PENDING; |
101 | |||
102 | /* | ||
103 | * This is only possible if this thread was cloned by the | ||
104 | * traced task running in the stopped group, set the signal | ||
105 | * for the future reports. | ||
106 | * FIXME: we should change ptrace_init_task() to handle this | ||
107 | * case. | ||
108 | */ | ||
109 | if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) | ||
110 | child->jobctl |= SIGSTOP; | ||
111 | } | ||
87 | 112 | ||
88 | /* | 113 | /* |
89 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | 114 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick |
@@ -91,16 +116,30 @@ void __ptrace_unlink(struct task_struct *child) | |||
91 | * is in TASK_TRACED; otherwise, we might unduly disrupt | 116 | * is in TASK_TRACED; otherwise, we might unduly disrupt |
92 | * TASK_KILLABLE sleeps. | 117 | * TASK_KILLABLE sleeps. |
93 | */ | 118 | */ |
94 | if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) | 119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
95 | signal_wake_up(child, task_is_traced(child)); | 120 | signal_wake_up(child, task_is_traced(child)); |
96 | 121 | ||
97 | spin_unlock(&child->sighand->siglock); | 122 | spin_unlock(&child->sighand->siglock); |
98 | } | 123 | } |
99 | 124 | ||
100 | /* | 125 | /** |
101 | * Check that we have indeed attached to the thing.. | 126 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation |
127 | * @child: ptracee to check for | ||
128 | * @ignore_state: don't check whether @child is currently %TASK_TRACED | ||
129 | * | ||
130 | * Check whether @child is being ptraced by %current and ready for further | ||
131 | * ptrace operations. If @ignore_state is %false, @child also should be in | ||
132 | * %TASK_TRACED state and on return the child is guaranteed to be traced | ||
133 | * and not executing. If @ignore_state is %true, @child can be in any | ||
134 | * state. | ||
135 | * | ||
136 | * CONTEXT: | ||
137 | * Grabs and releases tasklist_lock and @child->sighand->siglock. | ||
138 | * | ||
139 | * RETURNS: | ||
140 | * 0 on success, -ESRCH if %child is not ready. | ||
102 | */ | 141 | */ |
103 | int ptrace_check_attach(struct task_struct *child, int kill) | 142 | int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
104 | { | 143 | { |
105 | int ret = -ESRCH; | 144 | int ret = -ESRCH; |
106 | 145 | ||
@@ -119,13 +158,14 @@ int ptrace_check_attach(struct task_struct *child, int kill) | |||
119 | */ | 158 | */ |
120 | spin_lock_irq(&child->sighand->siglock); | 159 | spin_lock_irq(&child->sighand->siglock); |
121 | WARN_ON_ONCE(task_is_stopped(child)); | 160 | WARN_ON_ONCE(task_is_stopped(child)); |
122 | if (task_is_traced(child) || kill) | 161 | if (ignore_state || (task_is_traced(child) && |
162 | !(child->jobctl & JOBCTL_LISTENING))) | ||
123 | ret = 0; | 163 | ret = 0; |
124 | spin_unlock_irq(&child->sighand->siglock); | 164 | spin_unlock_irq(&child->sighand->siglock); |
125 | } | 165 | } |
126 | read_unlock(&tasklist_lock); | 166 | read_unlock(&tasklist_lock); |
127 | 167 | ||
128 | if (!ret && !kill) | 168 | if (!ret && !ignore_state) |
129 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; | 169 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; |
130 | 170 | ||
131 | /* All systems go.. */ | 171 | /* All systems go.. */ |
@@ -182,11 +222,28 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
182 | return !err; | 222 | return !err; |
183 | } | 223 | } |
184 | 224 | ||
185 | static int ptrace_attach(struct task_struct *task) | 225 | static int ptrace_attach(struct task_struct *task, long request, |
226 | unsigned long flags) | ||
186 | { | 227 | { |
187 | bool wait_trap = false; | 228 | bool seize = (request == PTRACE_SEIZE); |
188 | int retval; | 229 | int retval; |
189 | 230 | ||
231 | /* | ||
232 | * SEIZE will enable new ptrace behaviors which will be implemented | ||
233 | * gradually. SEIZE_DEVEL is used to prevent applications | ||
234 | * expecting full SEIZE behaviors trapping on kernel commits which | ||
235 | * are still in the process of implementing them. | ||
236 | * | ||
237 | * Only test programs for new ptrace behaviors being implemented | ||
238 | * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO. | ||
239 | * | ||
240 | * Once SEIZE behaviors are completely implemented, this flag and | ||
241 | * the following test will be removed. | ||
242 | */ | ||
243 | retval = -EIO; | ||
244 | if (seize && !(flags & PTRACE_SEIZE_DEVEL)) | ||
245 | goto out; | ||
246 | |||
190 | audit_ptrace(task); | 247 | audit_ptrace(task); |
191 | 248 | ||
192 | retval = -EPERM; | 249 | retval = -EPERM; |
@@ -218,16 +275,21 @@ static int ptrace_attach(struct task_struct *task) | |||
218 | goto unlock_tasklist; | 275 | goto unlock_tasklist; |
219 | 276 | ||
220 | task->ptrace = PT_PTRACED; | 277 | task->ptrace = PT_PTRACED; |
278 | if (seize) | ||
279 | task->ptrace |= PT_SEIZED; | ||
221 | if (task_ns_capable(task, CAP_SYS_PTRACE)) | 280 | if (task_ns_capable(task, CAP_SYS_PTRACE)) |
222 | task->ptrace |= PT_PTRACE_CAP; | 281 | task->ptrace |= PT_PTRACE_CAP; |
223 | 282 | ||
224 | __ptrace_link(task, current); | 283 | __ptrace_link(task, current); |
225 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | 284 | |
285 | /* SEIZE doesn't trap tracee on attach */ | ||
286 | if (!seize) | ||
287 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | ||
226 | 288 | ||
227 | spin_lock(&task->sighand->siglock); | 289 | spin_lock(&task->sighand->siglock); |
228 | 290 | ||
229 | /* | 291 | /* |
230 | * If the task is already STOPPED, set GROUP_STOP_PENDING and | 292 | * If the task is already STOPPED, set JOBCTL_TRAP_STOP and |
231 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING | 293 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING |
232 | * will be cleared if the child completes the transition or any | 294 | * will be cleared if the child completes the transition or any |
233 | * event which clears the group stop states happens. We'll wait | 295 | * event which clears the group stop states happens. We'll wait |
@@ -243,11 +305,9 @@ static int ptrace_attach(struct task_struct *task) | |||
243 | * The following task_is_stopped() test is safe as both transitions | 305 | * The following task_is_stopped() test is safe as both transitions |
244 | * in and out of STOPPED are protected by siglock. | 306 | * in and out of STOPPED are protected by siglock. |
245 | */ | 307 | */ |
246 | if (task_is_stopped(task)) { | 308 | if (task_is_stopped(task) && |
247 | task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; | 309 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
248 | signal_wake_up(task, 1); | 310 | signal_wake_up(task, 1); |
249 | wait_trap = true; | ||
250 | } | ||
251 | 311 | ||
252 | spin_unlock(&task->sighand->siglock); | 312 | spin_unlock(&task->sighand->siglock); |
253 | 313 | ||
@@ -257,9 +317,12 @@ unlock_tasklist: | |||
257 | unlock_creds: | 317 | unlock_creds: |
258 | mutex_unlock(&task->signal->cred_guard_mutex); | 318 | mutex_unlock(&task->signal->cred_guard_mutex); |
259 | out: | 319 | out: |
260 | if (wait_trap) | 320 | if (!retval) { |
261 | wait_event(current->signal->wait_chldexit, | 321 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, |
262 | !(task->group_stop & GROUP_STOP_TRAPPING)); | 322 | ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); |
323 | proc_ptrace_connector(task, PTRACE_ATTACH); | ||
324 | } | ||
325 | |||
263 | return retval; | 326 | return retval; |
264 | } | 327 | } |
265 | 328 | ||
@@ -322,25 +385,27 @@ static int ignoring_children(struct sighand_struct *sigh) | |||
322 | */ | 385 | */ |
323 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | 386 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) |
324 | { | 387 | { |
388 | bool dead; | ||
389 | |||
325 | __ptrace_unlink(p); | 390 | __ptrace_unlink(p); |
326 | 391 | ||
327 | if (p->exit_state == EXIT_ZOMBIE) { | 392 | if (p->exit_state != EXIT_ZOMBIE) |
328 | if (!task_detached(p) && thread_group_empty(p)) { | 393 | return false; |
329 | if (!same_thread_group(p->real_parent, tracer)) | 394 | |
330 | do_notify_parent(p, p->exit_signal); | 395 | dead = !thread_group_leader(p); |
331 | else if (ignoring_children(tracer->sighand)) { | 396 | |
332 | __wake_up_parent(p, tracer); | 397 | if (!dead && thread_group_empty(p)) { |
333 | p->exit_signal = -1; | 398 | if (!same_thread_group(p->real_parent, tracer)) |
334 | } | 399 | dead = do_notify_parent(p, p->exit_signal); |
335 | } | 400 | else if (ignoring_children(tracer->sighand)) { |
336 | if (task_detached(p)) { | 401 | __wake_up_parent(p, tracer); |
337 | /* Mark it as in the process of being reaped. */ | 402 | dead = true; |
338 | p->exit_state = EXIT_DEAD; | ||
339 | return true; | ||
340 | } | 403 | } |
341 | } | 404 | } |
342 | 405 | /* Mark it as in the process of being reaped. */ | |
343 | return false; | 406 | if (dead) |
407 | p->exit_state = EXIT_DEAD; | ||
408 | return dead; | ||
344 | } | 409 | } |
345 | 410 | ||
346 | static int ptrace_detach(struct task_struct *child, unsigned int data) | 411 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
@@ -365,6 +430,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
365 | } | 430 | } |
366 | write_unlock_irq(&tasklist_lock); | 431 | write_unlock_irq(&tasklist_lock); |
367 | 432 | ||
433 | proc_ptrace_connector(child, PTRACE_DETACH); | ||
368 | if (unlikely(dead)) | 434 | if (unlikely(dead)) |
369 | release_task(child); | 435 | release_task(child); |
370 | 436 | ||
@@ -611,10 +677,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type, | |||
611 | int ptrace_request(struct task_struct *child, long request, | 677 | int ptrace_request(struct task_struct *child, long request, |
612 | unsigned long addr, unsigned long data) | 678 | unsigned long addr, unsigned long data) |
613 | { | 679 | { |
680 | bool seized = child->ptrace & PT_SEIZED; | ||
614 | int ret = -EIO; | 681 | int ret = -EIO; |
615 | siginfo_t siginfo; | 682 | siginfo_t siginfo, *si; |
616 | void __user *datavp = (void __user *) data; | 683 | void __user *datavp = (void __user *) data; |
617 | unsigned long __user *datalp = datavp; | 684 | unsigned long __user *datalp = datavp; |
685 | unsigned long flags; | ||
618 | 686 | ||
619 | switch (request) { | 687 | switch (request) { |
620 | case PTRACE_PEEKTEXT: | 688 | case PTRACE_PEEKTEXT: |
@@ -647,6 +715,59 @@ int ptrace_request(struct task_struct *child, long request, | |||
647 | ret = ptrace_setsiginfo(child, &siginfo); | 715 | ret = ptrace_setsiginfo(child, &siginfo); |
648 | break; | 716 | break; |
649 | 717 | ||
718 | case PTRACE_INTERRUPT: | ||
719 | /* | ||
720 | * Stop tracee without any side-effect on signal or job | ||
721 | * control. At least one trap is guaranteed to happen | ||
722 | * after this request. If @child is already trapped, the | ||
723 | * current trap is not disturbed and another trap will | ||
724 | * happen after the current trap is ended with PTRACE_CONT. | ||
725 | * | ||
726 | * The actual trap might not be PTRACE_EVENT_STOP trap but | ||
727 | * the pending condition is cleared regardless. | ||
728 | */ | ||
729 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | ||
730 | break; | ||
731 | |||
732 | /* | ||
733 | * INTERRUPT doesn't disturb existing trap sans one | ||
734 | * exception. If ptracer issued LISTEN for the current | ||
735 | * STOP, this INTERRUPT should clear LISTEN and re-trap | ||
736 | * tracee into STOP. | ||
737 | */ | ||
738 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) | ||
739 | signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); | ||
740 | |||
741 | unlock_task_sighand(child, &flags); | ||
742 | ret = 0; | ||
743 | break; | ||
744 | |||
745 | case PTRACE_LISTEN: | ||
746 | /* | ||
747 | * Listen for events. Tracee must be in STOP. It's not | ||
748 | * resumed per-se but is not considered to be in TRACED by | ||
749 | * wait(2) or ptrace(2). If an async event (e.g. group | ||
750 | * stop state change) happens, tracee will enter STOP trap | ||
751 | * again. Alternatively, ptracer can issue INTERRUPT to | ||
752 | * finish listening and re-trap tracee into STOP. | ||
753 | */ | ||
754 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | ||
755 | break; | ||
756 | |||
757 | si = child->last_siginfo; | ||
758 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { | ||
759 | child->jobctl |= JOBCTL_LISTENING; | ||
760 | /* | ||
761 | * If NOTIFY is set, it means event happened between | ||
762 | * start of this trap and now. Trigger re-trap. | ||
763 | */ | ||
764 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | ||
765 | signal_wake_up(child, true); | ||
766 | ret = 0; | ||
767 | } | ||
768 | unlock_task_sighand(child, &flags); | ||
769 | break; | ||
770 | |||
650 | case PTRACE_DETACH: /* detach a process that was attached. */ | 771 | case PTRACE_DETACH: /* detach a process that was attached. */ |
651 | ret = ptrace_detach(child, data); | 772 | ret = ptrace_detach(child, data); |
652 | break; | 773 | break; |
@@ -761,8 +882,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, | |||
761 | goto out; | 882 | goto out; |
762 | } | 883 | } |
763 | 884 | ||
764 | if (request == PTRACE_ATTACH) { | 885 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
765 | ret = ptrace_attach(child); | 886 | ret = ptrace_attach(child, request, data); |
766 | /* | 887 | /* |
767 | * Some architectures need to do book-keeping after | 888 | * Some architectures need to do book-keeping after |
768 | * a ptrace attach. | 889 | * a ptrace attach. |
@@ -772,7 +893,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, | |||
772 | goto out_put_task_struct; | 893 | goto out_put_task_struct; |
773 | } | 894 | } |
774 | 895 | ||
775 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 896 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
897 | request == PTRACE_INTERRUPT); | ||
776 | if (ret < 0) | 898 | if (ret < 0) |
777 | goto out_put_task_struct; | 899 | goto out_put_task_struct; |
778 | 900 | ||
@@ -903,8 +1025,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
903 | goto out; | 1025 | goto out; |
904 | } | 1026 | } |
905 | 1027 | ||
906 | if (request == PTRACE_ATTACH) { | 1028 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
907 | ret = ptrace_attach(child); | 1029 | ret = ptrace_attach(child, request, data); |
908 | /* | 1030 | /* |
909 | * Some architectures need to do book-keeping after | 1031 | * Some architectures need to do book-keeping after |
910 | * a ptrace attach. | 1032 | * a ptrace attach. |
@@ -914,7 +1036,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
914 | goto out_put_task_struct; | 1036 | goto out_put_task_struct; |
915 | } | 1037 | } |
916 | 1038 | ||
917 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 1039 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1040 | request == PTRACE_INTERRUPT); | ||
918 | if (!ret) | 1041 | if (!ret) |
919 | ret = compat_arch_ptrace(child, request, addr, data); | 1042 | ret = compat_arch_ptrace(child, request, addr, data); |
920 | 1043 | ||