aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/ptrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/ptrace.c')
-rw-r--r--kernel/ptrace.c118
1 files changed, 81 insertions, 37 deletions
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index dc7ab65f3b36..7a81fc071344 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -38,35 +38,33 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
38 child->parent = new_parent; 38 child->parent = new_parent;
39} 39}
40 40
41/* 41/**
42 * Turn a tracing stop into a normal stop now, since with no tracer there 42 * __ptrace_unlink - unlink ptracee and restore its execution state
43 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 43 * @child: ptracee to be unlinked
44 * signal sent that would resume the child, but didn't because it was in
45 * TASK_TRACED, resume it now.
46 * Requires that irqs be disabled.
47 */
48static void ptrace_untrace(struct task_struct *child)
49{
50 spin_lock(&child->sighand->siglock);
51 if (task_is_traced(child)) {
52 /*
53 * If the group stop is completed or in progress,
54 * this thread was already counted as stopped.
55 */
56 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
57 child->signal->group_stop_count)
58 __set_task_state(child, TASK_STOPPED);
59 else
60 signal_wake_up(child, 1);
61 }
62 spin_unlock(&child->sighand->siglock);
63}
64
65/*
66 * unptrace a task: move it back to its original parent and
67 * remove it from the ptrace list.
68 * 44 *
69 * Must be called with the tasklist lock write-held. 45 * Remove @child from the ptrace list, move it back to the original parent,
46 * and restore the execution state so that it conforms to the group stop
47 * state.
48 *
49 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
50 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
51 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
52 * If the ptracer is exiting, the ptracee can be in any state.
53 *
54 * After detach, the ptracee should be in a state which conforms to the
55 * group stop. If the group is stopped or in the process of stopping, the
56 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
57 * up from TASK_TRACED.
58 *
59 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
60 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
61 * to but in the opposite direction of what happens while attaching to a
62 * stopped task. However, in this direction, the intermediate RUNNING
63 * state is not hidden even from the current ptracer and if it immediately
64 * re-attaches and performs a WNOHANG wait(2), it may fail.
65 *
66 * CONTEXT:
67 * write_lock_irq(tasklist_lock)
70 */ 68 */
71void __ptrace_unlink(struct task_struct *child) 69void __ptrace_unlink(struct task_struct *child)
72{ 70{
@@ -76,8 +74,27 @@ void __ptrace_unlink(struct task_struct *child)
76 child->parent = child->real_parent; 74 child->parent = child->real_parent;
77 list_del_init(&child->ptrace_entry); 75 list_del_init(&child->ptrace_entry);
78 76
79 if (task_is_traced(child)) 77 spin_lock(&child->sighand->siglock);
80 ptrace_untrace(child); 78
79 /*
80 * Reinstate GROUP_STOP_PENDING if group stop is in effect and
81 * @child isn't dead.
82 */
83 if (!(child->flags & PF_EXITING) &&
84 (child->signal->flags & SIGNAL_STOP_STOPPED ||
85 child->signal->group_stop_count))
86 child->group_stop |= GROUP_STOP_PENDING;
87
88 /*
89 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
90 * @child in the butt. Note that @resume should be used iff @child
91 * is in TASK_TRACED; otherwise, we might unduly disrupt
92 * TASK_KILLABLE sleeps.
93 */
94 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
95 signal_wake_up(child, task_is_traced(child));
96
97 spin_unlock(&child->sighand->siglock);
81} 98}
82 99
83/* 100/*
@@ -96,16 +113,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
96 */ 113 */
97 read_lock(&tasklist_lock); 114 read_lock(&tasklist_lock);
98 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 115 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
99 ret = 0;
100 /* 116 /*
101 * child->sighand can't be NULL, release_task() 117 * child->sighand can't be NULL, release_task()
102 * does ptrace_unlink() before __exit_signal(). 118 * does ptrace_unlink() before __exit_signal().
103 */ 119 */
104 spin_lock_irq(&child->sighand->siglock); 120 spin_lock_irq(&child->sighand->siglock);
105 if (task_is_stopped(child)) 121 WARN_ON_ONCE(task_is_stopped(child));
106 child->state = TASK_TRACED; 122 if (task_is_traced(child) || kill)
107 else if (!task_is_traced(child) && !kill) 123 ret = 0;
108 ret = -ESRCH;
109 spin_unlock_irq(&child->sighand->siglock); 124 spin_unlock_irq(&child->sighand->siglock);
110 } 125 }
111 read_unlock(&tasklist_lock); 126 read_unlock(&tasklist_lock);
@@ -169,6 +184,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
169 184
170static int ptrace_attach(struct task_struct *task) 185static int ptrace_attach(struct task_struct *task)
171{ 186{
187 bool wait_trap = false;
172 int retval; 188 int retval;
173 189
174 audit_ptrace(task); 190 audit_ptrace(task);
@@ -208,12 +224,42 @@ static int ptrace_attach(struct task_struct *task)
208 __ptrace_link(task, current); 224 __ptrace_link(task, current);
209 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 225 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
210 226
227 spin_lock(&task->sighand->siglock);
228
229 /*
230 * If the task is already STOPPED, set GROUP_STOP_PENDING and
231 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
232 * will be cleared if the child completes the transition or any
233 * event which clears the group stop states happens. We'll wait
234 * for the transition to complete before returning from this
235 * function.
236 *
237 * This hides STOPPED -> RUNNING -> TRACED transition from the
238 * attaching thread but a different thread in the same group can
239 * still observe the transient RUNNING state. IOW, if another
240 * thread's WNOHANG wait(2) on the stopped tracee races against
241 * ATTACH, the wait(2) may fail due to the transient RUNNING.
242 *
243 * The following task_is_stopped() test is safe as both transitions
244 * in and out of STOPPED are protected by siglock.
245 */
246 if (task_is_stopped(task)) {
247 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
248 signal_wake_up(task, 1);
249 wait_trap = true;
250 }
251
252 spin_unlock(&task->sighand->siglock);
253
211 retval = 0; 254 retval = 0;
212unlock_tasklist: 255unlock_tasklist:
213 write_unlock_irq(&tasklist_lock); 256 write_unlock_irq(&tasklist_lock);
214unlock_creds: 257unlock_creds:
215 mutex_unlock(&task->signal->cred_guard_mutex); 258 mutex_unlock(&task->signal->cred_guard_mutex);
216out: 259out:
260 if (wait_trap)
261 wait_event(current->signal->wait_chldexit,
262 !(task->group_stop & GROUP_STOP_TRAPPING));
217 return retval; 263 return retval;
218} 264}
219 265
@@ -316,8 +362,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
316 if (child->ptrace) { 362 if (child->ptrace) {
317 child->exit_code = data; 363 child->exit_code = data;
318 dead = __ptrace_detach(current, child); 364 dead = __ptrace_detach(current, child);
319 if (!child->exit_state)
320 wake_up_state(child, TASK_TRACED | TASK_STOPPED);
321 } 365 }
322 write_unlock_irq(&tasklist_lock); 366 write_unlock_irq(&tasklist_lock);
323 367