aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c84
-rw-r--r--kernel/ptrace.c118
-rw-r--r--kernel/signal.c375
3 files changed, 427 insertions, 150 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6a488ad2dce5..5cbc83e83a5d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1538,33 +1538,83 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 1540
1541 if (likely(!ptrace) && unlikely(task_ptrace(p))) { 1541 /* dead body doesn't have much to contribute */
1542 if (p->exit_state == EXIT_DEAD)
1543 return 0;
1544
1545 /* slay zombie? */
1546 if (p->exit_state == EXIT_ZOMBIE) {
1542 /* 1547 /*
1543 * This child is hidden by ptrace. 1548 * A zombie ptracee is only visible to its ptracer.
1544 * We aren't allowed to see it now, but eventually we will. 1549 * Notification and reaping will be cascaded to the real
1550 * parent when the ptracer detaches.
1545 */ 1551 */
1546 wo->notask_error = 0; 1552 if (likely(!ptrace) && unlikely(task_ptrace(p))) {
1547 return 0; 1553 /* it will become visible, clear notask_error */
1548 } 1554 wo->notask_error = 0;
1555 return 0;
1556 }
1549 1557
1550 if (p->exit_state == EXIT_DEAD) 1558 /* we don't reap group leaders with subthreads */
1551 return 0; 1559 if (!delay_group_leader(p))
1560 return wait_task_zombie(wo, p);
1552 1561
1553 /* 1562 /*
1554 * We don't reap group leaders with subthreads. 1563 * Allow access to stopped/continued state via zombie by
1555 */ 1564 * falling through. Clearing of notask_error is complex.
1556 if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) 1565 *
1557 return wait_task_zombie(wo, p); 1566 * When !@ptrace:
1567 *
1568 * If WEXITED is set, notask_error should naturally be
1569 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1570 * so, if there are live subthreads, there are events to
1571 * wait for. If all subthreads are dead, it's still safe
1572 * to clear - this function will be called again in finite
1573 * amount time once all the subthreads are released and
1574 * will then return without clearing.
1575 *
1576 * When @ptrace:
1577 *
1578 * Stopped state is per-task and thus can't change once the
1579 * target task dies. Only continued and exited can happen.
1580 * Clear notask_error if WCONTINUED | WEXITED.
1581 */
1582 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1583 wo->notask_error = 0;
1584 } else {
1585 /*
1586 * If @p is ptraced by a task in its real parent's group,
1587 * hide group stop/continued state when looking at @p as
1588 * the real parent; otherwise, a single stop can be
1589 * reported twice as group and ptrace stops.
1590 *
1591 * If a ptracer wants to distinguish the two events for its
1592 * own children, it should create a separate process which
1593 * takes the role of real parent.
1594 */
1595 if (likely(!ptrace) && task_ptrace(p) &&
1596 same_thread_group(p->parent, p->real_parent))
1597 return 0;
1598
1599 /*
1600 * @p is alive and it's gonna stop, continue or exit, so
1601 * there always is something to wait for.
1602 */
1603 wo->notask_error = 0;
1604 }
1558 1605
1559 /* 1606 /*
1560 * It's stopped or running now, so it might 1607 * Wait for stopped. Depending on @ptrace, different stopped state
1561 * later continue, exit, or stop again. 1608 * is used and the two don't interact with each other.
1562 */ 1609 */
1563 wo->notask_error = 0;
1564
1565 if (task_stopped_code(p, ptrace)) 1610 if (task_stopped_code(p, ptrace))
1566 return wait_task_stopped(wo, ptrace, p); 1611 return wait_task_stopped(wo, ptrace, p);
1567 1612
1613 /*
1614 * Wait for continued. There's only one continued state and the
1615 * ptracer can consume it which can confuse the real parent. Don't
1616 * use WCONTINUED from ptracer. You don't need or want it.
1617 */
1568 return wait_task_continued(wo, p); 1618 return wait_task_continued(wo, p);
1569} 1619}
1570 1620
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0fc1eed28d27..512bd017218d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -37,35 +37,33 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
37 child->parent = new_parent; 37 child->parent = new_parent;
38} 38}
39 39
40/* 40/**
41 * Turn a tracing stop into a normal stop now, since with no tracer there 41 * __ptrace_unlink - unlink ptracee and restore its execution state
42 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 42 * @child: ptracee to be unlinked
43 * signal sent that would resume the child, but didn't because it was in
44 * TASK_TRACED, resume it now.
45 * Requires that irqs be disabled.
46 */
47static void ptrace_untrace(struct task_struct *child)
48{
49 spin_lock(&child->sighand->siglock);
50 if (task_is_traced(child)) {
51 /*
52 * If the group stop is completed or in progress,
53 * this thread was already counted as stopped.
54 */
55 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
56 child->signal->group_stop_count)
57 __set_task_state(child, TASK_STOPPED);
58 else
59 signal_wake_up(child, 1);
60 }
61 spin_unlock(&child->sighand->siglock);
62}
63
64/*
65 * unptrace a task: move it back to its original parent and
66 * remove it from the ptrace list.
67 * 43 *
68 * Must be called with the tasklist lock write-held. 44 * Remove @child from the ptrace list, move it back to the original parent,
45 * and restore the execution state so that it conforms to the group stop
46 * state.
47 *
48 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
49 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
50 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
51 * If the ptracer is exiting, the ptracee can be in any state.
52 *
53 * After detach, the ptracee should be in a state which conforms to the
54 * group stop. If the group is stopped or in the process of stopping, the
55 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
56 * up from TASK_TRACED.
57 *
58 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
59 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
60 * to but in the opposite direction of what happens while attaching to a
61 * stopped task. However, in this direction, the intermediate RUNNING
62 * state is not hidden even from the current ptracer and if it immediately
63 * re-attaches and performs a WNOHANG wait(2), it may fail.
64 *
65 * CONTEXT:
66 * write_lock_irq(tasklist_lock)
69 */ 67 */
70void __ptrace_unlink(struct task_struct *child) 68void __ptrace_unlink(struct task_struct *child)
71{ 69{
@@ -75,8 +73,27 @@ void __ptrace_unlink(struct task_struct *child)
75 child->parent = child->real_parent; 73 child->parent = child->real_parent;
76 list_del_init(&child->ptrace_entry); 74 list_del_init(&child->ptrace_entry);
77 75
78 if (task_is_traced(child)) 76 spin_lock(&child->sighand->siglock);
79 ptrace_untrace(child); 77
78 /*
79 * Reinstate GROUP_STOP_PENDING if group stop is in effect and
80 * @child isn't dead.
81 */
82 if (!(child->flags & PF_EXITING) &&
83 (child->signal->flags & SIGNAL_STOP_STOPPED ||
84 child->signal->group_stop_count))
85 child->group_stop |= GROUP_STOP_PENDING;
86
87 /*
88 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
89 * @child in the butt. Note that @resume should be used iff @child
90 * is in TASK_TRACED; otherwise, we might unduly disrupt
91 * TASK_KILLABLE sleeps.
92 */
93 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
94 signal_wake_up(child, task_is_traced(child));
95
96 spin_unlock(&child->sighand->siglock);
80} 97}
81 98
82/* 99/*
@@ -95,16 +112,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
95 */ 112 */
96 read_lock(&tasklist_lock); 113 read_lock(&tasklist_lock);
97 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 114 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
98 ret = 0;
99 /* 115 /*
100 * child->sighand can't be NULL, release_task() 116 * child->sighand can't be NULL, release_task()
101 * does ptrace_unlink() before __exit_signal(). 117 * does ptrace_unlink() before __exit_signal().
102 */ 118 */
103 spin_lock_irq(&child->sighand->siglock); 119 spin_lock_irq(&child->sighand->siglock);
104 if (task_is_stopped(child)) 120 WARN_ON_ONCE(task_is_stopped(child));
105 child->state = TASK_TRACED; 121 if (task_is_traced(child) || kill)
106 else if (!task_is_traced(child) && !kill) 122 ret = 0;
107 ret = -ESRCH;
108 spin_unlock_irq(&child->sighand->siglock); 123 spin_unlock_irq(&child->sighand->siglock);
109 } 124 }
110 read_unlock(&tasklist_lock); 125 read_unlock(&tasklist_lock);
@@ -168,6 +183,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
168 183
169static int ptrace_attach(struct task_struct *task) 184static int ptrace_attach(struct task_struct *task)
170{ 185{
186 bool wait_trap = false;
171 int retval; 187 int retval;
172 188
173 audit_ptrace(task); 189 audit_ptrace(task);
@@ -207,12 +223,42 @@ static int ptrace_attach(struct task_struct *task)
207 __ptrace_link(task, current); 223 __ptrace_link(task, current);
208 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 224 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
209 225
226 spin_lock(&task->sighand->siglock);
227
228 /*
229 * If the task is already STOPPED, set GROUP_STOP_PENDING and
230 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
231 * will be cleared if the child completes the transition or any
232 * event which clears the group stop states happens. We'll wait
233 * for the transition to complete before returning from this
234 * function.
235 *
236 * This hides STOPPED -> RUNNING -> TRACED transition from the
237 * attaching thread but a different thread in the same group can
238 * still observe the transient RUNNING state. IOW, if another
239 * thread's WNOHANG wait(2) on the stopped tracee races against
240 * ATTACH, the wait(2) may fail due to the transient RUNNING.
241 *
242 * The following task_is_stopped() test is safe as both transitions
243 * in and out of STOPPED are protected by siglock.
244 */
245 if (task_is_stopped(task)) {
246 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
247 signal_wake_up(task, 1);
248 wait_trap = true;
249 }
250
251 spin_unlock(&task->sighand->siglock);
252
210 retval = 0; 253 retval = 0;
211unlock_tasklist: 254unlock_tasklist:
212 write_unlock_irq(&tasklist_lock); 255 write_unlock_irq(&tasklist_lock);
213unlock_creds: 256unlock_creds:
214 mutex_unlock(&task->signal->cred_guard_mutex); 257 mutex_unlock(&task->signal->cred_guard_mutex);
215out: 258out:
259 if (wait_trap)
260 wait_event(current->signal->wait_chldexit,
261 !(task->group_stop & GROUP_STOP_TRAPPING));
216 return retval; 262 return retval;
217} 263}
218 264
@@ -315,8 +361,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
315 if (child->ptrace) { 361 if (child->ptrace) {
316 child->exit_code = data; 362 child->exit_code = data;
317 dead = __ptrace_detach(current, child); 363 dead = __ptrace_detach(current, child);
318 if (!child->exit_state)
319 wake_up_state(child, TASK_TRACED | TASK_STOPPED);
320 } 364 }
321 write_unlock_irq(&tasklist_lock); 365 write_unlock_irq(&tasklist_lock);
322 366
diff --git a/kernel/signal.c b/kernel/signal.c
index dc17929ab78a..c15e9792b088 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
124 124
125static int recalc_sigpending_tsk(struct task_struct *t) 125static int recalc_sigpending_tsk(struct task_struct *t)
126{ 126{
127 if (t->signal->group_stop_count > 0 || 127 if ((t->group_stop & GROUP_STOP_PENDING) ||
128 PENDING(&t->pending, &t->blocked) || 128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) { 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING); 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -223,6 +223,83 @@ static inline void print_dropped_signal(int sig)
223 current->comm, current->pid, sig); 223 current->comm, current->pid, sig);
224} 224}
225 225
226/**
227 * task_clear_group_stop_trapping - clear group stop trapping bit
228 * @task: target task
229 *
230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
231 * and wake up the ptracer. Note that we don't need any further locking.
232 * @task->siglock guarantees that @task->parent points to the ptracer.
233 *
234 * CONTEXT:
235 * Must be called with @task->sighand->siglock held.
236 */
237static void task_clear_group_stop_trapping(struct task_struct *task)
238{
239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240 task->group_stop &= ~GROUP_STOP_TRAPPING;
241 __wake_up_sync(&task->parent->signal->wait_chldexit,
242 TASK_UNINTERRUPTIBLE, 1);
243 }
244}
245
246/**
247 * task_clear_group_stop_pending - clear pending group stop
248 * @task: target task
249 *
250 * Clear group stop states for @task.
251 *
252 * CONTEXT:
253 * Must be called with @task->sighand->siglock held.
254 */
255void task_clear_group_stop_pending(struct task_struct *task)
256{
257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
258 GROUP_STOP_DEQUEUED);
259}
260
261/**
262 * task_participate_group_stop - participate in a group stop
263 * @task: task participating in a group stop
264 *
265 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
266 * Group stop states are cleared and the group stop count is consumed if
267 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
268 * stop, the appropriate %SIGNAL_* flags are set.
269 *
270 * CONTEXT:
271 * Must be called with @task->sighand->siglock held.
272 *
273 * RETURNS:
274 * %true if group stop completion should be notified to the parent, %false
275 * otherwise.
276 */
277static bool task_participate_group_stop(struct task_struct *task)
278{
279 struct signal_struct *sig = task->signal;
280 bool consume = task->group_stop & GROUP_STOP_CONSUME;
281
282 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
283
284 task_clear_group_stop_pending(task);
285
286 if (!consume)
287 return false;
288
289 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
290 sig->group_stop_count--;
291
292 /*
293 * Tell the caller to notify completion iff we are entering into a
294 * fresh group stop. Read comment in do_signal_stop() for details.
295 */
296 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
297 sig->flags = SIGNAL_STOP_STOPPED;
298 return true;
299 }
300 return false;
301}
302
226/* 303/*
227 * allocate a new signal queue record 304 * allocate a new signal queue record
228 * - this may be called without locks if and only if t == current, otherwise an 305 * - this may be called without locks if and only if t == current, otherwise an
@@ -527,7 +604,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
527 * is to alert stop-signal processing code when another 604 * is to alert stop-signal processing code when another
528 * processor has come along and cleared the flag. 605 * processor has come along and cleared the flag.
529 */ 606 */
530 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 607 current->group_stop |= GROUP_STOP_DEQUEUED;
531 } 608 }
532 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 609 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
533 /* 610 /*
@@ -727,34 +804,14 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
727 } else if (sig == SIGCONT) { 804 } else if (sig == SIGCONT) {
728 unsigned int why; 805 unsigned int why;
729 /* 806 /*
730 * Remove all stop signals from all queues, 807 * Remove all stop signals from all queues, wake all threads.
731 * and wake all threads.
732 */ 808 */
733 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 809 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
734 t = p; 810 t = p;
735 do { 811 do {
736 unsigned int state; 812 task_clear_group_stop_pending(t);
737 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 813 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
738 /* 814 wake_up_state(t, __TASK_STOPPED);
739 * If there is a handler for SIGCONT, we must make
740 * sure that no thread returns to user mode before
741 * we post the signal, in case it was the only
742 * thread eligible to run the signal handler--then
743 * it must not do anything between resuming and
744 * running the handler. With the TIF_SIGPENDING
745 * flag set, the thread will pause and acquire the
746 * siglock that we hold now and until we've queued
747 * the pending signal.
748 *
749 * Wake up the stopped thread _after_ setting
750 * TIF_SIGPENDING
751 */
752 state = __TASK_STOPPED;
753 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
754 set_tsk_thread_flag(t, TIF_SIGPENDING);
755 state |= TASK_INTERRUPTIBLE;
756 }
757 wake_up_state(t, state);
758 } while_each_thread(p, t); 815 } while_each_thread(p, t);
759 816
760 /* 817 /*
@@ -780,13 +837,6 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
780 signal->flags = why | SIGNAL_STOP_CONTINUED; 837 signal->flags = why | SIGNAL_STOP_CONTINUED;
781 signal->group_stop_count = 0; 838 signal->group_stop_count = 0;
782 signal->group_exit_code = 0; 839 signal->group_exit_code = 0;
783 } else {
784 /*
785 * We are not stopped, but there could be a stop
786 * signal in the middle of being processed after
787 * being removed from the queue. Clear that too.
788 */
789 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
790 } 840 }
791 } 841 }
792 842
@@ -875,6 +925,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
875 signal->group_stop_count = 0; 925 signal->group_stop_count = 0;
876 t = p; 926 t = p;
877 do { 927 do {
928 task_clear_group_stop_pending(t);
878 sigaddset(&t->pending.signal, SIGKILL); 929 sigaddset(&t->pending.signal, SIGKILL);
879 signal_wake_up(t, 1); 930 signal_wake_up(t, 1);
880 } while_each_thread(p, t); 931 } while_each_thread(p, t);
@@ -1109,6 +1160,7 @@ int zap_other_threads(struct task_struct *p)
1109 p->signal->group_stop_count = 0; 1160 p->signal->group_stop_count = 0;
1110 1161
1111 while_each_thread(p, t) { 1162 while_each_thread(p, t) {
1163 task_clear_group_stop_pending(t);
1112 count++; 1164 count++;
1113 1165
1114 /* Don't bother with already dead threads */ 1166 /* Don't bother with already dead threads */
@@ -1536,16 +1588,30 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1536 return ret; 1588 return ret;
1537} 1589}
1538 1590
1539static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1591/**
1592 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1593 * @tsk: task reporting the state change
1594 * @for_ptracer: the notification is for ptracer
1595 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1596 *
1597 * Notify @tsk's parent that the stopped/continued state has changed. If
1598 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1599 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1600 *
1601 * CONTEXT:
1602 * Must be called with tasklist_lock at least read locked.
1603 */
1604static void do_notify_parent_cldstop(struct task_struct *tsk,
1605 bool for_ptracer, int why)
1540{ 1606{
1541 struct siginfo info; 1607 struct siginfo info;
1542 unsigned long flags; 1608 unsigned long flags;
1543 struct task_struct *parent; 1609 struct task_struct *parent;
1544 struct sighand_struct *sighand; 1610 struct sighand_struct *sighand;
1545 1611
1546 if (task_ptrace(tsk)) 1612 if (for_ptracer) {
1547 parent = tsk->parent; 1613 parent = tsk->parent;
1548 else { 1614 } else {
1549 tsk = tsk->group_leader; 1615 tsk = tsk->group_leader;
1550 parent = tsk->real_parent; 1616 parent = tsk->real_parent;
1551 } 1617 }
@@ -1621,6 +1687,15 @@ static int sigkill_pending(struct task_struct *tsk)
1621} 1687}
1622 1688
1623/* 1689/*
1690 * Test whether the target task of the usual cldstop notification - the
1691 * real_parent of @child - is in the same group as the ptracer.
1692 */
1693static bool real_parent_is_ptracer(struct task_struct *child)
1694{
1695 return same_thread_group(child->parent, child->real_parent);
1696}
1697
1698/*
1624 * This must be called with current->sighand->siglock held. 1699 * This must be called with current->sighand->siglock held.
1625 * 1700 *
1626 * This should be the path for all ptrace stops. 1701 * This should be the path for all ptrace stops.
@@ -1631,10 +1706,12 @@ static int sigkill_pending(struct task_struct *tsk)
1631 * If we actually decide not to stop at all because the tracer 1706 * If we actually decide not to stop at all because the tracer
1632 * is gone, we keep current->exit_code unless clear_code. 1707 * is gone, we keep current->exit_code unless clear_code.
1633 */ 1708 */
1634static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1709static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1635 __releases(&current->sighand->siglock) 1710 __releases(&current->sighand->siglock)
1636 __acquires(&current->sighand->siglock) 1711 __acquires(&current->sighand->siglock)
1637{ 1712{
1713 bool gstop_done = false;
1714
1638 if (arch_ptrace_stop_needed(exit_code, info)) { 1715 if (arch_ptrace_stop_needed(exit_code, info)) {
1639 /* 1716 /*
1640 * The arch code has something special to do before a 1717 * The arch code has something special to do before a
@@ -1655,21 +1732,49 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1655 } 1732 }
1656 1733
1657 /* 1734 /*
1658 * If there is a group stop in progress, 1735 * If @why is CLD_STOPPED, we're trapping to participate in a group
1659 * we must participate in the bookkeeping. 1736 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1737 * while siglock was released for the arch hook, PENDING could be
1738 * clear now. We act as if SIGCONT is received after TASK_TRACED
1739 * is entered - ignore it.
1660 */ 1740 */
1661 if (current->signal->group_stop_count > 0) 1741 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
1662 --current->signal->group_stop_count; 1742 gstop_done = task_participate_group_stop(current);
1663 1743
1664 current->last_siginfo = info; 1744 current->last_siginfo = info;
1665 current->exit_code = exit_code; 1745 current->exit_code = exit_code;
1666 1746
1667 /* Let the debugger run. */ 1747 /*
1668 __set_current_state(TASK_TRACED); 1748 * TRACED should be visible before TRAPPING is cleared; otherwise,
1749 * the tracer might fail do_wait().
1750 */
1751 set_current_state(TASK_TRACED);
1752
1753 /*
1754 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
1755 * transition to TASK_TRACED should be atomic with respect to
1756 * siglock. This hsould be done after the arch hook as siglock is
1757 * released and regrabbed across it.
1758 */
1759 task_clear_group_stop_trapping(current);
1760
1669 spin_unlock_irq(&current->sighand->siglock); 1761 spin_unlock_irq(&current->sighand->siglock);
1670 read_lock(&tasklist_lock); 1762 read_lock(&tasklist_lock);
1671 if (may_ptrace_stop()) { 1763 if (may_ptrace_stop()) {
1672 do_notify_parent_cldstop(current, CLD_TRAPPED); 1764 /*
1765 * Notify parents of the stop.
1766 *
1767 * While ptraced, there are two parents - the ptracer and
1768 * the real_parent of the group_leader. The ptracer should
1769 * know about every stop while the real parent is only
1770 * interested in the completion of group stop. The states
1771 * for the two don't interact with each other. Notify
1772 * separately unless they're gonna be duplicates.
1773 */
1774 do_notify_parent_cldstop(current, true, why);
1775 if (gstop_done && !real_parent_is_ptracer(current))
1776 do_notify_parent_cldstop(current, false, why);
1777
1673 /* 1778 /*
1674 * Don't want to allow preemption here, because 1779 * Don't want to allow preemption here, because
1675 * sys_ptrace() needs this task to be inactive. 1780 * sys_ptrace() needs this task to be inactive.
@@ -1684,7 +1789,16 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1684 /* 1789 /*
1685 * By the time we got the lock, our tracer went away. 1790 * By the time we got the lock, our tracer went away.
1686 * Don't drop the lock yet, another tracer may come. 1791 * Don't drop the lock yet, another tracer may come.
1792 *
1793 * If @gstop_done, the ptracer went away between group stop
1794 * completion and here. During detach, it would have set
1795 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1796 * in do_signal_stop() on return, so notifying the real
1797 * parent of the group stop completion is enough.
1687 */ 1798 */
1799 if (gstop_done)
1800 do_notify_parent_cldstop(current, false, why);
1801
1688 __set_current_state(TASK_RUNNING); 1802 __set_current_state(TASK_RUNNING);
1689 if (clear_code) 1803 if (clear_code)
1690 current->exit_code = 0; 1804 current->exit_code = 0;
@@ -1728,7 +1842,7 @@ void ptrace_notify(int exit_code)
1728 1842
1729 /* Let the debugger run. */ 1843 /* Let the debugger run. */
1730 spin_lock_irq(&current->sighand->siglock); 1844 spin_lock_irq(&current->sighand->siglock);
1731 ptrace_stop(exit_code, 1, &info); 1845 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1732 spin_unlock_irq(&current->sighand->siglock); 1846 spin_unlock_irq(&current->sighand->siglock);
1733} 1847}
1734 1848
@@ -1741,66 +1855,115 @@ void ptrace_notify(int exit_code)
1741static int do_signal_stop(int signr) 1855static int do_signal_stop(int signr)
1742{ 1856{
1743 struct signal_struct *sig = current->signal; 1857 struct signal_struct *sig = current->signal;
1744 int notify;
1745 1858
1746 if (!sig->group_stop_count) { 1859 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1860 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
1747 struct task_struct *t; 1861 struct task_struct *t;
1748 1862
1749 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1863 /* signr will be recorded in task->group_stop for retries */
1864 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1865
1866 if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
1750 unlikely(signal_group_exit(sig))) 1867 unlikely(signal_group_exit(sig)))
1751 return 0; 1868 return 0;
1752 /* 1869 /*
1753 * There is no group stop already in progress. 1870 * There is no group stop already in progress. We must
1754 * We must initiate one now. 1871 * initiate one now.
1872 *
1873 * While ptraced, a task may be resumed while group stop is
1874 * still in effect and then receive a stop signal and
1875 * initiate another group stop. This deviates from the
1876 * usual behavior as two consecutive stop signals can't
1877 * cause two group stops when !ptraced. That is why we
1878 * also check !task_is_stopped(t) below.
1879 *
1880 * The condition can be distinguished by testing whether
1881 * SIGNAL_STOP_STOPPED is already set. Don't generate
1882 * group_exit_code in such case.
1883 *
1884 * This is not necessary for SIGNAL_STOP_CONTINUED because
1885 * an intervening stop signal is required to cause two
1886 * continued events regardless of ptrace.
1755 */ 1887 */
1756 sig->group_exit_code = signr; 1888 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1889 sig->group_exit_code = signr;
1890 else
1891 WARN_ON_ONCE(!task_ptrace(current));
1757 1892
1893 current->group_stop &= ~GROUP_STOP_SIGMASK;
1894 current->group_stop |= signr | gstop;
1758 sig->group_stop_count = 1; 1895 sig->group_stop_count = 1;
1759 for (t = next_thread(current); t != current; t = next_thread(t)) 1896 for (t = next_thread(current); t != current;
1897 t = next_thread(t)) {
1898 t->group_stop &= ~GROUP_STOP_SIGMASK;
1760 /* 1899 /*
1761 * Setting state to TASK_STOPPED for a group 1900 * Setting state to TASK_STOPPED for a group
1762 * stop is always done with the siglock held, 1901 * stop is always done with the siglock held,
1763 * so this check has no races. 1902 * so this check has no races.
1764 */ 1903 */
1765 if (!(t->flags & PF_EXITING) && 1904 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1766 !task_is_stopped_or_traced(t)) { 1905 t->group_stop |= signr | gstop;
1767 sig->group_stop_count++; 1906 sig->group_stop_count++;
1768 signal_wake_up(t, 0); 1907 signal_wake_up(t, 0);
1769 } 1908 }
1909 }
1770 } 1910 }
1771 /* 1911retry:
1772 * If there are no other threads in the group, or if there is 1912 if (likely(!task_ptrace(current))) {
1773 * a group stop in progress and we are the last to stop, report 1913 int notify = 0;
1774 * to the parent. When ptraced, every thread reports itself. 1914
1775 */ 1915 /*
1776 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; 1916 * If there are no other threads in the group, or if there
1777 notify = tracehook_notify_jctl(notify, CLD_STOPPED); 1917 * is a group stop in progress and we are the last to stop,
1778 /* 1918 * report to the parent.
1779 * tracehook_notify_jctl() can drop and reacquire siglock, so 1919 */
1780 * we keep ->group_stop_count != 0 before the call. If SIGCONT 1920 if (task_participate_group_stop(current))
1781 * or SIGKILL comes in between ->group_stop_count == 0. 1921 notify = CLD_STOPPED;
1782 */ 1922
1783 if (sig->group_stop_count) {
1784 if (!--sig->group_stop_count)
1785 sig->flags = SIGNAL_STOP_STOPPED;
1786 current->exit_code = sig->group_exit_code;
1787 __set_current_state(TASK_STOPPED); 1923 __set_current_state(TASK_STOPPED);
1924 spin_unlock_irq(&current->sighand->siglock);
1925
1926 /*
1927 * Notify the parent of the group stop completion. Because
1928 * we're not holding either the siglock or tasklist_lock
1929 * here, ptracer may attach inbetween; however, this is for
1930 * group stop and should always be delivered to the real
1931 * parent of the group leader. The new ptracer will get
1932 * its notification when this task transitions into
1933 * TASK_TRACED.
1934 */
1935 if (notify) {
1936 read_lock(&tasklist_lock);
1937 do_notify_parent_cldstop(current, false, notify);
1938 read_unlock(&tasklist_lock);
1939 }
1940
1941 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1942 schedule();
1943
1944 spin_lock_irq(&current->sighand->siglock);
1945 } else {
1946 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1947 CLD_STOPPED, 0, NULL);
1948 current->exit_code = 0;
1788 } 1949 }
1789 spin_unlock_irq(&current->sighand->siglock);
1790 1950
1791 if (notify) { 1951 /*
1792 read_lock(&tasklist_lock); 1952 * GROUP_STOP_PENDING could be set if another group stop has
1793 do_notify_parent_cldstop(current, notify); 1953 * started since being woken up or ptrace wants us to transit
1794 read_unlock(&tasklist_lock); 1954 * between TASK_STOPPED and TRACED. Retry group stop.
1955 */
1956 if (current->group_stop & GROUP_STOP_PENDING) {
1957 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1958 goto retry;
1795 } 1959 }
1796 1960
1797 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 1961 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1798 do { 1962 task_clear_group_stop_trapping(current);
1799 schedule(); 1963
1800 } while (try_to_freeze()); 1964 spin_unlock_irq(&current->sighand->siglock);
1801 1965
1802 tracehook_finish_jctl(); 1966 tracehook_finish_jctl();
1803 current->exit_code = 0;
1804 1967
1805 return 1; 1968 return 1;
1806} 1969}
@@ -1814,7 +1977,7 @@ static int ptrace_signal(int signr, siginfo_t *info,
1814 ptrace_signal_deliver(regs, cookie); 1977 ptrace_signal_deliver(regs, cookie);
1815 1978
1816 /* Let the debugger run. */ 1979 /* Let the debugger run. */
1817 ptrace_stop(signr, 0, info); 1980 ptrace_stop(signr, CLD_TRAPPED, 0, info);
1818 1981
1819 /* We're back. Did the debugger cancel the sig? */ 1982 /* We're back. Did the debugger cancel the sig? */
1820 signr = current->exit_code; 1983 signr = current->exit_code;
@@ -1869,18 +2032,36 @@ relock:
1869 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2032 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1870 */ 2033 */
1871 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2034 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1872 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 2035 struct task_struct *leader;
1873 ? CLD_CONTINUED : CLD_STOPPED; 2036 int why;
2037
2038 if (signal->flags & SIGNAL_CLD_CONTINUED)
2039 why = CLD_CONTINUED;
2040 else
2041 why = CLD_STOPPED;
2042
1874 signal->flags &= ~SIGNAL_CLD_MASK; 2043 signal->flags &= ~SIGNAL_CLD_MASK;
1875 2044
1876 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1877 spin_unlock_irq(&sighand->siglock); 2045 spin_unlock_irq(&sighand->siglock);
1878 2046
1879 if (why) { 2047 /*
1880 read_lock(&tasklist_lock); 2048 * Notify the parent that we're continuing. This event is
1881 do_notify_parent_cldstop(current->group_leader, why); 2049 * always per-process and doesn't make whole lot of sense
1882 read_unlock(&tasklist_lock); 2050 * for ptracers, who shouldn't consume the state via
1883 } 2051 * wait(2) either, but, for backward compatibility, notify
2052 * the ptracer of the group leader too unless it's gonna be
2053 * a duplicate.
2054 */
2055 read_lock(&tasklist_lock);
2056
2057 do_notify_parent_cldstop(current, false, why);
2058
2059 leader = current->group_leader;
2060 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2061 do_notify_parent_cldstop(leader, true, why);
2062
2063 read_unlock(&tasklist_lock);
2064
1884 goto relock; 2065 goto relock;
1885 } 2066 }
1886 2067
@@ -1897,8 +2078,8 @@ relock:
1897 if (unlikely(signr != 0)) 2078 if (unlikely(signr != 0))
1898 ka = return_ka; 2079 ka = return_ka;
1899 else { 2080 else {
1900 if (unlikely(signal->group_stop_count > 0) && 2081 if (unlikely(current->group_stop &
1901 do_signal_stop(0)) 2082 GROUP_STOP_PENDING) && do_signal_stop(0))
1902 goto relock; 2083 goto relock;
1903 2084
1904 signr = dequeue_signal(current, &current->blocked, 2085 signr = dequeue_signal(current, &current->blocked,
@@ -2045,17 +2226,19 @@ void exit_signals(struct task_struct *tsk)
2045 if (!signal_pending(t) && !(t->flags & PF_EXITING)) 2226 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2046 recalc_sigpending_and_wake(t); 2227 recalc_sigpending_and_wake(t);
2047 2228
2048 if (unlikely(tsk->signal->group_stop_count) && 2229 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
2049 !--tsk->signal->group_stop_count) { 2230 task_participate_group_stop(tsk))
2050 tsk->signal->flags = SIGNAL_STOP_STOPPED; 2231 group_stop = CLD_STOPPED;
2051 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2052 }
2053out: 2232out:
2054 spin_unlock_irq(&tsk->sighand->siglock); 2233 spin_unlock_irq(&tsk->sighand->siglock);
2055 2234
2235 /*
2236 * If group stop has completed, deliver the notification. This
2237 * should always go to the real parent of the group leader.
2238 */
2056 if (unlikely(group_stop)) { 2239 if (unlikely(group_stop)) {
2057 read_lock(&tasklist_lock); 2240 read_lock(&tasklist_lock);
2058 do_notify_parent_cldstop(tsk, group_stop); 2241 do_notify_parent_cldstop(tsk, false, group_stop);
2059 read_unlock(&tasklist_lock); 2242 read_unlock(&tasklist_lock);
2060 } 2243 }
2061} 2244}