aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c451
1 files changed, 277 insertions, 174 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index ceb258782835..93d2711b9381 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -71,7 +71,7 @@ static void __unhash_process(struct task_struct *p)
71 __get_cpu_var(process_counts)--; 71 __get_cpu_var(process_counts)--;
72 } 72 }
73 list_del_rcu(&p->thread_group); 73 list_del_rcu(&p->thread_group);
74 remove_parent(p); 74 list_del_init(&p->sibling);
75} 75}
76 76
77/* 77/*
@@ -152,6 +152,18 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
152 put_task_struct(container_of(rhp, struct task_struct, rcu)); 152 put_task_struct(container_of(rhp, struct task_struct, rcu));
153} 153}
154 154
155/*
156 * Do final ptrace-related cleanup of a zombie being reaped.
157 *
158 * Called with write_lock(&tasklist_lock) held.
159 */
160static void ptrace_release_task(struct task_struct *p)
161{
162 BUG_ON(!list_empty(&p->ptraced));
163 ptrace_unlink(p);
164 BUG_ON(!list_empty(&p->ptrace_entry));
165}
166
155void release_task(struct task_struct * p) 167void release_task(struct task_struct * p)
156{ 168{
157 struct task_struct *leader; 169 struct task_struct *leader;
@@ -160,8 +172,7 @@ repeat:
160 atomic_dec(&p->user->processes); 172 atomic_dec(&p->user->processes);
161 proc_flush_task(p); 173 proc_flush_task(p);
162 write_lock_irq(&tasklist_lock); 174 write_lock_irq(&tasklist_lock);
163 ptrace_unlink(p); 175 ptrace_release_task(p);
164 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
165 __exit_signal(p); 176 __exit_signal(p);
166 177
167 /* 178 /*
@@ -315,9 +326,8 @@ static void reparent_to_kthreadd(void)
315 326
316 ptrace_unlink(current); 327 ptrace_unlink(current);
317 /* Reparent to init */ 328 /* Reparent to init */
318 remove_parent(current);
319 current->real_parent = current->parent = kthreadd_task; 329 current->real_parent = current->parent = kthreadd_task;
320 add_parent(current); 330 list_move_tail(&current->sibling, &current->real_parent->children);
321 331
322 /* Set the exit signal to SIGCHLD so we signal init on exit */ 332 /* Set the exit signal to SIGCHLD so we signal init on exit */
323 current->exit_signal = SIGCHLD; 333 current->exit_signal = SIGCHLD;
@@ -692,37 +702,97 @@ static void exit_mm(struct task_struct * tsk)
692 mmput(mm); 702 mmput(mm);
693} 703}
694 704
695static void 705/*
696reparent_thread(struct task_struct *p, struct task_struct *father, int traced) 706 * Return nonzero if @parent's children should reap themselves.
707 *
708 * Called with write_lock_irq(&tasklist_lock) held.
709 */
710static int ignoring_children(struct task_struct *parent)
697{ 711{
698 if (p->pdeath_signal) 712 int ret;
699 /* We already hold the tasklist_lock here. */ 713 struct sighand_struct *psig = parent->sighand;
700 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); 714 unsigned long flags;
715 spin_lock_irqsave(&psig->siglock, flags);
716 ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
717 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
718 spin_unlock_irqrestore(&psig->siglock, flags);
719 return ret;
720}
701 721
702 /* Move the child from its dying parent to the new one. */ 722/*
703 if (unlikely(traced)) { 723 * Detach all tasks we were using ptrace on.
704 /* Preserve ptrace links if someone else is tracing this child. */ 724 * Any that need to be release_task'd are put on the @dead list.
705 list_del_init(&p->ptrace_list); 725 *
706 if (ptrace_reparented(p)) 726 * Called with write_lock(&tasklist_lock) held.
707 list_add(&p->ptrace_list, &p->real_parent->ptrace_children); 727 */
708 } else { 728static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
709 /* If this child is being traced, then we're the one tracing it 729{
710 * anyway, so let go of it. 730 struct task_struct *p, *n;
731 int ign = -1;
732
733 list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
734 __ptrace_unlink(p);
735
736 if (p->exit_state != EXIT_ZOMBIE)
737 continue;
738
739 /*
740 * If it's a zombie, our attachedness prevented normal
741 * parent notification or self-reaping. Do notification
742 * now if it would have happened earlier. If it should
743 * reap itself, add it to the @dead list. We can't call
744 * release_task() here because we already hold tasklist_lock.
745 *
746 * If it's our own child, there is no notification to do.
747 * But if our normal children self-reap, then this child
748 * was prevented by ptrace and we must reap it now.
711 */ 749 */
712 p->ptrace = 0; 750 if (!task_detached(p) && thread_group_empty(p)) {
713 remove_parent(p); 751 if (!same_thread_group(p->real_parent, parent))
714 p->parent = p->real_parent; 752 do_notify_parent(p, p->exit_signal);
715 add_parent(p); 753 else {
754 if (ign < 0)
755 ign = ignoring_children(parent);
756 if (ign)
757 p->exit_signal = -1;
758 }
759 }
716 760
717 if (task_is_traced(p)) { 761 if (task_detached(p)) {
718 /* 762 /*
719 * If it was at a trace stop, turn it into 763 * Mark it as in the process of being reaped.
720 * a normal stop since it's no longer being
721 * traced.
722 */ 764 */
723 ptrace_untrace(p); 765 p->exit_state = EXIT_DEAD;
766 list_add(&p->ptrace_entry, dead);
724 } 767 }
725 } 768 }
769}
770
771/*
772 * Finish up exit-time ptrace cleanup.
773 *
774 * Called without locks.
775 */
776static void ptrace_exit_finish(struct task_struct *parent,
777 struct list_head *dead)
778{
779 struct task_struct *p, *n;
780
781 BUG_ON(!list_empty(&parent->ptraced));
782
783 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
784 list_del_init(&p->ptrace_entry);
785 release_task(p);
786 }
787}
788
789static void reparent_thread(struct task_struct *p, struct task_struct *father)
790{
791 if (p->pdeath_signal)
792 /* We already hold the tasklist_lock here. */
793 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
794
795 list_move_tail(&p->sibling, &p->real_parent->children);
726 796
727 /* If this is a threaded reparent there is no need to 797 /* If this is a threaded reparent there is no need to
728 * notify anyone anything has happened. 798 * notify anyone anything has happened.
@@ -737,7 +807,8 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
737 /* If we'd notified the old parent about this child's death, 807 /* If we'd notified the old parent about this child's death,
738 * also notify the new parent. 808 * also notify the new parent.
739 */ 809 */
740 if (!traced && p->exit_state == EXIT_ZOMBIE && 810 if (!ptrace_reparented(p) &&
811 p->exit_state == EXIT_ZOMBIE &&
741 !task_detached(p) && thread_group_empty(p)) 812 !task_detached(p) && thread_group_empty(p))
742 do_notify_parent(p, p->exit_signal); 813 do_notify_parent(p, p->exit_signal);
743 814
@@ -754,12 +825,15 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
754static void forget_original_parent(struct task_struct *father) 825static void forget_original_parent(struct task_struct *father)
755{ 826{
756 struct task_struct *p, *n, *reaper = father; 827 struct task_struct *p, *n, *reaper = father;
757 struct list_head ptrace_dead; 828 LIST_HEAD(ptrace_dead);
758
759 INIT_LIST_HEAD(&ptrace_dead);
760 829
761 write_lock_irq(&tasklist_lock); 830 write_lock_irq(&tasklist_lock);
762 831
832 /*
833 * First clean up ptrace if we were using it.
834 */
835 ptrace_exit(father, &ptrace_dead);
836
763 do { 837 do {
764 reaper = next_thread(reaper); 838 reaper = next_thread(reaper);
765 if (reaper == father) { 839 if (reaper == father) {
@@ -768,58 +842,19 @@ static void forget_original_parent(struct task_struct *father)
768 } 842 }
769 } while (reaper->flags & PF_EXITING); 843 } while (reaper->flags & PF_EXITING);
770 844
771 /*
772 * There are only two places where our children can be:
773 *
774 * - in our child list
775 * - in our ptraced child list
776 *
777 * Search them and reparent children.
778 */
779 list_for_each_entry_safe(p, n, &father->children, sibling) { 845 list_for_each_entry_safe(p, n, &father->children, sibling) {
780 int ptrace;
781
782 ptrace = p->ptrace;
783
784 /* if father isn't the real parent, then ptrace must be enabled */
785 BUG_ON(father != p->real_parent && !ptrace);
786
787 if (father == p->real_parent) {
788 /* reparent with a reaper, real father it's us */
789 p->real_parent = reaper;
790 reparent_thread(p, father, 0);
791 } else {
792 /* reparent ptraced task to its real parent */
793 __ptrace_unlink (p);
794 if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
795 thread_group_empty(p))
796 do_notify_parent(p, p->exit_signal);
797 }
798
799 /*
800 * if the ptraced child is a detached zombie we must collect
801 * it before we exit, or it will remain zombie forever since
802 * we prevented it from self-reap itself while it was being
803 * traced by us, to be able to see it in wait4.
804 */
805 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
806 list_add(&p->ptrace_list, &ptrace_dead);
807 }
808
809 list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
810 p->real_parent = reaper; 846 p->real_parent = reaper;
811 reparent_thread(p, father, 1); 847 if (p->parent == father) {
848 BUG_ON(p->ptrace);
849 p->parent = p->real_parent;
850 }
851 reparent_thread(p, father);
812 } 852 }
813 853
814 write_unlock_irq(&tasklist_lock); 854 write_unlock_irq(&tasklist_lock);
815 BUG_ON(!list_empty(&father->children)); 855 BUG_ON(!list_empty(&father->children));
816 BUG_ON(!list_empty(&father->ptrace_children));
817
818 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
819 list_del_init(&p->ptrace_list);
820 release_task(p);
821 }
822 856
857 ptrace_exit_finish(father, &ptrace_dead);
823} 858}
824 859
825/* 860/*
@@ -1180,13 +1215,6 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
1180 return 0; 1215 return 0;
1181 } 1216 }
1182 1217
1183 /*
1184 * Do not consider detached threads that are
1185 * not ptraced:
1186 */
1187 if (task_detached(p) && !p->ptrace)
1188 return 0;
1189
1190 /* Wait for all children (clone and not) if __WALL is set; 1218 /* Wait for all children (clone and not) if __WALL is set;
1191 * otherwise, wait for clone children *only* if __WCLONE is 1219 * otherwise, wait for clone children *only* if __WCLONE is
1192 * set; otherwise, wait for non-clone children *only*. (Note: 1220 * set; otherwise, wait for non-clone children *only*. (Note:
@@ -1197,14 +1225,10 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
1197 return 0; 1225 return 0;
1198 1226
1199 err = security_task_wait(p); 1227 err = security_task_wait(p);
1200 if (likely(!err)) 1228 if (err)
1201 return 1; 1229 return err;
1202 1230
1203 if (type != PIDTYPE_PID) 1231 return 1;
1204 return 0;
1205 /* This child was explicitly requested, abort */
1206 read_unlock(&tasklist_lock);
1207 return err;
1208} 1232}
1209 1233
1210static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, 1234static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
@@ -1238,7 +1262,7 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1238 * the lock and this task is uninteresting. If we return nonzero, we have 1262 * the lock and this task is uninteresting. If we return nonzero, we have
1239 * released the lock and the system call should return. 1263 * released the lock and the system call should return.
1240 */ 1264 */
1241static int wait_task_zombie(struct task_struct *p, int noreap, 1265static int wait_task_zombie(struct task_struct *p, int options,
1242 struct siginfo __user *infop, 1266 struct siginfo __user *infop,
1243 int __user *stat_addr, struct rusage __user *ru) 1267 int __user *stat_addr, struct rusage __user *ru)
1244{ 1268{
@@ -1246,7 +1270,10 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1246 int retval, status, traced; 1270 int retval, status, traced;
1247 pid_t pid = task_pid_vnr(p); 1271 pid_t pid = task_pid_vnr(p);
1248 1272
1249 if (unlikely(noreap)) { 1273 if (!likely(options & WEXITED))
1274 return 0;
1275
1276 if (unlikely(options & WNOWAIT)) {
1250 uid_t uid = p->uid; 1277 uid_t uid = p->uid;
1251 int exit_code = p->exit_code; 1278 int exit_code = p->exit_code;
1252 int why, status; 1279 int why, status;
@@ -1396,21 +1423,24 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1396 * the lock and this task is uninteresting. If we return nonzero, we have 1423 * the lock and this task is uninteresting. If we return nonzero, we have
1397 * released the lock and the system call should return. 1424 * released the lock and the system call should return.
1398 */ 1425 */
1399static int wait_task_stopped(struct task_struct *p, 1426static int wait_task_stopped(int ptrace, struct task_struct *p,
1400 int noreap, struct siginfo __user *infop, 1427 int options, struct siginfo __user *infop,
1401 int __user *stat_addr, struct rusage __user *ru) 1428 int __user *stat_addr, struct rusage __user *ru)
1402{ 1429{
1403 int retval, exit_code, why; 1430 int retval, exit_code, why;
1404 uid_t uid = 0; /* unneeded, required by compiler */ 1431 uid_t uid = 0; /* unneeded, required by compiler */
1405 pid_t pid; 1432 pid_t pid;
1406 1433
1434 if (!(options & WUNTRACED))
1435 return 0;
1436
1407 exit_code = 0; 1437 exit_code = 0;
1408 spin_lock_irq(&p->sighand->siglock); 1438 spin_lock_irq(&p->sighand->siglock);
1409 1439
1410 if (unlikely(!task_is_stopped_or_traced(p))) 1440 if (unlikely(!task_is_stopped_or_traced(p)))
1411 goto unlock_sig; 1441 goto unlock_sig;
1412 1442
1413 if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0) 1443 if (!ptrace && p->signal->group_stop_count > 0)
1414 /* 1444 /*
1415 * A group stop is in progress and this is the group leader. 1445 * A group stop is in progress and this is the group leader.
1416 * We won't report until all threads have stopped. 1446 * We won't report until all threads have stopped.
@@ -1421,7 +1451,7 @@ static int wait_task_stopped(struct task_struct *p,
1421 if (!exit_code) 1451 if (!exit_code)
1422 goto unlock_sig; 1452 goto unlock_sig;
1423 1453
1424 if (!noreap) 1454 if (!unlikely(options & WNOWAIT))
1425 p->exit_code = 0; 1455 p->exit_code = 0;
1426 1456
1427 uid = p->uid; 1457 uid = p->uid;
@@ -1439,10 +1469,10 @@ unlock_sig:
1439 */ 1469 */
1440 get_task_struct(p); 1470 get_task_struct(p);
1441 pid = task_pid_vnr(p); 1471 pid = task_pid_vnr(p);
1442 why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; 1472 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1443 read_unlock(&tasklist_lock); 1473 read_unlock(&tasklist_lock);
1444 1474
1445 if (unlikely(noreap)) 1475 if (unlikely(options & WNOWAIT))
1446 return wait_noreap_copyout(p, pid, uid, 1476 return wait_noreap_copyout(p, pid, uid,
1447 why, exit_code, 1477 why, exit_code,
1448 infop, ru); 1478 infop, ru);
@@ -1476,7 +1506,7 @@ unlock_sig:
1476 * the lock and this task is uninteresting. If we return nonzero, we have 1506 * the lock and this task is uninteresting. If we return nonzero, we have
1477 * released the lock and the system call should return. 1507 * released the lock and the system call should return.
1478 */ 1508 */
1479static int wait_task_continued(struct task_struct *p, int noreap, 1509static int wait_task_continued(struct task_struct *p, int options,
1480 struct siginfo __user *infop, 1510 struct siginfo __user *infop,
1481 int __user *stat_addr, struct rusage __user *ru) 1511 int __user *stat_addr, struct rusage __user *ru)
1482{ 1512{
@@ -1484,6 +1514,9 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1484 pid_t pid; 1514 pid_t pid;
1485 uid_t uid; 1515 uid_t uid;
1486 1516
1517 if (!unlikely(options & WCONTINUED))
1518 return 0;
1519
1487 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1520 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1488 return 0; 1521 return 0;
1489 1522
@@ -1493,7 +1526,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1493 spin_unlock_irq(&p->sighand->siglock); 1526 spin_unlock_irq(&p->sighand->siglock);
1494 return 0; 1527 return 0;
1495 } 1528 }
1496 if (!noreap) 1529 if (!unlikely(options & WNOWAIT))
1497 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1530 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1498 spin_unlock_irq(&p->sighand->siglock); 1531 spin_unlock_irq(&p->sighand->siglock);
1499 1532
@@ -1519,89 +1552,161 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1519 return retval; 1552 return retval;
1520} 1553}
1521 1554
1555/*
1556 * Consider @p for a wait by @parent.
1557 *
1558 * -ECHILD should be in *@notask_error before the first call.
1559 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1560 * Returns zero if the search for a child should continue;
1561 * then *@notask_error is 0 if @p is an eligible child,
1562 * or another error from security_task_wait(), or still -ECHILD.
1563 */
1564static int wait_consider_task(struct task_struct *parent, int ptrace,
1565 struct task_struct *p, int *notask_error,
1566 enum pid_type type, struct pid *pid, int options,
1567 struct siginfo __user *infop,
1568 int __user *stat_addr, struct rusage __user *ru)
1569{
1570 int ret = eligible_child(type, pid, options, p);
1571 if (!ret)
1572 return ret;
1573
1574 if (unlikely(ret < 0)) {
1575 /*
1576 * If we have not yet seen any eligible child,
1577 * then let this error code replace -ECHILD.
1578 * A permission error will give the user a clue
1579 * to look for security policy problems, rather
1580 * than for mysterious wait bugs.
1581 */
1582 if (*notask_error)
1583 *notask_error = ret;
1584 }
1585
1586 if (likely(!ptrace) && unlikely(p->ptrace)) {
1587 /*
1588 * This child is hidden by ptrace.
1589 * We aren't allowed to see it now, but eventually we will.
1590 */
1591 *notask_error = 0;
1592 return 0;
1593 }
1594
1595 if (p->exit_state == EXIT_DEAD)
1596 return 0;
1597
1598 /*
1599 * We don't reap group leaders with subthreads.
1600 */
1601 if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
1602 return wait_task_zombie(p, options, infop, stat_addr, ru);
1603
1604 /*
1605 * It's stopped or running now, so it might
1606 * later continue, exit, or stop again.
1607 */
1608 *notask_error = 0;
1609
1610 if (task_is_stopped_or_traced(p))
1611 return wait_task_stopped(ptrace, p, options,
1612 infop, stat_addr, ru);
1613
1614 return wait_task_continued(p, options, infop, stat_addr, ru);
1615}
1616
1617/*
1618 * Do the work of do_wait() for one thread in the group, @tsk.
1619 *
1620 * -ECHILD should be in *@notask_error before the first call.
1621 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1622 * Returns zero if the search for a child should continue; then
1623 * *@notask_error is 0 if there were any eligible children,
1624 * or another error from security_task_wait(), or still -ECHILD.
1625 */
1626static int do_wait_thread(struct task_struct *tsk, int *notask_error,
1627 enum pid_type type, struct pid *pid, int options,
1628 struct siginfo __user *infop, int __user *stat_addr,
1629 struct rusage __user *ru)
1630{
1631 struct task_struct *p;
1632
1633 list_for_each_entry(p, &tsk->children, sibling) {
1634 /*
1635 * Do not consider detached threads.
1636 */
1637 if (!task_detached(p)) {
1638 int ret = wait_consider_task(tsk, 0, p, notask_error,
1639 type, pid, options,
1640 infop, stat_addr, ru);
1641 if (ret)
1642 return ret;
1643 }
1644 }
1645
1646 return 0;
1647}
1648
1649static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
1650 enum pid_type type, struct pid *pid, int options,
1651 struct siginfo __user *infop, int __user *stat_addr,
1652 struct rusage __user *ru)
1653{
1654 struct task_struct *p;
1655
1656 /*
1657 * Traditionally we see ptrace'd stopped tasks regardless of options.
1658 */
1659 options |= WUNTRACED;
1660
1661 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1662 int ret = wait_consider_task(tsk, 1, p, notask_error,
1663 type, pid, options,
1664 infop, stat_addr, ru);
1665 if (ret)
1666 return ret;
1667 }
1668
1669 return 0;
1670}
1671
1522static long do_wait(enum pid_type type, struct pid *pid, int options, 1672static long do_wait(enum pid_type type, struct pid *pid, int options,
1523 struct siginfo __user *infop, int __user *stat_addr, 1673 struct siginfo __user *infop, int __user *stat_addr,
1524 struct rusage __user *ru) 1674 struct rusage __user *ru)
1525{ 1675{
1526 DECLARE_WAITQUEUE(wait, current); 1676 DECLARE_WAITQUEUE(wait, current);
1527 struct task_struct *tsk; 1677 struct task_struct *tsk;
1528 int flag, retval; 1678 int retval;
1529 1679
1530 add_wait_queue(&current->signal->wait_chldexit,&wait); 1680 add_wait_queue(&current->signal->wait_chldexit,&wait);
1531repeat: 1681repeat:
1532 /* If there is nothing that can match our critier just get out */ 1682 /*
1683 * If there is nothing that can match our critiera just get out.
1684 * We will clear @retval to zero if we see any child that might later
1685 * match our criteria, even if we are not able to reap it yet.
1686 */
1533 retval = -ECHILD; 1687 retval = -ECHILD;
1534 if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) 1688 if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
1535 goto end; 1689 goto end;
1536 1690
1537 /*
1538 * We will set this flag if we see any child that might later
1539 * match our criteria, even if we are not able to reap it yet.
1540 */
1541 flag = retval = 0;
1542 current->state = TASK_INTERRUPTIBLE; 1691 current->state = TASK_INTERRUPTIBLE;
1543 read_lock(&tasklist_lock); 1692 read_lock(&tasklist_lock);
1544 tsk = current; 1693 tsk = current;
1545 do { 1694 do {
1546 struct task_struct *p; 1695 int tsk_result = do_wait_thread(tsk, &retval,
1547 1696 type, pid, options,
1548 list_for_each_entry(p, &tsk->children, sibling) { 1697 infop, stat_addr, ru);
1549 int ret = eligible_child(type, pid, options, p); 1698 if (!tsk_result)
1550 if (!ret) 1699 tsk_result = ptrace_do_wait(tsk, &retval,
1551 continue; 1700 type, pid, options,
1552 1701 infop, stat_addr, ru);
1553 if (unlikely(ret < 0)) { 1702 if (tsk_result) {
1554 retval = ret; 1703 /*
1555 } else if (task_is_stopped_or_traced(p)) { 1704 * tasklist_lock is unlocked and we have a final result.
1556 /* 1705 */
1557 * It's stopped now, so it might later 1706 retval = tsk_result;
1558 * continue, exit, or stop again. 1707 goto end;
1559 */
1560 flag = 1;
1561 if (!(p->ptrace & PT_PTRACED) &&
1562 !(options & WUNTRACED))
1563 continue;
1564
1565 retval = wait_task_stopped(p,
1566 (options & WNOWAIT), infop,
1567 stat_addr, ru);
1568 } else if (p->exit_state == EXIT_ZOMBIE &&
1569 !delay_group_leader(p)) {
1570 /*
1571 * We don't reap group leaders with subthreads.
1572 */
1573 if (!likely(options & WEXITED))
1574 continue;
1575 retval = wait_task_zombie(p,
1576 (options & WNOWAIT), infop,
1577 stat_addr, ru);
1578 } else if (p->exit_state != EXIT_DEAD) {
1579 /*
1580 * It's running now, so it might later
1581 * exit, stop, or stop and then continue.
1582 */
1583 flag = 1;
1584 if (!unlikely(options & WCONTINUED))
1585 continue;
1586 retval = wait_task_continued(p,
1587 (options & WNOWAIT), infop,
1588 stat_addr, ru);
1589 }
1590 if (retval != 0) /* tasklist_lock released */
1591 goto end;
1592 }
1593 if (!flag) {
1594 list_for_each_entry(p, &tsk->ptrace_children,
1595 ptrace_list) {
1596 flag = eligible_child(type, pid, options, p);
1597 if (!flag)
1598 continue;
1599 if (likely(flag > 0))
1600 break;
1601 retval = flag;
1602 goto end;
1603 }
1604 } 1708 }
1709
1605 if (options & __WNOTHREAD) 1710 if (options & __WNOTHREAD)
1606 break; 1711 break;
1607 tsk = next_thread(tsk); 1712 tsk = next_thread(tsk);
@@ -1609,16 +1714,14 @@ repeat:
1609 } while (tsk != current); 1714 } while (tsk != current);
1610 read_unlock(&tasklist_lock); 1715 read_unlock(&tasklist_lock);
1611 1716
1612 if (flag) { 1717 if (!retval && !(options & WNOHANG)) {
1613 if (options & WNOHANG)
1614 goto end;
1615 retval = -ERESTARTSYS; 1718 retval = -ERESTARTSYS;
1616 if (signal_pending(current)) 1719 if (!signal_pending(current)) {
1617 goto end; 1720 schedule();
1618 schedule(); 1721 goto repeat;
1619 goto repeat; 1722 }
1620 } 1723 }
1621 retval = -ECHILD; 1724
1622end: 1725end:
1623 current->state = TASK_RUNNING; 1726 current->state = TASK_RUNNING;
1624 remove_wait_queue(&current->signal->wait_chldexit,&wait); 1727 remove_wait_queue(&current->signal->wait_chldexit,&wait);