diff options
-rw-r--r-- | kernel/exit.c | 215 |
1 files changed, 135 insertions, 80 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index ceb258782835..7453356a961f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1238,7 +1238,7 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, | |||
1238 | * the lock and this task is uninteresting. If we return nonzero, we have | 1238 | * the lock and this task is uninteresting. If we return nonzero, we have |
1239 | * released the lock and the system call should return. | 1239 | * released the lock and the system call should return. |
1240 | */ | 1240 | */ |
1241 | static int wait_task_zombie(struct task_struct *p, int noreap, | 1241 | static int wait_task_zombie(struct task_struct *p, int options, |
1242 | struct siginfo __user *infop, | 1242 | struct siginfo __user *infop, |
1243 | int __user *stat_addr, struct rusage __user *ru) | 1243 | int __user *stat_addr, struct rusage __user *ru) |
1244 | { | 1244 | { |
@@ -1246,7 +1246,10 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1246 | int retval, status, traced; | 1246 | int retval, status, traced; |
1247 | pid_t pid = task_pid_vnr(p); | 1247 | pid_t pid = task_pid_vnr(p); |
1248 | 1248 | ||
1249 | if (unlikely(noreap)) { | 1249 | if (!likely(options & WEXITED)) |
1250 | return 0; | ||
1251 | |||
1252 | if (unlikely(options & WNOWAIT)) { | ||
1250 | uid_t uid = p->uid; | 1253 | uid_t uid = p->uid; |
1251 | int exit_code = p->exit_code; | 1254 | int exit_code = p->exit_code; |
1252 | int why, status; | 1255 | int why, status; |
@@ -1397,13 +1400,16 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1397 | * released the lock and the system call should return. | 1400 | * released the lock and the system call should return. |
1398 | */ | 1401 | */ |
1399 | static int wait_task_stopped(struct task_struct *p, | 1402 | static int wait_task_stopped(struct task_struct *p, |
1400 | int noreap, struct siginfo __user *infop, | 1403 | int options, struct siginfo __user *infop, |
1401 | int __user *stat_addr, struct rusage __user *ru) | 1404 | int __user *stat_addr, struct rusage __user *ru) |
1402 | { | 1405 | { |
1403 | int retval, exit_code, why; | 1406 | int retval, exit_code, why; |
1404 | uid_t uid = 0; /* unneeded, required by compiler */ | 1407 | uid_t uid = 0; /* unneeded, required by compiler */ |
1405 | pid_t pid; | 1408 | pid_t pid; |
1406 | 1409 | ||
1410 | if (!(p->ptrace & PT_PTRACED) && !(options & WUNTRACED)) | ||
1411 | return 0; | ||
1412 | |||
1407 | exit_code = 0; | 1413 | exit_code = 0; |
1408 | spin_lock_irq(&p->sighand->siglock); | 1414 | spin_lock_irq(&p->sighand->siglock); |
1409 | 1415 | ||
@@ -1421,7 +1427,7 @@ static int wait_task_stopped(struct task_struct *p, | |||
1421 | if (!exit_code) | 1427 | if (!exit_code) |
1422 | goto unlock_sig; | 1428 | goto unlock_sig; |
1423 | 1429 | ||
1424 | if (!noreap) | 1430 | if (!unlikely(options & WNOWAIT)) |
1425 | p->exit_code = 0; | 1431 | p->exit_code = 0; |
1426 | 1432 | ||
1427 | uid = p->uid; | 1433 | uid = p->uid; |
@@ -1442,7 +1448,7 @@ unlock_sig: | |||
1442 | why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; | 1448 | why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; |
1443 | read_unlock(&tasklist_lock); | 1449 | read_unlock(&tasklist_lock); |
1444 | 1450 | ||
1445 | if (unlikely(noreap)) | 1451 | if (unlikely(options & WNOWAIT)) |
1446 | return wait_noreap_copyout(p, pid, uid, | 1452 | return wait_noreap_copyout(p, pid, uid, |
1447 | why, exit_code, | 1453 | why, exit_code, |
1448 | infop, ru); | 1454 | infop, ru); |
@@ -1476,7 +1482,7 @@ unlock_sig: | |||
1476 | * the lock and this task is uninteresting. If we return nonzero, we have | 1482 | * the lock and this task is uninteresting. If we return nonzero, we have |
1477 | * released the lock and the system call should return. | 1483 | * released the lock and the system call should return. |
1478 | */ | 1484 | */ |
1479 | static int wait_task_continued(struct task_struct *p, int noreap, | 1485 | static int wait_task_continued(struct task_struct *p, int options, |
1480 | struct siginfo __user *infop, | 1486 | struct siginfo __user *infop, |
1481 | int __user *stat_addr, struct rusage __user *ru) | 1487 | int __user *stat_addr, struct rusage __user *ru) |
1482 | { | 1488 | { |
@@ -1484,6 +1490,9 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1484 | pid_t pid; | 1490 | pid_t pid; |
1485 | uid_t uid; | 1491 | uid_t uid; |
1486 | 1492 | ||
1493 | if (!unlikely(options & WCONTINUED)) | ||
1494 | return 0; | ||
1495 | |||
1487 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | 1496 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) |
1488 | return 0; | 1497 | return 0; |
1489 | 1498 | ||
@@ -1493,7 +1502,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1493 | spin_unlock_irq(&p->sighand->siglock); | 1502 | spin_unlock_irq(&p->sighand->siglock); |
1494 | return 0; | 1503 | return 0; |
1495 | } | 1504 | } |
1496 | if (!noreap) | 1505 | if (!unlikely(options & WNOWAIT)) |
1497 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1506 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1498 | spin_unlock_irq(&p->sighand->siglock); | 1507 | spin_unlock_irq(&p->sighand->siglock); |
1499 | 1508 | ||
@@ -1519,89 +1528,137 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1519 | return retval; | 1528 | return retval; |
1520 | } | 1529 | } |
1521 | 1530 | ||
1531 | /* | ||
1532 | * Consider @p for a wait by @parent. | ||
1533 | * | ||
1534 | * -ECHILD should be in *@notask_error before the first call. | ||
1535 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | ||
1536 | * Returns zero if the search for a child should continue; | ||
1537 | * then *@notask_error is 0 if @p is an eligible child, or still -ECHILD. | ||
1538 | */ | ||
1539 | static int wait_consider_task(struct task_struct *parent, | ||
1540 | struct task_struct *p, int *notask_error, | ||
1541 | enum pid_type type, struct pid *pid, int options, | ||
1542 | struct siginfo __user *infop, | ||
1543 | int __user *stat_addr, struct rusage __user *ru) | ||
1544 | { | ||
1545 | int ret = eligible_child(type, pid, options, p); | ||
1546 | if (ret <= 0) | ||
1547 | return ret; | ||
1548 | |||
1549 | if (p->exit_state == EXIT_DEAD) | ||
1550 | return 0; | ||
1551 | |||
1552 | /* | ||
1553 | * We don't reap group leaders with subthreads. | ||
1554 | */ | ||
1555 | if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) | ||
1556 | return wait_task_zombie(p, options, infop, stat_addr, ru); | ||
1557 | |||
1558 | /* | ||
1559 | * It's stopped or running now, so it might | ||
1560 | * later continue, exit, or stop again. | ||
1561 | */ | ||
1562 | *notask_error = 0; | ||
1563 | |||
1564 | if (task_is_stopped_or_traced(p)) | ||
1565 | return wait_task_stopped(p, options, infop, stat_addr, ru); | ||
1566 | |||
1567 | return wait_task_continued(p, options, infop, stat_addr, ru); | ||
1568 | } | ||
1569 | |||
1570 | /* | ||
1571 | * Do the work of do_wait() for one thread in the group, @tsk. | ||
1572 | * | ||
1573 | * -ECHILD should be in *@notask_error before the first call. | ||
1574 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | ||
1575 | * Returns zero if the search for a child should continue; then | ||
1576 | * *@notask_error is 0 if there were any eligible children, or still -ECHILD. | ||
1577 | */ | ||
1578 | static int do_wait_thread(struct task_struct *tsk, int *notask_error, | ||
1579 | enum pid_type type, struct pid *pid, int options, | ||
1580 | struct siginfo __user *infop, int __user *stat_addr, | ||
1581 | struct rusage __user *ru) | ||
1582 | { | ||
1583 | struct task_struct *p; | ||
1584 | |||
1585 | list_for_each_entry(p, &tsk->children, sibling) { | ||
1586 | int ret = wait_consider_task(tsk, p, notask_error, | ||
1587 | type, pid, options, | ||
1588 | infop, stat_addr, ru); | ||
1589 | if (ret) | ||
1590 | return ret; | ||
1591 | } | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, | ||
1597 | enum pid_type type, struct pid *pid, int options, | ||
1598 | struct siginfo __user *infop, int __user *stat_addr, | ||
1599 | struct rusage __user *ru) | ||
1600 | { | ||
1601 | struct task_struct *p; | ||
1602 | |||
1603 | /* | ||
1604 | * If we never saw an eligile child, check for children stolen by | ||
1605 | * ptrace. We don't leave -ECHILD in *@notask_error if there are any, | ||
1606 | * because we will eventually be allowed to wait for them again. | ||
1607 | */ | ||
1608 | if (!*notask_error) | ||
1609 | return 0; | ||
1610 | |||
1611 | list_for_each_entry(p, &tsk->ptrace_children, ptrace_list) { | ||
1612 | int ret = eligible_child(type, pid, options, p); | ||
1613 | if (unlikely(ret < 0)) | ||
1614 | return ret; | ||
1615 | if (ret) { | ||
1616 | *notask_error = 0; | ||
1617 | return 0; | ||
1618 | } | ||
1619 | } | ||
1620 | |||
1621 | return 0; | ||
1622 | } | ||
1623 | |||
1522 | static long do_wait(enum pid_type type, struct pid *pid, int options, | 1624 | static long do_wait(enum pid_type type, struct pid *pid, int options, |
1523 | struct siginfo __user *infop, int __user *stat_addr, | 1625 | struct siginfo __user *infop, int __user *stat_addr, |
1524 | struct rusage __user *ru) | 1626 | struct rusage __user *ru) |
1525 | { | 1627 | { |
1526 | DECLARE_WAITQUEUE(wait, current); | 1628 | DECLARE_WAITQUEUE(wait, current); |
1527 | struct task_struct *tsk; | 1629 | struct task_struct *tsk; |
1528 | int flag, retval; | 1630 | int retval; |
1529 | 1631 | ||
1530 | add_wait_queue(¤t->signal->wait_chldexit,&wait); | 1632 | add_wait_queue(¤t->signal->wait_chldexit,&wait); |
1531 | repeat: | 1633 | repeat: |
1532 | /* If there is nothing that can match our critier just get out */ | 1634 | /* |
1635 | * If there is nothing that can match our critiera just get out. | ||
1636 | * We will clear @retval to zero if we see any child that might later | ||
1637 | * match our criteria, even if we are not able to reap it yet. | ||
1638 | */ | ||
1533 | retval = -ECHILD; | 1639 | retval = -ECHILD; |
1534 | if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) | 1640 | if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) |
1535 | goto end; | 1641 | goto end; |
1536 | 1642 | ||
1537 | /* | ||
1538 | * We will set this flag if we see any child that might later | ||
1539 | * match our criteria, even if we are not able to reap it yet. | ||
1540 | */ | ||
1541 | flag = retval = 0; | ||
1542 | current->state = TASK_INTERRUPTIBLE; | 1643 | current->state = TASK_INTERRUPTIBLE; |
1543 | read_lock(&tasklist_lock); | 1644 | read_lock(&tasklist_lock); |
1544 | tsk = current; | 1645 | tsk = current; |
1545 | do { | 1646 | do { |
1546 | struct task_struct *p; | 1647 | int tsk_result = do_wait_thread(tsk, &retval, |
1547 | 1648 | type, pid, options, | |
1548 | list_for_each_entry(p, &tsk->children, sibling) { | 1649 | infop, stat_addr, ru); |
1549 | int ret = eligible_child(type, pid, options, p); | 1650 | if (!tsk_result) |
1550 | if (!ret) | 1651 | tsk_result = ptrace_do_wait(tsk, &retval, |
1551 | continue; | 1652 | type, pid, options, |
1552 | 1653 | infop, stat_addr, ru); | |
1553 | if (unlikely(ret < 0)) { | 1654 | if (tsk_result) { |
1554 | retval = ret; | 1655 | /* |
1555 | } else if (task_is_stopped_or_traced(p)) { | 1656 | * tasklist_lock is unlocked and we have a final result. |
1556 | /* | 1657 | */ |
1557 | * It's stopped now, so it might later | 1658 | retval = tsk_result; |
1558 | * continue, exit, or stop again. | 1659 | goto end; |
1559 | */ | ||
1560 | flag = 1; | ||
1561 | if (!(p->ptrace & PT_PTRACED) && | ||
1562 | !(options & WUNTRACED)) | ||
1563 | continue; | ||
1564 | |||
1565 | retval = wait_task_stopped(p, | ||
1566 | (options & WNOWAIT), infop, | ||
1567 | stat_addr, ru); | ||
1568 | } else if (p->exit_state == EXIT_ZOMBIE && | ||
1569 | !delay_group_leader(p)) { | ||
1570 | /* | ||
1571 | * We don't reap group leaders with subthreads. | ||
1572 | */ | ||
1573 | if (!likely(options & WEXITED)) | ||
1574 | continue; | ||
1575 | retval = wait_task_zombie(p, | ||
1576 | (options & WNOWAIT), infop, | ||
1577 | stat_addr, ru); | ||
1578 | } else if (p->exit_state != EXIT_DEAD) { | ||
1579 | /* | ||
1580 | * It's running now, so it might later | ||
1581 | * exit, stop, or stop and then continue. | ||
1582 | */ | ||
1583 | flag = 1; | ||
1584 | if (!unlikely(options & WCONTINUED)) | ||
1585 | continue; | ||
1586 | retval = wait_task_continued(p, | ||
1587 | (options & WNOWAIT), infop, | ||
1588 | stat_addr, ru); | ||
1589 | } | ||
1590 | if (retval != 0) /* tasklist_lock released */ | ||
1591 | goto end; | ||
1592 | } | ||
1593 | if (!flag) { | ||
1594 | list_for_each_entry(p, &tsk->ptrace_children, | ||
1595 | ptrace_list) { | ||
1596 | flag = eligible_child(type, pid, options, p); | ||
1597 | if (!flag) | ||
1598 | continue; | ||
1599 | if (likely(flag > 0)) | ||
1600 | break; | ||
1601 | retval = flag; | ||
1602 | goto end; | ||
1603 | } | ||
1604 | } | 1660 | } |
1661 | |||
1605 | if (options & __WNOTHREAD) | 1662 | if (options & __WNOTHREAD) |
1606 | break; | 1663 | break; |
1607 | tsk = next_thread(tsk); | 1664 | tsk = next_thread(tsk); |
@@ -1609,16 +1666,14 @@ repeat: | |||
1609 | } while (tsk != current); | 1666 | } while (tsk != current); |
1610 | read_unlock(&tasklist_lock); | 1667 | read_unlock(&tasklist_lock); |
1611 | 1668 | ||
1612 | if (flag) { | 1669 | if (!retval && !(options & WNOHANG)) { |
1613 | if (options & WNOHANG) | ||
1614 | goto end; | ||
1615 | retval = -ERESTARTSYS; | 1670 | retval = -ERESTARTSYS; |
1616 | if (signal_pending(current)) | 1671 | if (!signal_pending(current)) { |
1617 | goto end; | 1672 | schedule(); |
1618 | schedule(); | 1673 | goto repeat; |
1619 | goto repeat; | 1674 | } |
1620 | } | 1675 | } |
1621 | retval = -ECHILD; | 1676 | |
1622 | end: | 1677 | end: |
1623 | current->state = TASK_RUNNING; | 1678 | current->state = TASK_RUNNING; |
1624 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); | 1679 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); |