diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 83 |
1 files changed, 38 insertions, 45 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index d282fea81138..4980a073237f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -678,7 +678,7 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
| 678 | 678 | ||
| 679 | /* forward decl */ | 679 | /* forward decl */ |
| 680 | static void do_notify_parent_cldstop(struct task_struct *tsk, | 680 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
| 681 | struct task_struct *parent, | 681 | int to_self, |
| 682 | int why); | 682 | int why); |
| 683 | 683 | ||
| 684 | /* | 684 | /* |
| @@ -729,14 +729,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
| 729 | p->signal->group_stop_count = 0; | 729 | p->signal->group_stop_count = 0; |
| 730 | p->signal->flags = SIGNAL_STOP_CONTINUED; | 730 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
| 731 | spin_unlock(&p->sighand->siglock); | 731 | spin_unlock(&p->sighand->siglock); |
| 732 | if (p->ptrace & PT_PTRACED) | 732 | do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); |
| 733 | do_notify_parent_cldstop(p, p->parent, | ||
| 734 | CLD_STOPPED); | ||
| 735 | else | ||
| 736 | do_notify_parent_cldstop( | ||
| 737 | p->group_leader, | ||
| 738 | p->group_leader->real_parent, | ||
| 739 | CLD_STOPPED); | ||
| 740 | spin_lock(&p->sighand->siglock); | 733 | spin_lock(&p->sighand->siglock); |
| 741 | } | 734 | } |
| 742 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | 735 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); |
| @@ -777,14 +770,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
| 777 | p->signal->flags = SIGNAL_STOP_CONTINUED; | 770 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
| 778 | p->signal->group_exit_code = 0; | 771 | p->signal->group_exit_code = 0; |
| 779 | spin_unlock(&p->sighand->siglock); | 772 | spin_unlock(&p->sighand->siglock); |
| 780 | if (p->ptrace & PT_PTRACED) | 773 | do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); |
| 781 | do_notify_parent_cldstop(p, p->parent, | ||
| 782 | CLD_CONTINUED); | ||
| 783 | else | ||
| 784 | do_notify_parent_cldstop( | ||
| 785 | p->group_leader, | ||
| 786 | p->group_leader->real_parent, | ||
| 787 | CLD_CONTINUED); | ||
| 788 | spin_lock(&p->sighand->siglock); | 774 | spin_lock(&p->sighand->siglock); |
| 789 | } else { | 775 | } else { |
| 790 | /* | 776 | /* |
| @@ -1380,16 +1366,16 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
| 1380 | unsigned long flags; | 1366 | unsigned long flags; |
| 1381 | int ret = 0; | 1367 | int ret = 0; |
| 1382 | 1368 | ||
| 1383 | /* | ||
| 1384 | * We need the tasklist lock even for the specific | ||
| 1385 | * thread case (when we don't need to follow the group | ||
| 1386 | * lists) in order to avoid races with "p->sighand" | ||
| 1387 | * going away or changing from under us. | ||
| 1388 | */ | ||
| 1389 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1369 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
| 1390 | read_lock(&tasklist_lock); | 1370 | read_lock(&tasklist_lock); |
| 1371 | |||
| 1372 | if (unlikely(p->flags & PF_EXITING)) { | ||
| 1373 | ret = -1; | ||
| 1374 | goto out_err; | ||
| 1375 | } | ||
| 1376 | |||
| 1391 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1377 | spin_lock_irqsave(&p->sighand->siglock, flags); |
| 1392 | 1378 | ||
| 1393 | if (unlikely(!list_empty(&q->list))) { | 1379 | if (unlikely(!list_empty(&q->list))) { |
| 1394 | /* | 1380 | /* |
| 1395 | * If an SI_TIMER entry is already queue just increment | 1381 | * If an SI_TIMER entry is already queue just increment |
| @@ -1399,7 +1385,7 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
| 1399 | BUG(); | 1385 | BUG(); |
| 1400 | q->info.si_overrun++; | 1386 | q->info.si_overrun++; |
| 1401 | goto out; | 1387 | goto out; |
| 1402 | } | 1388 | } |
| 1403 | /* Short-circuit ignored signals. */ | 1389 | /* Short-circuit ignored signals. */ |
| 1404 | if (sig_ignored(p, sig)) { | 1390 | if (sig_ignored(p, sig)) { |
| 1405 | ret = 1; | 1391 | ret = 1; |
| @@ -1414,8 +1400,10 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
| 1414 | 1400 | ||
| 1415 | out: | 1401 | out: |
| 1416 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1402 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
| 1403 | out_err: | ||
| 1417 | read_unlock(&tasklist_lock); | 1404 | read_unlock(&tasklist_lock); |
| 1418 | return(ret); | 1405 | |
| 1406 | return ret; | ||
| 1419 | } | 1407 | } |
| 1420 | 1408 | ||
| 1421 | int | 1409 | int |
| @@ -1542,14 +1530,20 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
| 1542 | spin_unlock_irqrestore(&psig->siglock, flags); | 1530 | spin_unlock_irqrestore(&psig->siglock, flags); |
| 1543 | } | 1531 | } |
| 1544 | 1532 | ||
| 1545 | static void | 1533 | static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) |
| 1546 | do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent, | ||
| 1547 | int why) | ||
| 1548 | { | 1534 | { |
| 1549 | struct siginfo info; | 1535 | struct siginfo info; |
| 1550 | unsigned long flags; | 1536 | unsigned long flags; |
| 1537 | struct task_struct *parent; | ||
| 1551 | struct sighand_struct *sighand; | 1538 | struct sighand_struct *sighand; |
| 1552 | 1539 | ||
| 1540 | if (to_self) | ||
| 1541 | parent = tsk->parent; | ||
| 1542 | else { | ||
| 1543 | tsk = tsk->group_leader; | ||
| 1544 | parent = tsk->real_parent; | ||
| 1545 | } | ||
| 1546 | |||
| 1553 | info.si_signo = SIGCHLD; | 1547 | info.si_signo = SIGCHLD; |
| 1554 | info.si_errno = 0; | 1548 | info.si_errno = 0; |
| 1555 | info.si_pid = tsk->pid; | 1549 | info.si_pid = tsk->pid; |
| @@ -1618,8 +1612,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) | |||
| 1618 | !(current->ptrace & PT_ATTACHED)) && | 1612 | !(current->ptrace & PT_ATTACHED)) && |
| 1619 | (likely(current->parent->signal != current->signal) || | 1613 | (likely(current->parent->signal != current->signal) || |
| 1620 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { | 1614 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { |
| 1621 | do_notify_parent_cldstop(current, current->parent, | 1615 | do_notify_parent_cldstop(current, 1, CLD_TRAPPED); |
| 1622 | CLD_TRAPPED); | ||
| 1623 | read_unlock(&tasklist_lock); | 1616 | read_unlock(&tasklist_lock); |
| 1624 | schedule(); | 1617 | schedule(); |
| 1625 | } else { | 1618 | } else { |
| @@ -1668,25 +1661,25 @@ void ptrace_notify(int exit_code) | |||
| 1668 | static void | 1661 | static void |
| 1669 | finish_stop(int stop_count) | 1662 | finish_stop(int stop_count) |
| 1670 | { | 1663 | { |
| 1664 | int to_self; | ||
| 1665 | |||
| 1671 | /* | 1666 | /* |
| 1672 | * If there are no other threads in the group, or if there is | 1667 | * If there are no other threads in the group, or if there is |
| 1673 | * a group stop in progress and we are the last to stop, | 1668 | * a group stop in progress and we are the last to stop, |
| 1674 | * report to the parent. When ptraced, every thread reports itself. | 1669 | * report to the parent. When ptraced, every thread reports itself. |
| 1675 | */ | 1670 | */ |
| 1676 | if (stop_count < 0 || (current->ptrace & PT_PTRACED)) { | 1671 | if (stop_count < 0 || (current->ptrace & PT_PTRACED)) |
| 1677 | read_lock(&tasklist_lock); | 1672 | to_self = 1; |
| 1678 | do_notify_parent_cldstop(current, current->parent, | 1673 | else if (stop_count == 0) |
| 1679 | CLD_STOPPED); | 1674 | to_self = 0; |
| 1680 | read_unlock(&tasklist_lock); | 1675 | else |
| 1681 | } | 1676 | goto out; |
| 1682 | else if (stop_count == 0) { | ||
| 1683 | read_lock(&tasklist_lock); | ||
| 1684 | do_notify_parent_cldstop(current->group_leader, | ||
| 1685 | current->group_leader->real_parent, | ||
| 1686 | CLD_STOPPED); | ||
| 1687 | read_unlock(&tasklist_lock); | ||
| 1688 | } | ||
| 1689 | 1677 | ||
| 1678 | read_lock(&tasklist_lock); | ||
| 1679 | do_notify_parent_cldstop(current, to_self, CLD_STOPPED); | ||
| 1680 | read_unlock(&tasklist_lock); | ||
| 1681 | |||
| 1682 | out: | ||
| 1690 | schedule(); | 1683 | schedule(); |
| 1691 | /* | 1684 | /* |
| 1692 | * Now we don't run again until continued. | 1685 | * Now we don't run again until continued. |
