diff options
author | Roland McGrath <roland@redhat.com> | 2009-09-23 18:56:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 10:21:00 -0400 |
commit | ae6d2ed7bb3877ff35b9569402025f40ea2e1803 (patch) | |
tree | 80527061ab7615cd890236b777b2be6e909a1573 /kernel | |
parent | b6fe2d117e98805ee76352e6468f87d494a97292 (diff) |
signals: tracehook_notify_jctl change
This changes tracehook_notify_jctl() so it's called with the siglock held,
and changes its argument and return value definition. These clean-ups
make it a better fit for what new tracing hooks need to check.
Tracing needs the siglock here, held from the time TASK_STOPPED was set,
to avoid potential SIGCONT races if it wants to allow any blocking in its
tracing hooks.
This also folds the finish_stop() function into its caller
do_signal_stop(). The function is short, called only once and only
unconditionally. It aids readability to fold it in.
[oleg@redhat.com: do not call tracehook_notify_jctl() in TASK_STOPPED state]
[oleg@redhat.com: introduce tracehook_finish_jctl() helper]
Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/signal.c | 97 |
1 files changed, 47 insertions, 50 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 534ea81cde47..5d3b3f8f219b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
705 | 705 | ||
706 | if (why) { | 706 | if (why) { |
707 | /* | 707 | /* |
708 | * The first thread which returns from finish_stop() | 708 | * The first thread which returns from do_signal_stop() |
709 | * will take ->siglock, notice SIGNAL_CLD_MASK, and | 709 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
710 | * notify its parent. See get_signal_to_deliver(). | 710 | * notify its parent. See get_signal_to_deliver(). |
711 | */ | 711 | */ |
@@ -1664,29 +1664,6 @@ void ptrace_notify(int exit_code) | |||
1664 | spin_unlock_irq(¤t->sighand->siglock); | 1664 | spin_unlock_irq(¤t->sighand->siglock); |
1665 | } | 1665 | } |
1666 | 1666 | ||
1667 | static void | ||
1668 | finish_stop(int stop_count) | ||
1669 | { | ||
1670 | /* | ||
1671 | * If there are no other threads in the group, or if there is | ||
1672 | * a group stop in progress and we are the last to stop, | ||
1673 | * report to the parent. When ptraced, every thread reports itself. | ||
1674 | */ | ||
1675 | if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { | ||
1676 | read_lock(&tasklist_lock); | ||
1677 | do_notify_parent_cldstop(current, CLD_STOPPED); | ||
1678 | read_unlock(&tasklist_lock); | ||
1679 | } | ||
1680 | |||
1681 | do { | ||
1682 | schedule(); | ||
1683 | } while (try_to_freeze()); | ||
1684 | /* | ||
1685 | * Now we don't run again until continued. | ||
1686 | */ | ||
1687 | current->exit_code = 0; | ||
1688 | } | ||
1689 | |||
1690 | /* | 1667 | /* |
1691 | * This performs the stopping for SIGSTOP and other stop signals. | 1668 | * This performs the stopping for SIGSTOP and other stop signals. |
1692 | * We have to stop all threads in the thread group. | 1669 | * We have to stop all threads in the thread group. |
@@ -1696,15 +1673,9 @@ finish_stop(int stop_count) | |||
1696 | static int do_signal_stop(int signr) | 1673 | static int do_signal_stop(int signr) |
1697 | { | 1674 | { |
1698 | struct signal_struct *sig = current->signal; | 1675 | struct signal_struct *sig = current->signal; |
1699 | int stop_count; | 1676 | int notify; |
1700 | 1677 | ||
1701 | if (sig->group_stop_count > 0) { | 1678 | if (!sig->group_stop_count) { |
1702 | /* | ||
1703 | * There is a group stop in progress. We don't need to | ||
1704 | * start another one. | ||
1705 | */ | ||
1706 | stop_count = --sig->group_stop_count; | ||
1707 | } else { | ||
1708 | struct task_struct *t; | 1679 | struct task_struct *t; |
1709 | 1680 | ||
1710 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1681 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
@@ -1716,7 +1687,7 @@ static int do_signal_stop(int signr) | |||
1716 | */ | 1687 | */ |
1717 | sig->group_exit_code = signr; | 1688 | sig->group_exit_code = signr; |
1718 | 1689 | ||
1719 | stop_count = 0; | 1690 | sig->group_stop_count = 1; |
1720 | for (t = next_thread(current); t != current; t = next_thread(t)) | 1691 | for (t = next_thread(current); t != current; t = next_thread(t)) |
1721 | /* | 1692 | /* |
1722 | * Setting state to TASK_STOPPED for a group | 1693 | * Setting state to TASK_STOPPED for a group |
@@ -1725,19 +1696,44 @@ static int do_signal_stop(int signr) | |||
1725 | */ | 1696 | */ |
1726 | if (!(t->flags & PF_EXITING) && | 1697 | if (!(t->flags & PF_EXITING) && |
1727 | !task_is_stopped_or_traced(t)) { | 1698 | !task_is_stopped_or_traced(t)) { |
1728 | stop_count++; | 1699 | sig->group_stop_count++; |
1729 | signal_wake_up(t, 0); | 1700 | signal_wake_up(t, 0); |
1730 | } | 1701 | } |
1731 | sig->group_stop_count = stop_count; | ||
1732 | } | 1702 | } |
1703 | /* | ||
1704 | * If there are no other threads in the group, or if there is | ||
1705 | * a group stop in progress and we are the last to stop, report | ||
1706 | * to the parent. When ptraced, every thread reports itself. | ||
1707 | */ | ||
1708 | notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | ||
1709 | notify = tracehook_notify_jctl(notify, CLD_STOPPED); | ||
1710 | /* | ||
1711 | * tracehook_notify_jctl() can drop and reacquire siglock, so | ||
1712 | * we keep ->group_stop_count != 0 before the call. If SIGCONT | ||
1713 | * or SIGKILL comes in between ->group_stop_count == 0. | ||
1714 | */ | ||
1715 | if (sig->group_stop_count) { | ||
1716 | if (!--sig->group_stop_count) | ||
1717 | sig->flags = SIGNAL_STOP_STOPPED; | ||
1718 | current->exit_code = sig->group_exit_code; | ||
1719 | __set_current_state(TASK_STOPPED); | ||
1720 | } | ||
1721 | spin_unlock_irq(¤t->sighand->siglock); | ||
1733 | 1722 | ||
1734 | if (stop_count == 0) | 1723 | if (notify) { |
1735 | sig->flags = SIGNAL_STOP_STOPPED; | 1724 | read_lock(&tasklist_lock); |
1736 | current->exit_code = sig->group_exit_code; | 1725 | do_notify_parent_cldstop(current, notify); |
1737 | __set_current_state(TASK_STOPPED); | 1726 | read_unlock(&tasklist_lock); |
1727 | } | ||
1728 | |||
1729 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | ||
1730 | do { | ||
1731 | schedule(); | ||
1732 | } while (try_to_freeze()); | ||
1733 | |||
1734 | tracehook_finish_jctl(); | ||
1735 | current->exit_code = 0; | ||
1738 | 1736 | ||
1739 | spin_unlock_irq(¤t->sighand->siglock); | ||
1740 | finish_stop(stop_count); | ||
1741 | return 1; | 1737 | return 1; |
1742 | } | 1738 | } |
1743 | 1739 | ||
@@ -1806,14 +1802,15 @@ relock: | |||
1806 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) | 1802 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) |
1807 | ? CLD_CONTINUED : CLD_STOPPED; | 1803 | ? CLD_CONTINUED : CLD_STOPPED; |
1808 | signal->flags &= ~SIGNAL_CLD_MASK; | 1804 | signal->flags &= ~SIGNAL_CLD_MASK; |
1809 | spin_unlock_irq(&sighand->siglock); | ||
1810 | 1805 | ||
1811 | if (unlikely(!tracehook_notify_jctl(1, why))) | 1806 | why = tracehook_notify_jctl(why, CLD_CONTINUED); |
1812 | goto relock; | 1807 | spin_unlock_irq(&sighand->siglock); |
1813 | 1808 | ||
1814 | read_lock(&tasklist_lock); | 1809 | if (why) { |
1815 | do_notify_parent_cldstop(current->group_leader, why); | 1810 | read_lock(&tasklist_lock); |
1816 | read_unlock(&tasklist_lock); | 1811 | do_notify_parent_cldstop(current->group_leader, why); |
1812 | read_unlock(&tasklist_lock); | ||
1813 | } | ||
1817 | goto relock; | 1814 | goto relock; |
1818 | } | 1815 | } |
1819 | 1816 | ||
@@ -1978,14 +1975,14 @@ void exit_signals(struct task_struct *tsk) | |||
1978 | if (unlikely(tsk->signal->group_stop_count) && | 1975 | if (unlikely(tsk->signal->group_stop_count) && |
1979 | !--tsk->signal->group_stop_count) { | 1976 | !--tsk->signal->group_stop_count) { |
1980 | tsk->signal->flags = SIGNAL_STOP_STOPPED; | 1977 | tsk->signal->flags = SIGNAL_STOP_STOPPED; |
1981 | group_stop = 1; | 1978 | group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); |
1982 | } | 1979 | } |
1983 | out: | 1980 | out: |
1984 | spin_unlock_irq(&tsk->sighand->siglock); | 1981 | spin_unlock_irq(&tsk->sighand->siglock); |
1985 | 1982 | ||
1986 | if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { | 1983 | if (unlikely(group_stop)) { |
1987 | read_lock(&tasklist_lock); | 1984 | read_lock(&tasklist_lock); |
1988 | do_notify_parent_cldstop(tsk, CLD_STOPPED); | 1985 | do_notify_parent_cldstop(tsk, group_stop); |
1989 | read_unlock(&tasklist_lock); | 1986 | read_unlock(&tasklist_lock); |
1990 | } | 1987 | } |
1991 | } | 1988 | } |