diff options
author | Tejun Heo <tj@kernel.org> | 2011-03-23 05:37:00 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-03-23 05:37:00 -0400 |
commit | 39efa3ef3a376a4e53de2f82fc91182459d34200 (patch) | |
tree | f75a44fa77396edcf74e41686912554f96681340 /kernel/signal.c | |
parent | e5c1902e9260a0075ea52cb5ef627a8d9aaede89 (diff) |
signal: Use GROUP_STOP_PENDING to stop once for a single group stop
Currently task->signal->group_stop_count is used to decide whether to
stop for group stop. However, if there is a task in the group which
is taking a long time to stop, other tasks which are continued by
ptrace would repeatedly stop for the same group stop until the group
stop is complete.
Conversely, if a ptraced task is in TASK_TRACED state, the debugger
won't get notified of group stops which is inconsistent compared to
the ptraced task in any other state.
This patch introduces GROUP_STOP_PENDING which tracks whether a task
is yet to stop for the group stop in progress. The flag is set when a
group stop starts and cleared when the task stops the first time for
the group stop, and consulted whenever whether the task should
participate in a group stop needs to be determined. Note that now
tasks in TASK_TRACED also participate in group stop.
This results in the following behavior changes.
* For a single group stop, a ptracer would see at most one stop
reported.
* A ptracee in TASK_TRACED now also participates in group stop and the
tracer would get the notification. However, as a ptraced task could
be in TASK_STOPPED state or any ptrace trap could consume group
stop, the notification may still be missing. These will be
addressed with further patches.
* A ptracee may start a group stop while one is still in progress if
the tracer let it continue with stop signal delivery. Group stop
code handles this correctly.
Oleg:
* Spotted that a task might skip signal check even when its
GROUP_STOP_PENDING is set. Fixed by updating
recalc_sigpending_tsk() to check GROUP_STOP_PENDING instead of
group_stop_count.
* Pointed out that task->group_stop should be cleared whenever
task->signal->group_stop_count is cleared. Fixed accordingly.
* Pointed out the behavior inconsistency between TASK_TRACED and
RUNNING and the last behavior change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Roland McGrath <roland@redhat.com>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index ecb20089eaff..a2e7a6527d24 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |||
124 | 124 | ||
125 | static int recalc_sigpending_tsk(struct task_struct *t) | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { | 126 | { |
127 | if (t->signal->group_stop_count > 0 || | 127 | if ((t->group_stop & GROUP_STOP_PENDING) || |
128 | PENDING(&t->pending, &t->blocked) || | 128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
@@ -232,19 +232,19 @@ static inline void print_dropped_signal(int sig) | |||
232 | * CONTEXT: | 232 | * CONTEXT: |
233 | * Must be called with @task->sighand->siglock held. | 233 | * Must be called with @task->sighand->siglock held. |
234 | */ | 234 | */ |
235 | static void task_clear_group_stop_pending(struct task_struct *task) | 235 | void task_clear_group_stop_pending(struct task_struct *task) |
236 | { | 236 | { |
237 | task->group_stop &= ~GROUP_STOP_CONSUME; | 237 | task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME); |
238 | } | 238 | } |
239 | 239 | ||
240 | /** | 240 | /** |
241 | * task_participate_group_stop - participate in a group stop | 241 | * task_participate_group_stop - participate in a group stop |
242 | * @task: task participating in a group stop | 242 | * @task: task participating in a group stop |
243 | * | 243 | * |
244 | * @task is participating in a group stop. Group stop states are cleared | 244 | * @task has GROUP_STOP_PENDING set and is participating in a group stop. |
245 | * and the group stop count is consumed if %GROUP_STOP_CONSUME was set. If | 245 | * Group stop states are cleared and the group stop count is consumed if |
246 | * the consumption completes the group stop, the appropriate %SIGNAL_* | 246 | * %GROUP_STOP_CONSUME was set. If the consumption completes the group |
247 | * flags are set. | 247 | * stop, the appropriate %SIGNAL_* flags are set. |
248 | * | 248 | * |
249 | * CONTEXT: | 249 | * CONTEXT: |
250 | * Must be called with @task->sighand->siglock held. | 250 | * Must be called with @task->sighand->siglock held. |
@@ -254,6 +254,8 @@ static bool task_participate_group_stop(struct task_struct *task) | |||
254 | struct signal_struct *sig = task->signal; | 254 | struct signal_struct *sig = task->signal; |
255 | bool consume = task->group_stop & GROUP_STOP_CONSUME; | 255 | bool consume = task->group_stop & GROUP_STOP_CONSUME; |
256 | 256 | ||
257 | WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | ||
258 | |||
257 | task_clear_group_stop_pending(task); | 259 | task_clear_group_stop_pending(task); |
258 | 260 | ||
259 | if (!consume) | 261 | if (!consume) |
@@ -765,6 +767,9 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
765 | t = p; | 767 | t = p; |
766 | do { | 768 | do { |
767 | unsigned int state; | 769 | unsigned int state; |
770 | |||
771 | task_clear_group_stop_pending(t); | ||
772 | |||
768 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 773 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
769 | /* | 774 | /* |
770 | * If there is a handler for SIGCONT, we must make | 775 | * If there is a handler for SIGCONT, we must make |
@@ -906,6 +911,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
906 | signal->group_stop_count = 0; | 911 | signal->group_stop_count = 0; |
907 | t = p; | 912 | t = p; |
908 | do { | 913 | do { |
914 | task_clear_group_stop_pending(t); | ||
909 | sigaddset(&t->pending.signal, SIGKILL); | 915 | sigaddset(&t->pending.signal, SIGKILL); |
910 | signal_wake_up(t, 1); | 916 | signal_wake_up(t, 1); |
911 | } while_each_thread(p, t); | 917 | } while_each_thread(p, t); |
@@ -1139,6 +1145,7 @@ int zap_other_threads(struct task_struct *p) | |||
1139 | p->signal->group_stop_count = 0; | 1145 | p->signal->group_stop_count = 0; |
1140 | 1146 | ||
1141 | while_each_thread(p, t) { | 1147 | while_each_thread(p, t) { |
1148 | task_clear_group_stop_pending(t); | ||
1142 | count++; | 1149 | count++; |
1143 | 1150 | ||
1144 | /* Don't bother with already dead threads */ | 1151 | /* Don't bother with already dead threads */ |
@@ -1690,7 +1697,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1690 | * If there is a group stop in progress, | 1697 | * If there is a group stop in progress, |
1691 | * we must participate in the bookkeeping. | 1698 | * we must participate in the bookkeeping. |
1692 | */ | 1699 | */ |
1693 | if (current->signal->group_stop_count > 0) | 1700 | if (current->group_stop & GROUP_STOP_PENDING) |
1694 | task_participate_group_stop(current); | 1701 | task_participate_group_stop(current); |
1695 | 1702 | ||
1696 | current->last_siginfo = info; | 1703 | current->last_siginfo = info; |
@@ -1775,8 +1782,8 @@ static int do_signal_stop(int signr) | |||
1775 | struct signal_struct *sig = current->signal; | 1782 | struct signal_struct *sig = current->signal; |
1776 | int notify = 0; | 1783 | int notify = 0; |
1777 | 1784 | ||
1778 | if (!sig->group_stop_count) { | 1785 | if (!(current->group_stop & GROUP_STOP_PENDING)) { |
1779 | unsigned int gstop = GROUP_STOP_CONSUME; | 1786 | unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; |
1780 | struct task_struct *t; | 1787 | struct task_struct *t; |
1781 | 1788 | ||
1782 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1789 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
@@ -1796,8 +1803,7 @@ static int do_signal_stop(int signr) | |||
1796 | * stop is always done with the siglock held, | 1803 | * stop is always done with the siglock held, |
1797 | * so this check has no races. | 1804 | * so this check has no races. |
1798 | */ | 1805 | */ |
1799 | if (!(t->flags & PF_EXITING) && | 1806 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { |
1800 | !task_is_stopped_or_traced(t)) { | ||
1801 | t->group_stop = gstop; | 1807 | t->group_stop = gstop; |
1802 | sig->group_stop_count++; | 1808 | sig->group_stop_count++; |
1803 | signal_wake_up(t, 0); | 1809 | signal_wake_up(t, 0); |
@@ -1926,8 +1932,8 @@ relock: | |||
1926 | if (unlikely(signr != 0)) | 1932 | if (unlikely(signr != 0)) |
1927 | ka = return_ka; | 1933 | ka = return_ka; |
1928 | else { | 1934 | else { |
1929 | if (unlikely(signal->group_stop_count > 0) && | 1935 | if (unlikely(current->group_stop & |
1930 | do_signal_stop(0)) | 1936 | GROUP_STOP_PENDING) && do_signal_stop(0)) |
1931 | goto relock; | 1937 | goto relock; |
1932 | 1938 | ||
1933 | signr = dequeue_signal(current, ¤t->blocked, | 1939 | signr = dequeue_signal(current, ¤t->blocked, |
@@ -2073,7 +2079,7 @@ void exit_signals(struct task_struct *tsk) | |||
2073 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) | 2079 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) |
2074 | recalc_sigpending_and_wake(t); | 2080 | recalc_sigpending_and_wake(t); |
2075 | 2081 | ||
2076 | if (unlikely(tsk->signal->group_stop_count) && | 2082 | if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && |
2077 | task_participate_group_stop(tsk)) | 2083 | task_participate_group_stop(tsk)) |
2078 | group_stop = CLD_STOPPED; | 2084 | group_stop = CLD_STOPPED; |
2079 | out: | 2085 | out: |