diff options
author | Tejun Heo <tj@kernel.org> | 2011-06-02 05:13:59 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2011-06-04 12:17:09 -0400 |
commit | a8f072c1d624a627b67f2ace2f0c25d856ef4e54 (patch) | |
tree | 9ba3e96aa874b08c7156a3584f27187bcdbdd9cd /kernel/signal.c | |
parent | 0b1007c3578569469a6fab6ae5cca918ccdc3ee1 (diff) |
job control: rename signal->group_stop and flags to jobctl and update them
signal->group_stop currently hosts mostly group stop related flags;
however, it's gonna be used for wider purposes and the GROUP_STOP_
flag prefix becomes confusing. Rename signal->group_stop to
signal->jobctl and rename all GROUP_STOP_* flags to JOBCTL_*.
Bit position macros JOBCTL_*_BIT are defined and JOBCTL_* flags are
defined in terms of them to allow using bitops later.
While at it, reassign JOBCTL_TRAPPING to bit 22 to better accomodate
future additions.
This doesn't cause any functional change.
-v2: JOBCTL_*_BIT macros added as suggested by Linus.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 91 |
1 files changed, 46 insertions, 45 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 86c32b884f8e..ab6851c06461 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |||
124 | 124 | ||
125 | static int recalc_sigpending_tsk(struct task_struct *t) | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { | 126 | { |
127 | if ((t->group_stop & GROUP_STOP_PENDING) || | 127 | if ((t->jobctl & JOBCTL_STOP_PENDING) || |
128 | PENDING(&t->pending, &t->blocked) || | 128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
@@ -224,27 +224,28 @@ static inline void print_dropped_signal(int sig) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | /** | 226 | /** |
227 | * task_clear_group_stop_trapping - clear group stop trapping bit | 227 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
228 | * @task: target task | 228 | * @task: target task |
229 | * | 229 | * |
230 | * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it | 230 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
231 | * and wake up the ptracer. Note that we don't need any further locking. | 231 | * Clear it and wake up the ptracer. Note that we don't need any further |
232 | * @task->siglock guarantees that @task->parent points to the ptracer. | 232 | * locking. @task->siglock guarantees that @task->parent points to the |
233 | * ptracer. | ||
233 | * | 234 | * |
234 | * CONTEXT: | 235 | * CONTEXT: |
235 | * Must be called with @task->sighand->siglock held. | 236 | * Must be called with @task->sighand->siglock held. |
236 | */ | 237 | */ |
237 | static void task_clear_group_stop_trapping(struct task_struct *task) | 238 | static void task_clear_jobctl_trapping(struct task_struct *task) |
238 | { | 239 | { |
239 | if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { | 240 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
240 | task->group_stop &= ~GROUP_STOP_TRAPPING; | 241 | task->jobctl &= ~JOBCTL_TRAPPING; |
241 | __wake_up_sync_key(&task->parent->signal->wait_chldexit, | 242 | __wake_up_sync_key(&task->parent->signal->wait_chldexit, |
242 | TASK_UNINTERRUPTIBLE, 1, task); | 243 | TASK_UNINTERRUPTIBLE, 1, task); |
243 | } | 244 | } |
244 | } | 245 | } |
245 | 246 | ||
246 | /** | 247 | /** |
247 | * task_clear_group_stop_pending - clear pending group stop | 248 | * task_clear_jobctl_stop_pending - clear pending group stop |
248 | * @task: target task | 249 | * @task: target task |
249 | * | 250 | * |
250 | * Clear group stop states for @task. | 251 | * Clear group stop states for @task. |
@@ -252,19 +253,19 @@ static void task_clear_group_stop_trapping(struct task_struct *task) | |||
252 | * CONTEXT: | 253 | * CONTEXT: |
253 | * Must be called with @task->sighand->siglock held. | 254 | * Must be called with @task->sighand->siglock held. |
254 | */ | 255 | */ |
255 | void task_clear_group_stop_pending(struct task_struct *task) | 256 | void task_clear_jobctl_stop_pending(struct task_struct *task) |
256 | { | 257 | { |
257 | task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | | 258 | task->jobctl &= ~(JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME | |
258 | GROUP_STOP_DEQUEUED); | 259 | JOBCTL_STOP_DEQUEUED); |
259 | } | 260 | } |
260 | 261 | ||
261 | /** | 262 | /** |
262 | * task_participate_group_stop - participate in a group stop | 263 | * task_participate_group_stop - participate in a group stop |
263 | * @task: task participating in a group stop | 264 | * @task: task participating in a group stop |
264 | * | 265 | * |
265 | * @task has GROUP_STOP_PENDING set and is participating in a group stop. | 266 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
266 | * Group stop states are cleared and the group stop count is consumed if | 267 | * Group stop states are cleared and the group stop count is consumed if |
267 | * %GROUP_STOP_CONSUME was set. If the consumption completes the group | 268 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
268 | * stop, the appropriate %SIGNAL_* flags are set. | 269 | * stop, the appropriate %SIGNAL_* flags are set. |
269 | * | 270 | * |
270 | * CONTEXT: | 271 | * CONTEXT: |
@@ -277,11 +278,11 @@ void task_clear_group_stop_pending(struct task_struct *task) | |||
277 | static bool task_participate_group_stop(struct task_struct *task) | 278 | static bool task_participate_group_stop(struct task_struct *task) |
278 | { | 279 | { |
279 | struct signal_struct *sig = task->signal; | 280 | struct signal_struct *sig = task->signal; |
280 | bool consume = task->group_stop & GROUP_STOP_CONSUME; | 281 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
281 | 282 | ||
282 | WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | 283 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
283 | 284 | ||
284 | task_clear_group_stop_pending(task); | 285 | task_clear_jobctl_stop_pending(task); |
285 | 286 | ||
286 | if (!consume) | 287 | if (!consume) |
287 | return false; | 288 | return false; |
@@ -604,7 +605,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
604 | * is to alert stop-signal processing code when another | 605 | * is to alert stop-signal processing code when another |
605 | * processor has come along and cleared the flag. | 606 | * processor has come along and cleared the flag. |
606 | */ | 607 | */ |
607 | current->group_stop |= GROUP_STOP_DEQUEUED; | 608 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
608 | } | 609 | } |
609 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 610 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
610 | /* | 611 | /* |
@@ -809,7 +810,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
809 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 810 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
810 | t = p; | 811 | t = p; |
811 | do { | 812 | do { |
812 | task_clear_group_stop_pending(t); | 813 | task_clear_jobctl_stop_pending(t); |
813 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 814 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
814 | wake_up_state(t, __TASK_STOPPED); | 815 | wake_up_state(t, __TASK_STOPPED); |
815 | } while_each_thread(p, t); | 816 | } while_each_thread(p, t); |
@@ -925,7 +926,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
925 | signal->group_stop_count = 0; | 926 | signal->group_stop_count = 0; |
926 | t = p; | 927 | t = p; |
927 | do { | 928 | do { |
928 | task_clear_group_stop_pending(t); | 929 | task_clear_jobctl_stop_pending(t); |
929 | sigaddset(&t->pending.signal, SIGKILL); | 930 | sigaddset(&t->pending.signal, SIGKILL); |
930 | signal_wake_up(t, 1); | 931 | signal_wake_up(t, 1); |
931 | } while_each_thread(p, t); | 932 | } while_each_thread(p, t); |
@@ -1160,7 +1161,7 @@ int zap_other_threads(struct task_struct *p) | |||
1160 | p->signal->group_stop_count = 0; | 1161 | p->signal->group_stop_count = 0; |
1161 | 1162 | ||
1162 | while_each_thread(p, t) { | 1163 | while_each_thread(p, t) { |
1163 | task_clear_group_stop_pending(t); | 1164 | task_clear_jobctl_stop_pending(t); |
1164 | count++; | 1165 | count++; |
1165 | 1166 | ||
1166 | /* Don't bother with already dead threads */ | 1167 | /* Don't bother with already dead threads */ |
@@ -1738,7 +1739,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1738 | * clear now. We act as if SIGCONT is received after TASK_TRACED | 1739 | * clear now. We act as if SIGCONT is received after TASK_TRACED |
1739 | * is entered - ignore it. | 1740 | * is entered - ignore it. |
1740 | */ | 1741 | */ |
1741 | if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) | 1742 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
1742 | gstop_done = task_participate_group_stop(current); | 1743 | gstop_done = task_participate_group_stop(current); |
1743 | 1744 | ||
1744 | current->last_siginfo = info; | 1745 | current->last_siginfo = info; |
@@ -1751,12 +1752,12 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1751 | set_current_state(TASK_TRACED); | 1752 | set_current_state(TASK_TRACED); |
1752 | 1753 | ||
1753 | /* | 1754 | /* |
1754 | * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and | 1755 | * We're committing to trapping. Clearing JOBCTL_TRAPPING and |
1755 | * transition to TASK_TRACED should be atomic with respect to | 1756 | * transition to TASK_TRACED should be atomic with respect to |
1756 | * siglock. This hsould be done after the arch hook as siglock is | 1757 | * siglock. This should be done after the arch hook as siglock is |
1757 | * released and regrabbed across it. | 1758 | * released and regrabbed across it. |
1758 | */ | 1759 | */ |
1759 | task_clear_group_stop_trapping(current); | 1760 | task_clear_jobctl_trapping(current); |
1760 | 1761 | ||
1761 | spin_unlock_irq(¤t->sighand->siglock); | 1762 | spin_unlock_irq(¤t->sighand->siglock); |
1762 | read_lock(&tasklist_lock); | 1763 | read_lock(&tasklist_lock); |
@@ -1792,9 +1793,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1792 | * | 1793 | * |
1793 | * If @gstop_done, the ptracer went away between group stop | 1794 | * If @gstop_done, the ptracer went away between group stop |
1794 | * completion and here. During detach, it would have set | 1795 | * completion and here. During detach, it would have set |
1795 | * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED | 1796 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
1796 | * in do_signal_stop() on return, so notifying the real | 1797 | * TASK_STOPPED in do_signal_stop() on return, so notifying |
1797 | * parent of the group stop completion is enough. | 1798 | * the real parent of the group stop completion is enough. |
1798 | */ | 1799 | */ |
1799 | if (gstop_done) | 1800 | if (gstop_done) |
1800 | do_notify_parent_cldstop(current, false, why); | 1801 | do_notify_parent_cldstop(current, false, why); |
@@ -1856,14 +1857,14 @@ static int do_signal_stop(int signr) | |||
1856 | { | 1857 | { |
1857 | struct signal_struct *sig = current->signal; | 1858 | struct signal_struct *sig = current->signal; |
1858 | 1859 | ||
1859 | if (!(current->group_stop & GROUP_STOP_PENDING)) { | 1860 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
1860 | unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; | 1861 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
1861 | struct task_struct *t; | 1862 | struct task_struct *t; |
1862 | 1863 | ||
1863 | /* signr will be recorded in task->group_stop for retries */ | 1864 | /* signr will be recorded in task->jobctl for retries */ |
1864 | WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); | 1865 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
1865 | 1866 | ||
1866 | if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || | 1867 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
1867 | unlikely(signal_group_exit(sig))) | 1868 | unlikely(signal_group_exit(sig))) |
1868 | return 0; | 1869 | return 0; |
1869 | /* | 1870 | /* |
@@ -1890,19 +1891,19 @@ static int do_signal_stop(int signr) | |||
1890 | else | 1891 | else |
1891 | WARN_ON_ONCE(!task_ptrace(current)); | 1892 | WARN_ON_ONCE(!task_ptrace(current)); |
1892 | 1893 | ||
1893 | current->group_stop &= ~GROUP_STOP_SIGMASK; | 1894 | current->jobctl &= ~JOBCTL_STOP_SIGMASK; |
1894 | current->group_stop |= signr | gstop; | 1895 | current->jobctl |= signr | gstop; |
1895 | sig->group_stop_count = 1; | 1896 | sig->group_stop_count = 1; |
1896 | for (t = next_thread(current); t != current; | 1897 | for (t = next_thread(current); t != current; |
1897 | t = next_thread(t)) { | 1898 | t = next_thread(t)) { |
1898 | t->group_stop &= ~GROUP_STOP_SIGMASK; | 1899 | t->jobctl &= ~JOBCTL_STOP_SIGMASK; |
1899 | /* | 1900 | /* |
1900 | * Setting state to TASK_STOPPED for a group | 1901 | * Setting state to TASK_STOPPED for a group |
1901 | * stop is always done with the siglock held, | 1902 | * stop is always done with the siglock held, |
1902 | * so this check has no races. | 1903 | * so this check has no races. |
1903 | */ | 1904 | */ |
1904 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { | 1905 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { |
1905 | t->group_stop |= signr | gstop; | 1906 | t->jobctl |= signr | gstop; |
1906 | sig->group_stop_count++; | 1907 | sig->group_stop_count++; |
1907 | signal_wake_up(t, 0); | 1908 | signal_wake_up(t, 0); |
1908 | } | 1909 | } |
@@ -1943,23 +1944,23 @@ retry: | |||
1943 | 1944 | ||
1944 | spin_lock_irq(¤t->sighand->siglock); | 1945 | spin_lock_irq(¤t->sighand->siglock); |
1945 | } else { | 1946 | } else { |
1946 | ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, | 1947 | ptrace_stop(current->jobctl & JOBCTL_STOP_SIGMASK, |
1947 | CLD_STOPPED, 0, NULL); | 1948 | CLD_STOPPED, 0, NULL); |
1948 | current->exit_code = 0; | 1949 | current->exit_code = 0; |
1949 | } | 1950 | } |
1950 | 1951 | ||
1951 | /* | 1952 | /* |
1952 | * GROUP_STOP_PENDING could be set if another group stop has | 1953 | * JOBCTL_STOP_PENDING could be set if another group stop has |
1953 | * started since being woken up or ptrace wants us to transit | 1954 | * started since being woken up or ptrace wants us to transit |
1954 | * between TASK_STOPPED and TRACED. Retry group stop. | 1955 | * between TASK_STOPPED and TRACED. Retry group stop. |
1955 | */ | 1956 | */ |
1956 | if (current->group_stop & GROUP_STOP_PENDING) { | 1957 | if (current->jobctl & JOBCTL_STOP_PENDING) { |
1957 | WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); | 1958 | WARN_ON_ONCE(!(current->jobctl & JOBCTL_STOP_SIGMASK)); |
1958 | goto retry; | 1959 | goto retry; |
1959 | } | 1960 | } |
1960 | 1961 | ||
1961 | /* PTRACE_ATTACH might have raced with task killing, clear trapping */ | 1962 | /* PTRACE_ATTACH might have raced with task killing, clear trapping */ |
1962 | task_clear_group_stop_trapping(current); | 1963 | task_clear_jobctl_trapping(current); |
1963 | 1964 | ||
1964 | spin_unlock_irq(¤t->sighand->siglock); | 1965 | spin_unlock_irq(¤t->sighand->siglock); |
1965 | 1966 | ||
@@ -2078,8 +2079,8 @@ relock: | |||
2078 | if (unlikely(signr != 0)) | 2079 | if (unlikely(signr != 0)) |
2079 | ka = return_ka; | 2080 | ka = return_ka; |
2080 | else { | 2081 | else { |
2081 | if (unlikely(current->group_stop & | 2082 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2082 | GROUP_STOP_PENDING) && do_signal_stop(0)) | 2083 | do_signal_stop(0)) |
2083 | goto relock; | 2084 | goto relock; |
2084 | 2085 | ||
2085 | signr = dequeue_signal(current, ¤t->blocked, | 2086 | signr = dequeue_signal(current, ¤t->blocked, |
@@ -2253,7 +2254,7 @@ void exit_signals(struct task_struct *tsk) | |||
2253 | signotset(&unblocked); | 2254 | signotset(&unblocked); |
2254 | retarget_shared_pending(tsk, &unblocked); | 2255 | retarget_shared_pending(tsk, &unblocked); |
2255 | 2256 | ||
2256 | if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && | 2257 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
2257 | task_participate_group_stop(tsk)) | 2258 | task_participate_group_stop(tsk)) |
2258 | group_stop = CLD_STOPPED; | 2259 | group_stop = CLD_STOPPED; |
2259 | out: | 2260 | out: |