diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-20 16:33:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-20 16:33:21 -0400 |
commit | 3ed4c0583daa34dedb568b26ff99e5a7b58db612 (patch) | |
tree | a531d4cc94acaa58fe0600cf83da9fb8b77f6e50 | |
parent | ad9471752ebae25daa133b4e5d9299809c35e155 (diff) | |
parent | bd715d9a4f13f87bad5526c2cd41370949473b16 (diff) |
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (41 commits)
signal: trivial, fix the "timespec declared inside parameter list" warning
job control: reorganize wait_task_stopped()
ptrace: fix signal->wait_chldexit usage in task_clear_group_stop_trapping()
signal: sys_sigprocmask() needs retarget_shared_pending()
signal: cleanup sys_sigprocmask()
signal: rename signandsets() to sigandnsets()
signal: do_sigtimedwait() needs retarget_shared_pending()
signal: introduce do_sigtimedwait() to factor out compat/native code
signal: sys_rt_sigtimedwait: simplify the timeout logic
signal: cleanup sys_rt_sigprocmask()
x86: signal: sys_rt_sigreturn() should use set_current_blocked()
x86: signal: handle_signal() should use set_current_blocked()
signal: sigprocmask() should do retarget_shared_pending()
signal: sigprocmask: narrow the scope of ->siglock
signal: retarget_shared_pending: optimize while_each_thread() loop
signal: retarget_shared_pending: consider shared/unblocked signals only
signal: introduce retarget_shared_pending()
ptrace: ptrace_check_attach() should not do s/STOPPED/TRACED/
signal: Turn SIGNAL_STOP_DEQUEUED into GROUP_STOP_DEQUEUED
signal: do_signal_stop: Remove the unneeded task_clear_group_stop_pending()
...
-rw-r--r-- | arch/x86/kernel/signal.c | 14 | ||||
-rw-r--r-- | fs/exec.c | 1 | ||||
-rw-r--r-- | include/linux/sched.h | 17 | ||||
-rw-r--r-- | include/linux/signal.h | 13 | ||||
-rw-r--r-- | include/linux/tracehook.h | 27 | ||||
-rw-r--r-- | kernel/compat.c | 47 | ||||
-rw-r--r-- | kernel/exit.c | 110 | ||||
-rw-r--r-- | kernel/ptrace.c | 118 | ||||
-rw-r--r-- | kernel/signal.c | 678 |
9 files changed, 659 insertions, 366 deletions
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4fd173cd8e57..40a24932a8a1 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -601,10 +601,7 @@ long sys_rt_sigreturn(struct pt_regs *regs) | |||
601 | goto badframe; | 601 | goto badframe; |
602 | 602 | ||
603 | sigdelsetmask(&set, ~_BLOCKABLE); | 603 | sigdelsetmask(&set, ~_BLOCKABLE); |
604 | spin_lock_irq(¤t->sighand->siglock); | 604 | set_current_blocked(&set); |
605 | current->blocked = set; | ||
606 | recalc_sigpending(); | ||
607 | spin_unlock_irq(¤t->sighand->siglock); | ||
608 | 605 | ||
609 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 606 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
610 | goto badframe; | 607 | goto badframe; |
@@ -682,6 +679,7 @@ static int | |||
682 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 679 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
683 | sigset_t *oldset, struct pt_regs *regs) | 680 | sigset_t *oldset, struct pt_regs *regs) |
684 | { | 681 | { |
682 | sigset_t blocked; | ||
685 | int ret; | 683 | int ret; |
686 | 684 | ||
687 | /* Are we from a system call? */ | 685 | /* Are we from a system call? */ |
@@ -741,12 +739,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
741 | */ | 739 | */ |
742 | regs->flags &= ~X86_EFLAGS_TF; | 740 | regs->flags &= ~X86_EFLAGS_TF; |
743 | 741 | ||
744 | spin_lock_irq(¤t->sighand->siglock); | 742 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
745 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
746 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 743 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
747 | sigaddset(¤t->blocked, sig); | 744 | sigaddset(&blocked, sig); |
748 | recalc_sigpending(); | 745 | set_current_blocked(&blocked); |
749 | spin_unlock_irq(¤t->sighand->siglock); | ||
750 | 746 | ||
751 | tracehook_signal_handler(sig, info, ka, regs, | 747 | tracehook_signal_handler(sig, info, ka, regs, |
752 | test_thread_flag(TIF_SINGLESTEP)); | 748 | test_thread_flag(TIF_SINGLESTEP)); |
@@ -1659,6 +1659,7 @@ static int zap_process(struct task_struct *start, int exit_code) | |||
1659 | 1659 | ||
1660 | t = start; | 1660 | t = start; |
1661 | do { | 1661 | do { |
1662 | task_clear_group_stop_pending(t); | ||
1662 | if (t != current && t->mm) { | 1663 | if (t != current && t->mm) { |
1663 | sigaddset(&t->pending.signal, SIGKILL); | 1664 | sigaddset(&t->pending.signal, SIGKILL); |
1664 | signal_wake_up(t, 1); | 1665 | signal_wake_up(t, 1); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 12211e1666e2..885c4f242ad7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -653,9 +653,8 @@ struct signal_struct { | |||
653 | * Bits in flags field of signal_struct. | 653 | * Bits in flags field of signal_struct. |
654 | */ | 654 | */ |
655 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | 655 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
656 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | 656 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
657 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 657 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
658 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | ||
659 | /* | 658 | /* |
660 | * Pending notifications to parent. | 659 | * Pending notifications to parent. |
661 | */ | 660 | */ |
@@ -1251,6 +1250,7 @@ struct task_struct { | |||
1251 | int exit_state; | 1250 | int exit_state; |
1252 | int exit_code, exit_signal; | 1251 | int exit_code, exit_signal; |
1253 | int pdeath_signal; /* The signal sent when the parent dies */ | 1252 | int pdeath_signal; /* The signal sent when the parent dies */ |
1253 | unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ | ||
1254 | /* ??? */ | 1254 | /* ??? */ |
1255 | unsigned int personality; | 1255 | unsigned int personality; |
1256 | unsigned did_exec:1; | 1256 | unsigned did_exec:1; |
@@ -1771,6 +1771,17 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1771 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1771 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1772 | #define used_math() tsk_used_math(current) | 1772 | #define used_math() tsk_used_math(current) |
1773 | 1773 | ||
1774 | /* | ||
1775 | * task->group_stop flags | ||
1776 | */ | ||
1777 | #define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ | ||
1778 | #define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ | ||
1779 | #define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ | ||
1780 | #define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ | ||
1781 | #define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ | ||
1782 | |||
1783 | extern void task_clear_group_stop_pending(struct task_struct *task); | ||
1784 | |||
1774 | #ifdef CONFIG_PREEMPT_RCU | 1785 | #ifdef CONFIG_PREEMPT_RCU |
1775 | 1786 | ||
1776 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1787 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
diff --git a/include/linux/signal.h b/include/linux/signal.h index 29a68ac7af83..a822300a253b 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -125,13 +125,13 @@ _SIG_SET_BINOP(sigorsets, _sig_or) | |||
125 | #define _sig_and(x,y) ((x) & (y)) | 125 | #define _sig_and(x,y) ((x) & (y)) |
126 | _SIG_SET_BINOP(sigandsets, _sig_and) | 126 | _SIG_SET_BINOP(sigandsets, _sig_and) |
127 | 127 | ||
128 | #define _sig_nand(x,y) ((x) & ~(y)) | 128 | #define _sig_andn(x,y) ((x) & ~(y)) |
129 | _SIG_SET_BINOP(signandsets, _sig_nand) | 129 | _SIG_SET_BINOP(sigandnsets, _sig_andn) |
130 | 130 | ||
131 | #undef _SIG_SET_BINOP | 131 | #undef _SIG_SET_BINOP |
132 | #undef _sig_or | 132 | #undef _sig_or |
133 | #undef _sig_and | 133 | #undef _sig_and |
134 | #undef _sig_nand | 134 | #undef _sig_andn |
135 | 135 | ||
136 | #define _SIG_SET_OP(name, op) \ | 136 | #define _SIG_SET_OP(name, op) \ |
137 | static inline void name(sigset_t *set) \ | 137 | static inline void name(sigset_t *set) \ |
@@ -236,6 +236,9 @@ static inline int valid_signal(unsigned long sig) | |||
236 | return sig <= _NSIG ? 1 : 0; | 236 | return sig <= _NSIG ? 1 : 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | struct timespec; | ||
240 | struct pt_regs; | ||
241 | |||
239 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | 242 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
240 | extern int do_send_sig_info(int sig, struct siginfo *info, | 243 | extern int do_send_sig_info(int sig, struct siginfo *info, |
241 | struct task_struct *p, bool group); | 244 | struct task_struct *p, bool group); |
@@ -244,10 +247,12 @@ extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | |||
244 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | 247 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, |
245 | siginfo_t *info); | 248 | siginfo_t *info); |
246 | extern long do_sigpending(void __user *, unsigned long); | 249 | extern long do_sigpending(void __user *, unsigned long); |
250 | extern int do_sigtimedwait(const sigset_t *, siginfo_t *, | ||
251 | const struct timespec *); | ||
247 | extern int sigprocmask(int, sigset_t *, sigset_t *); | 252 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
253 | extern void set_current_blocked(const sigset_t *); | ||
248 | extern int show_unhandled_signals; | 254 | extern int show_unhandled_signals; |
249 | 255 | ||
250 | struct pt_regs; | ||
251 | extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); | 256 | extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); |
252 | extern void exit_signals(struct task_struct *tsk); | 257 | extern void exit_signals(struct task_struct *tsk); |
253 | 258 | ||
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index ebcfa4ebdbf8..e95f5236611f 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -469,33 +469,6 @@ static inline int tracehook_get_signal(struct task_struct *task, | |||
469 | } | 469 | } |
470 | 470 | ||
471 | /** | 471 | /** |
472 | * tracehook_notify_jctl - report about job control stop/continue | ||
473 | * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED | ||
474 | * @why: %CLD_STOPPED or %CLD_CONTINUED | ||
475 | * | ||
476 | * This is called when we might call do_notify_parent_cldstop(). | ||
477 | * | ||
478 | * @notify is zero if we would not ordinarily send a %SIGCHLD, | ||
479 | * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. | ||
480 | * | ||
481 | * @why is %CLD_STOPPED when about to stop for job control; | ||
482 | * we are already in %TASK_STOPPED state, about to call schedule(). | ||
483 | * It might also be that we have just exited (check %PF_EXITING), | ||
484 | * but need to report that a group-wide stop is complete. | ||
485 | * | ||
486 | * @why is %CLD_CONTINUED when waking up after job control stop and | ||
487 | * ready to make a delayed @notify report. | ||
488 | * | ||
489 | * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. | ||
490 | * | ||
491 | * Called with the siglock held. | ||
492 | */ | ||
493 | static inline int tracehook_notify_jctl(int notify, int why) | ||
494 | { | ||
495 | return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; | ||
496 | } | ||
497 | |||
498 | /** | ||
499 | * tracehook_finish_jctl - report about return from job control stop | 472 | * tracehook_finish_jctl - report about return from job control stop |
500 | * | 473 | * |
501 | * This is called by do_signal_stop() after wakeup. | 474 | * This is called by do_signal_stop() after wakeup. |
diff --git a/kernel/compat.c b/kernel/compat.c index 38b1d2c1cbe8..9214dcd087b7 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -890,10 +890,9 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |||
890 | { | 890 | { |
891 | compat_sigset_t s32; | 891 | compat_sigset_t s32; |
892 | sigset_t s; | 892 | sigset_t s; |
893 | int sig; | ||
894 | struct timespec t; | 893 | struct timespec t; |
895 | siginfo_t info; | 894 | siginfo_t info; |
896 | long ret, timeout = 0; | 895 | long ret; |
897 | 896 | ||
898 | if (sigsetsize != sizeof(sigset_t)) | 897 | if (sigsetsize != sizeof(sigset_t)) |
899 | return -EINVAL; | 898 | return -EINVAL; |
@@ -901,51 +900,19 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |||
901 | if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) | 900 | if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) |
902 | return -EFAULT; | 901 | return -EFAULT; |
903 | sigset_from_compat(&s, &s32); | 902 | sigset_from_compat(&s, &s32); |
904 | sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
905 | signotset(&s); | ||
906 | 903 | ||
907 | if (uts) { | 904 | if (uts) { |
908 | if (get_compat_timespec (&t, uts)) | 905 | if (get_compat_timespec(&t, uts)) |
909 | return -EFAULT; | 906 | return -EFAULT; |
910 | if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 | ||
911 | || t.tv_sec < 0) | ||
912 | return -EINVAL; | ||
913 | } | 907 | } |
914 | 908 | ||
915 | spin_lock_irq(¤t->sighand->siglock); | 909 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); |
916 | sig = dequeue_signal(current, &s, &info); | ||
917 | if (!sig) { | ||
918 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
919 | if (uts) | ||
920 | timeout = timespec_to_jiffies(&t) | ||
921 | +(t.tv_sec || t.tv_nsec); | ||
922 | if (timeout) { | ||
923 | current->real_blocked = current->blocked; | ||
924 | sigandsets(¤t->blocked, ¤t->blocked, &s); | ||
925 | |||
926 | recalc_sigpending(); | ||
927 | spin_unlock_irq(¤t->sighand->siglock); | ||
928 | |||
929 | timeout = schedule_timeout_interruptible(timeout); | ||
930 | |||
931 | spin_lock_irq(¤t->sighand->siglock); | ||
932 | sig = dequeue_signal(current, &s, &info); | ||
933 | current->blocked = current->real_blocked; | ||
934 | siginitset(¤t->real_blocked, 0); | ||
935 | recalc_sigpending(); | ||
936 | } | ||
937 | } | ||
938 | spin_unlock_irq(¤t->sighand->siglock); | ||
939 | 910 | ||
940 | if (sig) { | 911 | if (ret > 0 && uinfo) { |
941 | ret = sig; | 912 | if (copy_siginfo_to_user32(uinfo, &info)) |
942 | if (uinfo) { | 913 | ret = -EFAULT; |
943 | if (copy_siginfo_to_user32(uinfo, &info)) | ||
944 | ret = -EFAULT; | ||
945 | } | ||
946 | }else { | ||
947 | ret = timeout?-EINTR:-EAGAIN; | ||
948 | } | 914 | } |
915 | |||
949 | return ret; | 916 | return ret; |
950 | 917 | ||
951 | } | 918 | } |
diff --git a/kernel/exit.c b/kernel/exit.c index 8dd874181542..20a406471525 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1377,11 +1377,23 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace) | |||
1377 | return NULL; | 1377 | return NULL; |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | /* | 1380 | /** |
1381 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | 1381 | * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED |
1382 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1382 | * @wo: wait options |
1383 | * the lock and this task is uninteresting. If we return nonzero, we have | 1383 | * @ptrace: is the wait for ptrace |
1384 | * released the lock and the system call should return. | 1384 | * @p: task to wait for |
1385 | * | ||
1386 | * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. | ||
1387 | * | ||
1388 | * CONTEXT: | ||
1389 | * read_lock(&tasklist_lock), which is released if return value is | ||
1390 | * non-zero. Also, grabs and releases @p->sighand->siglock. | ||
1391 | * | ||
1392 | * RETURNS: | ||
1393 | * 0 if wait condition didn't exist and search for other wait conditions | ||
1394 | * should continue. Non-zero return, -errno on failure and @p's pid on | ||
1395 | * success, implies that tasklist_lock is released and wait condition | ||
1396 | * search should terminate. | ||
1385 | */ | 1397 | */ |
1386 | static int wait_task_stopped(struct wait_opts *wo, | 1398 | static int wait_task_stopped(struct wait_opts *wo, |
1387 | int ptrace, struct task_struct *p) | 1399 | int ptrace, struct task_struct *p) |
@@ -1397,6 +1409,9 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
1397 | if (!ptrace && !(wo->wo_flags & WUNTRACED)) | 1409 | if (!ptrace && !(wo->wo_flags & WUNTRACED)) |
1398 | return 0; | 1410 | return 0; |
1399 | 1411 | ||
1412 | if (!task_stopped_code(p, ptrace)) | ||
1413 | return 0; | ||
1414 | |||
1400 | exit_code = 0; | 1415 | exit_code = 0; |
1401 | spin_lock_irq(&p->sighand->siglock); | 1416 | spin_lock_irq(&p->sighand->siglock); |
1402 | 1417 | ||
@@ -1538,33 +1553,84 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1538 | return 0; | 1553 | return 0; |
1539 | } | 1554 | } |
1540 | 1555 | ||
1541 | if (likely(!ptrace) && unlikely(task_ptrace(p))) { | 1556 | /* dead body doesn't have much to contribute */ |
1557 | if (p->exit_state == EXIT_DEAD) | ||
1558 | return 0; | ||
1559 | |||
1560 | /* slay zombie? */ | ||
1561 | if (p->exit_state == EXIT_ZOMBIE) { | ||
1562 | /* | ||
1563 | * A zombie ptracee is only visible to its ptracer. | ||
1564 | * Notification and reaping will be cascaded to the real | ||
1565 | * parent when the ptracer detaches. | ||
1566 | */ | ||
1567 | if (likely(!ptrace) && unlikely(task_ptrace(p))) { | ||
1568 | /* it will become visible, clear notask_error */ | ||
1569 | wo->notask_error = 0; | ||
1570 | return 0; | ||
1571 | } | ||
1572 | |||
1573 | /* we don't reap group leaders with subthreads */ | ||
1574 | if (!delay_group_leader(p)) | ||
1575 | return wait_task_zombie(wo, p); | ||
1576 | |||
1542 | /* | 1577 | /* |
1543 | * This child is hidden by ptrace. | 1578 | * Allow access to stopped/continued state via zombie by |
1544 | * We aren't allowed to see it now, but eventually we will. | 1579 | * falling through. Clearing of notask_error is complex. |
1580 | * | ||
1581 | * When !@ptrace: | ||
1582 | * | ||
1583 | * If WEXITED is set, notask_error should naturally be | ||
1584 | * cleared. If not, subset of WSTOPPED|WCONTINUED is set, | ||
1585 | * so, if there are live subthreads, there are events to | ||
1586 | * wait for. If all subthreads are dead, it's still safe | ||
1587 | * to clear - this function will be called again in finite | ||
1588 | * amount time once all the subthreads are released and | ||
1589 | * will then return without clearing. | ||
1590 | * | ||
1591 | * When @ptrace: | ||
1592 | * | ||
1593 | * Stopped state is per-task and thus can't change once the | ||
1594 | * target task dies. Only continued and exited can happen. | ||
1595 | * Clear notask_error if WCONTINUED | WEXITED. | ||
1596 | */ | ||
1597 | if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) | ||
1598 | wo->notask_error = 0; | ||
1599 | } else { | ||
1600 | /* | ||
1601 | * If @p is ptraced by a task in its real parent's group, | ||
1602 | * hide group stop/continued state when looking at @p as | ||
1603 | * the real parent; otherwise, a single stop can be | ||
1604 | * reported twice as group and ptrace stops. | ||
1605 | * | ||
1606 | * If a ptracer wants to distinguish the two events for its | ||
1607 | * own children, it should create a separate process which | ||
1608 | * takes the role of real parent. | ||
1609 | */ | ||
1610 | if (likely(!ptrace) && task_ptrace(p) && | ||
1611 | same_thread_group(p->parent, p->real_parent)) | ||
1612 | return 0; | ||
1613 | |||
1614 | /* | ||
1615 | * @p is alive and it's gonna stop, continue or exit, so | ||
1616 | * there always is something to wait for. | ||
1545 | */ | 1617 | */ |
1546 | wo->notask_error = 0; | 1618 | wo->notask_error = 0; |
1547 | return 0; | ||
1548 | } | 1619 | } |
1549 | 1620 | ||
1550 | if (p->exit_state == EXIT_DEAD) | ||
1551 | return 0; | ||
1552 | |||
1553 | /* | 1621 | /* |
1554 | * We don't reap group leaders with subthreads. | 1622 | * Wait for stopped. Depending on @ptrace, different stopped state |
1623 | * is used and the two don't interact with each other. | ||
1555 | */ | 1624 | */ |
1556 | if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) | 1625 | ret = wait_task_stopped(wo, ptrace, p); |
1557 | return wait_task_zombie(wo, p); | 1626 | if (ret) |
1627 | return ret; | ||
1558 | 1628 | ||
1559 | /* | 1629 | /* |
1560 | * It's stopped or running now, so it might | 1630 | * Wait for continued. There's only one continued state and the |
1561 | * later continue, exit, or stop again. | 1631 | * ptracer can consume it which can confuse the real parent. Don't |
1632 | * use WCONTINUED from ptracer. You don't need or want it. | ||
1562 | */ | 1633 | */ |
1563 | wo->notask_error = 0; | ||
1564 | |||
1565 | if (task_stopped_code(p, ptrace)) | ||
1566 | return wait_task_stopped(wo, ptrace, p); | ||
1567 | |||
1568 | return wait_task_continued(wo, p); | 1634 | return wait_task_continued(wo, p); |
1569 | } | 1635 | } |
1570 | 1636 | ||
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index dc7ab65f3b36..7a81fc071344 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -38,35 +38,33 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | |||
38 | child->parent = new_parent; | 38 | child->parent = new_parent; |
39 | } | 39 | } |
40 | 40 | ||
41 | /* | 41 | /** |
42 | * Turn a tracing stop into a normal stop now, since with no tracer there | 42 | * __ptrace_unlink - unlink ptracee and restore its execution state |
43 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | 43 | * @child: ptracee to be unlinked |
44 | * signal sent that would resume the child, but didn't because it was in | ||
45 | * TASK_TRACED, resume it now. | ||
46 | * Requires that irqs be disabled. | ||
47 | */ | ||
48 | static void ptrace_untrace(struct task_struct *child) | ||
49 | { | ||
50 | spin_lock(&child->sighand->siglock); | ||
51 | if (task_is_traced(child)) { | ||
52 | /* | ||
53 | * If the group stop is completed or in progress, | ||
54 | * this thread was already counted as stopped. | ||
55 | */ | ||
56 | if (child->signal->flags & SIGNAL_STOP_STOPPED || | ||
57 | child->signal->group_stop_count) | ||
58 | __set_task_state(child, TASK_STOPPED); | ||
59 | else | ||
60 | signal_wake_up(child, 1); | ||
61 | } | ||
62 | spin_unlock(&child->sighand->siglock); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * unptrace a task: move it back to its original parent and | ||
67 | * remove it from the ptrace list. | ||
68 | * | 44 | * |
69 | * Must be called with the tasklist lock write-held. | 45 | * Remove @child from the ptrace list, move it back to the original parent, |
46 | * and restore the execution state so that it conforms to the group stop | ||
47 | * state. | ||
48 | * | ||
49 | * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer | ||
50 | * exiting. For PTRACE_DETACH, unless the ptracee has been killed between | ||
51 | * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. | ||
52 | * If the ptracer is exiting, the ptracee can be in any state. | ||
53 | * | ||
54 | * After detach, the ptracee should be in a state which conforms to the | ||
55 | * group stop. If the group is stopped or in the process of stopping, the | ||
56 | * ptracee should be put into TASK_STOPPED; otherwise, it should be woken | ||
57 | * up from TASK_TRACED. | ||
58 | * | ||
59 | * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, | ||
60 | * it goes through TRACED -> RUNNING -> STOPPED transition which is similar | ||
61 | * to but in the opposite direction of what happens while attaching to a | ||
62 | * stopped task. However, in this direction, the intermediate RUNNING | ||
63 | * state is not hidden even from the current ptracer and if it immediately | ||
64 | * re-attaches and performs a WNOHANG wait(2), it may fail. | ||
65 | * | ||
66 | * CONTEXT: | ||
67 | * write_lock_irq(tasklist_lock) | ||
70 | */ | 68 | */ |
71 | void __ptrace_unlink(struct task_struct *child) | 69 | void __ptrace_unlink(struct task_struct *child) |
72 | { | 70 | { |
@@ -76,8 +74,27 @@ void __ptrace_unlink(struct task_struct *child) | |||
76 | child->parent = child->real_parent; | 74 | child->parent = child->real_parent; |
77 | list_del_init(&child->ptrace_entry); | 75 | list_del_init(&child->ptrace_entry); |
78 | 76 | ||
79 | if (task_is_traced(child)) | 77 | spin_lock(&child->sighand->siglock); |
80 | ptrace_untrace(child); | 78 | |
79 | /* | ||
80 | * Reinstate GROUP_STOP_PENDING if group stop is in effect and | ||
81 | * @child isn't dead. | ||
82 | */ | ||
83 | if (!(child->flags & PF_EXITING) && | ||
84 | (child->signal->flags & SIGNAL_STOP_STOPPED || | ||
85 | child->signal->group_stop_count)) | ||
86 | child->group_stop |= GROUP_STOP_PENDING; | ||
87 | |||
88 | /* | ||
89 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | ||
90 | * @child in the butt. Note that @resume should be used iff @child | ||
91 | * is in TASK_TRACED; otherwise, we might unduly disrupt | ||
92 | * TASK_KILLABLE sleeps. | ||
93 | */ | ||
94 | if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) | ||
95 | signal_wake_up(child, task_is_traced(child)); | ||
96 | |||
97 | spin_unlock(&child->sighand->siglock); | ||
81 | } | 98 | } |
82 | 99 | ||
83 | /* | 100 | /* |
@@ -96,16 +113,14 @@ int ptrace_check_attach(struct task_struct *child, int kill) | |||
96 | */ | 113 | */ |
97 | read_lock(&tasklist_lock); | 114 | read_lock(&tasklist_lock); |
98 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { | 115 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
99 | ret = 0; | ||
100 | /* | 116 | /* |
101 | * child->sighand can't be NULL, release_task() | 117 | * child->sighand can't be NULL, release_task() |
102 | * does ptrace_unlink() before __exit_signal(). | 118 | * does ptrace_unlink() before __exit_signal(). |
103 | */ | 119 | */ |
104 | spin_lock_irq(&child->sighand->siglock); | 120 | spin_lock_irq(&child->sighand->siglock); |
105 | if (task_is_stopped(child)) | 121 | WARN_ON_ONCE(task_is_stopped(child)); |
106 | child->state = TASK_TRACED; | 122 | if (task_is_traced(child) || kill) |
107 | else if (!task_is_traced(child) && !kill) | 123 | ret = 0; |
108 | ret = -ESRCH; | ||
109 | spin_unlock_irq(&child->sighand->siglock); | 124 | spin_unlock_irq(&child->sighand->siglock); |
110 | } | 125 | } |
111 | read_unlock(&tasklist_lock); | 126 | read_unlock(&tasklist_lock); |
@@ -169,6 +184,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
169 | 184 | ||
170 | static int ptrace_attach(struct task_struct *task) | 185 | static int ptrace_attach(struct task_struct *task) |
171 | { | 186 | { |
187 | bool wait_trap = false; | ||
172 | int retval; | 188 | int retval; |
173 | 189 | ||
174 | audit_ptrace(task); | 190 | audit_ptrace(task); |
@@ -208,12 +224,42 @@ static int ptrace_attach(struct task_struct *task) | |||
208 | __ptrace_link(task, current); | 224 | __ptrace_link(task, current); |
209 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | 225 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
210 | 226 | ||
227 | spin_lock(&task->sighand->siglock); | ||
228 | |||
229 | /* | ||
230 | * If the task is already STOPPED, set GROUP_STOP_PENDING and | ||
231 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING | ||
232 | * will be cleared if the child completes the transition or any | ||
233 | * event which clears the group stop states happens. We'll wait | ||
234 | * for the transition to complete before returning from this | ||
235 | * function. | ||
236 | * | ||
237 | * This hides STOPPED -> RUNNING -> TRACED transition from the | ||
238 | * attaching thread but a different thread in the same group can | ||
239 | * still observe the transient RUNNING state. IOW, if another | ||
240 | * thread's WNOHANG wait(2) on the stopped tracee races against | ||
241 | * ATTACH, the wait(2) may fail due to the transient RUNNING. | ||
242 | * | ||
243 | * The following task_is_stopped() test is safe as both transitions | ||
244 | * in and out of STOPPED are protected by siglock. | ||
245 | */ | ||
246 | if (task_is_stopped(task)) { | ||
247 | task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; | ||
248 | signal_wake_up(task, 1); | ||
249 | wait_trap = true; | ||
250 | } | ||
251 | |||
252 | spin_unlock(&task->sighand->siglock); | ||
253 | |||
211 | retval = 0; | 254 | retval = 0; |
212 | unlock_tasklist: | 255 | unlock_tasklist: |
213 | write_unlock_irq(&tasklist_lock); | 256 | write_unlock_irq(&tasklist_lock); |
214 | unlock_creds: | 257 | unlock_creds: |
215 | mutex_unlock(&task->signal->cred_guard_mutex); | 258 | mutex_unlock(&task->signal->cred_guard_mutex); |
216 | out: | 259 | out: |
260 | if (wait_trap) | ||
261 | wait_event(current->signal->wait_chldexit, | ||
262 | !(task->group_stop & GROUP_STOP_TRAPPING)); | ||
217 | return retval; | 263 | return retval; |
218 | } | 264 | } |
219 | 265 | ||
@@ -316,8 +362,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
316 | if (child->ptrace) { | 362 | if (child->ptrace) { |
317 | child->exit_code = data; | 363 | child->exit_code = data; |
318 | dead = __ptrace_detach(current, child); | 364 | dead = __ptrace_detach(current, child); |
319 | if (!child->exit_state) | ||
320 | wake_up_state(child, TASK_TRACED | TASK_STOPPED); | ||
321 | } | 365 | } |
322 | write_unlock_irq(&tasklist_lock); | 366 | write_unlock_irq(&tasklist_lock); |
323 | 367 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 7165af5f1b11..ad5e818baacc 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |||
124 | 124 | ||
125 | static int recalc_sigpending_tsk(struct task_struct *t) | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { | 126 | { |
127 | if (t->signal->group_stop_count > 0 || | 127 | if ((t->group_stop & GROUP_STOP_PENDING) || |
128 | PENDING(&t->pending, &t->blocked) || | 128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
@@ -223,6 +223,83 @@ static inline void print_dropped_signal(int sig) | |||
223 | current->comm, current->pid, sig); | 223 | current->comm, current->pid, sig); |
224 | } | 224 | } |
225 | 225 | ||
226 | /** | ||
227 | * task_clear_group_stop_trapping - clear group stop trapping bit | ||
228 | * @task: target task | ||
229 | * | ||
230 | * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it | ||
231 | * and wake up the ptracer. Note that we don't need any further locking. | ||
232 | * @task->siglock guarantees that @task->parent points to the ptracer. | ||
233 | * | ||
234 | * CONTEXT: | ||
235 | * Must be called with @task->sighand->siglock held. | ||
236 | */ | ||
237 | static void task_clear_group_stop_trapping(struct task_struct *task) | ||
238 | { | ||
239 | if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { | ||
240 | task->group_stop &= ~GROUP_STOP_TRAPPING; | ||
241 | __wake_up_sync_key(&task->parent->signal->wait_chldexit, | ||
242 | TASK_UNINTERRUPTIBLE, 1, task); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * task_clear_group_stop_pending - clear pending group stop | ||
248 | * @task: target task | ||
249 | * | ||
250 | * Clear group stop states for @task. | ||
251 | * | ||
252 | * CONTEXT: | ||
253 | * Must be called with @task->sighand->siglock held. | ||
254 | */ | ||
255 | void task_clear_group_stop_pending(struct task_struct *task) | ||
256 | { | ||
257 | task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | | ||
258 | GROUP_STOP_DEQUEUED); | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * task_participate_group_stop - participate in a group stop | ||
263 | * @task: task participating in a group stop | ||
264 | * | ||
265 | * @task has GROUP_STOP_PENDING set and is participating in a group stop. | ||
266 | * Group stop states are cleared and the group stop count is consumed if | ||
267 | * %GROUP_STOP_CONSUME was set. If the consumption completes the group | ||
268 | * stop, the appropriate %SIGNAL_* flags are set. | ||
269 | * | ||
270 | * CONTEXT: | ||
271 | * Must be called with @task->sighand->siglock held. | ||
272 | * | ||
273 | * RETURNS: | ||
274 | * %true if group stop completion should be notified to the parent, %false | ||
275 | * otherwise. | ||
276 | */ | ||
277 | static bool task_participate_group_stop(struct task_struct *task) | ||
278 | { | ||
279 | struct signal_struct *sig = task->signal; | ||
280 | bool consume = task->group_stop & GROUP_STOP_CONSUME; | ||
281 | |||
282 | WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | ||
283 | |||
284 | task_clear_group_stop_pending(task); | ||
285 | |||
286 | if (!consume) | ||
287 | return false; | ||
288 | |||
289 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | ||
290 | sig->group_stop_count--; | ||
291 | |||
292 | /* | ||
293 | * Tell the caller to notify completion iff we are entering into a | ||
294 | * fresh group stop. Read comment in do_signal_stop() for details. | ||
295 | */ | ||
296 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | ||
297 | sig->flags = SIGNAL_STOP_STOPPED; | ||
298 | return true; | ||
299 | } | ||
300 | return false; | ||
301 | } | ||
302 | |||
226 | /* | 303 | /* |
227 | * allocate a new signal queue record | 304 | * allocate a new signal queue record |
228 | * - this may be called without locks if and only if t == current, otherwise an | 305 | * - this may be called without locks if and only if t == current, otherwise an |
@@ -527,7 +604,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
527 | * is to alert stop-signal processing code when another | 604 | * is to alert stop-signal processing code when another |
528 | * processor has come along and cleared the flag. | 605 | * processor has come along and cleared the flag. |
529 | */ | 606 | */ |
530 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 607 | current->group_stop |= GROUP_STOP_DEQUEUED; |
531 | } | 608 | } |
532 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 609 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
533 | /* | 610 | /* |
@@ -592,7 +669,7 @@ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |||
592 | if (sigisemptyset(&m)) | 669 | if (sigisemptyset(&m)) |
593 | return 0; | 670 | return 0; |
594 | 671 | ||
595 | signandsets(&s->signal, &s->signal, mask); | 672 | sigandnsets(&s->signal, &s->signal, mask); |
596 | list_for_each_entry_safe(q, n, &s->list, list) { | 673 | list_for_each_entry_safe(q, n, &s->list, list) { |
597 | if (sigismember(mask, q->info.si_signo)) { | 674 | if (sigismember(mask, q->info.si_signo)) { |
598 | list_del_init(&q->list); | 675 | list_del_init(&q->list); |
@@ -727,34 +804,14 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
727 | } else if (sig == SIGCONT) { | 804 | } else if (sig == SIGCONT) { |
728 | unsigned int why; | 805 | unsigned int why; |
729 | /* | 806 | /* |
730 | * Remove all stop signals from all queues, | 807 | * Remove all stop signals from all queues, wake all threads. |
731 | * and wake all threads. | ||
732 | */ | 808 | */ |
733 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 809 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
734 | t = p; | 810 | t = p; |
735 | do { | 811 | do { |
736 | unsigned int state; | 812 | task_clear_group_stop_pending(t); |
737 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 813 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
738 | /* | 814 | wake_up_state(t, __TASK_STOPPED); |
739 | * If there is a handler for SIGCONT, we must make | ||
740 | * sure that no thread returns to user mode before | ||
741 | * we post the signal, in case it was the only | ||
742 | * thread eligible to run the signal handler--then | ||
743 | * it must not do anything between resuming and | ||
744 | * running the handler. With the TIF_SIGPENDING | ||
745 | * flag set, the thread will pause and acquire the | ||
746 | * siglock that we hold now and until we've queued | ||
747 | * the pending signal. | ||
748 | * | ||
749 | * Wake up the stopped thread _after_ setting | ||
750 | * TIF_SIGPENDING | ||
751 | */ | ||
752 | state = __TASK_STOPPED; | ||
753 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | ||
754 | set_tsk_thread_flag(t, TIF_SIGPENDING); | ||
755 | state |= TASK_INTERRUPTIBLE; | ||
756 | } | ||
757 | wake_up_state(t, state); | ||
758 | } while_each_thread(p, t); | 815 | } while_each_thread(p, t); |
759 | 816 | ||
760 | /* | 817 | /* |
@@ -780,13 +837,6 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
780 | signal->flags = why | SIGNAL_STOP_CONTINUED; | 837 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
781 | signal->group_stop_count = 0; | 838 | signal->group_stop_count = 0; |
782 | signal->group_exit_code = 0; | 839 | signal->group_exit_code = 0; |
783 | } else { | ||
784 | /* | ||
785 | * We are not stopped, but there could be a stop | ||
786 | * signal in the middle of being processed after | ||
787 | * being removed from the queue. Clear that too. | ||
788 | */ | ||
789 | signal->flags &= ~SIGNAL_STOP_DEQUEUED; | ||
790 | } | 840 | } |
791 | } | 841 | } |
792 | 842 | ||
@@ -875,6 +925,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
875 | signal->group_stop_count = 0; | 925 | signal->group_stop_count = 0; |
876 | t = p; | 926 | t = p; |
877 | do { | 927 | do { |
928 | task_clear_group_stop_pending(t); | ||
878 | sigaddset(&t->pending.signal, SIGKILL); | 929 | sigaddset(&t->pending.signal, SIGKILL); |
879 | signal_wake_up(t, 1); | 930 | signal_wake_up(t, 1); |
880 | } while_each_thread(p, t); | 931 | } while_each_thread(p, t); |
@@ -1109,6 +1160,7 @@ int zap_other_threads(struct task_struct *p) | |||
1109 | p->signal->group_stop_count = 0; | 1160 | p->signal->group_stop_count = 0; |
1110 | 1161 | ||
1111 | while_each_thread(p, t) { | 1162 | while_each_thread(p, t) { |
1163 | task_clear_group_stop_pending(t); | ||
1112 | count++; | 1164 | count++; |
1113 | 1165 | ||
1114 | /* Don't bother with already dead threads */ | 1166 | /* Don't bother with already dead threads */ |
@@ -1536,16 +1588,30 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
1536 | return ret; | 1588 | return ret; |
1537 | } | 1589 | } |
1538 | 1590 | ||
1539 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | 1591 | /** |
1592 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | ||
1593 | * @tsk: task reporting the state change | ||
1594 | * @for_ptracer: the notification is for ptracer | ||
1595 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | ||
1596 | * | ||
1597 | * Notify @tsk's parent that the stopped/continued state has changed. If | ||
1598 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | ||
1599 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | ||
1600 | * | ||
1601 | * CONTEXT: | ||
1602 | * Must be called with tasklist_lock at least read locked. | ||
1603 | */ | ||
1604 | static void do_notify_parent_cldstop(struct task_struct *tsk, | ||
1605 | bool for_ptracer, int why) | ||
1540 | { | 1606 | { |
1541 | struct siginfo info; | 1607 | struct siginfo info; |
1542 | unsigned long flags; | 1608 | unsigned long flags; |
1543 | struct task_struct *parent; | 1609 | struct task_struct *parent; |
1544 | struct sighand_struct *sighand; | 1610 | struct sighand_struct *sighand; |
1545 | 1611 | ||
1546 | if (task_ptrace(tsk)) | 1612 | if (for_ptracer) { |
1547 | parent = tsk->parent; | 1613 | parent = tsk->parent; |
1548 | else { | 1614 | } else { |
1549 | tsk = tsk->group_leader; | 1615 | tsk = tsk->group_leader; |
1550 | parent = tsk->real_parent; | 1616 | parent = tsk->real_parent; |
1551 | } | 1617 | } |
@@ -1621,6 +1687,15 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1621 | } | 1687 | } |
1622 | 1688 | ||
1623 | /* | 1689 | /* |
1690 | * Test whether the target task of the usual cldstop notification - the | ||
1691 | * real_parent of @child - is in the same group as the ptracer. | ||
1692 | */ | ||
1693 | static bool real_parent_is_ptracer(struct task_struct *child) | ||
1694 | { | ||
1695 | return same_thread_group(child->parent, child->real_parent); | ||
1696 | } | ||
1697 | |||
1698 | /* | ||
1624 | * This must be called with current->sighand->siglock held. | 1699 | * This must be called with current->sighand->siglock held. |
1625 | * | 1700 | * |
1626 | * This should be the path for all ptrace stops. | 1701 | * This should be the path for all ptrace stops. |
@@ -1631,10 +1706,12 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1631 | * If we actually decide not to stop at all because the tracer | 1706 | * If we actually decide not to stop at all because the tracer |
1632 | * is gone, we keep current->exit_code unless clear_code. | 1707 | * is gone, we keep current->exit_code unless clear_code. |
1633 | */ | 1708 | */ |
1634 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | 1709 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1635 | __releases(¤t->sighand->siglock) | 1710 | __releases(¤t->sighand->siglock) |
1636 | __acquires(¤t->sighand->siglock) | 1711 | __acquires(¤t->sighand->siglock) |
1637 | { | 1712 | { |
1713 | bool gstop_done = false; | ||
1714 | |||
1638 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1715 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1639 | /* | 1716 | /* |
1640 | * The arch code has something special to do before a | 1717 | * The arch code has something special to do before a |
@@ -1655,21 +1732,49 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1655 | } | 1732 | } |
1656 | 1733 | ||
1657 | /* | 1734 | /* |
1658 | * If there is a group stop in progress, | 1735 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1659 | * we must participate in the bookkeeping. | 1736 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1737 | * while siglock was released for the arch hook, PENDING could be | ||
1738 | * clear now. We act as if SIGCONT is received after TASK_TRACED | ||
1739 | * is entered - ignore it. | ||
1660 | */ | 1740 | */ |
1661 | if (current->signal->group_stop_count > 0) | 1741 | if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) |
1662 | --current->signal->group_stop_count; | 1742 | gstop_done = task_participate_group_stop(current); |
1663 | 1743 | ||
1664 | current->last_siginfo = info; | 1744 | current->last_siginfo = info; |
1665 | current->exit_code = exit_code; | 1745 | current->exit_code = exit_code; |
1666 | 1746 | ||
1667 | /* Let the debugger run. */ | 1747 | /* |
1668 | __set_current_state(TASK_TRACED); | 1748 | * TRACED should be visible before TRAPPING is cleared; otherwise, |
1749 | * the tracer might fail do_wait(). | ||
1750 | */ | ||
1751 | set_current_state(TASK_TRACED); | ||
1752 | |||
1753 | /* | ||
1754 | * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and | ||
1755 | * transition to TASK_TRACED should be atomic with respect to | ||
1756 | * siglock. This hsould be done after the arch hook as siglock is | ||
1757 | * released and regrabbed across it. | ||
1758 | */ | ||
1759 | task_clear_group_stop_trapping(current); | ||
1760 | |||
1669 | spin_unlock_irq(¤t->sighand->siglock); | 1761 | spin_unlock_irq(¤t->sighand->siglock); |
1670 | read_lock(&tasklist_lock); | 1762 | read_lock(&tasklist_lock); |
1671 | if (may_ptrace_stop()) { | 1763 | if (may_ptrace_stop()) { |
1672 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1764 | /* |
1765 | * Notify parents of the stop. | ||
1766 | * | ||
1767 | * While ptraced, there are two parents - the ptracer and | ||
1768 | * the real_parent of the group_leader. The ptracer should | ||
1769 | * know about every stop while the real parent is only | ||
1770 | * interested in the completion of group stop. The states | ||
1771 | * for the two don't interact with each other. Notify | ||
1772 | * separately unless they're gonna be duplicates. | ||
1773 | */ | ||
1774 | do_notify_parent_cldstop(current, true, why); | ||
1775 | if (gstop_done && !real_parent_is_ptracer(current)) | ||
1776 | do_notify_parent_cldstop(current, false, why); | ||
1777 | |||
1673 | /* | 1778 | /* |
1674 | * Don't want to allow preemption here, because | 1779 | * Don't want to allow preemption here, because |
1675 | * sys_ptrace() needs this task to be inactive. | 1780 | * sys_ptrace() needs this task to be inactive. |
@@ -1684,7 +1789,16 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1684 | /* | 1789 | /* |
1685 | * By the time we got the lock, our tracer went away. | 1790 | * By the time we got the lock, our tracer went away. |
1686 | * Don't drop the lock yet, another tracer may come. | 1791 | * Don't drop the lock yet, another tracer may come. |
1792 | * | ||
1793 | * If @gstop_done, the ptracer went away between group stop | ||
1794 | * completion and here. During detach, it would have set | ||
1795 | * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED | ||
1796 | * in do_signal_stop() on return, so notifying the real | ||
1797 | * parent of the group stop completion is enough. | ||
1687 | */ | 1798 | */ |
1799 | if (gstop_done) | ||
1800 | do_notify_parent_cldstop(current, false, why); | ||
1801 | |||
1688 | __set_current_state(TASK_RUNNING); | 1802 | __set_current_state(TASK_RUNNING); |
1689 | if (clear_code) | 1803 | if (clear_code) |
1690 | current->exit_code = 0; | 1804 | current->exit_code = 0; |
@@ -1728,7 +1842,7 @@ void ptrace_notify(int exit_code) | |||
1728 | 1842 | ||
1729 | /* Let the debugger run. */ | 1843 | /* Let the debugger run. */ |
1730 | spin_lock_irq(¤t->sighand->siglock); | 1844 | spin_lock_irq(¤t->sighand->siglock); |
1731 | ptrace_stop(exit_code, 1, &info); | 1845 | ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); |
1732 | spin_unlock_irq(¤t->sighand->siglock); | 1846 | spin_unlock_irq(¤t->sighand->siglock); |
1733 | } | 1847 | } |
1734 | 1848 | ||
@@ -1741,66 +1855,115 @@ void ptrace_notify(int exit_code) | |||
1741 | static int do_signal_stop(int signr) | 1855 | static int do_signal_stop(int signr) |
1742 | { | 1856 | { |
1743 | struct signal_struct *sig = current->signal; | 1857 | struct signal_struct *sig = current->signal; |
1744 | int notify; | ||
1745 | 1858 | ||
1746 | if (!sig->group_stop_count) { | 1859 | if (!(current->group_stop & GROUP_STOP_PENDING)) { |
1860 | unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; | ||
1747 | struct task_struct *t; | 1861 | struct task_struct *t; |
1748 | 1862 | ||
1749 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1863 | /* signr will be recorded in task->group_stop for retries */ |
1864 | WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); | ||
1865 | |||
1866 | if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || | ||
1750 | unlikely(signal_group_exit(sig))) | 1867 | unlikely(signal_group_exit(sig))) |
1751 | return 0; | 1868 | return 0; |
1752 | /* | 1869 | /* |
1753 | * There is no group stop already in progress. | 1870 | * There is no group stop already in progress. We must |
1754 | * We must initiate one now. | 1871 | * initiate one now. |
1872 | * | ||
1873 | * While ptraced, a task may be resumed while group stop is | ||
1874 | * still in effect and then receive a stop signal and | ||
1875 | * initiate another group stop. This deviates from the | ||
1876 | * usual behavior as two consecutive stop signals can't | ||
1877 | * cause two group stops when !ptraced. That is why we | ||
1878 | * also check !task_is_stopped(t) below. | ||
1879 | * | ||
1880 | * The condition can be distinguished by testing whether | ||
1881 | * SIGNAL_STOP_STOPPED is already set. Don't generate | ||
1882 | * group_exit_code in such case. | ||
1883 | * | ||
1884 | * This is not necessary for SIGNAL_STOP_CONTINUED because | ||
1885 | * an intervening stop signal is required to cause two | ||
1886 | * continued events regardless of ptrace. | ||
1755 | */ | 1887 | */ |
1756 | sig->group_exit_code = signr; | 1888 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
1889 | sig->group_exit_code = signr; | ||
1890 | else | ||
1891 | WARN_ON_ONCE(!task_ptrace(current)); | ||
1757 | 1892 | ||
1893 | current->group_stop &= ~GROUP_STOP_SIGMASK; | ||
1894 | current->group_stop |= signr | gstop; | ||
1758 | sig->group_stop_count = 1; | 1895 | sig->group_stop_count = 1; |
1759 | for (t = next_thread(current); t != current; t = next_thread(t)) | 1896 | for (t = next_thread(current); t != current; |
1897 | t = next_thread(t)) { | ||
1898 | t->group_stop &= ~GROUP_STOP_SIGMASK; | ||
1760 | /* | 1899 | /* |
1761 | * Setting state to TASK_STOPPED for a group | 1900 | * Setting state to TASK_STOPPED for a group |
1762 | * stop is always done with the siglock held, | 1901 | * stop is always done with the siglock held, |
1763 | * so this check has no races. | 1902 | * so this check has no races. |
1764 | */ | 1903 | */ |
1765 | if (!(t->flags & PF_EXITING) && | 1904 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { |
1766 | !task_is_stopped_or_traced(t)) { | 1905 | t->group_stop |= signr | gstop; |
1767 | sig->group_stop_count++; | 1906 | sig->group_stop_count++; |
1768 | signal_wake_up(t, 0); | 1907 | signal_wake_up(t, 0); |
1769 | } | 1908 | } |
1909 | } | ||
1770 | } | 1910 | } |
1771 | /* | 1911 | retry: |
1772 | * If there are no other threads in the group, or if there is | 1912 | if (likely(!task_ptrace(current))) { |
1773 | * a group stop in progress and we are the last to stop, report | 1913 | int notify = 0; |
1774 | * to the parent. When ptraced, every thread reports itself. | 1914 | |
1775 | */ | 1915 | /* |
1776 | notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | 1916 | * If there are no other threads in the group, or if there |
1777 | notify = tracehook_notify_jctl(notify, CLD_STOPPED); | 1917 | * is a group stop in progress and we are the last to stop, |
1778 | /* | 1918 | * report to the parent. |
1779 | * tracehook_notify_jctl() can drop and reacquire siglock, so | 1919 | */ |
1780 | * we keep ->group_stop_count != 0 before the call. If SIGCONT | 1920 | if (task_participate_group_stop(current)) |
1781 | * or SIGKILL comes in between ->group_stop_count == 0. | 1921 | notify = CLD_STOPPED; |
1782 | */ | 1922 | |
1783 | if (sig->group_stop_count) { | ||
1784 | if (!--sig->group_stop_count) | ||
1785 | sig->flags = SIGNAL_STOP_STOPPED; | ||
1786 | current->exit_code = sig->group_exit_code; | ||
1787 | __set_current_state(TASK_STOPPED); | 1923 | __set_current_state(TASK_STOPPED); |
1924 | spin_unlock_irq(¤t->sighand->siglock); | ||
1925 | |||
1926 | /* | ||
1927 | * Notify the parent of the group stop completion. Because | ||
1928 | * we're not holding either the siglock or tasklist_lock | ||
1929 | * here, ptracer may attach inbetween; however, this is for | ||
1930 | * group stop and should always be delivered to the real | ||
1931 | * parent of the group leader. The new ptracer will get | ||
1932 | * its notification when this task transitions into | ||
1933 | * TASK_TRACED. | ||
1934 | */ | ||
1935 | if (notify) { | ||
1936 | read_lock(&tasklist_lock); | ||
1937 | do_notify_parent_cldstop(current, false, notify); | ||
1938 | read_unlock(&tasklist_lock); | ||
1939 | } | ||
1940 | |||
1941 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | ||
1942 | schedule(); | ||
1943 | |||
1944 | spin_lock_irq(¤t->sighand->siglock); | ||
1945 | } else { | ||
1946 | ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, | ||
1947 | CLD_STOPPED, 0, NULL); | ||
1948 | current->exit_code = 0; | ||
1788 | } | 1949 | } |
1789 | spin_unlock_irq(¤t->sighand->siglock); | ||
1790 | 1950 | ||
1791 | if (notify) { | 1951 | /* |
1792 | read_lock(&tasklist_lock); | 1952 | * GROUP_STOP_PENDING could be set if another group stop has |
1793 | do_notify_parent_cldstop(current, notify); | 1953 | * started since being woken up or ptrace wants us to transit |
1794 | read_unlock(&tasklist_lock); | 1954 | * between TASK_STOPPED and TRACED. Retry group stop. |
1955 | */ | ||
1956 | if (current->group_stop & GROUP_STOP_PENDING) { | ||
1957 | WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); | ||
1958 | goto retry; | ||
1795 | } | 1959 | } |
1796 | 1960 | ||
1797 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | 1961 | /* PTRACE_ATTACH might have raced with task killing, clear trapping */ |
1798 | do { | 1962 | task_clear_group_stop_trapping(current); |
1799 | schedule(); | 1963 | |
1800 | } while (try_to_freeze()); | 1964 | spin_unlock_irq(¤t->sighand->siglock); |
1801 | 1965 | ||
1802 | tracehook_finish_jctl(); | 1966 | tracehook_finish_jctl(); |
1803 | current->exit_code = 0; | ||
1804 | 1967 | ||
1805 | return 1; | 1968 | return 1; |
1806 | } | 1969 | } |
@@ -1814,7 +1977,7 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1814 | ptrace_signal_deliver(regs, cookie); | 1977 | ptrace_signal_deliver(regs, cookie); |
1815 | 1978 | ||
1816 | /* Let the debugger run. */ | 1979 | /* Let the debugger run. */ |
1817 | ptrace_stop(signr, 0, info); | 1980 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
1818 | 1981 | ||
1819 | /* We're back. Did the debugger cancel the sig? */ | 1982 | /* We're back. Did the debugger cancel the sig? */ |
1820 | signr = current->exit_code; | 1983 | signr = current->exit_code; |
@@ -1869,18 +2032,36 @@ relock: | |||
1869 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | 2032 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
1870 | */ | 2033 | */ |
1871 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | 2034 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
1872 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) | 2035 | struct task_struct *leader; |
1873 | ? CLD_CONTINUED : CLD_STOPPED; | 2036 | int why; |
2037 | |||
2038 | if (signal->flags & SIGNAL_CLD_CONTINUED) | ||
2039 | why = CLD_CONTINUED; | ||
2040 | else | ||
2041 | why = CLD_STOPPED; | ||
2042 | |||
1874 | signal->flags &= ~SIGNAL_CLD_MASK; | 2043 | signal->flags &= ~SIGNAL_CLD_MASK; |
1875 | 2044 | ||
1876 | why = tracehook_notify_jctl(why, CLD_CONTINUED); | ||
1877 | spin_unlock_irq(&sighand->siglock); | 2045 | spin_unlock_irq(&sighand->siglock); |
1878 | 2046 | ||
1879 | if (why) { | 2047 | /* |
1880 | read_lock(&tasklist_lock); | 2048 | * Notify the parent that we're continuing. This event is |
1881 | do_notify_parent_cldstop(current->group_leader, why); | 2049 | * always per-process and doesn't make whole lot of sense |
1882 | read_unlock(&tasklist_lock); | 2050 | * for ptracers, who shouldn't consume the state via |
1883 | } | 2051 | * wait(2) either, but, for backward compatibility, notify |
2052 | * the ptracer of the group leader too unless it's gonna be | ||
2053 | * a duplicate. | ||
2054 | */ | ||
2055 | read_lock(&tasklist_lock); | ||
2056 | |||
2057 | do_notify_parent_cldstop(current, false, why); | ||
2058 | |||
2059 | leader = current->group_leader; | ||
2060 | if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) | ||
2061 | do_notify_parent_cldstop(leader, true, why); | ||
2062 | |||
2063 | read_unlock(&tasklist_lock); | ||
2064 | |||
1884 | goto relock; | 2065 | goto relock; |
1885 | } | 2066 | } |
1886 | 2067 | ||
@@ -1897,8 +2078,8 @@ relock: | |||
1897 | if (unlikely(signr != 0)) | 2078 | if (unlikely(signr != 0)) |
1898 | ka = return_ka; | 2079 | ka = return_ka; |
1899 | else { | 2080 | else { |
1900 | if (unlikely(signal->group_stop_count > 0) && | 2081 | if (unlikely(current->group_stop & |
1901 | do_signal_stop(0)) | 2082 | GROUP_STOP_PENDING) && do_signal_stop(0)) |
1902 | goto relock; | 2083 | goto relock; |
1903 | 2084 | ||
1904 | signr = dequeue_signal(current, ¤t->blocked, | 2085 | signr = dequeue_signal(current, ¤t->blocked, |
@@ -2017,10 +2198,42 @@ relock: | |||
2017 | return signr; | 2198 | return signr; |
2018 | } | 2199 | } |
2019 | 2200 | ||
2201 | /* | ||
2202 | * It could be that complete_signal() picked us to notify about the | ||
2203 | * group-wide signal. Other threads should be notified now to take | ||
2204 | * the shared signals in @which since we will not. | ||
2205 | */ | ||
2206 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | ||
2207 | { | ||
2208 | sigset_t retarget; | ||
2209 | struct task_struct *t; | ||
2210 | |||
2211 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | ||
2212 | if (sigisemptyset(&retarget)) | ||
2213 | return; | ||
2214 | |||
2215 | t = tsk; | ||
2216 | while_each_thread(tsk, t) { | ||
2217 | if (t->flags & PF_EXITING) | ||
2218 | continue; | ||
2219 | |||
2220 | if (!has_pending_signals(&retarget, &t->blocked)) | ||
2221 | continue; | ||
2222 | /* Remove the signals this thread can handle. */ | ||
2223 | sigandsets(&retarget, &retarget, &t->blocked); | ||
2224 | |||
2225 | if (!signal_pending(t)) | ||
2226 | signal_wake_up(t, 0); | ||
2227 | |||
2228 | if (sigisemptyset(&retarget)) | ||
2229 | break; | ||
2230 | } | ||
2231 | } | ||
2232 | |||
2020 | void exit_signals(struct task_struct *tsk) | 2233 | void exit_signals(struct task_struct *tsk) |
2021 | { | 2234 | { |
2022 | int group_stop = 0; | 2235 | int group_stop = 0; |
2023 | struct task_struct *t; | 2236 | sigset_t unblocked; |
2024 | 2237 | ||
2025 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | 2238 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2026 | tsk->flags |= PF_EXITING; | 2239 | tsk->flags |= PF_EXITING; |
@@ -2036,26 +2249,23 @@ void exit_signals(struct task_struct *tsk) | |||
2036 | if (!signal_pending(tsk)) | 2249 | if (!signal_pending(tsk)) |
2037 | goto out; | 2250 | goto out; |
2038 | 2251 | ||
2039 | /* | 2252 | unblocked = tsk->blocked; |
2040 | * It could be that __group_complete_signal() choose us to | 2253 | signotset(&unblocked); |
2041 | * notify about group-wide signal. Another thread should be | 2254 | retarget_shared_pending(tsk, &unblocked); |
2042 | * woken now to take the signal since we will not. | ||
2043 | */ | ||
2044 | for (t = tsk; (t = next_thread(t)) != tsk; ) | ||
2045 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) | ||
2046 | recalc_sigpending_and_wake(t); | ||
2047 | 2255 | ||
2048 | if (unlikely(tsk->signal->group_stop_count) && | 2256 | if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && |
2049 | !--tsk->signal->group_stop_count) { | 2257 | task_participate_group_stop(tsk)) |
2050 | tsk->signal->flags = SIGNAL_STOP_STOPPED; | 2258 | group_stop = CLD_STOPPED; |
2051 | group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); | ||
2052 | } | ||
2053 | out: | 2259 | out: |
2054 | spin_unlock_irq(&tsk->sighand->siglock); | 2260 | spin_unlock_irq(&tsk->sighand->siglock); |
2055 | 2261 | ||
2262 | /* | ||
2263 | * If group stop has completed, deliver the notification. This | ||
2264 | * should always go to the real parent of the group leader. | ||
2265 | */ | ||
2056 | if (unlikely(group_stop)) { | 2266 | if (unlikely(group_stop)) { |
2057 | read_lock(&tasklist_lock); | 2267 | read_lock(&tasklist_lock); |
2058 | do_notify_parent_cldstop(tsk, group_stop); | 2268 | do_notify_parent_cldstop(tsk, false, group_stop); |
2059 | read_unlock(&tasklist_lock); | 2269 | read_unlock(&tasklist_lock); |
2060 | } | 2270 | } |
2061 | } | 2271 | } |
@@ -2089,11 +2299,33 @@ long do_no_restart_syscall(struct restart_block *param) | |||
2089 | return -EINTR; | 2299 | return -EINTR; |
2090 | } | 2300 | } |
2091 | 2301 | ||
2092 | /* | 2302 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2093 | * We don't need to get the kernel lock - this is all local to this | 2303 | { |
2094 | * particular thread.. (and that's good, because this is _heavily_ | 2304 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2095 | * used by various programs) | 2305 | sigset_t newblocked; |
2306 | /* A set of now blocked but previously unblocked signals. */ | ||
2307 | sigandnsets(&newblocked, newset, ¤t->blocked); | ||
2308 | retarget_shared_pending(tsk, &newblocked); | ||
2309 | } | ||
2310 | tsk->blocked = *newset; | ||
2311 | recalc_sigpending(); | ||
2312 | } | ||
2313 | |||
2314 | /** | ||
2315 | * set_current_blocked - change current->blocked mask | ||
2316 | * @newset: new mask | ||
2317 | * | ||
2318 | * It is wrong to change ->blocked directly, this helper should be used | ||
2319 | * to ensure the process can't miss a shared signal we are going to block. | ||
2096 | */ | 2320 | */ |
2321 | void set_current_blocked(const sigset_t *newset) | ||
2322 | { | ||
2323 | struct task_struct *tsk = current; | ||
2324 | |||
2325 | spin_lock_irq(&tsk->sighand->siglock); | ||
2326 | __set_task_blocked(tsk, newset); | ||
2327 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2328 | } | ||
2097 | 2329 | ||
2098 | /* | 2330 | /* |
2099 | * This is also useful for kernel threads that want to temporarily | 2331 | * This is also useful for kernel threads that want to temporarily |
@@ -2105,30 +2337,29 @@ long do_no_restart_syscall(struct restart_block *param) | |||
2105 | */ | 2337 | */ |
2106 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | 2338 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2107 | { | 2339 | { |
2108 | int error; | 2340 | struct task_struct *tsk = current; |
2341 | sigset_t newset; | ||
2109 | 2342 | ||
2110 | spin_lock_irq(¤t->sighand->siglock); | 2343 | /* Lockless, only current can change ->blocked, never from irq */ |
2111 | if (oldset) | 2344 | if (oldset) |
2112 | *oldset = current->blocked; | 2345 | *oldset = tsk->blocked; |
2113 | 2346 | ||
2114 | error = 0; | ||
2115 | switch (how) { | 2347 | switch (how) { |
2116 | case SIG_BLOCK: | 2348 | case SIG_BLOCK: |
2117 | sigorsets(¤t->blocked, ¤t->blocked, set); | 2349 | sigorsets(&newset, &tsk->blocked, set); |
2118 | break; | 2350 | break; |
2119 | case SIG_UNBLOCK: | 2351 | case SIG_UNBLOCK: |
2120 | signandsets(¤t->blocked, ¤t->blocked, set); | 2352 | sigandnsets(&newset, &tsk->blocked, set); |
2121 | break; | 2353 | break; |
2122 | case SIG_SETMASK: | 2354 | case SIG_SETMASK: |
2123 | current->blocked = *set; | 2355 | newset = *set; |
2124 | break; | 2356 | break; |
2125 | default: | 2357 | default: |
2126 | error = -EINVAL; | 2358 | return -EINVAL; |
2127 | } | 2359 | } |
2128 | recalc_sigpending(); | ||
2129 | spin_unlock_irq(¤t->sighand->siglock); | ||
2130 | 2360 | ||
2131 | return error; | 2361 | set_current_blocked(&newset); |
2362 | return 0; | ||
2132 | } | 2363 | } |
2133 | 2364 | ||
2134 | /** | 2365 | /** |
@@ -2138,40 +2369,34 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |||
2138 | * @oset: previous value of signal mask if non-null | 2369 | * @oset: previous value of signal mask if non-null |
2139 | * @sigsetsize: size of sigset_t type | 2370 | * @sigsetsize: size of sigset_t type |
2140 | */ | 2371 | */ |
2141 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, | 2372 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
2142 | sigset_t __user *, oset, size_t, sigsetsize) | 2373 | sigset_t __user *, oset, size_t, sigsetsize) |
2143 | { | 2374 | { |
2144 | int error = -EINVAL; | ||
2145 | sigset_t old_set, new_set; | 2375 | sigset_t old_set, new_set; |
2376 | int error; | ||
2146 | 2377 | ||
2147 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2378 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2148 | if (sigsetsize != sizeof(sigset_t)) | 2379 | if (sigsetsize != sizeof(sigset_t)) |
2149 | goto out; | 2380 | return -EINVAL; |
2150 | 2381 | ||
2151 | if (set) { | 2382 | old_set = current->blocked; |
2152 | error = -EFAULT; | 2383 | |
2153 | if (copy_from_user(&new_set, set, sizeof(*set))) | 2384 | if (nset) { |
2154 | goto out; | 2385 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2386 | return -EFAULT; | ||
2155 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 2387 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2156 | 2388 | ||
2157 | error = sigprocmask(how, &new_set, &old_set); | 2389 | error = sigprocmask(how, &new_set, NULL); |
2158 | if (error) | 2390 | if (error) |
2159 | goto out; | 2391 | return error; |
2160 | if (oset) | 2392 | } |
2161 | goto set_old; | ||
2162 | } else if (oset) { | ||
2163 | spin_lock_irq(¤t->sighand->siglock); | ||
2164 | old_set = current->blocked; | ||
2165 | spin_unlock_irq(¤t->sighand->siglock); | ||
2166 | 2393 | ||
2167 | set_old: | 2394 | if (oset) { |
2168 | error = -EFAULT; | 2395 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2169 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 2396 | return -EFAULT; |
2170 | goto out; | ||
2171 | } | 2397 | } |
2172 | error = 0; | 2398 | |
2173 | out: | 2399 | return 0; |
2174 | return error; | ||
2175 | } | 2400 | } |
2176 | 2401 | ||
2177 | long do_sigpending(void __user *set, unsigned long sigsetsize) | 2402 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
@@ -2284,6 +2509,66 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
2284 | #endif | 2509 | #endif |
2285 | 2510 | ||
2286 | /** | 2511 | /** |
2512 | * do_sigtimedwait - wait for queued signals specified in @which | ||
2513 | * @which: queued signals to wait for | ||
2514 | * @info: if non-null, the signal's siginfo is returned here | ||
2515 | * @ts: upper bound on process time suspension | ||
2516 | */ | ||
2517 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | ||
2518 | const struct timespec *ts) | ||
2519 | { | ||
2520 | struct task_struct *tsk = current; | ||
2521 | long timeout = MAX_SCHEDULE_TIMEOUT; | ||
2522 | sigset_t mask = *which; | ||
2523 | int sig; | ||
2524 | |||
2525 | if (ts) { | ||
2526 | if (!timespec_valid(ts)) | ||
2527 | return -EINVAL; | ||
2528 | timeout = timespec_to_jiffies(ts); | ||
2529 | /* | ||
2530 | * We can be close to the next tick, add another one | ||
2531 | * to ensure we will wait at least the time asked for. | ||
2532 | */ | ||
2533 | if (ts->tv_sec || ts->tv_nsec) | ||
2534 | timeout++; | ||
2535 | } | ||
2536 | |||
2537 | /* | ||
2538 | * Invert the set of allowed signals to get those we want to block. | ||
2539 | */ | ||
2540 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
2541 | signotset(&mask); | ||
2542 | |||
2543 | spin_lock_irq(&tsk->sighand->siglock); | ||
2544 | sig = dequeue_signal(tsk, &mask, info); | ||
2545 | if (!sig && timeout) { | ||
2546 | /* | ||
2547 | * None ready, temporarily unblock those we're interested | ||
2548 | * while we are sleeping in so that we'll be awakened when | ||
2549 | * they arrive. Unblocking is always fine, we can avoid | ||
2550 | * set_current_blocked(). | ||
2551 | */ | ||
2552 | tsk->real_blocked = tsk->blocked; | ||
2553 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | ||
2554 | recalc_sigpending(); | ||
2555 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2556 | |||
2557 | timeout = schedule_timeout_interruptible(timeout); | ||
2558 | |||
2559 | spin_lock_irq(&tsk->sighand->siglock); | ||
2560 | __set_task_blocked(tsk, &tsk->real_blocked); | ||
2561 | siginitset(&tsk->real_blocked, 0); | ||
2562 | sig = dequeue_signal(tsk, &mask, info); | ||
2563 | } | ||
2564 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2565 | |||
2566 | if (sig) | ||
2567 | return sig; | ||
2568 | return timeout ? -EINTR : -EAGAIN; | ||
2569 | } | ||
2570 | |||
2571 | /** | ||
2287 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | 2572 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2288 | * in @uthese | 2573 | * in @uthese |
2289 | * @uthese: queued signals to wait for | 2574 | * @uthese: queued signals to wait for |
@@ -2295,11 +2580,10 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | |||
2295 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | 2580 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2296 | size_t, sigsetsize) | 2581 | size_t, sigsetsize) |
2297 | { | 2582 | { |
2298 | int ret, sig; | ||
2299 | sigset_t these; | 2583 | sigset_t these; |
2300 | struct timespec ts; | 2584 | struct timespec ts; |
2301 | siginfo_t info; | 2585 | siginfo_t info; |
2302 | long timeout = 0; | 2586 | int ret; |
2303 | 2587 | ||
2304 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2588 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2305 | if (sigsetsize != sizeof(sigset_t)) | 2589 | if (sigsetsize != sizeof(sigset_t)) |
@@ -2308,61 +2592,16 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | |||
2308 | if (copy_from_user(&these, uthese, sizeof(these))) | 2592 | if (copy_from_user(&these, uthese, sizeof(these))) |
2309 | return -EFAULT; | 2593 | return -EFAULT; |
2310 | 2594 | ||
2311 | /* | ||
2312 | * Invert the set of allowed signals to get those we | ||
2313 | * want to block. | ||
2314 | */ | ||
2315 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
2316 | signotset(&these); | ||
2317 | |||
2318 | if (uts) { | 2595 | if (uts) { |
2319 | if (copy_from_user(&ts, uts, sizeof(ts))) | 2596 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2320 | return -EFAULT; | 2597 | return -EFAULT; |
2321 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 | ||
2322 | || ts.tv_sec < 0) | ||
2323 | return -EINVAL; | ||
2324 | } | 2598 | } |
2325 | 2599 | ||
2326 | spin_lock_irq(¤t->sighand->siglock); | 2600 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2327 | sig = dequeue_signal(current, &these, &info); | ||
2328 | if (!sig) { | ||
2329 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
2330 | if (uts) | ||
2331 | timeout = (timespec_to_jiffies(&ts) | ||
2332 | + (ts.tv_sec || ts.tv_nsec)); | ||
2333 | |||
2334 | if (timeout) { | ||
2335 | /* | ||
2336 | * None ready -- temporarily unblock those we're | ||
2337 | * interested while we are sleeping in so that we'll | ||
2338 | * be awakened when they arrive. | ||
2339 | */ | ||
2340 | current->real_blocked = current->blocked; | ||
2341 | sigandsets(¤t->blocked, ¤t->blocked, &these); | ||
2342 | recalc_sigpending(); | ||
2343 | spin_unlock_irq(¤t->sighand->siglock); | ||
2344 | |||
2345 | timeout = schedule_timeout_interruptible(timeout); | ||
2346 | |||
2347 | spin_lock_irq(¤t->sighand->siglock); | ||
2348 | sig = dequeue_signal(current, &these, &info); | ||
2349 | current->blocked = current->real_blocked; | ||
2350 | siginitset(¤t->real_blocked, 0); | ||
2351 | recalc_sigpending(); | ||
2352 | } | ||
2353 | } | ||
2354 | spin_unlock_irq(¤t->sighand->siglock); | ||
2355 | 2601 | ||
2356 | if (sig) { | 2602 | if (ret > 0 && uinfo) { |
2357 | ret = sig; | 2603 | if (copy_siginfo_to_user(uinfo, &info)) |
2358 | if (uinfo) { | 2604 | ret = -EFAULT; |
2359 | if (copy_siginfo_to_user(uinfo, &info)) | ||
2360 | ret = -EFAULT; | ||
2361 | } | ||
2362 | } else { | ||
2363 | ret = -EAGAIN; | ||
2364 | if (timeout) | ||
2365 | ret = -EINTR; | ||
2366 | } | 2605 | } |
2367 | 2606 | ||
2368 | return ret; | 2607 | return ret; |
@@ -2650,60 +2889,51 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | |||
2650 | /** | 2889 | /** |
2651 | * sys_sigprocmask - examine and change blocked signals | 2890 | * sys_sigprocmask - examine and change blocked signals |
2652 | * @how: whether to add, remove, or set signals | 2891 | * @how: whether to add, remove, or set signals |
2653 | * @set: signals to add or remove (if non-null) | 2892 | * @nset: signals to add or remove (if non-null) |
2654 | * @oset: previous value of signal mask if non-null | 2893 | * @oset: previous value of signal mask if non-null |
2655 | * | 2894 | * |
2656 | * Some platforms have their own version with special arguments; | 2895 | * Some platforms have their own version with special arguments; |
2657 | * others support only sys_rt_sigprocmask. | 2896 | * others support only sys_rt_sigprocmask. |
2658 | */ | 2897 | */ |
2659 | 2898 | ||
2660 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, | 2899 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
2661 | old_sigset_t __user *, oset) | 2900 | old_sigset_t __user *, oset) |
2662 | { | 2901 | { |
2663 | int error; | ||
2664 | old_sigset_t old_set, new_set; | 2902 | old_sigset_t old_set, new_set; |
2903 | sigset_t new_blocked; | ||
2665 | 2904 | ||
2666 | if (set) { | 2905 | old_set = current->blocked.sig[0]; |
2667 | error = -EFAULT; | 2906 | |
2668 | if (copy_from_user(&new_set, set, sizeof(*set))) | 2907 | if (nset) { |
2669 | goto out; | 2908 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
2909 | return -EFAULT; | ||
2670 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2910 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2671 | 2911 | ||
2672 | spin_lock_irq(¤t->sighand->siglock); | 2912 | new_blocked = current->blocked; |
2673 | old_set = current->blocked.sig[0]; | ||
2674 | 2913 | ||
2675 | error = 0; | ||
2676 | switch (how) { | 2914 | switch (how) { |
2677 | default: | ||
2678 | error = -EINVAL; | ||
2679 | break; | ||
2680 | case SIG_BLOCK: | 2915 | case SIG_BLOCK: |
2681 | sigaddsetmask(¤t->blocked, new_set); | 2916 | sigaddsetmask(&new_blocked, new_set); |
2682 | break; | 2917 | break; |
2683 | case SIG_UNBLOCK: | 2918 | case SIG_UNBLOCK: |
2684 | sigdelsetmask(¤t->blocked, new_set); | 2919 | sigdelsetmask(&new_blocked, new_set); |
2685 | break; | 2920 | break; |
2686 | case SIG_SETMASK: | 2921 | case SIG_SETMASK: |
2687 | current->blocked.sig[0] = new_set; | 2922 | new_blocked.sig[0] = new_set; |
2688 | break; | 2923 | break; |
2924 | default: | ||
2925 | return -EINVAL; | ||
2689 | } | 2926 | } |
2690 | 2927 | ||
2691 | recalc_sigpending(); | 2928 | set_current_blocked(&new_blocked); |
2692 | spin_unlock_irq(¤t->sighand->siglock); | 2929 | } |
2693 | if (error) | 2930 | |
2694 | goto out; | 2931 | if (oset) { |
2695 | if (oset) | ||
2696 | goto set_old; | ||
2697 | } else if (oset) { | ||
2698 | old_set = current->blocked.sig[0]; | ||
2699 | set_old: | ||
2700 | error = -EFAULT; | ||
2701 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 2932 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2702 | goto out; | 2933 | return -EFAULT; |
2703 | } | 2934 | } |
2704 | error = 0; | 2935 | |
2705 | out: | 2936 | return 0; |
2706 | return error; | ||
2707 | } | 2937 | } |
2708 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | 2938 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
2709 | 2939 | ||