diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-02-05 01:27:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:07 -0500 |
commit | ed5d2cac114202fe2978a9cbcab8f5032796d538 (patch) | |
tree | aa9aaea1aa0945bd9159685d1b04897d105a90c9 | |
parent | f558b7e408026eb3c6afcd0e8fc1f7fe31195a6a (diff) |
exec: rework the group exit and fix the race with kill
As Roland pointed out, we have the very old problem with exec. de_thread()
sets SIGNAL_GROUP_EXIT, kills other threads, changes ->group_leader and then
clears signal->flags. All signals (even fatal ones) sent in this window
(which is not too small) will be lost.
With this patch exec doesn't abuse SIGNAL_GROUP_EXIT. signal_group_exit(),
the new helper, should be used to detect exit_group() or exec() in progress.
It can have more users, but this patch does only strictly necessary changes.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Davide Libenzi <davidel@xmailserver.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Robin Holt <holt@sgi.com>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/exec.c | 13 | ||||
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/signal.c | 4 |
4 files changed, 15 insertions, 12 deletions
@@ -760,7 +760,7 @@ static int de_thread(struct task_struct *tsk) | |||
760 | */ | 760 | */ |
761 | read_lock(&tasklist_lock); | 761 | read_lock(&tasklist_lock); |
762 | spin_lock_irq(lock); | 762 | spin_lock_irq(lock); |
763 | if (sig->flags & SIGNAL_GROUP_EXIT) { | 763 | if (signal_group_exit(sig)) { |
764 | /* | 764 | /* |
765 | * Another group action in progress, just | 765 | * Another group action in progress, just |
766 | * return so that the signal is processed. | 766 | * return so that the signal is processed. |
@@ -778,6 +778,7 @@ static int de_thread(struct task_struct *tsk) | |||
778 | if (unlikely(tsk->group_leader == task_child_reaper(tsk))) | 778 | if (unlikely(tsk->group_leader == task_child_reaper(tsk))) |
779 | task_active_pid_ns(tsk)->child_reaper = tsk; | 779 | task_active_pid_ns(tsk)->child_reaper = tsk; |
780 | 780 | ||
781 | sig->group_exit_task = tsk; | ||
781 | zap_other_threads(tsk); | 782 | zap_other_threads(tsk); |
782 | read_unlock(&tasklist_lock); | 783 | read_unlock(&tasklist_lock); |
783 | 784 | ||
@@ -802,7 +803,6 @@ static int de_thread(struct task_struct *tsk) | |||
802 | } | 803 | } |
803 | 804 | ||
804 | sig->notify_count = count; | 805 | sig->notify_count = count; |
805 | sig->group_exit_task = tsk; | ||
806 | while (atomic_read(&sig->count) > count) { | 806 | while (atomic_read(&sig->count) > count) { |
807 | __set_current_state(TASK_UNINTERRUPTIBLE); | 807 | __set_current_state(TASK_UNINTERRUPTIBLE); |
808 | spin_unlock_irq(lock); | 808 | spin_unlock_irq(lock); |
@@ -871,15 +871,10 @@ static int de_thread(struct task_struct *tsk) | |||
871 | leader->exit_state = EXIT_DEAD; | 871 | leader->exit_state = EXIT_DEAD; |
872 | 872 | ||
873 | write_unlock_irq(&tasklist_lock); | 873 | write_unlock_irq(&tasklist_lock); |
874 | } | 874 | } |
875 | 875 | ||
876 | sig->group_exit_task = NULL; | 876 | sig->group_exit_task = NULL; |
877 | sig->notify_count = 0; | 877 | sig->notify_count = 0; |
878 | /* | ||
879 | * There may be one thread left which is just exiting, | ||
880 | * but it's safe to stop telling the group to kill themselves. | ||
881 | */ | ||
882 | sig->flags = 0; | ||
883 | 878 | ||
884 | no_thread_group: | 879 | no_thread_group: |
885 | exit_itimers(sig); | 880 | exit_itimers(sig); |
@@ -1549,7 +1544,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |||
1549 | int err = -EAGAIN; | 1544 | int err = -EAGAIN; |
1550 | 1545 | ||
1551 | spin_lock_irq(&tsk->sighand->siglock); | 1546 | spin_lock_irq(&tsk->sighand->siglock); |
1552 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) { | 1547 | if (!signal_group_exit(tsk->signal)) { |
1553 | tsk->signal->group_exit_code = exit_code; | 1548 | tsk->signal->group_exit_code = exit_code; |
1554 | zap_process(tsk); | 1549 | zap_process(tsk); |
1555 | err = 0; | 1550 | err = 0; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 680bb03a4b90..483ea4e1accf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -555,6 +555,13 @@ struct signal_struct { | |||
555 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 555 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ |
556 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | 556 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ |
557 | 557 | ||
558 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ | ||
559 | static inline int signal_group_exit(const struct signal_struct *sig) | ||
560 | { | ||
561 | return (sig->flags & SIGNAL_GROUP_EXIT) || | ||
562 | (sig->group_exit_task != NULL); | ||
563 | } | ||
564 | |||
558 | /* | 565 | /* |
559 | * Some day this will be a full-fledged user tracking system.. | 566 | * Some day this will be a full-fledged user tracking system.. |
560 | */ | 567 | */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 9e459fefda77..9d3d0f0b27d9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1083,11 +1083,12 @@ do_group_exit(int exit_code) | |||
1083 | struct signal_struct *const sig = current->signal; | 1083 | struct signal_struct *const sig = current->signal; |
1084 | struct sighand_struct *const sighand = current->sighand; | 1084 | struct sighand_struct *const sighand = current->sighand; |
1085 | spin_lock_irq(&sighand->siglock); | 1085 | spin_lock_irq(&sighand->siglock); |
1086 | if (sig->flags & SIGNAL_GROUP_EXIT) | 1086 | if (signal_group_exit(sig)) |
1087 | /* Another thread got here before we took the lock. */ | 1087 | /* Another thread got here before we took the lock. */ |
1088 | exit_code = sig->group_exit_code; | 1088 | exit_code = sig->group_exit_code; |
1089 | else { | 1089 | else { |
1090 | sig->group_exit_code = exit_code; | 1090 | sig->group_exit_code = exit_code; |
1091 | sig->flags = SIGNAL_GROUP_EXIT; | ||
1091 | zap_other_threads(current); | 1092 | zap_other_threads(current); |
1092 | } | 1093 | } |
1093 | spin_unlock_irq(&sighand->siglock); | 1094 | spin_unlock_irq(&sighand->siglock); |
diff --git a/kernel/signal.c b/kernel/signal.c index 1117b28488c2..6a5f97cd337a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -957,7 +957,6 @@ void zap_other_threads(struct task_struct *p) | |||
957 | { | 957 | { |
958 | struct task_struct *t; | 958 | struct task_struct *t; |
959 | 959 | ||
960 | p->signal->flags = SIGNAL_GROUP_EXIT; | ||
961 | p->signal->group_stop_count = 0; | 960 | p->signal->group_stop_count = 0; |
962 | 961 | ||
963 | for (t = next_thread(p); t != p; t = next_thread(t)) { | 962 | for (t = next_thread(p); t != p; t = next_thread(t)) { |
@@ -1697,7 +1696,8 @@ static int do_signal_stop(int signr) | |||
1697 | } else { | 1696 | } else { |
1698 | struct task_struct *t; | 1697 | struct task_struct *t; |
1699 | 1698 | ||
1700 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) | 1699 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
1700 | unlikely(sig->group_exit_task)) | ||
1701 | return 0; | 1701 | return 0; |
1702 | /* | 1702 | /* |
1703 | * There is no group stop already in progress. | 1703 | * There is no group stop already in progress. |