diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2012-10-08 13:13:01 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-08 17:53:20 -0400 |
| commit | d5bbd43d5f450c3fca058f5b85f3dfb4e8cc88c9 (patch) | |
| tree | 9ee712ea3c2768dc9934e1e6003680793f303a00 | |
| parent | b5356a19ced273ef8a941be226f4dfdb95c23073 (diff) | |
exec: make de_thread() killable
Change de_thread() to use KILLABLE rather than UNINTERRUPTIBLE while
waiting for other threads. The only complication is that we should
clear ->group_exit_task and ->notify_count before we return, and we
should do this under tasklist_lock. -EAGAIN is used to match the
initial signal_group_exit() check/return, it doesn't really matter.
This fixes the (unlikely) race with coredump. de_thread() checks
signal_group_exit() before it starts to kill the subthreads, but this
can't help if another CLONE_VM (but non CLONE_THREAD) task starts the
coredumping after de_thread() unlocks ->siglock. In this case the
killed sub-thread can block in exit_mm() waiting for coredump_finish(),
execing thread waits for that sub-thead, and the coredumping thread
waits for execing thread. Deadlock.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | fs/exec.c | 16 |
1 files changed, 14 insertions, 2 deletions
| @@ -878,9 +878,11 @@ static int de_thread(struct task_struct *tsk) | |||
| 878 | sig->notify_count--; | 878 | sig->notify_count--; |
| 879 | 879 | ||
| 880 | while (sig->notify_count) { | 880 | while (sig->notify_count) { |
| 881 | __set_current_state(TASK_UNINTERRUPTIBLE); | 881 | __set_current_state(TASK_KILLABLE); |
| 882 | spin_unlock_irq(lock); | 882 | spin_unlock_irq(lock); |
| 883 | schedule(); | 883 | schedule(); |
| 884 | if (unlikely(__fatal_signal_pending(tsk))) | ||
| 885 | goto killed; | ||
| 884 | spin_lock_irq(lock); | 886 | spin_lock_irq(lock); |
| 885 | } | 887 | } |
| 886 | spin_unlock_irq(lock); | 888 | spin_unlock_irq(lock); |
| @@ -898,9 +900,11 @@ static int de_thread(struct task_struct *tsk) | |||
| 898 | write_lock_irq(&tasklist_lock); | 900 | write_lock_irq(&tasklist_lock); |
| 899 | if (likely(leader->exit_state)) | 901 | if (likely(leader->exit_state)) |
| 900 | break; | 902 | break; |
| 901 | __set_current_state(TASK_UNINTERRUPTIBLE); | 903 | __set_current_state(TASK_KILLABLE); |
| 902 | write_unlock_irq(&tasklist_lock); | 904 | write_unlock_irq(&tasklist_lock); |
| 903 | schedule(); | 905 | schedule(); |
| 906 | if (unlikely(__fatal_signal_pending(tsk))) | ||
| 907 | goto killed; | ||
| 904 | } | 908 | } |
| 905 | 909 | ||
| 906 | /* | 910 | /* |
| @@ -994,6 +998,14 @@ no_thread_group: | |||
| 994 | 998 | ||
| 995 | BUG_ON(!thread_group_leader(tsk)); | 999 | BUG_ON(!thread_group_leader(tsk)); |
| 996 | return 0; | 1000 | return 0; |
| 1001 | |||
| 1002 | killed: | ||
| 1003 | /* protects against exit_notify() and __exit_signal() */ | ||
| 1004 | read_lock(&tasklist_lock); | ||
| 1005 | sig->group_exit_task = NULL; | ||
| 1006 | sig->notify_count = 0; | ||
| 1007 | read_unlock(&tasklist_lock); | ||
| 1008 | return -EAGAIN; | ||
| 997 | } | 1009 | } |
| 998 | 1010 | ||
| 999 | char *get_task_comm(char *buf, struct task_struct *tsk) | 1011 | char *get_task_comm(char *buf, struct task_struct *tsk) |
