diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-10-17 02:27:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 11:42:54 -0400 |
commit | 6db840fa7887980ef68a649640d506fe069eef0c (patch) | |
tree | 6248c1e65b572f1c2b14c46848e5a18df003f60e /fs/exec.c | |
parent | 356d6d5058c8082b9e811838ab2fa27825c947e4 (diff) |
exec: RT sub-thread can livelock and monopolize CPU on exec
de_thread() yields waiting for ->group_leader to be a zombie. This deadlocks
if an rt-prio execer shares the same cpu with ->group_leader. Change the code
to use ->group_exit_task/notify_count mechanics.
This patch certainly uglifies the code, perhaps someone can suggest something
better.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r-- | fs/exec.c | 28 |
1 files changed, 15 insertions, 13 deletions
@@ -801,16 +801,15 @@ static int de_thread(struct task_struct *tsk) | |||
801 | hrtimer_restart(&sig->real_timer); | 801 | hrtimer_restart(&sig->real_timer); |
802 | spin_lock_irq(lock); | 802 | spin_lock_irq(lock); |
803 | } | 803 | } |
804 | |||
805 | sig->notify_count = count; | ||
806 | sig->group_exit_task = tsk; | ||
804 | while (atomic_read(&sig->count) > count) { | 807 | while (atomic_read(&sig->count) > count) { |
805 | sig->group_exit_task = tsk; | ||
806 | sig->notify_count = count; | ||
807 | __set_current_state(TASK_UNINTERRUPTIBLE); | 808 | __set_current_state(TASK_UNINTERRUPTIBLE); |
808 | spin_unlock_irq(lock); | 809 | spin_unlock_irq(lock); |
809 | schedule(); | 810 | schedule(); |
810 | spin_lock_irq(lock); | 811 | spin_lock_irq(lock); |
811 | } | 812 | } |
812 | sig->group_exit_task = NULL; | ||
813 | sig->notify_count = 0; | ||
814 | spin_unlock_irq(lock); | 813 | spin_unlock_irq(lock); |
815 | 814 | ||
816 | /* | 815 | /* |
@@ -819,14 +818,17 @@ static int de_thread(struct task_struct *tsk) | |||
819 | * and to assume its PID: | 818 | * and to assume its PID: |
820 | */ | 819 | */ |
821 | if (!thread_group_leader(tsk)) { | 820 | if (!thread_group_leader(tsk)) { |
822 | /* | ||
823 | * Wait for the thread group leader to be a zombie. | ||
824 | * It should already be zombie at this point, most | ||
825 | * of the time. | ||
826 | */ | ||
827 | leader = tsk->group_leader; | 821 | leader = tsk->group_leader; |
828 | while (leader->exit_state != EXIT_ZOMBIE) | 822 | |
829 | yield(); | 823 | sig->notify_count = -1; |
824 | for (;;) { | ||
825 | write_lock_irq(&tasklist_lock); | ||
826 | if (likely(leader->exit_state)) | ||
827 | break; | ||
828 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
829 | write_unlock_irq(&tasklist_lock); | ||
830 | schedule(); | ||
831 | } | ||
830 | 832 | ||
831 | /* | 833 | /* |
832 | * The only record we have of the real-time age of a | 834 | * The only record we have of the real-time age of a |
@@ -840,8 +842,6 @@ static int de_thread(struct task_struct *tsk) | |||
840 | */ | 842 | */ |
841 | tsk->start_time = leader->start_time; | 843 | tsk->start_time = leader->start_time; |
842 | 844 | ||
843 | write_lock_irq(&tasklist_lock); | ||
844 | |||
845 | BUG_ON(leader->tgid != tsk->tgid); | 845 | BUG_ON(leader->tgid != tsk->tgid); |
846 | BUG_ON(tsk->pid == tsk->tgid); | 846 | BUG_ON(tsk->pid == tsk->tgid); |
847 | /* | 847 | /* |
@@ -874,6 +874,8 @@ static int de_thread(struct task_struct *tsk) | |||
874 | write_unlock_irq(&tasklist_lock); | 874 | write_unlock_irq(&tasklist_lock); |
875 | } | 875 | } |
876 | 876 | ||
877 | sig->group_exit_task = NULL; | ||
878 | sig->notify_count = 0; | ||
877 | /* | 879 | /* |
878 | * There may be one thread left which is just exiting, | 880 | * There may be one thread left which is just exiting, |
879 | * but it's safe to stop telling the group to kill themselves. | 881 | * but it's safe to stop telling the group to kill themselves. |