diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-10-17 02:27:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 11:42:53 -0400 |
commit | b2c903b8790467ae400f6992ac01bc8913b49e19 (patch) | |
tree | 656e6523b76856742cb38e36acf90ec5641bc7be /fs/exec.c | |
parent | 0840a90d943bcde2fbfeabd3c256236eed2273cd (diff) |
exec: simplify the new ->sighand allocation
de_thread() pre-allocates newsighand to make sure that exec() can't fail after
killing all sub-threads. Imho, this buys nothing, but complicates the code:
- this is (mostly) needed to handle CLONE_SIGHAND without CLONE_THREAD
tasks, this is very unlikely (if ever used) case
- unless we already have some serious problems, GFP_KERNEL allocation
should not fail
- ENOMEM still can happen after de_thread(), ->sighand is not the last
object we have to allocate
Change the code to allocate the new ->sighand on demand.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r-- | fs/exec.c | 24 |
1 files changed, 9 insertions, 15 deletions
@@ -747,7 +747,7 @@ static int exec_mmap(struct mm_struct *mm) | |||
747 | static int de_thread(struct task_struct *tsk) | 747 | static int de_thread(struct task_struct *tsk) |
748 | { | 748 | { |
749 | struct signal_struct *sig = tsk->signal; | 749 | struct signal_struct *sig = tsk->signal; |
750 | struct sighand_struct *newsighand, *oldsighand = tsk->sighand; | 750 | struct sighand_struct *oldsighand = tsk->sighand; |
751 | spinlock_t *lock = &oldsighand->siglock; | 751 | spinlock_t *lock = &oldsighand->siglock; |
752 | struct task_struct *leader = NULL; | 752 | struct task_struct *leader = NULL; |
753 | int count; | 753 | int count; |
@@ -761,10 +761,6 @@ static int de_thread(struct task_struct *tsk) | |||
761 | return 0; | 761 | return 0; |
762 | } | 762 | } |
763 | 763 | ||
764 | newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); | ||
765 | if (!newsighand) | ||
766 | return -ENOMEM; | ||
767 | |||
768 | if (thread_group_empty(tsk)) | 764 | if (thread_group_empty(tsk)) |
769 | goto no_thread_group; | 765 | goto no_thread_group; |
770 | 766 | ||
@@ -781,7 +777,6 @@ static int de_thread(struct task_struct *tsk) | |||
781 | */ | 777 | */ |
782 | spin_unlock_irq(lock); | 778 | spin_unlock_irq(lock); |
783 | read_unlock(&tasklist_lock); | 779 | read_unlock(&tasklist_lock); |
784 | kmem_cache_free(sighand_cachep, newsighand); | ||
785 | return -EAGAIN; | 780 | return -EAGAIN; |
786 | } | 781 | } |
787 | 782 | ||
@@ -899,17 +894,16 @@ no_thread_group: | |||
899 | if (leader) | 894 | if (leader) |
900 | release_task(leader); | 895 | release_task(leader); |
901 | 896 | ||
902 | if (atomic_read(&oldsighand->count) == 1) { | 897 | if (atomic_read(&oldsighand->count) != 1) { |
903 | /* | 898 | struct sighand_struct *newsighand; |
904 | * Now that we nuked the rest of the thread group, | ||
905 | * it turns out we are not sharing sighand any more either. | ||
906 | * So we can just keep it. | ||
907 | */ | ||
908 | kmem_cache_free(sighand_cachep, newsighand); | ||
909 | } else { | ||
910 | /* | 899 | /* |
911 | * Move our state over to newsighand and switch it in. | 900 | * This ->sighand is shared with the CLONE_SIGHAND |
901 | * but not CLONE_THREAD task, switch to the new one. | ||
912 | */ | 902 | */ |
903 | newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); | ||
904 | if (!newsighand) | ||
905 | return -ENOMEM; | ||
906 | |||
913 | atomic_set(&newsighand->count, 1); | 907 | atomic_set(&newsighand->count, 1); |
914 | memcpy(newsighand->action, oldsighand->action, | 908 | memcpy(newsighand->action, oldsighand->action, |
915 | sizeof(newsighand->action)); | 909 | sizeof(newsighand->action)); |