diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 92 |
1 files changed, 46 insertions, 46 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 93d2711b9381..eb4d6470d1d0 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/resource.h> | 46 | #include <linux/resource.h> |
47 | #include <linux/blkdev.h> | 47 | #include <linux/blkdev.h> |
48 | #include <linux/task_io_accounting_ops.h> | 48 | #include <linux/task_io_accounting_ops.h> |
49 | #include <linux/tracehook.h> | ||
49 | 50 | ||
50 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
51 | #include <asm/unistd.h> | 52 | #include <asm/unistd.h> |
@@ -85,7 +86,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
85 | BUG_ON(!sig); | 86 | BUG_ON(!sig); |
86 | BUG_ON(!atomic_read(&sig->count)); | 87 | BUG_ON(!atomic_read(&sig->count)); |
87 | 88 | ||
88 | rcu_read_lock(); | ||
89 | sighand = rcu_dereference(tsk->sighand); | 89 | sighand = rcu_dereference(tsk->sighand); |
90 | spin_lock(&sighand->siglock); | 90 | spin_lock(&sighand->siglock); |
91 | 91 | ||
@@ -121,6 +121,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
121 | sig->nivcsw += tsk->nivcsw; | 121 | sig->nivcsw += tsk->nivcsw; |
122 | sig->inblock += task_io_get_inblock(tsk); | 122 | sig->inblock += task_io_get_inblock(tsk); |
123 | sig->oublock += task_io_get_oublock(tsk); | 123 | sig->oublock += task_io_get_oublock(tsk); |
124 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | ||
124 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | 125 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
125 | sig = NULL; /* Marker for below. */ | 126 | sig = NULL; /* Marker for below. */ |
126 | } | 127 | } |
@@ -136,7 +137,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
136 | tsk->signal = NULL; | 137 | tsk->signal = NULL; |
137 | tsk->sighand = NULL; | 138 | tsk->sighand = NULL; |
138 | spin_unlock(&sighand->siglock); | 139 | spin_unlock(&sighand->siglock); |
139 | rcu_read_unlock(); | ||
140 | 140 | ||
141 | __cleanup_sighand(sighand); | 141 | __cleanup_sighand(sighand); |
142 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 142 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
@@ -152,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | 152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | ||
156 | * Do final ptrace-related cleanup of a zombie being reaped. | ||
157 | * | ||
158 | * Called with write_lock(&tasklist_lock) held. | ||
159 | */ | ||
160 | static void ptrace_release_task(struct task_struct *p) | ||
161 | { | ||
162 | BUG_ON(!list_empty(&p->ptraced)); | ||
163 | ptrace_unlink(p); | ||
164 | BUG_ON(!list_empty(&p->ptrace_entry)); | ||
165 | } | ||
166 | 155 | ||
167 | void release_task(struct task_struct * p) | 156 | void release_task(struct task_struct * p) |
168 | { | 157 | { |
169 | struct task_struct *leader; | 158 | struct task_struct *leader; |
170 | int zap_leader; | 159 | int zap_leader; |
171 | repeat: | 160 | repeat: |
161 | tracehook_prepare_release_task(p); | ||
172 | atomic_dec(&p->user->processes); | 162 | atomic_dec(&p->user->processes); |
173 | proc_flush_task(p); | 163 | proc_flush_task(p); |
174 | write_lock_irq(&tasklist_lock); | 164 | write_lock_irq(&tasklist_lock); |
175 | ptrace_release_task(p); | 165 | tracehook_finish_release_task(p); |
176 | __exit_signal(p); | 166 | __exit_signal(p); |
177 | 167 | ||
178 | /* | 168 | /* |
@@ -194,6 +184,13 @@ repeat: | |||
194 | * that case. | 184 | * that case. |
195 | */ | 185 | */ |
196 | zap_leader = task_detached(leader); | 186 | zap_leader = task_detached(leader); |
187 | |||
188 | /* | ||
189 | * This maintains the invariant that release_task() | ||
190 | * only runs on a task in EXIT_DEAD, just for sanity. | ||
191 | */ | ||
192 | if (zap_leader) | ||
193 | leader->exit_state = EXIT_DEAD; | ||
197 | } | 194 | } |
198 | 195 | ||
199 | write_unlock_irq(&tasklist_lock); | 196 | write_unlock_irq(&tasklist_lock); |
@@ -432,7 +429,7 @@ void daemonize(const char *name, ...) | |||
432 | * We don't want to have TIF_FREEZE set if the system-wide hibernation | 429 | * We don't want to have TIF_FREEZE set if the system-wide hibernation |
433 | * or suspend transition begins right now. | 430 | * or suspend transition begins right now. |
434 | */ | 431 | */ |
435 | current->flags |= PF_NOFREEZE; | 432 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
436 | 433 | ||
437 | if (current->nsproxy != &init_nsproxy) { | 434 | if (current->nsproxy != &init_nsproxy) { |
438 | get_nsproxy(&init_nsproxy); | 435 | get_nsproxy(&init_nsproxy); |
@@ -557,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs) | |||
557 | if (atomic_dec_and_test(&fs->count)) { | 554 | if (atomic_dec_and_test(&fs->count)) { |
558 | path_put(&fs->root); | 555 | path_put(&fs->root); |
559 | path_put(&fs->pwd); | 556 | path_put(&fs->pwd); |
560 | if (fs->altroot.dentry) | ||
561 | path_put(&fs->altroot); | ||
562 | kmem_cache_free(fs_cachep, fs); | 557 | kmem_cache_free(fs_cachep, fs); |
563 | } | 558 | } |
564 | } | 559 | } |
@@ -666,26 +661,40 @@ assign_new_owner: | |||
666 | static void exit_mm(struct task_struct * tsk) | 661 | static void exit_mm(struct task_struct * tsk) |
667 | { | 662 | { |
668 | struct mm_struct *mm = tsk->mm; | 663 | struct mm_struct *mm = tsk->mm; |
664 | struct core_state *core_state; | ||
669 | 665 | ||
670 | mm_release(tsk, mm); | 666 | mm_release(tsk, mm); |
671 | if (!mm) | 667 | if (!mm) |
672 | return; | 668 | return; |
673 | /* | 669 | /* |
674 | * Serialize with any possible pending coredump. | 670 | * Serialize with any possible pending coredump. |
675 | * We must hold mmap_sem around checking core_waiters | 671 | * We must hold mmap_sem around checking core_state |
676 | * and clearing tsk->mm. The core-inducing thread | 672 | * and clearing tsk->mm. The core-inducing thread |
677 | * will increment core_waiters for each thread in the | 673 | * will increment ->nr_threads for each thread in the |
678 | * group with ->mm != NULL. | 674 | * group with ->mm != NULL. |
679 | */ | 675 | */ |
680 | down_read(&mm->mmap_sem); | 676 | down_read(&mm->mmap_sem); |
681 | if (mm->core_waiters) { | 677 | core_state = mm->core_state; |
678 | if (core_state) { | ||
679 | struct core_thread self; | ||
682 | up_read(&mm->mmap_sem); | 680 | up_read(&mm->mmap_sem); |
683 | down_write(&mm->mmap_sem); | ||
684 | if (!--mm->core_waiters) | ||
685 | complete(mm->core_startup_done); | ||
686 | up_write(&mm->mmap_sem); | ||
687 | 681 | ||
688 | wait_for_completion(&mm->core_done); | 682 | self.task = tsk; |
683 | self.next = xchg(&core_state->dumper.next, &self); | ||
684 | /* | ||
685 | * Implies mb(), the result of xchg() must be visible | ||
686 | * to core_state->dumper. | ||
687 | */ | ||
688 | if (atomic_dec_and_test(&core_state->nr_threads)) | ||
689 | complete(&core_state->startup); | ||
690 | |||
691 | for (;;) { | ||
692 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
693 | if (!self.task) /* see coredump_finish() */ | ||
694 | break; | ||
695 | schedule(); | ||
696 | } | ||
697 | __set_task_state(tsk, TASK_RUNNING); | ||
689 | down_read(&mm->mmap_sem); | 698 | down_read(&mm->mmap_sem); |
690 | } | 699 | } |
691 | atomic_inc(&mm->mm_count); | 700 | atomic_inc(&mm->mm_count); |
@@ -863,7 +872,8 @@ static void forget_original_parent(struct task_struct *father) | |||
863 | */ | 872 | */ |
864 | static void exit_notify(struct task_struct *tsk, int group_dead) | 873 | static void exit_notify(struct task_struct *tsk, int group_dead) |
865 | { | 874 | { |
866 | int state; | 875 | int signal; |
876 | void *cookie; | ||
867 | 877 | ||
868 | /* | 878 | /* |
869 | * This does two things: | 879 | * This does two things: |
@@ -900,22 +910,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
900 | !capable(CAP_KILL)) | 910 | !capable(CAP_KILL)) |
901 | tsk->exit_signal = SIGCHLD; | 911 | tsk->exit_signal = SIGCHLD; |
902 | 912 | ||
903 | /* If something other than our normal parent is ptracing us, then | 913 | signal = tracehook_notify_death(tsk, &cookie, group_dead); |
904 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | 914 | if (signal > 0) |
905 | * only has special meaning to our real parent. | 915 | signal = do_notify_parent(tsk, signal); |
906 | */ | ||
907 | if (!task_detached(tsk) && thread_group_empty(tsk)) { | ||
908 | int signal = ptrace_reparented(tsk) ? | ||
909 | SIGCHLD : tsk->exit_signal; | ||
910 | do_notify_parent(tsk, signal); | ||
911 | } else if (tsk->ptrace) { | ||
912 | do_notify_parent(tsk, SIGCHLD); | ||
913 | } | ||
914 | 916 | ||
915 | state = EXIT_ZOMBIE; | 917 | tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE; |
916 | if (task_detached(tsk) && likely(!tsk->ptrace)) | ||
917 | state = EXIT_DEAD; | ||
918 | tsk->exit_state = state; | ||
919 | 918 | ||
920 | /* mt-exec, de_thread() is waiting for us */ | 919 | /* mt-exec, de_thread() is waiting for us */ |
921 | if (thread_group_leader(tsk) && | 920 | if (thread_group_leader(tsk) && |
@@ -925,8 +924,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
925 | 924 | ||
926 | write_unlock_irq(&tasklist_lock); | 925 | write_unlock_irq(&tasklist_lock); |
927 | 926 | ||
927 | tracehook_report_death(tsk, signal, cookie, group_dead); | ||
928 | |||
928 | /* If the process is dead, release it - nobody will wait for it */ | 929 | /* If the process is dead, release it - nobody will wait for it */ |
929 | if (state == EXIT_DEAD) | 930 | if (signal < 0) |
930 | release_task(tsk); | 931 | release_task(tsk); |
931 | } | 932 | } |
932 | 933 | ||
@@ -1005,10 +1006,7 @@ NORET_TYPE void do_exit(long code) | |||
1005 | if (unlikely(!tsk->pid)) | 1006 | if (unlikely(!tsk->pid)) |
1006 | panic("Attempted to kill the idle task!"); | 1007 | panic("Attempted to kill the idle task!"); |
1007 | 1008 | ||
1008 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 1009 | tracehook_report_exit(&code); |
1009 | current->ptrace_message = code; | ||
1010 | ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); | ||
1011 | } | ||
1012 | 1010 | ||
1013 | /* | 1011 | /* |
1014 | * We're taking recursive faults here in do_exit. Safest is to just | 1012 | * We're taking recursive faults here in do_exit. Safest is to just |
@@ -1354,6 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1354 | psig->coublock += | 1352 | psig->coublock += |
1355 | task_io_get_oublock(p) + | 1353 | task_io_get_oublock(p) + |
1356 | sig->oublock + sig->coublock; | 1354 | sig->oublock + sig->coublock; |
1355 | task_io_accounting_add(&psig->ioac, &p->ioac); | ||
1356 | task_io_accounting_add(&psig->ioac, &sig->ioac); | ||
1357 | spin_unlock_irq(&p->parent->sighand->siglock); | 1357 | spin_unlock_irq(&p->parent->sighand->siglock); |
1358 | } | 1358 | } |
1359 | 1359 | ||