diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 546774a31a66..ceffc67b564a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -55,15 +55,14 @@ | |||
55 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
56 | #include <asm/pgtable.h> | 56 | #include <asm/pgtable.h> |
57 | #include <asm/mmu_context.h> | 57 | #include <asm/mmu_context.h> |
58 | #include "cred-internals.h" | ||
59 | 58 | ||
60 | static void exit_mm(struct task_struct * tsk); | 59 | static void exit_mm(struct task_struct * tsk); |
61 | 60 | ||
62 | static void __unhash_process(struct task_struct *p) | 61 | static void __unhash_process(struct task_struct *p, bool group_dead) |
63 | { | 62 | { |
64 | nr_threads--; | 63 | nr_threads--; |
65 | detach_pid(p, PIDTYPE_PID); | 64 | detach_pid(p, PIDTYPE_PID); |
66 | if (thread_group_leader(p)) { | 65 | if (group_dead) { |
67 | detach_pid(p, PIDTYPE_PGID); | 66 | detach_pid(p, PIDTYPE_PGID); |
68 | detach_pid(p, PIDTYPE_SID); | 67 | detach_pid(p, PIDTYPE_SID); |
69 | 68 | ||
@@ -80,23 +79,26 @@ static void __unhash_process(struct task_struct *p) | |||
80 | static void __exit_signal(struct task_struct *tsk) | 79 | static void __exit_signal(struct task_struct *tsk) |
81 | { | 80 | { |
82 | struct signal_struct *sig = tsk->signal; | 81 | struct signal_struct *sig = tsk->signal; |
82 | bool group_dead = thread_group_leader(tsk); | ||
83 | struct sighand_struct *sighand; | 83 | struct sighand_struct *sighand; |
84 | struct tty_struct *uninitialized_var(tty); | ||
84 | 85 | ||
85 | BUG_ON(!sig); | 86 | sighand = rcu_dereference_check(tsk->sighand, |
86 | BUG_ON(!atomic_read(&sig->count)); | 87 | rcu_read_lock_held() || |
87 | 88 | lockdep_tasklist_lock_is_held()); | |
88 | sighand = rcu_dereference(tsk->sighand); | ||
89 | spin_lock(&sighand->siglock); | 89 | spin_lock(&sighand->siglock); |
90 | 90 | ||
91 | posix_cpu_timers_exit(tsk); | 91 | posix_cpu_timers_exit(tsk); |
92 | if (atomic_dec_and_test(&sig->count)) | 92 | if (group_dead) { |
93 | posix_cpu_timers_exit_group(tsk); | 93 | posix_cpu_timers_exit_group(tsk); |
94 | else { | 94 | tty = sig->tty; |
95 | sig->tty = NULL; | ||
96 | } else { | ||
95 | /* | 97 | /* |
96 | * If there is any task waiting for the group exit | 98 | * If there is any task waiting for the group exit |
97 | * then notify it: | 99 | * then notify it: |
98 | */ | 100 | */ |
99 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) | 101 | if (sig->notify_count > 0 && !--sig->notify_count) |
100 | wake_up_process(sig->group_exit_task); | 102 | wake_up_process(sig->group_exit_task); |
101 | 103 | ||
102 | if (tsk == sig->curr_target) | 104 | if (tsk == sig->curr_target) |
@@ -122,32 +124,24 @@ static void __exit_signal(struct task_struct *tsk) | |||
122 | sig->oublock += task_io_get_oublock(tsk); | 124 | sig->oublock += task_io_get_oublock(tsk); |
123 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | 125 | task_io_accounting_add(&sig->ioac, &tsk->ioac); |
124 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | 126 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
125 | sig = NULL; /* Marker for below. */ | ||
126 | } | 127 | } |
127 | 128 | ||
128 | __unhash_process(tsk); | 129 | sig->nr_threads--; |
130 | __unhash_process(tsk, group_dead); | ||
129 | 131 | ||
130 | /* | 132 | /* |
131 | * Do this under ->siglock, we can race with another thread | 133 | * Do this under ->siglock, we can race with another thread |
132 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. | 134 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. |
133 | */ | 135 | */ |
134 | flush_sigqueue(&tsk->pending); | 136 | flush_sigqueue(&tsk->pending); |
135 | |||
136 | tsk->signal = NULL; | ||
137 | tsk->sighand = NULL; | 137 | tsk->sighand = NULL; |
138 | spin_unlock(&sighand->siglock); | 138 | spin_unlock(&sighand->siglock); |
139 | 139 | ||
140 | __cleanup_sighand(sighand); | 140 | __cleanup_sighand(sighand); |
141 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 141 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
142 | if (sig) { | 142 | if (group_dead) { |
143 | flush_sigqueue(&sig->shared_pending); | 143 | flush_sigqueue(&sig->shared_pending); |
144 | taskstats_tgid_free(sig); | 144 | tty_kref_put(tty); |
145 | /* | ||
146 | * Make sure ->signal can't go away under rq->lock, | ||
147 | * see account_group_exec_runtime(). | ||
148 | */ | ||
149 | task_rq_unlock_wait(tsk); | ||
150 | __cleanup_signal(sig); | ||
151 | } | 145 | } |
152 | } | 146 | } |
153 | 147 | ||
@@ -170,8 +164,10 @@ void release_task(struct task_struct * p) | |||
170 | repeat: | 164 | repeat: |
171 | tracehook_prepare_release_task(p); | 165 | tracehook_prepare_release_task(p); |
172 | /* don't need to get the RCU readlock here - the process is dead and | 166 | /* don't need to get the RCU readlock here - the process is dead and |
173 | * can't be modifying its own credentials */ | 167 | * can't be modifying its own credentials. But shut RCU-lockdep up */ |
168 | rcu_read_lock(); | ||
174 | atomic_dec(&__task_cred(p)->user->processes); | 169 | atomic_dec(&__task_cred(p)->user->processes); |
170 | rcu_read_unlock(); | ||
175 | 171 | ||
176 | proc_flush_task(p); | 172 | proc_flush_task(p); |
177 | 173 | ||
@@ -473,9 +469,11 @@ static void close_files(struct files_struct * files) | |||
473 | /* | 469 | /* |
474 | * It is safe to dereference the fd table without RCU or | 470 | * It is safe to dereference the fd table without RCU or |
475 | * ->file_lock because this is the last reference to the | 471 | * ->file_lock because this is the last reference to the |
476 | * files structure. | 472 | * files structure. But use RCU to shut RCU-lockdep up. |
477 | */ | 473 | */ |
474 | rcu_read_lock(); | ||
478 | fdt = files_fdtable(files); | 475 | fdt = files_fdtable(files); |
476 | rcu_read_unlock(); | ||
479 | for (;;) { | 477 | for (;;) { |
480 | unsigned long set; | 478 | unsigned long set; |
481 | i = j * __NFDBITS; | 479 | i = j * __NFDBITS; |
@@ -521,10 +519,12 @@ void put_files_struct(struct files_struct *files) | |||
521 | * at the end of the RCU grace period. Otherwise, | 519 | * at the end of the RCU grace period. Otherwise, |
522 | * you can free files immediately. | 520 | * you can free files immediately. |
523 | */ | 521 | */ |
522 | rcu_read_lock(); | ||
524 | fdt = files_fdtable(files); | 523 | fdt = files_fdtable(files); |
525 | if (fdt != &files->fdtab) | 524 | if (fdt != &files->fdtab) |
526 | kmem_cache_free(files_cachep, files); | 525 | kmem_cache_free(files_cachep, files); |
527 | free_fdtable(fdt); | 526 | free_fdtable(fdt); |
527 | rcu_read_unlock(); | ||
528 | } | 528 | } |
529 | } | 529 | } |
530 | 530 | ||
@@ -849,12 +849,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
849 | 849 | ||
850 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; | 850 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; |
851 | 851 | ||
852 | /* mt-exec, de_thread() is waiting for us */ | 852 | /* mt-exec, de_thread() is waiting for group leader */ |
853 | if (thread_group_leader(tsk) && | 853 | if (unlikely(tsk->signal->notify_count < 0)) |
854 | tsk->signal->group_exit_task && | ||
855 | tsk->signal->notify_count < 0) | ||
856 | wake_up_process(tsk->signal->group_exit_task); | 854 | wake_up_process(tsk->signal->group_exit_task); |
857 | |||
858 | write_unlock_irq(&tasklist_lock); | 855 | write_unlock_irq(&tasklist_lock); |
859 | 856 | ||
860 | tracehook_report_death(tsk, signal, cookie, group_dead); | 857 | tracehook_report_death(tsk, signal, cookie, group_dead); |
@@ -944,7 +941,9 @@ NORET_TYPE void do_exit(long code) | |||
944 | preempt_count()); | 941 | preempt_count()); |
945 | 942 | ||
946 | acct_update_integrals(tsk); | 943 | acct_update_integrals(tsk); |
947 | 944 | /* sync mm's RSS info before statistics gathering */ | |
945 | if (tsk->mm) | ||
946 | sync_mm_rss(tsk, tsk->mm); | ||
948 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 947 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
949 | if (group_dead) { | 948 | if (group_dead) { |
950 | hrtimer_cancel(&tsk->signal->real_timer); | 949 | hrtimer_cancel(&tsk->signal->real_timer); |
@@ -993,8 +992,10 @@ NORET_TYPE void do_exit(long code) | |||
993 | 992 | ||
994 | exit_notify(tsk, group_dead); | 993 | exit_notify(tsk, group_dead); |
995 | #ifdef CONFIG_NUMA | 994 | #ifdef CONFIG_NUMA |
995 | task_lock(tsk); | ||
996 | mpol_put(tsk->mempolicy); | 996 | mpol_put(tsk->mempolicy); |
997 | tsk->mempolicy = NULL; | 997 | tsk->mempolicy = NULL; |
998 | task_unlock(tsk); | ||
998 | #endif | 999 | #endif |
999 | #ifdef CONFIG_FUTEX | 1000 | #ifdef CONFIG_FUTEX |
1000 | if (unlikely(current->pi_state_cache)) | 1001 | if (unlikely(current->pi_state_cache)) |
@@ -1180,7 +1181,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1180 | 1181 | ||
1181 | if (unlikely(wo->wo_flags & WNOWAIT)) { | 1182 | if (unlikely(wo->wo_flags & WNOWAIT)) { |
1182 | int exit_code = p->exit_code; | 1183 | int exit_code = p->exit_code; |
1183 | int why, status; | 1184 | int why; |
1184 | 1185 | ||
1185 | get_task_struct(p); | 1186 | get_task_struct(p); |
1186 | read_unlock(&tasklist_lock); | 1187 | read_unlock(&tasklist_lock); |