diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
commit | 3dd41424090a0ca3a660218d06afe6ff4441bad3 (patch) | |
tree | 511ef1bb1799027fc5aad574adce49120ecadd87 /kernel/exit.c | |
parent | 5c5456402d467969b217d7fdd6670f8c8600f5a8 (diff) | |
parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) |
Merge commit 'v2.6.36' into wip-merge-2.6.36
Conflicts:
Makefile
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/syscall_table_32.S
kernel/sched.c
kernel/time/tick-sched.c
Relevant API and functions changes (solved in this commit):
- (API) .enqueue_task() (enqueue_task_litmus),
dequeue_task() (dequeue_task_litmus),
[litmus/sched_litmus.c]
- (API) .select_task_rq() (select_task_rq_litmus)
[litmus/sched_litmus.c]
- (API) sysrq_dump_trace_buffer() and sysrq_handle_kill_rt_tasks()
[litmus/sched_trace.c]
- struct kfifo internal buffer name changed (buffer -> buf)
[litmus/sched_trace.c]
- add_wait_queue_exclusive_locked -> __add_wait_queue_tail_exclusive
[litmus/fmlp.c]
- syscall numbers for both x86_32 and x86_64
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 256ce8c2ebc8..b9d3bc6c21ec 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -55,17 +55,16 @@ | |||
55 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
56 | #include <asm/pgtable.h> | 56 | #include <asm/pgtable.h> |
57 | #include <asm/mmu_context.h> | 57 | #include <asm/mmu_context.h> |
58 | #include "cred-internals.h" | ||
59 | 58 | ||
60 | extern void exit_od_table(struct task_struct *t); | 59 | extern void exit_od_table(struct task_struct *t); |
61 | 60 | ||
62 | static void exit_mm(struct task_struct * tsk); | 61 | static void exit_mm(struct task_struct * tsk); |
63 | 62 | ||
64 | static void __unhash_process(struct task_struct *p) | 63 | static void __unhash_process(struct task_struct *p, bool group_dead) |
65 | { | 64 | { |
66 | nr_threads--; | 65 | nr_threads--; |
67 | detach_pid(p, PIDTYPE_PID); | 66 | detach_pid(p, PIDTYPE_PID); |
68 | if (thread_group_leader(p)) { | 67 | if (group_dead) { |
69 | detach_pid(p, PIDTYPE_PGID); | 68 | detach_pid(p, PIDTYPE_PGID); |
70 | detach_pid(p, PIDTYPE_SID); | 69 | detach_pid(p, PIDTYPE_SID); |
71 | 70 | ||
@@ -82,10 +81,9 @@ static void __unhash_process(struct task_struct *p) | |||
82 | static void __exit_signal(struct task_struct *tsk) | 81 | static void __exit_signal(struct task_struct *tsk) |
83 | { | 82 | { |
84 | struct signal_struct *sig = tsk->signal; | 83 | struct signal_struct *sig = tsk->signal; |
84 | bool group_dead = thread_group_leader(tsk); | ||
85 | struct sighand_struct *sighand; | 85 | struct sighand_struct *sighand; |
86 | 86 | struct tty_struct *uninitialized_var(tty); | |
87 | BUG_ON(!sig); | ||
88 | BUG_ON(!atomic_read(&sig->count)); | ||
89 | 87 | ||
90 | sighand = rcu_dereference_check(tsk->sighand, | 88 | sighand = rcu_dereference_check(tsk->sighand, |
91 | rcu_read_lock_held() || | 89 | rcu_read_lock_held() || |
@@ -93,14 +91,16 @@ static void __exit_signal(struct task_struct *tsk) | |||
93 | spin_lock(&sighand->siglock); | 91 | spin_lock(&sighand->siglock); |
94 | 92 | ||
95 | posix_cpu_timers_exit(tsk); | 93 | posix_cpu_timers_exit(tsk); |
96 | if (atomic_dec_and_test(&sig->count)) | 94 | if (group_dead) { |
97 | posix_cpu_timers_exit_group(tsk); | 95 | posix_cpu_timers_exit_group(tsk); |
98 | else { | 96 | tty = sig->tty; |
97 | sig->tty = NULL; | ||
98 | } else { | ||
99 | /* | 99 | /* |
100 | * If there is any task waiting for the group exit | 100 | * If there is any task waiting for the group exit |
101 | * then notify it: | 101 | * then notify it: |
102 | */ | 102 | */ |
103 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) | 103 | if (sig->notify_count > 0 && !--sig->notify_count) |
104 | wake_up_process(sig->group_exit_task); | 104 | wake_up_process(sig->group_exit_task); |
105 | 105 | ||
106 | if (tsk == sig->curr_target) | 106 | if (tsk == sig->curr_target) |
@@ -126,32 +126,24 @@ static void __exit_signal(struct task_struct *tsk) | |||
126 | sig->oublock += task_io_get_oublock(tsk); | 126 | sig->oublock += task_io_get_oublock(tsk); |
127 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | 127 | task_io_accounting_add(&sig->ioac, &tsk->ioac); |
128 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | 128 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
129 | sig = NULL; /* Marker for below. */ | ||
130 | } | 129 | } |
131 | 130 | ||
132 | __unhash_process(tsk); | 131 | sig->nr_threads--; |
132 | __unhash_process(tsk, group_dead); | ||
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Do this under ->siglock, we can race with another thread | 135 | * Do this under ->siglock, we can race with another thread |
136 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. | 136 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. |
137 | */ | 137 | */ |
138 | flush_sigqueue(&tsk->pending); | 138 | flush_sigqueue(&tsk->pending); |
139 | |||
140 | tsk->signal = NULL; | ||
141 | tsk->sighand = NULL; | 139 | tsk->sighand = NULL; |
142 | spin_unlock(&sighand->siglock); | 140 | spin_unlock(&sighand->siglock); |
143 | 141 | ||
144 | __cleanup_sighand(sighand); | 142 | __cleanup_sighand(sighand); |
145 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 143 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
146 | if (sig) { | 144 | if (group_dead) { |
147 | flush_sigqueue(&sig->shared_pending); | 145 | flush_sigqueue(&sig->shared_pending); |
148 | taskstats_tgid_free(sig); | 146 | tty_kref_put(tty); |
149 | /* | ||
150 | * Make sure ->signal can't go away under rq->lock, | ||
151 | * see account_group_exec_runtime(). | ||
152 | */ | ||
153 | task_rq_unlock_wait(tsk); | ||
154 | __cleanup_signal(sig); | ||
155 | } | 147 | } |
156 | } | 148 | } |
157 | 149 | ||
@@ -781,9 +773,12 @@ static void forget_original_parent(struct task_struct *father) | |||
781 | struct task_struct *p, *n, *reaper; | 773 | struct task_struct *p, *n, *reaper; |
782 | LIST_HEAD(dead_children); | 774 | LIST_HEAD(dead_children); |
783 | 775 | ||
784 | exit_ptrace(father); | ||
785 | |||
786 | write_lock_irq(&tasklist_lock); | 776 | write_lock_irq(&tasklist_lock); |
777 | /* | ||
778 | * Note that exit_ptrace() and find_new_reaper() might | ||
779 | * drop tasklist_lock and reacquire it. | ||
780 | */ | ||
781 | exit_ptrace(father); | ||
787 | reaper = find_new_reaper(father); | 782 | reaper = find_new_reaper(father); |
788 | 783 | ||
789 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 784 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
@@ -859,12 +854,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
859 | 854 | ||
860 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; | 855 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; |
861 | 856 | ||
862 | /* mt-exec, de_thread() is waiting for us */ | 857 | /* mt-exec, de_thread() is waiting for group leader */ |
863 | if (thread_group_leader(tsk) && | 858 | if (unlikely(tsk->signal->notify_count < 0)) |
864 | tsk->signal->group_exit_task && | ||
865 | tsk->signal->notify_count < 0) | ||
866 | wake_up_process(tsk->signal->group_exit_task); | 859 | wake_up_process(tsk->signal->group_exit_task); |
867 | |||
868 | write_unlock_irq(&tasklist_lock); | 860 | write_unlock_irq(&tasklist_lock); |
869 | 861 | ||
870 | tracehook_report_death(tsk, signal, cookie, group_dead); | 862 | tracehook_report_death(tsk, signal, cookie, group_dead); |
@@ -1007,8 +999,10 @@ NORET_TYPE void do_exit(long code) | |||
1007 | 999 | ||
1008 | exit_notify(tsk, group_dead); | 1000 | exit_notify(tsk, group_dead); |
1009 | #ifdef CONFIG_NUMA | 1001 | #ifdef CONFIG_NUMA |
1002 | task_lock(tsk); | ||
1010 | mpol_put(tsk->mempolicy); | 1003 | mpol_put(tsk->mempolicy); |
1011 | tsk->mempolicy = NULL; | 1004 | tsk->mempolicy = NULL; |
1005 | task_unlock(tsk); | ||
1012 | #endif | 1006 | #endif |
1013 | #ifdef CONFIG_FUTEX | 1007 | #ifdef CONFIG_FUTEX |
1014 | if (unlikely(current->pi_state_cache)) | 1008 | if (unlikely(current->pi_state_cache)) |
@@ -1396,8 +1390,7 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
1396 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1390 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1397 | *p_code = 0; | 1391 | *p_code = 0; |
1398 | 1392 | ||
1399 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1393 | uid = task_uid(p); |
1400 | uid = __task_cred(p)->uid; | ||
1401 | unlock_sig: | 1394 | unlock_sig: |
1402 | spin_unlock_irq(&p->sighand->siglock); | 1395 | spin_unlock_irq(&p->sighand->siglock); |
1403 | if (!exit_code) | 1396 | if (!exit_code) |
@@ -1470,7 +1463,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1470 | } | 1463 | } |
1471 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1464 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1472 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1465 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1473 | uid = __task_cred(p)->uid; | 1466 | uid = task_uid(p); |
1474 | spin_unlock_irq(&p->sighand->siglock); | 1467 | spin_unlock_irq(&p->sighand->siglock); |
1475 | 1468 | ||
1476 | pid = task_pid_vnr(p); | 1469 | pid = task_pid_vnr(p); |