diff options
| -rw-r--r-- | arch/mips/kernel/irixsig.c | 4 | ||||
| -rw-r--r-- | arch/um/kernel/smp.c | 1 | ||||
| -rw-r--r-- | drivers/char/tty_io.c | 20 | ||||
| -rw-r--r-- | fs/exec.c | 30 | ||||
| -rw-r--r-- | include/linux/init_task.h | 2 | ||||
| -rw-r--r-- | include/linux/pid.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 51 | ||||
| -rw-r--r-- | include/linux/signal.h | 2 | ||||
| -rw-r--r-- | include/linux/slab.h | 1 | ||||
| -rw-r--r-- | kernel/exit.c | 129 | ||||
| -rw-r--r-- | kernel/fork.c | 121 | ||||
| -rw-r--r-- | kernel/kmod.c | 2 | ||||
| -rw-r--r-- | kernel/pid.c | 40 | ||||
| -rw-r--r-- | kernel/ptrace.c | 8 | ||||
| -rw-r--r-- | kernel/signal.c | 344 | ||||
| -rw-r--r-- | kernel/sys.c | 73 |
16 files changed, 309 insertions, 521 deletions
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c index 08273a2a501d..8150f071f80a 100644 --- a/arch/mips/kernel/irixsig.c +++ b/arch/mips/kernel/irixsig.c | |||
| @@ -603,7 +603,7 @@ repeat: | |||
| 603 | /* move to end of parent's list to avoid starvation */ | 603 | /* move to end of parent's list to avoid starvation */ |
| 604 | write_lock_irq(&tasklist_lock); | 604 | write_lock_irq(&tasklist_lock); |
| 605 | remove_parent(p); | 605 | remove_parent(p); |
| 606 | add_parent(p, p->parent); | 606 | add_parent(p); |
| 607 | write_unlock_irq(&tasklist_lock); | 607 | write_unlock_irq(&tasklist_lock); |
| 608 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | 608 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
| 609 | if (retval) | 609 | if (retval) |
| @@ -643,7 +643,7 @@ repeat: | |||
| 643 | write_lock_irq(&tasklist_lock); | 643 | write_lock_irq(&tasklist_lock); |
| 644 | remove_parent(p); | 644 | remove_parent(p); |
| 645 | p->parent = p->real_parent; | 645 | p->parent = p->real_parent; |
| 646 | add_parent(p, p->parent); | 646 | add_parent(p); |
| 647 | do_notify_parent(p, SIGCHLD); | 647 | do_notify_parent(p, SIGCHLD); |
| 648 | write_unlock_irq(&tasklist_lock); | 648 | write_unlock_irq(&tasklist_lock); |
| 649 | } else | 649 | } else |
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index c8d8d0ac1a7f..511116aebaf7 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
| @@ -143,7 +143,6 @@ void smp_prepare_cpus(unsigned int maxcpus) | |||
| 143 | idle = idle_thread(cpu); | 143 | idle = idle_thread(cpu); |
| 144 | 144 | ||
| 145 | init_idle(idle, cpu); | 145 | init_idle(idle, cpu); |
| 146 | unhash_process(idle); | ||
| 147 | 146 | ||
| 148 | waittime = 200000000; | 147 | waittime = 200000000; |
| 149 | while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) | 148 | while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 811dadb9ce3e..0bfd1b63662e 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
| @@ -1094,8 +1094,8 @@ static void do_tty_hangup(void *data) | |||
| 1094 | p->signal->tty = NULL; | 1094 | p->signal->tty = NULL; |
| 1095 | if (!p->signal->leader) | 1095 | if (!p->signal->leader) |
| 1096 | continue; | 1096 | continue; |
| 1097 | send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p); | 1097 | group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); |
| 1098 | send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p); | 1098 | group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); |
| 1099 | if (tty->pgrp > 0) | 1099 | if (tty->pgrp > 0) |
| 1100 | p->signal->tty_old_pgrp = tty->pgrp; | 1100 | p->signal->tty_old_pgrp = tty->pgrp; |
| 1101 | } while_each_task_pid(tty->session, PIDTYPE_SID, p); | 1101 | } while_each_task_pid(tty->session, PIDTYPE_SID, p); |
| @@ -2672,7 +2672,7 @@ static void __do_SAK(void *arg) | |||
| 2672 | tty_hangup(tty); | 2672 | tty_hangup(tty); |
| 2673 | #else | 2673 | #else |
| 2674 | struct tty_struct *tty = arg; | 2674 | struct tty_struct *tty = arg; |
| 2675 | struct task_struct *p; | 2675 | struct task_struct *g, *p; |
| 2676 | int session; | 2676 | int session; |
| 2677 | int i; | 2677 | int i; |
| 2678 | struct file *filp; | 2678 | struct file *filp; |
| @@ -2693,8 +2693,18 @@ static void __do_SAK(void *arg) | |||
| 2693 | tty->driver->flush_buffer(tty); | 2693 | tty->driver->flush_buffer(tty); |
| 2694 | 2694 | ||
| 2695 | read_lock(&tasklist_lock); | 2695 | read_lock(&tasklist_lock); |
| 2696 | /* Kill the entire session */ | ||
| 2696 | do_each_task_pid(session, PIDTYPE_SID, p) { | 2697 | do_each_task_pid(session, PIDTYPE_SID, p) { |
| 2697 | if (p->signal->tty == tty || session > 0) { | 2698 | printk(KERN_NOTICE "SAK: killed process %d" |
| 2699 | " (%s): p->signal->session==tty->session\n", | ||
| 2700 | p->pid, p->comm); | ||
| 2701 | send_sig(SIGKILL, p, 1); | ||
| 2702 | } while_each_task_pid(session, PIDTYPE_SID, p); | ||
| 2703 | /* Now kill any processes that happen to have the | ||
| 2704 | * tty open. | ||
| 2705 | */ | ||
| 2706 | do_each_thread(g, p) { | ||
| 2707 | if (p->signal->tty == tty) { | ||
| 2698 | printk(KERN_NOTICE "SAK: killed process %d" | 2708 | printk(KERN_NOTICE "SAK: killed process %d" |
| 2699 | " (%s): p->signal->session==tty->session\n", | 2709 | " (%s): p->signal->session==tty->session\n", |
| 2700 | p->pid, p->comm); | 2710 | p->pid, p->comm); |
| @@ -2721,7 +2731,7 @@ static void __do_SAK(void *arg) | |||
| 2721 | rcu_read_unlock(); | 2731 | rcu_read_unlock(); |
| 2722 | } | 2732 | } |
| 2723 | task_unlock(p); | 2733 | task_unlock(p); |
| 2724 | } while_each_task_pid(session, PIDTYPE_SID, p); | 2734 | } while_each_thread(g, p); |
| 2725 | read_unlock(&tasklist_lock); | 2735 | read_unlock(&tasklist_lock); |
| 2726 | #endif | 2736 | #endif |
| 2727 | } | 2737 | } |
| @@ -616,6 +616,15 @@ static int de_thread(struct task_struct *tsk) | |||
| 616 | kmem_cache_free(sighand_cachep, newsighand); | 616 | kmem_cache_free(sighand_cachep, newsighand); |
| 617 | return -EAGAIN; | 617 | return -EAGAIN; |
| 618 | } | 618 | } |
| 619 | |||
| 620 | /* | ||
| 621 | * child_reaper ignores SIGKILL, change it now. | ||
| 622 | * Reparenting needs write_lock on tasklist_lock, | ||
| 623 | * so it is safe to do it under read_lock. | ||
| 624 | */ | ||
| 625 | if (unlikely(current->group_leader == child_reaper)) | ||
| 626 | child_reaper = current; | ||
| 627 | |||
| 619 | zap_other_threads(current); | 628 | zap_other_threads(current); |
| 620 | read_unlock(&tasklist_lock); | 629 | read_unlock(&tasklist_lock); |
| 621 | 630 | ||
| @@ -699,22 +708,30 @@ static int de_thread(struct task_struct *tsk) | |||
| 699 | remove_parent(current); | 708 | remove_parent(current); |
| 700 | remove_parent(leader); | 709 | remove_parent(leader); |
| 701 | 710 | ||
| 702 | switch_exec_pids(leader, current); | 711 | |
| 712 | /* Become a process group leader with the old leader's pid. | ||
| 713 | * Note: The old leader also uses thispid until release_task | ||
| 714 | * is called. Odd but simple and correct. | ||
| 715 | */ | ||
| 716 | detach_pid(current, PIDTYPE_PID); | ||
| 717 | current->pid = leader->pid; | ||
| 718 | attach_pid(current, PIDTYPE_PID, current->pid); | ||
| 719 | attach_pid(current, PIDTYPE_PGID, current->signal->pgrp); | ||
| 720 | attach_pid(current, PIDTYPE_SID, current->signal->session); | ||
| 721 | list_add_tail(¤t->tasks, &init_task.tasks); | ||
| 703 | 722 | ||
| 704 | current->parent = current->real_parent = leader->real_parent; | 723 | current->parent = current->real_parent = leader->real_parent; |
| 705 | leader->parent = leader->real_parent = child_reaper; | 724 | leader->parent = leader->real_parent = child_reaper; |
| 706 | current->group_leader = current; | 725 | current->group_leader = current; |
| 707 | leader->group_leader = leader; | 726 | leader->group_leader = leader; |
| 708 | 727 | ||
| 709 | add_parent(current, current->parent); | 728 | add_parent(current); |
| 710 | add_parent(leader, leader->parent); | 729 | add_parent(leader); |
| 711 | if (ptrace) { | 730 | if (ptrace) { |
| 712 | current->ptrace = ptrace; | 731 | current->ptrace = ptrace; |
| 713 | __ptrace_link(current, parent); | 732 | __ptrace_link(current, parent); |
| 714 | } | 733 | } |
| 715 | 734 | ||
| 716 | list_del(¤t->tasks); | ||
| 717 | list_add_tail(¤t->tasks, &init_task.tasks); | ||
| 718 | current->exit_signal = SIGCHLD; | 735 | current->exit_signal = SIGCHLD; |
| 719 | 736 | ||
| 720 | BUG_ON(leader->exit_state != EXIT_ZOMBIE); | 737 | BUG_ON(leader->exit_state != EXIT_ZOMBIE); |
| @@ -751,7 +768,6 @@ no_thread_group: | |||
| 751 | /* | 768 | /* |
| 752 | * Move our state over to newsighand and switch it in. | 769 | * Move our state over to newsighand and switch it in. |
| 753 | */ | 770 | */ |
| 754 | spin_lock_init(&newsighand->siglock); | ||
| 755 | atomic_set(&newsighand->count, 1); | 771 | atomic_set(&newsighand->count, 1); |
| 756 | memcpy(newsighand->action, oldsighand->action, | 772 | memcpy(newsighand->action, oldsighand->action, |
| 757 | sizeof(newsighand->action)); | 773 | sizeof(newsighand->action)); |
| @@ -768,7 +784,7 @@ no_thread_group: | |||
| 768 | write_unlock_irq(&tasklist_lock); | 784 | write_unlock_irq(&tasklist_lock); |
| 769 | 785 | ||
| 770 | if (atomic_dec_and_test(&oldsighand->count)) | 786 | if (atomic_dec_and_test(&oldsighand->count)) |
| 771 | sighand_free(oldsighand); | 787 | kmem_cache_free(sighand_cachep, oldsighand); |
| 772 | } | 788 | } |
| 773 | 789 | ||
| 774 | BUG_ON(!thread_group_leader(current)); | 790 | BUG_ON(!thread_group_leader(current)); |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 92146f3b7423..41ecbb847f32 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -62,6 +62,8 @@ | |||
| 62 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ | 62 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ |
| 63 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ | 63 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
| 64 | .rlim = INIT_RLIMITS, \ | 64 | .rlim = INIT_RLIMITS, \ |
| 65 | .pgrp = 1, \ | ||
| 66 | .session = 1, \ | ||
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | #define INIT_SIGHAND(sighand) { \ | 69 | #define INIT_SIGHAND(sighand) { \ |
diff --git a/include/linux/pid.h b/include/linux/pid.h index 5b2fcb19d2da..5b9082cc600f 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | enum pid_type | 4 | enum pid_type |
| 5 | { | 5 | { |
| 6 | PIDTYPE_PID, | 6 | PIDTYPE_PID, |
| 7 | PIDTYPE_TGID, | ||
| 8 | PIDTYPE_PGID, | 7 | PIDTYPE_PGID, |
| 9 | PIDTYPE_SID, | 8 | PIDTYPE_SID, |
| 10 | PIDTYPE_MAX | 9 | PIDTYPE_MAX |
| @@ -38,7 +37,6 @@ extern struct pid *FASTCALL(find_pid(enum pid_type, int)); | |||
| 38 | 37 | ||
| 39 | extern int alloc_pidmap(void); | 38 | extern int alloc_pidmap(void); |
| 40 | extern void FASTCALL(free_pidmap(int)); | 39 | extern void FASTCALL(free_pidmap(int)); |
| 41 | extern void switch_exec_pids(struct task_struct *leader, struct task_struct *thread); | ||
| 42 | 40 | ||
| 43 | #define do_each_task_pid(who, type, task) \ | 41 | #define do_each_task_pid(who, type, task) \ |
| 44 | if ((task = find_task_by_pid_type(type, who))) { \ | 42 | if ((task = find_task_by_pid_type(type, who))) { \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 20b4f0372e44..d04186d8cc68 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -355,16 +355,8 @@ struct sighand_struct { | |||
| 355 | atomic_t count; | 355 | atomic_t count; |
| 356 | struct k_sigaction action[_NSIG]; | 356 | struct k_sigaction action[_NSIG]; |
| 357 | spinlock_t siglock; | 357 | spinlock_t siglock; |
| 358 | struct rcu_head rcu; | ||
| 359 | }; | 358 | }; |
| 360 | 359 | ||
| 361 | extern void sighand_free_cb(struct rcu_head *rhp); | ||
| 362 | |||
| 363 | static inline void sighand_free(struct sighand_struct *sp) | ||
| 364 | { | ||
| 365 | call_rcu(&sp->rcu, sighand_free_cb); | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | 360 | /* |
| 369 | * NOTE! "signal_struct" does not have it's own | 361 | * NOTE! "signal_struct" does not have it's own |
| 370 | * locking, because a shared signal_struct always | 362 | * locking, because a shared signal_struct always |
| @@ -760,6 +752,7 @@ struct task_struct { | |||
| 760 | 752 | ||
| 761 | /* PID/PID hash table linkage. */ | 753 | /* PID/PID hash table linkage. */ |
| 762 | struct pid pids[PIDTYPE_MAX]; | 754 | struct pid pids[PIDTYPE_MAX]; |
| 755 | struct list_head thread_group; | ||
| 763 | 756 | ||
| 764 | struct completion *vfork_done; /* for vfork() */ | 757 | struct completion *vfork_done; /* for vfork() */ |
| 765 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 758 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
| @@ -1101,7 +1094,6 @@ extern void force_sig_specific(int, struct task_struct *); | |||
| 1101 | extern int send_sig(int, struct task_struct *, int); | 1094 | extern int send_sig(int, struct task_struct *, int); |
| 1102 | extern void zap_other_threads(struct task_struct *p); | 1095 | extern void zap_other_threads(struct task_struct *p); |
| 1103 | extern int kill_pg(pid_t, int, int); | 1096 | extern int kill_pg(pid_t, int, int); |
| 1104 | extern int kill_sl(pid_t, int, int); | ||
| 1105 | extern int kill_proc(pid_t, int, int); | 1097 | extern int kill_proc(pid_t, int, int); |
| 1106 | extern struct sigqueue *sigqueue_alloc(void); | 1098 | extern struct sigqueue *sigqueue_alloc(void); |
| 1107 | extern void sigqueue_free(struct sigqueue *); | 1099 | extern void sigqueue_free(struct sigqueue *); |
| @@ -1158,10 +1150,8 @@ extern void flush_thread(void); | |||
| 1158 | extern void exit_thread(void); | 1150 | extern void exit_thread(void); |
| 1159 | 1151 | ||
| 1160 | extern void exit_files(struct task_struct *); | 1152 | extern void exit_files(struct task_struct *); |
| 1161 | extern void exit_signal(struct task_struct *); | 1153 | extern void __cleanup_signal(struct signal_struct *); |
| 1162 | extern void __exit_signal(struct task_struct *); | 1154 | extern void __cleanup_sighand(struct sighand_struct *); |
| 1163 | extern void exit_sighand(struct task_struct *); | ||
| 1164 | extern void __exit_sighand(struct task_struct *); | ||
| 1165 | extern void exit_itimers(struct signal_struct *); | 1155 | extern void exit_itimers(struct signal_struct *); |
| 1166 | 1156 | ||
| 1167 | extern NORET_TYPE void do_group_exit(int); | 1157 | extern NORET_TYPE void do_group_exit(int); |
| @@ -1185,19 +1175,7 @@ extern void wait_task_inactive(task_t * p); | |||
| 1185 | #endif | 1175 | #endif |
| 1186 | 1176 | ||
| 1187 | #define remove_parent(p) list_del_init(&(p)->sibling) | 1177 | #define remove_parent(p) list_del_init(&(p)->sibling) |
| 1188 | #define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) | 1178 | #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) |
| 1189 | |||
| 1190 | #define REMOVE_LINKS(p) do { \ | ||
| 1191 | if (thread_group_leader(p)) \ | ||
| 1192 | list_del_init(&(p)->tasks); \ | ||
| 1193 | remove_parent(p); \ | ||
| 1194 | } while (0) | ||
| 1195 | |||
| 1196 | #define SET_LINKS(p) do { \ | ||
| 1197 | if (thread_group_leader(p)) \ | ||
| 1198 | list_add_tail(&(p)->tasks,&init_task.tasks); \ | ||
| 1199 | add_parent(p, (p)->parent); \ | ||
| 1200 | } while (0) | ||
| 1201 | 1179 | ||
| 1202 | #define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) | 1180 | #define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) |
| 1203 | #define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) | 1181 | #define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) |
| @@ -1215,20 +1193,22 @@ extern void wait_task_inactive(task_t * p); | |||
| 1215 | #define while_each_thread(g, t) \ | 1193 | #define while_each_thread(g, t) \ |
| 1216 | while ((t = next_thread(t)) != g) | 1194 | while ((t = next_thread(t)) != g) |
| 1217 | 1195 | ||
| 1218 | extern task_t * FASTCALL(next_thread(const task_t *p)); | ||
| 1219 | |||
| 1220 | #define thread_group_leader(p) (p->pid == p->tgid) | 1196 | #define thread_group_leader(p) (p->pid == p->tgid) |
| 1221 | 1197 | ||
| 1198 | static inline task_t *next_thread(task_t *p) | ||
| 1199 | { | ||
| 1200 | return list_entry(rcu_dereference(p->thread_group.next), | ||
| 1201 | task_t, thread_group); | ||
| 1202 | } | ||
| 1203 | |||
| 1222 | static inline int thread_group_empty(task_t *p) | 1204 | static inline int thread_group_empty(task_t *p) |
| 1223 | { | 1205 | { |
| 1224 | return list_empty(&p->pids[PIDTYPE_TGID].pid_list); | 1206 | return list_empty(&p->thread_group); |
| 1225 | } | 1207 | } |
| 1226 | 1208 | ||
| 1227 | #define delay_group_leader(p) \ | 1209 | #define delay_group_leader(p) \ |
| 1228 | (thread_group_leader(p) && !thread_group_empty(p)) | 1210 | (thread_group_leader(p) && !thread_group_empty(p)) |
| 1229 | 1211 | ||
| 1230 | extern void unhash_process(struct task_struct *p); | ||
| 1231 | |||
| 1232 | /* | 1212 | /* |
| 1233 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring | 1213 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring |
| 1234 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 1214 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
| @@ -1248,6 +1228,15 @@ static inline void task_unlock(struct task_struct *p) | |||
| 1248 | spin_unlock(&p->alloc_lock); | 1228 | spin_unlock(&p->alloc_lock); |
| 1249 | } | 1229 | } |
| 1250 | 1230 | ||
| 1231 | extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, | ||
| 1232 | unsigned long *flags); | ||
| 1233 | |||
| 1234 | static inline void unlock_task_sighand(struct task_struct *tsk, | ||
| 1235 | unsigned long *flags) | ||
| 1236 | { | ||
| 1237 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | ||
| 1238 | } | ||
| 1239 | |||
| 1251 | #ifndef __HAVE_THREAD_FUNCTIONS | 1240 | #ifndef __HAVE_THREAD_FUNCTIONS |
| 1252 | 1241 | ||
| 1253 | #define task_thread_info(task) (task)->thread_info | 1242 | #define task_thread_info(task) (task)->thread_info |
diff --git a/include/linux/signal.h b/include/linux/signal.h index b7d093520bb6..162a8fd10b29 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig) | |||
| 249 | INIT_LIST_HEAD(&sig->list); | 249 | INIT_LIST_HEAD(&sig->list); |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | extern void flush_sigqueue(struct sigpending *queue); | ||
| 253 | |||
| 252 | /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ | 254 | /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ |
| 253 | static inline int valid_signal(unsigned long sig) | 255 | static inline int valid_signal(unsigned long sig) |
| 254 | { | 256 | { |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 15e1d9736b1b..3af03b19c983 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -210,7 +210,6 @@ extern kmem_cache_t *names_cachep; | |||
| 210 | extern kmem_cache_t *files_cachep; | 210 | extern kmem_cache_t *files_cachep; |
| 211 | extern kmem_cache_t *filp_cachep; | 211 | extern kmem_cache_t *filp_cachep; |
| 212 | extern kmem_cache_t *fs_cachep; | 212 | extern kmem_cache_t *fs_cachep; |
| 213 | extern kmem_cache_t *signal_cachep; | ||
| 214 | extern kmem_cache_t *sighand_cachep; | 213 | extern kmem_cache_t *sighand_cachep; |
| 215 | extern kmem_cache_t *bio_cachep; | 214 | extern kmem_cache_t *bio_cachep; |
| 216 | 215 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index a8c7efc7a681..bc0ec674d3f4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/cpuset.h> | 29 | #include <linux/cpuset.h> |
| 30 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
| 31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> |
| 32 | #include <linux/posix-timers.h> | ||
| 32 | #include <linux/cn_proc.h> | 33 | #include <linux/cn_proc.h> |
| 33 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
| 34 | #include <linux/futex.h> | 35 | #include <linux/futex.h> |
| @@ -50,15 +51,80 @@ static void __unhash_process(struct task_struct *p) | |||
| 50 | { | 51 | { |
| 51 | nr_threads--; | 52 | nr_threads--; |
| 52 | detach_pid(p, PIDTYPE_PID); | 53 | detach_pid(p, PIDTYPE_PID); |
| 53 | detach_pid(p, PIDTYPE_TGID); | ||
| 54 | if (thread_group_leader(p)) { | 54 | if (thread_group_leader(p)) { |
| 55 | detach_pid(p, PIDTYPE_PGID); | 55 | detach_pid(p, PIDTYPE_PGID); |
| 56 | detach_pid(p, PIDTYPE_SID); | 56 | detach_pid(p, PIDTYPE_SID); |
| 57 | if (p->pid) | 57 | |
| 58 | __get_cpu_var(process_counts)--; | 58 | list_del_init(&p->tasks); |
| 59 | __get_cpu_var(process_counts)--; | ||
| 60 | } | ||
| 61 | list_del_rcu(&p->thread_group); | ||
| 62 | remove_parent(p); | ||
| 63 | } | ||
| 64 | |||
| 65 | /* | ||
| 66 | * This function expects the tasklist_lock write-locked. | ||
| 67 | */ | ||
| 68 | static void __exit_signal(struct task_struct *tsk) | ||
| 69 | { | ||
| 70 | struct signal_struct *sig = tsk->signal; | ||
| 71 | struct sighand_struct *sighand; | ||
| 72 | |||
| 73 | BUG_ON(!sig); | ||
| 74 | BUG_ON(!atomic_read(&sig->count)); | ||
| 75 | |||
| 76 | rcu_read_lock(); | ||
| 77 | sighand = rcu_dereference(tsk->sighand); | ||
| 78 | spin_lock(&sighand->siglock); | ||
| 79 | |||
| 80 | posix_cpu_timers_exit(tsk); | ||
| 81 | if (atomic_dec_and_test(&sig->count)) | ||
| 82 | posix_cpu_timers_exit_group(tsk); | ||
| 83 | else { | ||
| 84 | /* | ||
| 85 | * If there is any task waiting for the group exit | ||
| 86 | * then notify it: | ||
| 87 | */ | ||
| 88 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
| 89 | wake_up_process(sig->group_exit_task); | ||
| 90 | sig->group_exit_task = NULL; | ||
| 91 | } | ||
| 92 | if (tsk == sig->curr_target) | ||
| 93 | sig->curr_target = next_thread(tsk); | ||
| 94 | /* | ||
| 95 | * Accumulate here the counters for all threads but the | ||
| 96 | * group leader as they die, so they can be added into | ||
| 97 | * the process-wide totals when those are taken. | ||
| 98 | * The group leader stays around as a zombie as long | ||
| 99 | * as there are other threads. When it gets reaped, | ||
| 100 | * the exit.c code will add its counts into these totals. | ||
| 101 | * We won't ever get here for the group leader, since it | ||
| 102 | * will have been the last reference on the signal_struct. | ||
| 103 | */ | ||
| 104 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
| 105 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
| 106 | sig->min_flt += tsk->min_flt; | ||
| 107 | sig->maj_flt += tsk->maj_flt; | ||
| 108 | sig->nvcsw += tsk->nvcsw; | ||
| 109 | sig->nivcsw += tsk->nivcsw; | ||
| 110 | sig->sched_time += tsk->sched_time; | ||
| 111 | sig = NULL; /* Marker for below. */ | ||
| 59 | } | 112 | } |
| 60 | 113 | ||
| 61 | REMOVE_LINKS(p); | 114 | __unhash_process(tsk); |
| 115 | |||
| 116 | tsk->signal = NULL; | ||
| 117 | tsk->sighand = NULL; | ||
| 118 | spin_unlock(&sighand->siglock); | ||
| 119 | rcu_read_unlock(); | ||
| 120 | |||
| 121 | __cleanup_sighand(sighand); | ||
| 122 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
| 123 | flush_sigqueue(&tsk->pending); | ||
| 124 | if (sig) { | ||
| 125 | flush_sigqueue(&sig->shared_pending); | ||
| 126 | __cleanup_signal(sig); | ||
| 127 | } | ||
| 62 | } | 128 | } |
| 63 | 129 | ||
| 64 | void release_task(struct task_struct * p) | 130 | void release_task(struct task_struct * p) |
| @@ -67,21 +133,14 @@ void release_task(struct task_struct * p) | |||
| 67 | task_t *leader; | 133 | task_t *leader; |
| 68 | struct dentry *proc_dentry; | 134 | struct dentry *proc_dentry; |
| 69 | 135 | ||
| 70 | repeat: | 136 | repeat: |
| 71 | atomic_dec(&p->user->processes); | 137 | atomic_dec(&p->user->processes); |
| 72 | spin_lock(&p->proc_lock); | 138 | spin_lock(&p->proc_lock); |
| 73 | proc_dentry = proc_pid_unhash(p); | 139 | proc_dentry = proc_pid_unhash(p); |
| 74 | write_lock_irq(&tasklist_lock); | 140 | write_lock_irq(&tasklist_lock); |
| 75 | if (unlikely(p->ptrace)) | 141 | ptrace_unlink(p); |
| 76 | __ptrace_unlink(p); | ||
| 77 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); | 142 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
| 78 | __exit_signal(p); | 143 | __exit_signal(p); |
| 79 | /* | ||
| 80 | * Note that the fastpath in sys_times depends on __exit_signal having | ||
| 81 | * updated the counters before a task is removed from the tasklist of | ||
| 82 | * the process by __unhash_process. | ||
| 83 | */ | ||
| 84 | __unhash_process(p); | ||
| 85 | 144 | ||
| 86 | /* | 145 | /* |
| 87 | * If we are the last non-leader member of the thread | 146 | * If we are the last non-leader member of the thread |
| @@ -116,21 +175,6 @@ repeat: | |||
| 116 | goto repeat; | 175 | goto repeat; |
| 117 | } | 176 | } |
| 118 | 177 | ||
| 119 | /* we are using it only for SMP init */ | ||
| 120 | |||
| 121 | void unhash_process(struct task_struct *p) | ||
| 122 | { | ||
| 123 | struct dentry *proc_dentry; | ||
| 124 | |||
| 125 | spin_lock(&p->proc_lock); | ||
| 126 | proc_dentry = proc_pid_unhash(p); | ||
| 127 | write_lock_irq(&tasklist_lock); | ||
| 128 | __unhash_process(p); | ||
| 129 | write_unlock_irq(&tasklist_lock); | ||
| 130 | spin_unlock(&p->proc_lock); | ||
| 131 | proc_pid_flush(proc_dentry); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* | 178 | /* |
| 135 | * This checks not only the pgrp, but falls back on the pid if no | 179 | * This checks not only the pgrp, but falls back on the pid if no |
| 136 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | 180 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly |
| @@ -238,10 +282,10 @@ static void reparent_to_init(void) | |||
| 238 | 282 | ||
| 239 | ptrace_unlink(current); | 283 | ptrace_unlink(current); |
| 240 | /* Reparent to init */ | 284 | /* Reparent to init */ |
| 241 | REMOVE_LINKS(current); | 285 | remove_parent(current); |
| 242 | current->parent = child_reaper; | 286 | current->parent = child_reaper; |
| 243 | current->real_parent = child_reaper; | 287 | current->real_parent = child_reaper; |
| 244 | SET_LINKS(current); | 288 | add_parent(current); |
| 245 | 289 | ||
| 246 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | 290 | /* Set the exit signal to SIGCHLD so we signal init on exit */ |
| 247 | current->exit_signal = SIGCHLD; | 291 | current->exit_signal = SIGCHLD; |
| @@ -538,13 +582,13 @@ static void exit_mm(struct task_struct * tsk) | |||
| 538 | mmput(mm); | 582 | mmput(mm); |
| 539 | } | 583 | } |
| 540 | 584 | ||
| 541 | static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper) | 585 | static inline void choose_new_parent(task_t *p, task_t *reaper) |
| 542 | { | 586 | { |
| 543 | /* | 587 | /* |
| 544 | * Make sure we're not reparenting to ourselves and that | 588 | * Make sure we're not reparenting to ourselves and that |
| 545 | * the parent is not a zombie. | 589 | * the parent is not a zombie. |
| 546 | */ | 590 | */ |
| 547 | BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE); | 591 | BUG_ON(p == reaper || reaper->exit_state); |
| 548 | p->real_parent = reaper; | 592 | p->real_parent = reaper; |
| 549 | } | 593 | } |
| 550 | 594 | ||
| @@ -569,9 +613,9 @@ static void reparent_thread(task_t *p, task_t *father, int traced) | |||
| 569 | * anyway, so let go of it. | 613 | * anyway, so let go of it. |
| 570 | */ | 614 | */ |
| 571 | p->ptrace = 0; | 615 | p->ptrace = 0; |
| 572 | list_del_init(&p->sibling); | 616 | remove_parent(p); |
| 573 | p->parent = p->real_parent; | 617 | p->parent = p->real_parent; |
| 574 | list_add_tail(&p->sibling, &p->parent->children); | 618 | add_parent(p); |
| 575 | 619 | ||
| 576 | /* If we'd notified the old parent about this child's death, | 620 | /* If we'd notified the old parent about this child's death, |
| 577 | * also notify the new parent. | 621 | * also notify the new parent. |
| @@ -645,7 +689,7 @@ static void forget_original_parent(struct task_struct * father, | |||
| 645 | 689 | ||
| 646 | if (father == p->real_parent) { | 690 | if (father == p->real_parent) { |
| 647 | /* reparent with a reaper, real father it's us */ | 691 | /* reparent with a reaper, real father it's us */ |
| 648 | choose_new_parent(p, reaper, child_reaper); | 692 | choose_new_parent(p, reaper); |
| 649 | reparent_thread(p, father, 0); | 693 | reparent_thread(p, father, 0); |
| 650 | } else { | 694 | } else { |
| 651 | /* reparent ptraced task to its real parent */ | 695 | /* reparent ptraced task to its real parent */ |
| @@ -666,7 +710,7 @@ static void forget_original_parent(struct task_struct * father, | |||
| 666 | } | 710 | } |
| 667 | list_for_each_safe(_p, _n, &father->ptrace_children) { | 711 | list_for_each_safe(_p, _n, &father->ptrace_children) { |
| 668 | p = list_entry(_p,struct task_struct,ptrace_list); | 712 | p = list_entry(_p,struct task_struct,ptrace_list); |
| 669 | choose_new_parent(p, reaper, child_reaper); | 713 | choose_new_parent(p, reaper); |
| 670 | reparent_thread(p, father, 1); | 714 | reparent_thread(p, father, 1); |
| 671 | } | 715 | } |
| 672 | } | 716 | } |
| @@ -807,7 +851,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 807 | panic("Aiee, killing interrupt handler!"); | 851 | panic("Aiee, killing interrupt handler!"); |
| 808 | if (unlikely(!tsk->pid)) | 852 | if (unlikely(!tsk->pid)) |
| 809 | panic("Attempted to kill the idle task!"); | 853 | panic("Attempted to kill the idle task!"); |
| 810 | if (unlikely(tsk->pid == 1)) | 854 | if (unlikely(tsk == child_reaper)) |
| 811 | panic("Attempted to kill init!"); | 855 | panic("Attempted to kill init!"); |
| 812 | 856 | ||
| 813 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 857 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
| @@ -920,13 +964,6 @@ asmlinkage long sys_exit(int error_code) | |||
| 920 | do_exit((error_code&0xff)<<8); | 964 | do_exit((error_code&0xff)<<8); |
| 921 | } | 965 | } |
| 922 | 966 | ||
| 923 | task_t fastcall *next_thread(const task_t *p) | ||
| 924 | { | ||
| 925 | return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID); | ||
| 926 | } | ||
| 927 | |||
| 928 | EXPORT_SYMBOL(next_thread); | ||
| 929 | |||
| 930 | /* | 967 | /* |
| 931 | * Take down every thread in the group. This is called by fatal signals | 968 | * Take down every thread in the group. This is called by fatal signals |
| 932 | * as well as by sys_exit_group (below). | 969 | * as well as by sys_exit_group (below). |
| @@ -941,7 +978,6 @@ do_group_exit(int exit_code) | |||
| 941 | else if (!thread_group_empty(current)) { | 978 | else if (!thread_group_empty(current)) { |
| 942 | struct signal_struct *const sig = current->signal; | 979 | struct signal_struct *const sig = current->signal; |
| 943 | struct sighand_struct *const sighand = current->sighand; | 980 | struct sighand_struct *const sighand = current->sighand; |
| 944 | read_lock(&tasklist_lock); | ||
| 945 | spin_lock_irq(&sighand->siglock); | 981 | spin_lock_irq(&sighand->siglock); |
| 946 | if (sig->flags & SIGNAL_GROUP_EXIT) | 982 | if (sig->flags & SIGNAL_GROUP_EXIT) |
| 947 | /* Another thread got here before we took the lock. */ | 983 | /* Another thread got here before we took the lock. */ |
| @@ -951,7 +987,6 @@ do_group_exit(int exit_code) | |||
| 951 | zap_other_threads(current); | 987 | zap_other_threads(current); |
| 952 | } | 988 | } |
| 953 | spin_unlock_irq(&sighand->siglock); | 989 | spin_unlock_irq(&sighand->siglock); |
| 954 | read_unlock(&tasklist_lock); | ||
| 955 | } | 990 | } |
| 956 | 991 | ||
| 957 | do_exit(exit_code); | 992 | do_exit(exit_code); |
| @@ -1281,7 +1316,7 @@ bail_ref: | |||
| 1281 | 1316 | ||
| 1282 | /* move to end of parent's list to avoid starvation */ | 1317 | /* move to end of parent's list to avoid starvation */ |
| 1283 | remove_parent(p); | 1318 | remove_parent(p); |
| 1284 | add_parent(p, p->parent); | 1319 | add_parent(p); |
| 1285 | 1320 | ||
| 1286 | write_unlock_irq(&tasklist_lock); | 1321 | write_unlock_irq(&tasklist_lock); |
| 1287 | 1322 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index c49bd193b058..b3f7a1bb5e55 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep; | |||
| 84 | #endif | 84 | #endif |
| 85 | 85 | ||
| 86 | /* SLAB cache for signal_struct structures (tsk->signal) */ | 86 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
| 87 | kmem_cache_t *signal_cachep; | 87 | static kmem_cache_t *signal_cachep; |
| 88 | 88 | ||
| 89 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ | 89 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
| 90 | kmem_cache_t *sighand_cachep; | 90 | kmem_cache_t *sighand_cachep; |
| @@ -786,14 +786,6 @@ int unshare_files(void) | |||
| 786 | 786 | ||
| 787 | EXPORT_SYMBOL(unshare_files); | 787 | EXPORT_SYMBOL(unshare_files); |
| 788 | 788 | ||
| 789 | void sighand_free_cb(struct rcu_head *rhp) | ||
| 790 | { | ||
| 791 | struct sighand_struct *sp; | ||
| 792 | |||
| 793 | sp = container_of(rhp, struct sighand_struct, rcu); | ||
| 794 | kmem_cache_free(sighand_cachep, sp); | ||
| 795 | } | ||
| 796 | |||
| 797 | static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) | 789 | static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) |
| 798 | { | 790 | { |
| 799 | struct sighand_struct *sig; | 791 | struct sighand_struct *sig; |
| @@ -806,12 +798,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t | |||
| 806 | rcu_assign_pointer(tsk->sighand, sig); | 798 | rcu_assign_pointer(tsk->sighand, sig); |
| 807 | if (!sig) | 799 | if (!sig) |
| 808 | return -ENOMEM; | 800 | return -ENOMEM; |
| 809 | spin_lock_init(&sig->siglock); | ||
| 810 | atomic_set(&sig->count, 1); | 801 | atomic_set(&sig->count, 1); |
| 811 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); | 802 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
| 812 | return 0; | 803 | return 0; |
| 813 | } | 804 | } |
| 814 | 805 | ||
| 806 | void __cleanup_sighand(struct sighand_struct *sighand) | ||
| 807 | { | ||
| 808 | if (atomic_dec_and_test(&sighand->count)) | ||
| 809 | kmem_cache_free(sighand_cachep, sighand); | ||
| 810 | } | ||
| 811 | |||
| 815 | static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) | 812 | static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) |
| 816 | { | 813 | { |
| 817 | struct signal_struct *sig; | 814 | struct signal_struct *sig; |
| @@ -881,6 +878,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
| 881 | return 0; | 878 | return 0; |
| 882 | } | 879 | } |
| 883 | 880 | ||
| 881 | void __cleanup_signal(struct signal_struct *sig) | ||
| 882 | { | ||
| 883 | exit_thread_group_keys(sig); | ||
| 884 | kmem_cache_free(signal_cachep, sig); | ||
| 885 | } | ||
| 886 | |||
| 887 | static inline void cleanup_signal(struct task_struct *tsk) | ||
| 888 | { | ||
| 889 | struct signal_struct *sig = tsk->signal; | ||
| 890 | |||
| 891 | atomic_dec(&sig->live); | ||
| 892 | |||
| 893 | if (atomic_dec_and_test(&sig->count)) | ||
| 894 | __cleanup_signal(sig); | ||
| 895 | } | ||
| 896 | |||
| 884 | static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) | 897 | static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) |
| 885 | { | 898 | { |
| 886 | unsigned long new_flags = p->flags; | 899 | unsigned long new_flags = p->flags; |
| @@ -1095,6 +1108,7 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1095 | * We dont wake it up yet. | 1108 | * We dont wake it up yet. |
| 1096 | */ | 1109 | */ |
| 1097 | p->group_leader = p; | 1110 | p->group_leader = p; |
| 1111 | INIT_LIST_HEAD(&p->thread_group); | ||
| 1098 | INIT_LIST_HEAD(&p->ptrace_children); | 1112 | INIT_LIST_HEAD(&p->ptrace_children); |
| 1099 | INIT_LIST_HEAD(&p->ptrace_list); | 1113 | INIT_LIST_HEAD(&p->ptrace_list); |
| 1100 | 1114 | ||
| @@ -1118,16 +1132,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1118 | !cpu_online(task_cpu(p)))) | 1132 | !cpu_online(task_cpu(p)))) |
| 1119 | set_task_cpu(p, smp_processor_id()); | 1133 | set_task_cpu(p, smp_processor_id()); |
| 1120 | 1134 | ||
| 1121 | /* | ||
| 1122 | * Check for pending SIGKILL! The new thread should not be allowed | ||
| 1123 | * to slip out of an OOM kill. (or normal SIGKILL.) | ||
| 1124 | */ | ||
| 1125 | if (sigismember(¤t->pending.signal, SIGKILL)) { | ||
| 1126 | write_unlock_irq(&tasklist_lock); | ||
| 1127 | retval = -EINTR; | ||
| 1128 | goto bad_fork_cleanup_namespace; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | /* CLONE_PARENT re-uses the old parent */ | 1135 | /* CLONE_PARENT re-uses the old parent */ |
| 1132 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) | 1136 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) |
| 1133 | p->real_parent = current->real_parent; | 1137 | p->real_parent = current->real_parent; |
| @@ -1136,6 +1140,23 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1136 | p->parent = p->real_parent; | 1140 | p->parent = p->real_parent; |
| 1137 | 1141 | ||
| 1138 | spin_lock(¤t->sighand->siglock); | 1142 | spin_lock(¤t->sighand->siglock); |
| 1143 | |||
| 1144 | /* | ||
| 1145 | * Process group and session signals need to be delivered to just the | ||
| 1146 | * parent before the fork or both the parent and the child after the | ||
| 1147 | * fork. Restart if a signal comes in before we add the new process to | ||
| 1148 | * it's process group. | ||
| 1149 | * A fatal signal pending means that current will exit, so the new | ||
| 1150 | * thread can't slip out of an OOM kill (or normal SIGKILL). | ||
| 1151 | */ | ||
| 1152 | recalc_sigpending(); | ||
| 1153 | if (signal_pending(current)) { | ||
| 1154 | spin_unlock(¤t->sighand->siglock); | ||
| 1155 | write_unlock_irq(&tasklist_lock); | ||
| 1156 | retval = -ERESTARTNOINTR; | ||
| 1157 | goto bad_fork_cleanup_namespace; | ||
| 1158 | } | ||
| 1159 | |||
| 1139 | if (clone_flags & CLONE_THREAD) { | 1160 | if (clone_flags & CLONE_THREAD) { |
| 1140 | /* | 1161 | /* |
| 1141 | * Important: if an exit-all has been started then | 1162 | * Important: if an exit-all has been started then |
| @@ -1148,17 +1169,9 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1148 | retval = -EAGAIN; | 1169 | retval = -EAGAIN; |
| 1149 | goto bad_fork_cleanup_namespace; | 1170 | goto bad_fork_cleanup_namespace; |
| 1150 | } | 1171 | } |
| 1151 | p->group_leader = current->group_leader; | ||
| 1152 | 1172 | ||
| 1153 | if (current->signal->group_stop_count > 0) { | 1173 | p->group_leader = current->group_leader; |
| 1154 | /* | 1174 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
| 1155 | * There is an all-stop in progress for the group. | ||
| 1156 | * We ourselves will stop as soon as we check signals. | ||
| 1157 | * Make the new thread part of that group stop too. | ||
| 1158 | */ | ||
| 1159 | current->signal->group_stop_count++; | ||
| 1160 | set_tsk_thread_flag(p, TIF_SIGPENDING); | ||
| 1161 | } | ||
| 1162 | 1175 | ||
| 1163 | if (!cputime_eq(current->signal->it_virt_expires, | 1176 | if (!cputime_eq(current->signal->it_virt_expires, |
| 1164 | cputime_zero) || | 1177 | cputime_zero) || |
| @@ -1181,23 +1194,25 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1181 | */ | 1194 | */ |
| 1182 | p->ioprio = current->ioprio; | 1195 | p->ioprio = current->ioprio; |
| 1183 | 1196 | ||
| 1184 | SET_LINKS(p); | 1197 | if (likely(p->pid)) { |
| 1185 | if (unlikely(p->ptrace & PT_PTRACED)) | 1198 | add_parent(p); |
| 1186 | __ptrace_link(p, current->parent); | 1199 | if (unlikely(p->ptrace & PT_PTRACED)) |
| 1187 | 1200 | __ptrace_link(p, current->parent); | |
| 1188 | if (thread_group_leader(p)) { | 1201 | |
| 1189 | p->signal->tty = current->signal->tty; | 1202 | if (thread_group_leader(p)) { |
| 1190 | p->signal->pgrp = process_group(current); | 1203 | p->signal->tty = current->signal->tty; |
| 1191 | p->signal->session = current->signal->session; | 1204 | p->signal->pgrp = process_group(current); |
| 1192 | attach_pid(p, PIDTYPE_PGID, process_group(p)); | 1205 | p->signal->session = current->signal->session; |
| 1193 | attach_pid(p, PIDTYPE_SID, p->signal->session); | 1206 | attach_pid(p, PIDTYPE_PGID, process_group(p)); |
| 1194 | if (p->pid) | 1207 | attach_pid(p, PIDTYPE_SID, p->signal->session); |
| 1208 | |||
| 1209 | list_add_tail(&p->tasks, &init_task.tasks); | ||
| 1195 | __get_cpu_var(process_counts)++; | 1210 | __get_cpu_var(process_counts)++; |
| 1211 | } | ||
| 1212 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
| 1213 | nr_threads++; | ||
| 1196 | } | 1214 | } |
| 1197 | attach_pid(p, PIDTYPE_TGID, p->tgid); | ||
| 1198 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
| 1199 | 1215 | ||
| 1200 | nr_threads++; | ||
| 1201 | total_forks++; | 1216 | total_forks++; |
| 1202 | spin_unlock(¤t->sighand->siglock); | 1217 | spin_unlock(¤t->sighand->siglock); |
| 1203 | write_unlock_irq(&tasklist_lock); | 1218 | write_unlock_irq(&tasklist_lock); |
| @@ -1212,9 +1227,9 @@ bad_fork_cleanup_mm: | |||
| 1212 | if (p->mm) | 1227 | if (p->mm) |
| 1213 | mmput(p->mm); | 1228 | mmput(p->mm); |
| 1214 | bad_fork_cleanup_signal: | 1229 | bad_fork_cleanup_signal: |
| 1215 | exit_signal(p); | 1230 | cleanup_signal(p); |
| 1216 | bad_fork_cleanup_sighand: | 1231 | bad_fork_cleanup_sighand: |
| 1217 | exit_sighand(p); | 1232 | __cleanup_sighand(p->sighand); |
| 1218 | bad_fork_cleanup_fs: | 1233 | bad_fork_cleanup_fs: |
| 1219 | exit_fs(p); /* blocking */ | 1234 | exit_fs(p); /* blocking */ |
| 1220 | bad_fork_cleanup_files: | 1235 | bad_fork_cleanup_files: |
| @@ -1261,7 +1276,7 @@ task_t * __devinit fork_idle(int cpu) | |||
| 1261 | if (!task) | 1276 | if (!task) |
| 1262 | return ERR_PTR(-ENOMEM); | 1277 | return ERR_PTR(-ENOMEM); |
| 1263 | init_idle(task, cpu); | 1278 | init_idle(task, cpu); |
| 1264 | unhash_process(task); | 1279 | |
| 1265 | return task; | 1280 | return task; |
| 1266 | } | 1281 | } |
| 1267 | 1282 | ||
| @@ -1353,11 +1368,21 @@ long do_fork(unsigned long clone_flags, | |||
| 1353 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 1368 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
| 1354 | #endif | 1369 | #endif |
| 1355 | 1370 | ||
| 1371 | static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) | ||
| 1372 | { | ||
| 1373 | struct sighand_struct *sighand = data; | ||
| 1374 | |||
| 1375 | if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == | ||
| 1376 | SLAB_CTOR_CONSTRUCTOR) | ||
| 1377 | spin_lock_init(&sighand->siglock); | ||
| 1378 | } | ||
| 1379 | |||
| 1356 | void __init proc_caches_init(void) | 1380 | void __init proc_caches_init(void) |
| 1357 | { | 1381 | { |
| 1358 | sighand_cachep = kmem_cache_create("sighand_cache", | 1382 | sighand_cachep = kmem_cache_create("sighand_cache", |
| 1359 | sizeof(struct sighand_struct), 0, | 1383 | sizeof(struct sighand_struct), 0, |
| 1360 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1384 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, |
| 1385 | sighand_ctor, NULL); | ||
| 1361 | signal_cachep = kmem_cache_create("signal_cache", | 1386 | signal_cachep = kmem_cache_create("signal_cache", |
| 1362 | sizeof(struct signal_struct), 0, | 1387 | sizeof(struct signal_struct), 0, |
| 1363 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1388 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 51a892063aaa..20a997c73c3d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -170,7 +170,7 @@ static int wait_for_helper(void *data) | |||
| 170 | sa.sa.sa_handler = SIG_IGN; | 170 | sa.sa.sa_handler = SIG_IGN; |
| 171 | sa.sa.sa_flags = 0; | 171 | sa.sa.sa_flags = 0; |
| 172 | siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); | 172 | siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); |
| 173 | do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); | 173 | do_sigaction(SIGCHLD, &sa, NULL); |
| 174 | allow_signal(SIGCHLD); | 174 | allow_signal(SIGCHLD); |
| 175 | 175 | ||
| 176 | pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); | 176 | pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); |
diff --git a/kernel/pid.c b/kernel/pid.c index 1acc07246991..a9f2dfd006d2 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -218,36 +218,6 @@ task_t *find_task_by_pid_type(int type, int nr) | |||
| 218 | EXPORT_SYMBOL(find_task_by_pid_type); | 218 | EXPORT_SYMBOL(find_task_by_pid_type); |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| 221 | * This function switches the PIDs if a non-leader thread calls | ||
| 222 | * sys_execve() - this must be done without releasing the PID. | ||
| 223 | * (which a detach_pid() would eventually do.) | ||
| 224 | */ | ||
| 225 | void switch_exec_pids(task_t *leader, task_t *thread) | ||
| 226 | { | ||
| 227 | __detach_pid(leader, PIDTYPE_PID); | ||
| 228 | __detach_pid(leader, PIDTYPE_TGID); | ||
| 229 | __detach_pid(leader, PIDTYPE_PGID); | ||
| 230 | __detach_pid(leader, PIDTYPE_SID); | ||
| 231 | |||
| 232 | __detach_pid(thread, PIDTYPE_PID); | ||
| 233 | __detach_pid(thread, PIDTYPE_TGID); | ||
| 234 | |||
| 235 | leader->pid = leader->tgid = thread->pid; | ||
| 236 | thread->pid = thread->tgid; | ||
| 237 | |||
| 238 | attach_pid(thread, PIDTYPE_PID, thread->pid); | ||
| 239 | attach_pid(thread, PIDTYPE_TGID, thread->tgid); | ||
| 240 | attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp); | ||
| 241 | attach_pid(thread, PIDTYPE_SID, thread->signal->session); | ||
| 242 | list_add_tail(&thread->tasks, &init_task.tasks); | ||
| 243 | |||
| 244 | attach_pid(leader, PIDTYPE_PID, leader->pid); | ||
| 245 | attach_pid(leader, PIDTYPE_TGID, leader->tgid); | ||
| 246 | attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp); | ||
| 247 | attach_pid(leader, PIDTYPE_SID, leader->signal->session); | ||
| 248 | } | ||
| 249 | |||
| 250 | /* | ||
| 251 | * The pid hash table is scaled according to the amount of memory in the | 221 | * The pid hash table is scaled according to the amount of memory in the |
| 252 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or | 222 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or |
| 253 | * more. | 223 | * more. |
| @@ -277,16 +247,8 @@ void __init pidhash_init(void) | |||
| 277 | 247 | ||
| 278 | void __init pidmap_init(void) | 248 | void __init pidmap_init(void) |
| 279 | { | 249 | { |
| 280 | int i; | ||
| 281 | |||
| 282 | pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); | 250 | pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); |
| 251 | /* Reserve PID 0. We never call free_pidmap(0) */ | ||
| 283 | set_bit(0, pidmap_array->page); | 252 | set_bit(0, pidmap_array->page); |
| 284 | atomic_dec(&pidmap_array->nr_free); | 253 | atomic_dec(&pidmap_array->nr_free); |
| 285 | |||
| 286 | /* | ||
| 287 | * Allocate PID 0, and hash it via all PID types: | ||
| 288 | */ | ||
| 289 | |||
| 290 | for (i = 0; i < PIDTYPE_MAX; i++) | ||
| 291 | attach_pid(current, i, 0); | ||
| 292 | } | 254 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index d95a72c9279d..86a7f6c60cb2 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -35,9 +35,9 @@ void __ptrace_link(task_t *child, task_t *new_parent) | |||
| 35 | if (child->parent == new_parent) | 35 | if (child->parent == new_parent) |
| 36 | return; | 36 | return; |
| 37 | list_add(&child->ptrace_list, &child->parent->ptrace_children); | 37 | list_add(&child->ptrace_list, &child->parent->ptrace_children); |
| 38 | REMOVE_LINKS(child); | 38 | remove_parent(child); |
| 39 | child->parent = new_parent; | 39 | child->parent = new_parent; |
| 40 | SET_LINKS(child); | 40 | add_parent(child); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | /* | 43 | /* |
| @@ -77,9 +77,9 @@ void __ptrace_unlink(task_t *child) | |||
| 77 | child->ptrace = 0; | 77 | child->ptrace = 0; |
| 78 | if (!list_empty(&child->ptrace_list)) { | 78 | if (!list_empty(&child->ptrace_list)) { |
| 79 | list_del_init(&child->ptrace_list); | 79 | list_del_init(&child->ptrace_list); |
| 80 | REMOVE_LINKS(child); | 80 | remove_parent(child); |
| 81 | child->parent = child->real_parent; | 81 | child->parent = child->real_parent; |
| 82 | SET_LINKS(child); | 82 | add_parent(child); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | ptrace_untrace(child); | 85 | ptrace_untrace(child); |
diff --git a/kernel/signal.c b/kernel/signal.c index 75f7341b0c39..4922928d91f6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
| 23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
| 24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/posix-timers.h> | ||
| 26 | #include <linux/signal.h> | 25 | #include <linux/signal.h> |
| 27 | #include <linux/audit.h> | 26 | #include <linux/audit.h> |
| 28 | #include <linux/capability.h> | 27 | #include <linux/capability.h> |
| @@ -147,6 +146,8 @@ static kmem_cache_t *sigqueue_cachep; | |||
| 147 | #define sig_kernel_stop(sig) \ | 146 | #define sig_kernel_stop(sig) \ |
| 148 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) | 147 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) |
| 149 | 148 | ||
| 149 | #define sig_needs_tasklist(sig) ((sig) == SIGCONT) | ||
| 150 | |||
| 150 | #define sig_user_defined(t, signr) \ | 151 | #define sig_user_defined(t, signr) \ |
| 151 | (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ | 152 | (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ |
| 152 | ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) | 153 | ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) |
| @@ -292,7 +293,7 @@ static void __sigqueue_free(struct sigqueue *q) | |||
| 292 | kmem_cache_free(sigqueue_cachep, q); | 293 | kmem_cache_free(sigqueue_cachep, q); |
| 293 | } | 294 | } |
| 294 | 295 | ||
| 295 | static void flush_sigqueue(struct sigpending *queue) | 296 | void flush_sigqueue(struct sigpending *queue) |
| 296 | { | 297 | { |
| 297 | struct sigqueue *q; | 298 | struct sigqueue *q; |
| 298 | 299 | ||
| @@ -307,9 +308,7 @@ static void flush_sigqueue(struct sigpending *queue) | |||
| 307 | /* | 308 | /* |
| 308 | * Flush all pending signals for a task. | 309 | * Flush all pending signals for a task. |
| 309 | */ | 310 | */ |
| 310 | 311 | void flush_signals(struct task_struct *t) | |
| 311 | void | ||
| 312 | flush_signals(struct task_struct *t) | ||
| 313 | { | 312 | { |
| 314 | unsigned long flags; | 313 | unsigned long flags; |
| 315 | 314 | ||
| @@ -321,109 +320,6 @@ flush_signals(struct task_struct *t) | |||
| 321 | } | 320 | } |
| 322 | 321 | ||
| 323 | /* | 322 | /* |
| 324 | * This function expects the tasklist_lock write-locked. | ||
| 325 | */ | ||
| 326 | void __exit_sighand(struct task_struct *tsk) | ||
| 327 | { | ||
| 328 | struct sighand_struct * sighand = tsk->sighand; | ||
| 329 | |||
| 330 | /* Ok, we're done with the signal handlers */ | ||
| 331 | tsk->sighand = NULL; | ||
| 332 | if (atomic_dec_and_test(&sighand->count)) | ||
| 333 | sighand_free(sighand); | ||
| 334 | } | ||
| 335 | |||
| 336 | void exit_sighand(struct task_struct *tsk) | ||
| 337 | { | ||
| 338 | write_lock_irq(&tasklist_lock); | ||
| 339 | rcu_read_lock(); | ||
| 340 | if (tsk->sighand != NULL) { | ||
| 341 | struct sighand_struct *sighand = rcu_dereference(tsk->sighand); | ||
| 342 | spin_lock(&sighand->siglock); | ||
| 343 | __exit_sighand(tsk); | ||
| 344 | spin_unlock(&sighand->siglock); | ||
| 345 | } | ||
| 346 | rcu_read_unlock(); | ||
| 347 | write_unlock_irq(&tasklist_lock); | ||
| 348 | } | ||
| 349 | |||
| 350 | /* | ||
| 351 | * This function expects the tasklist_lock write-locked. | ||
| 352 | */ | ||
| 353 | void __exit_signal(struct task_struct *tsk) | ||
| 354 | { | ||
| 355 | struct signal_struct * sig = tsk->signal; | ||
| 356 | struct sighand_struct * sighand; | ||
| 357 | |||
| 358 | if (!sig) | ||
| 359 | BUG(); | ||
| 360 | if (!atomic_read(&sig->count)) | ||
| 361 | BUG(); | ||
| 362 | rcu_read_lock(); | ||
| 363 | sighand = rcu_dereference(tsk->sighand); | ||
| 364 | spin_lock(&sighand->siglock); | ||
| 365 | posix_cpu_timers_exit(tsk); | ||
| 366 | if (atomic_dec_and_test(&sig->count)) { | ||
| 367 | posix_cpu_timers_exit_group(tsk); | ||
| 368 | tsk->signal = NULL; | ||
| 369 | __exit_sighand(tsk); | ||
| 370 | spin_unlock(&sighand->siglock); | ||
| 371 | flush_sigqueue(&sig->shared_pending); | ||
| 372 | } else { | ||
| 373 | /* | ||
| 374 | * If there is any task waiting for the group exit | ||
| 375 | * then notify it: | ||
| 376 | */ | ||
| 377 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
| 378 | wake_up_process(sig->group_exit_task); | ||
| 379 | sig->group_exit_task = NULL; | ||
| 380 | } | ||
| 381 | if (tsk == sig->curr_target) | ||
| 382 | sig->curr_target = next_thread(tsk); | ||
| 383 | tsk->signal = NULL; | ||
| 384 | /* | ||
| 385 | * Accumulate here the counters for all threads but the | ||
| 386 | * group leader as they die, so they can be added into | ||
| 387 | * the process-wide totals when those are taken. | ||
| 388 | * The group leader stays around as a zombie as long | ||
| 389 | * as there are other threads. When it gets reaped, | ||
| 390 | * the exit.c code will add its counts into these totals. | ||
| 391 | * We won't ever get here for the group leader, since it | ||
| 392 | * will have been the last reference on the signal_struct. | ||
| 393 | */ | ||
| 394 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
| 395 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
| 396 | sig->min_flt += tsk->min_flt; | ||
| 397 | sig->maj_flt += tsk->maj_flt; | ||
| 398 | sig->nvcsw += tsk->nvcsw; | ||
| 399 | sig->nivcsw += tsk->nivcsw; | ||
| 400 | sig->sched_time += tsk->sched_time; | ||
| 401 | __exit_sighand(tsk); | ||
| 402 | spin_unlock(&sighand->siglock); | ||
| 403 | sig = NULL; /* Marker for below. */ | ||
| 404 | } | ||
| 405 | rcu_read_unlock(); | ||
| 406 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
| 407 | flush_sigqueue(&tsk->pending); | ||
| 408 | if (sig) { | ||
| 409 | /* | ||
| 410 | * We are cleaning up the signal_struct here. | ||
| 411 | */ | ||
| 412 | exit_thread_group_keys(sig); | ||
| 413 | kmem_cache_free(signal_cachep, sig); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | void exit_signal(struct task_struct *tsk) | ||
| 418 | { | ||
| 419 | atomic_dec(&tsk->signal->live); | ||
| 420 | |||
| 421 | write_lock_irq(&tasklist_lock); | ||
| 422 | __exit_signal(tsk); | ||
| 423 | write_unlock_irq(&tasklist_lock); | ||
| 424 | } | ||
| 425 | |||
| 426 | /* | ||
| 427 | * Flush all handlers for a task. | 323 | * Flush all handlers for a task. |
| 428 | */ | 324 | */ |
| 429 | 325 | ||
| @@ -695,9 +591,7 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
| 695 | } | 591 | } |
| 696 | 592 | ||
| 697 | /* forward decl */ | 593 | /* forward decl */ |
| 698 | static void do_notify_parent_cldstop(struct task_struct *tsk, | 594 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); |
| 699 | int to_self, | ||
| 700 | int why); | ||
| 701 | 595 | ||
| 702 | /* | 596 | /* |
| 703 | * Handle magic process-wide effects of stop/continue signals. | 597 | * Handle magic process-wide effects of stop/continue signals. |
| @@ -747,7 +641,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
| 747 | p->signal->group_stop_count = 0; | 641 | p->signal->group_stop_count = 0; |
| 748 | p->signal->flags = SIGNAL_STOP_CONTINUED; | 642 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
| 749 | spin_unlock(&p->sighand->siglock); | 643 | spin_unlock(&p->sighand->siglock); |
| 750 | do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); | 644 | do_notify_parent_cldstop(p, CLD_STOPPED); |
| 751 | spin_lock(&p->sighand->siglock); | 645 | spin_lock(&p->sighand->siglock); |
| 752 | } | 646 | } |
| 753 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | 647 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); |
| @@ -788,7 +682,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
| 788 | p->signal->flags = SIGNAL_STOP_CONTINUED; | 682 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
| 789 | p->signal->group_exit_code = 0; | 683 | p->signal->group_exit_code = 0; |
| 790 | spin_unlock(&p->sighand->siglock); | 684 | spin_unlock(&p->sighand->siglock); |
| 791 | do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); | 685 | do_notify_parent_cldstop(p, CLD_CONTINUED); |
| 792 | spin_lock(&p->sighand->siglock); | 686 | spin_lock(&p->sighand->siglock); |
| 793 | } else { | 687 | } else { |
| 794 | /* | 688 | /* |
| @@ -1120,27 +1014,37 @@ void zap_other_threads(struct task_struct *p) | |||
| 1120 | /* | 1014 | /* |
| 1121 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | 1015 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
| 1122 | */ | 1016 | */ |
| 1017 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) | ||
| 1018 | { | ||
| 1019 | struct sighand_struct *sighand; | ||
| 1020 | |||
| 1021 | for (;;) { | ||
| 1022 | sighand = rcu_dereference(tsk->sighand); | ||
| 1023 | if (unlikely(sighand == NULL)) | ||
| 1024 | break; | ||
| 1025 | |||
| 1026 | spin_lock_irqsave(&sighand->siglock, *flags); | ||
| 1027 | if (likely(sighand == tsk->sighand)) | ||
| 1028 | break; | ||
| 1029 | spin_unlock_irqrestore(&sighand->siglock, *flags); | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | return sighand; | ||
| 1033 | } | ||
| 1034 | |||
| 1123 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1035 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
| 1124 | { | 1036 | { |
| 1125 | unsigned long flags; | 1037 | unsigned long flags; |
| 1126 | struct sighand_struct *sp; | ||
| 1127 | int ret; | 1038 | int ret; |
| 1128 | 1039 | ||
| 1129 | retry: | ||
| 1130 | ret = check_kill_permission(sig, info, p); | 1040 | ret = check_kill_permission(sig, info, p); |
| 1131 | if (!ret && sig && (sp = rcu_dereference(p->sighand))) { | 1041 | |
| 1132 | spin_lock_irqsave(&sp->siglock, flags); | 1042 | if (!ret && sig) { |
| 1133 | if (p->sighand != sp) { | 1043 | ret = -ESRCH; |
| 1134 | spin_unlock_irqrestore(&sp->siglock, flags); | 1044 | if (lock_task_sighand(p, &flags)) { |
| 1135 | goto retry; | 1045 | ret = __group_send_sig_info(sig, info, p); |
| 1136 | } | 1046 | unlock_task_sighand(p, &flags); |
| 1137 | if ((atomic_read(&sp->count) == 0) || | ||
| 1138 | (atomic_read(&p->usage) == 0)) { | ||
| 1139 | spin_unlock_irqrestore(&sp->siglock, flags); | ||
| 1140 | return -ESRCH; | ||
| 1141 | } | 1047 | } |
| 1142 | ret = __group_send_sig_info(sig, info, p); | ||
| 1143 | spin_unlock_irqrestore(&sp->siglock, flags); | ||
| 1144 | } | 1048 | } |
| 1145 | 1049 | ||
| 1146 | return ret; | 1050 | return ret; |
| @@ -1189,7 +1093,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) | |||
| 1189 | struct task_struct *p; | 1093 | struct task_struct *p; |
| 1190 | 1094 | ||
| 1191 | rcu_read_lock(); | 1095 | rcu_read_lock(); |
| 1192 | if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) { | 1096 | if (unlikely(sig_needs_tasklist(sig))) { |
| 1193 | read_lock(&tasklist_lock); | 1097 | read_lock(&tasklist_lock); |
| 1194 | acquired_tasklist_lock = 1; | 1098 | acquired_tasklist_lock = 1; |
| 1195 | } | 1099 | } |
| @@ -1405,12 +1309,10 @@ void sigqueue_free(struct sigqueue *q) | |||
| 1405 | __sigqueue_free(q); | 1309 | __sigqueue_free(q); |
| 1406 | } | 1310 | } |
| 1407 | 1311 | ||
| 1408 | int | 1312 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
| 1409 | send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | ||
| 1410 | { | 1313 | { |
| 1411 | unsigned long flags; | 1314 | unsigned long flags; |
| 1412 | int ret = 0; | 1315 | int ret = 0; |
| 1413 | struct sighand_struct *sh; | ||
| 1414 | 1316 | ||
| 1415 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1317 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
| 1416 | 1318 | ||
| @@ -1424,48 +1326,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
| 1424 | */ | 1326 | */ |
| 1425 | rcu_read_lock(); | 1327 | rcu_read_lock(); |
| 1426 | 1328 | ||
| 1427 | if (unlikely(p->flags & PF_EXITING)) { | 1329 | if (!likely(lock_task_sighand(p, &flags))) { |
| 1428 | ret = -1; | 1330 | ret = -1; |
| 1429 | goto out_err; | 1331 | goto out_err; |
| 1430 | } | 1332 | } |
| 1431 | 1333 | ||
| 1432 | retry: | ||
| 1433 | sh = rcu_dereference(p->sighand); | ||
| 1434 | |||
| 1435 | spin_lock_irqsave(&sh->siglock, flags); | ||
| 1436 | if (p->sighand != sh) { | ||
| 1437 | /* We raced with exec() in a multithreaded process... */ | ||
| 1438 | spin_unlock_irqrestore(&sh->siglock, flags); | ||
| 1439 | goto retry; | ||
| 1440 | } | ||
| 1441 | |||
| 1442 | /* | ||
| 1443 | * We do the check here again to handle the following scenario: | ||
| 1444 | * | ||
| 1445 | * CPU 0 CPU 1 | ||
| 1446 | * send_sigqueue | ||
| 1447 | * check PF_EXITING | ||
| 1448 | * interrupt exit code running | ||
| 1449 | * __exit_signal | ||
| 1450 | * lock sighand->siglock | ||
| 1451 | * unlock sighand->siglock | ||
| 1452 | * lock sh->siglock | ||
| 1453 | * add(tsk->pending) flush_sigqueue(tsk->pending) | ||
| 1454 | * | ||
| 1455 | */ | ||
| 1456 | |||
| 1457 | if (unlikely(p->flags & PF_EXITING)) { | ||
| 1458 | ret = -1; | ||
| 1459 | goto out; | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | if (unlikely(!list_empty(&q->list))) { | 1334 | if (unlikely(!list_empty(&q->list))) { |
| 1463 | /* | 1335 | /* |
| 1464 | * If an SI_TIMER entry is already queue just increment | 1336 | * If an SI_TIMER entry is already queue just increment |
| 1465 | * the overrun count. | 1337 | * the overrun count. |
| 1466 | */ | 1338 | */ |
| 1467 | if (q->info.si_code != SI_TIMER) | 1339 | BUG_ON(q->info.si_code != SI_TIMER); |
| 1468 | BUG(); | ||
| 1469 | q->info.si_overrun++; | 1340 | q->info.si_overrun++; |
| 1470 | goto out; | 1341 | goto out; |
| 1471 | } | 1342 | } |
| @@ -1481,7 +1352,7 @@ retry: | |||
| 1481 | signal_wake_up(p, sig == SIGKILL); | 1352 | signal_wake_up(p, sig == SIGKILL); |
| 1482 | 1353 | ||
| 1483 | out: | 1354 | out: |
| 1484 | spin_unlock_irqrestore(&sh->siglock, flags); | 1355 | unlock_task_sighand(p, &flags); |
| 1485 | out_err: | 1356 | out_err: |
| 1486 | rcu_read_unlock(); | 1357 | rcu_read_unlock(); |
| 1487 | 1358 | ||
| @@ -1613,14 +1484,14 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
| 1613 | spin_unlock_irqrestore(&psig->siglock, flags); | 1484 | spin_unlock_irqrestore(&psig->siglock, flags); |
| 1614 | } | 1485 | } |
| 1615 | 1486 | ||
| 1616 | static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) | 1487 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
| 1617 | { | 1488 | { |
| 1618 | struct siginfo info; | 1489 | struct siginfo info; |
| 1619 | unsigned long flags; | 1490 | unsigned long flags; |
| 1620 | struct task_struct *parent; | 1491 | struct task_struct *parent; |
| 1621 | struct sighand_struct *sighand; | 1492 | struct sighand_struct *sighand; |
| 1622 | 1493 | ||
| 1623 | if (to_self) | 1494 | if (tsk->ptrace & PT_PTRACED) |
| 1624 | parent = tsk->parent; | 1495 | parent = tsk->parent; |
| 1625 | else { | 1496 | else { |
| 1626 | tsk = tsk->group_leader; | 1497 | tsk = tsk->group_leader; |
| @@ -1695,7 +1566,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) | |||
| 1695 | !(current->ptrace & PT_ATTACHED)) && | 1566 | !(current->ptrace & PT_ATTACHED)) && |
| 1696 | (likely(current->parent->signal != current->signal) || | 1567 | (likely(current->parent->signal != current->signal) || |
| 1697 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { | 1568 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { |
| 1698 | do_notify_parent_cldstop(current, 1, CLD_TRAPPED); | 1569 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
| 1699 | read_unlock(&tasklist_lock); | 1570 | read_unlock(&tasklist_lock); |
| 1700 | schedule(); | 1571 | schedule(); |
| 1701 | } else { | 1572 | } else { |
| @@ -1744,25 +1615,17 @@ void ptrace_notify(int exit_code) | |||
| 1744 | static void | 1615 | static void |
| 1745 | finish_stop(int stop_count) | 1616 | finish_stop(int stop_count) |
| 1746 | { | 1617 | { |
| 1747 | int to_self; | ||
| 1748 | |||
| 1749 | /* | 1618 | /* |
| 1750 | * If there are no other threads in the group, or if there is | 1619 | * If there are no other threads in the group, or if there is |
| 1751 | * a group stop in progress and we are the last to stop, | 1620 | * a group stop in progress and we are the last to stop, |
| 1752 | * report to the parent. When ptraced, every thread reports itself. | 1621 | * report to the parent. When ptraced, every thread reports itself. |
| 1753 | */ | 1622 | */ |
| 1754 | if (stop_count < 0 || (current->ptrace & PT_PTRACED)) | 1623 | if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { |
| 1755 | to_self = 1; | 1624 | read_lock(&tasklist_lock); |
| 1756 | else if (stop_count == 0) | 1625 | do_notify_parent_cldstop(current, CLD_STOPPED); |
| 1757 | to_self = 0; | 1626 | read_unlock(&tasklist_lock); |
| 1758 | else | 1627 | } |
| 1759 | goto out; | ||
| 1760 | |||
| 1761 | read_lock(&tasklist_lock); | ||
| 1762 | do_notify_parent_cldstop(current, to_self, CLD_STOPPED); | ||
| 1763 | read_unlock(&tasklist_lock); | ||
| 1764 | 1628 | ||
| 1765 | out: | ||
| 1766 | schedule(); | 1629 | schedule(); |
| 1767 | /* | 1630 | /* |
| 1768 | * Now we don't run again until continued. | 1631 | * Now we don't run again until continued. |
| @@ -1776,12 +1639,10 @@ out: | |||
| 1776 | * Returns nonzero if we've actually stopped and released the siglock. | 1639 | * Returns nonzero if we've actually stopped and released the siglock. |
| 1777 | * Returns zero if we didn't stop and still hold the siglock. | 1640 | * Returns zero if we didn't stop and still hold the siglock. |
| 1778 | */ | 1641 | */ |
| 1779 | static int | 1642 | static int do_signal_stop(int signr) |
| 1780 | do_signal_stop(int signr) | ||
| 1781 | { | 1643 | { |
| 1782 | struct signal_struct *sig = current->signal; | 1644 | struct signal_struct *sig = current->signal; |
| 1783 | struct sighand_struct *sighand = current->sighand; | 1645 | int stop_count; |
| 1784 | int stop_count = -1; | ||
| 1785 | 1646 | ||
| 1786 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) | 1647 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) |
| 1787 | return 0; | 1648 | return 0; |
| @@ -1791,86 +1652,37 @@ do_signal_stop(int signr) | |||
| 1791 | * There is a group stop in progress. We don't need to | 1652 | * There is a group stop in progress. We don't need to |
| 1792 | * start another one. | 1653 | * start another one. |
| 1793 | */ | 1654 | */ |
| 1794 | signr = sig->group_exit_code; | ||
| 1795 | stop_count = --sig->group_stop_count; | 1655 | stop_count = --sig->group_stop_count; |
| 1796 | current->exit_code = signr; | 1656 | } else { |
| 1797 | set_current_state(TASK_STOPPED); | ||
| 1798 | if (stop_count == 0) | ||
| 1799 | sig->flags = SIGNAL_STOP_STOPPED; | ||
| 1800 | spin_unlock_irq(&sighand->siglock); | ||
| 1801 | } | ||
| 1802 | else if (thread_group_empty(current)) { | ||
| 1803 | /* | ||
| 1804 | * Lock must be held through transition to stopped state. | ||
| 1805 | */ | ||
| 1806 | current->exit_code = current->signal->group_exit_code = signr; | ||
| 1807 | set_current_state(TASK_STOPPED); | ||
| 1808 | sig->flags = SIGNAL_STOP_STOPPED; | ||
| 1809 | spin_unlock_irq(&sighand->siglock); | ||
| 1810 | } | ||
| 1811 | else { | ||
| 1812 | /* | 1657 | /* |
| 1813 | * There is no group stop already in progress. | 1658 | * There is no group stop already in progress. |
| 1814 | * We must initiate one now, but that requires | 1659 | * We must initiate one now. |
| 1815 | * dropping siglock to get both the tasklist lock | ||
| 1816 | * and siglock again in the proper order. Note that | ||
| 1817 | * this allows an intervening SIGCONT to be posted. | ||
| 1818 | * We need to check for that and bail out if necessary. | ||
| 1819 | */ | 1660 | */ |
| 1820 | struct task_struct *t; | 1661 | struct task_struct *t; |
| 1821 | 1662 | ||
| 1822 | spin_unlock_irq(&sighand->siglock); | 1663 | sig->group_exit_code = signr; |
| 1823 | |||
| 1824 | /* signals can be posted during this window */ | ||
| 1825 | 1664 | ||
| 1826 | read_lock(&tasklist_lock); | 1665 | stop_count = 0; |
| 1827 | spin_lock_irq(&sighand->siglock); | 1666 | for (t = next_thread(current); t != current; t = next_thread(t)) |
| 1828 | |||
| 1829 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) { | ||
| 1830 | /* | 1667 | /* |
| 1831 | * Another stop or continue happened while we | 1668 | * Setting state to TASK_STOPPED for a group |
| 1832 | * didn't have the lock. We can just swallow this | 1669 | * stop is always done with the siglock held, |
| 1833 | * signal now. If we raced with a SIGCONT, that | 1670 | * so this check has no races. |
| 1834 | * should have just cleared it now. If we raced | ||
| 1835 | * with another processor delivering a stop signal, | ||
| 1836 | * then the SIGCONT that wakes us up should clear it. | ||
| 1837 | */ | 1671 | */ |
| 1838 | read_unlock(&tasklist_lock); | 1672 | if (!t->exit_state && |
| 1839 | return 0; | 1673 | !(t->state & (TASK_STOPPED|TASK_TRACED))) { |
| 1840 | } | 1674 | stop_count++; |
| 1841 | 1675 | signal_wake_up(t, 0); | |
| 1842 | if (sig->group_stop_count == 0) { | 1676 | } |
| 1843 | sig->group_exit_code = signr; | 1677 | sig->group_stop_count = stop_count; |
| 1844 | stop_count = 0; | ||
| 1845 | for (t = next_thread(current); t != current; | ||
| 1846 | t = next_thread(t)) | ||
| 1847 | /* | ||
| 1848 | * Setting state to TASK_STOPPED for a group | ||
| 1849 | * stop is always done with the siglock held, | ||
| 1850 | * so this check has no races. | ||
| 1851 | */ | ||
| 1852 | if (!t->exit_state && | ||
| 1853 | !(t->state & (TASK_STOPPED|TASK_TRACED))) { | ||
| 1854 | stop_count++; | ||
| 1855 | signal_wake_up(t, 0); | ||
| 1856 | } | ||
| 1857 | sig->group_stop_count = stop_count; | ||
| 1858 | } | ||
| 1859 | else { | ||
| 1860 | /* A race with another thread while unlocked. */ | ||
| 1861 | signr = sig->group_exit_code; | ||
| 1862 | stop_count = --sig->group_stop_count; | ||
| 1863 | } | ||
| 1864 | |||
| 1865 | current->exit_code = signr; | ||
| 1866 | set_current_state(TASK_STOPPED); | ||
| 1867 | if (stop_count == 0) | ||
| 1868 | sig->flags = SIGNAL_STOP_STOPPED; | ||
| 1869 | |||
| 1870 | spin_unlock_irq(&sighand->siglock); | ||
| 1871 | read_unlock(&tasklist_lock); | ||
| 1872 | } | 1678 | } |
| 1873 | 1679 | ||
| 1680 | if (stop_count == 0) | ||
| 1681 | sig->flags = SIGNAL_STOP_STOPPED; | ||
| 1682 | current->exit_code = sig->group_exit_code; | ||
| 1683 | __set_current_state(TASK_STOPPED); | ||
| 1684 | |||
| 1685 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 1874 | finish_stop(stop_count); | 1686 | finish_stop(stop_count); |
| 1875 | return 1; | 1687 | return 1; |
| 1876 | } | 1688 | } |
| @@ -1990,7 +1802,7 @@ relock: | |||
| 1990 | continue; | 1802 | continue; |
| 1991 | 1803 | ||
| 1992 | /* Init gets no signals it doesn't want. */ | 1804 | /* Init gets no signals it doesn't want. */ |
| 1993 | if (current->pid == 1) | 1805 | if (current == child_reaper) |
| 1994 | continue; | 1806 | continue; |
| 1995 | 1807 | ||
| 1996 | if (sig_kernel_stop(signr)) { | 1808 | if (sig_kernel_stop(signr)) { |
| @@ -2430,8 +2242,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | |||
| 2430 | return kill_proc_info(sig, &info, pid); | 2242 | return kill_proc_info(sig, &info, pid); |
| 2431 | } | 2243 | } |
| 2432 | 2244 | ||
| 2433 | int | 2245 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
| 2434 | do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | ||
| 2435 | { | 2246 | { |
| 2436 | struct k_sigaction *k; | 2247 | struct k_sigaction *k; |
| 2437 | sigset_t mask; | 2248 | sigset_t mask; |
| @@ -2457,6 +2268,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
| 2457 | if (act) { | 2268 | if (act) { |
| 2458 | sigdelsetmask(&act->sa.sa_mask, | 2269 | sigdelsetmask(&act->sa.sa_mask, |
| 2459 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2270 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
| 2271 | *k = *act; | ||
| 2460 | /* | 2272 | /* |
| 2461 | * POSIX 3.3.1.3: | 2273 | * POSIX 3.3.1.3: |
| 2462 | * "Setting a signal action to SIG_IGN for a signal that is | 2274 | * "Setting a signal action to SIG_IGN for a signal that is |
| @@ -2469,19 +2281,8 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
| 2469 | * be discarded, whether or not it is blocked" | 2281 | * be discarded, whether or not it is blocked" |
| 2470 | */ | 2282 | */ |
| 2471 | if (act->sa.sa_handler == SIG_IGN || | 2283 | if (act->sa.sa_handler == SIG_IGN || |
| 2472 | (act->sa.sa_handler == SIG_DFL && | 2284 | (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { |
| 2473 | sig_kernel_ignore(sig))) { | ||
| 2474 | /* | ||
| 2475 | * This is a fairly rare case, so we only take the | ||
| 2476 | * tasklist_lock once we're sure we'll need it. | ||
| 2477 | * Now we must do this little unlock and relock | ||
| 2478 | * dance to maintain the lock hierarchy. | ||
| 2479 | */ | ||
| 2480 | struct task_struct *t = current; | 2285 | struct task_struct *t = current; |
| 2481 | spin_unlock_irq(&t->sighand->siglock); | ||
| 2482 | read_lock(&tasklist_lock); | ||
| 2483 | spin_lock_irq(&t->sighand->siglock); | ||
| 2484 | *k = *act; | ||
| 2485 | sigemptyset(&mask); | 2286 | sigemptyset(&mask); |
| 2486 | sigaddset(&mask, sig); | 2287 | sigaddset(&mask, sig); |
| 2487 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2288 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
| @@ -2490,12 +2291,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
| 2490 | recalc_sigpending_tsk(t); | 2291 | recalc_sigpending_tsk(t); |
| 2491 | t = next_thread(t); | 2292 | t = next_thread(t); |
| 2492 | } while (t != current); | 2293 | } while (t != current); |
| 2493 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 2494 | read_unlock(&tasklist_lock); | ||
| 2495 | return 0; | ||
| 2496 | } | 2294 | } |
| 2497 | |||
| 2498 | *k = *act; | ||
| 2499 | } | 2295 | } |
| 2500 | 2296 | ||
| 2501 | spin_unlock_irq(¤t->sighand->siglock); | 2297 | spin_unlock_irq(¤t->sighand->siglock); |
diff --git a/kernel/sys.c b/kernel/sys.c index c93d37f71aef..7ef7f6054c28 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1202,69 +1202,24 @@ asmlinkage long sys_times(struct tms __user * tbuf) | |||
| 1202 | */ | 1202 | */ |
| 1203 | if (tbuf) { | 1203 | if (tbuf) { |
| 1204 | struct tms tmp; | 1204 | struct tms tmp; |
| 1205 | struct task_struct *tsk = current; | ||
| 1206 | struct task_struct *t; | ||
| 1205 | cputime_t utime, stime, cutime, cstime; | 1207 | cputime_t utime, stime, cutime, cstime; |
| 1206 | 1208 | ||
| 1207 | #ifdef CONFIG_SMP | 1209 | spin_lock_irq(&tsk->sighand->siglock); |
| 1208 | if (thread_group_empty(current)) { | 1210 | utime = tsk->signal->utime; |
| 1209 | /* | 1211 | stime = tsk->signal->stime; |
| 1210 | * Single thread case without the use of any locks. | 1212 | t = tsk; |
| 1211 | * | 1213 | do { |
| 1212 | * We may race with release_task if two threads are | 1214 | utime = cputime_add(utime, t->utime); |
| 1213 | * executing. However, release task first adds up the | 1215 | stime = cputime_add(stime, t->stime); |
| 1214 | * counters (__exit_signal) before removing the task | 1216 | t = next_thread(t); |
| 1215 | * from the process tasklist (__unhash_process). | 1217 | } while (t != tsk); |
| 1216 | * __exit_signal also acquires and releases the | ||
| 1217 | * siglock which results in the proper memory ordering | ||
| 1218 | * so that the list modifications are always visible | ||
| 1219 | * after the counters have been updated. | ||
| 1220 | * | ||
| 1221 | * If the counters have been updated by the second thread | ||
| 1222 | * but the thread has not yet been removed from the list | ||
| 1223 | * then the other branch will be executing which will | ||
| 1224 | * block on tasklist_lock until the exit handling of the | ||
| 1225 | * other task is finished. | ||
| 1226 | * | ||
| 1227 | * This also implies that the sighand->siglock cannot | ||
| 1228 | * be held by another processor. So we can also | ||
| 1229 | * skip acquiring that lock. | ||
| 1230 | */ | ||
| 1231 | utime = cputime_add(current->signal->utime, current->utime); | ||
| 1232 | stime = cputime_add(current->signal->utime, current->stime); | ||
| 1233 | cutime = current->signal->cutime; | ||
| 1234 | cstime = current->signal->cstime; | ||
| 1235 | } else | ||
| 1236 | #endif | ||
| 1237 | { | ||
| 1238 | 1218 | ||
| 1239 | /* Process with multiple threads */ | 1219 | cutime = tsk->signal->cutime; |
| 1240 | struct task_struct *tsk = current; | 1220 | cstime = tsk->signal->cstime; |
| 1241 | struct task_struct *t; | 1221 | spin_unlock_irq(&tsk->sighand->siglock); |
| 1242 | 1222 | ||
| 1243 | read_lock(&tasklist_lock); | ||
| 1244 | utime = tsk->signal->utime; | ||
| 1245 | stime = tsk->signal->stime; | ||
| 1246 | t = tsk; | ||
| 1247 | do { | ||
| 1248 | utime = cputime_add(utime, t->utime); | ||
| 1249 | stime = cputime_add(stime, t->stime); | ||
| 1250 | t = next_thread(t); | ||
| 1251 | } while (t != tsk); | ||
| 1252 | |||
| 1253 | /* | ||
| 1254 | * While we have tasklist_lock read-locked, no dying thread | ||
| 1255 | * can be updating current->signal->[us]time. Instead, | ||
| 1256 | * we got their counts included in the live thread loop. | ||
| 1257 | * However, another thread can come in right now and | ||
| 1258 | * do a wait call that updates current->signal->c[us]time. | ||
| 1259 | * To make sure we always see that pair updated atomically, | ||
| 1260 | * we take the siglock around fetching them. | ||
| 1261 | */ | ||
| 1262 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 1263 | cutime = tsk->signal->cutime; | ||
| 1264 | cstime = tsk->signal->cstime; | ||
| 1265 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 1266 | read_unlock(&tasklist_lock); | ||
| 1267 | } | ||
| 1268 | tmp.tms_utime = cputime_to_clock_t(utime); | 1223 | tmp.tms_utime = cputime_to_clock_t(utime); |
| 1269 | tmp.tms_stime = cputime_to_clock_t(stime); | 1224 | tmp.tms_stime = cputime_to_clock_t(stime); |
| 1270 | tmp.tms_cutime = cputime_to_clock_t(cutime); | 1225 | tmp.tms_cutime = cputime_to_clock_t(cutime); |
