diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 121 |
1 files changed, 73 insertions, 48 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index c49bd193b058..b3f7a1bb5e55 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep; | |||
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | /* SLAB cache for signal_struct structures (tsk->signal) */ | 86 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
87 | kmem_cache_t *signal_cachep; | 87 | static kmem_cache_t *signal_cachep; |
88 | 88 | ||
89 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ | 89 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
90 | kmem_cache_t *sighand_cachep; | 90 | kmem_cache_t *sighand_cachep; |
@@ -786,14 +786,6 @@ int unshare_files(void) | |||
786 | 786 | ||
787 | EXPORT_SYMBOL(unshare_files); | 787 | EXPORT_SYMBOL(unshare_files); |
788 | 788 | ||
789 | void sighand_free_cb(struct rcu_head *rhp) | ||
790 | { | ||
791 | struct sighand_struct *sp; | ||
792 | |||
793 | sp = container_of(rhp, struct sighand_struct, rcu); | ||
794 | kmem_cache_free(sighand_cachep, sp); | ||
795 | } | ||
796 | |||
797 | static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) | 789 | static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) |
798 | { | 790 | { |
799 | struct sighand_struct *sig; | 791 | struct sighand_struct *sig; |
@@ -806,12 +798,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t | |||
806 | rcu_assign_pointer(tsk->sighand, sig); | 798 | rcu_assign_pointer(tsk->sighand, sig); |
807 | if (!sig) | 799 | if (!sig) |
808 | return -ENOMEM; | 800 | return -ENOMEM; |
809 | spin_lock_init(&sig->siglock); | ||
810 | atomic_set(&sig->count, 1); | 801 | atomic_set(&sig->count, 1); |
811 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); | 802 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
812 | return 0; | 803 | return 0; |
813 | } | 804 | } |
814 | 805 | ||
806 | void __cleanup_sighand(struct sighand_struct *sighand) | ||
807 | { | ||
808 | if (atomic_dec_and_test(&sighand->count)) | ||
809 | kmem_cache_free(sighand_cachep, sighand); | ||
810 | } | ||
811 | |||
815 | static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) | 812 | static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) |
816 | { | 813 | { |
817 | struct signal_struct *sig; | 814 | struct signal_struct *sig; |
@@ -881,6 +878,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
881 | return 0; | 878 | return 0; |
882 | } | 879 | } |
883 | 880 | ||
881 | void __cleanup_signal(struct signal_struct *sig) | ||
882 | { | ||
883 | exit_thread_group_keys(sig); | ||
884 | kmem_cache_free(signal_cachep, sig); | ||
885 | } | ||
886 | |||
887 | static inline void cleanup_signal(struct task_struct *tsk) | ||
888 | { | ||
889 | struct signal_struct *sig = tsk->signal; | ||
890 | |||
891 | atomic_dec(&sig->live); | ||
892 | |||
893 | if (atomic_dec_and_test(&sig->count)) | ||
894 | __cleanup_signal(sig); | ||
895 | } | ||
896 | |||
884 | static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) | 897 | static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) |
885 | { | 898 | { |
886 | unsigned long new_flags = p->flags; | 899 | unsigned long new_flags = p->flags; |
@@ -1095,6 +1108,7 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1095 | * We dont wake it up yet. | 1108 | * We dont wake it up yet. |
1096 | */ | 1109 | */ |
1097 | p->group_leader = p; | 1110 | p->group_leader = p; |
1111 | INIT_LIST_HEAD(&p->thread_group); | ||
1098 | INIT_LIST_HEAD(&p->ptrace_children); | 1112 | INIT_LIST_HEAD(&p->ptrace_children); |
1099 | INIT_LIST_HEAD(&p->ptrace_list); | 1113 | INIT_LIST_HEAD(&p->ptrace_list); |
1100 | 1114 | ||
@@ -1118,16 +1132,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1118 | !cpu_online(task_cpu(p)))) | 1132 | !cpu_online(task_cpu(p)))) |
1119 | set_task_cpu(p, smp_processor_id()); | 1133 | set_task_cpu(p, smp_processor_id()); |
1120 | 1134 | ||
1121 | /* | ||
1122 | * Check for pending SIGKILL! The new thread should not be allowed | ||
1123 | * to slip out of an OOM kill. (or normal SIGKILL.) | ||
1124 | */ | ||
1125 | if (sigismember(¤t->pending.signal, SIGKILL)) { | ||
1126 | write_unlock_irq(&tasklist_lock); | ||
1127 | retval = -EINTR; | ||
1128 | goto bad_fork_cleanup_namespace; | ||
1129 | } | ||
1130 | |||
1131 | /* CLONE_PARENT re-uses the old parent */ | 1135 | /* CLONE_PARENT re-uses the old parent */ |
1132 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) | 1136 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) |
1133 | p->real_parent = current->real_parent; | 1137 | p->real_parent = current->real_parent; |
@@ -1136,6 +1140,23 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1136 | p->parent = p->real_parent; | 1140 | p->parent = p->real_parent; |
1137 | 1141 | ||
1138 | spin_lock(¤t->sighand->siglock); | 1142 | spin_lock(¤t->sighand->siglock); |
1143 | |||
1144 | /* | ||
1145 | * Process group and session signals need to be delivered to just the | ||
1146 | * parent before the fork or both the parent and the child after the | ||
1147 | * fork. Restart if a signal comes in before we add the new process to | ||
1148 | * it's process group. | ||
1149 | * A fatal signal pending means that current will exit, so the new | ||
1150 | * thread can't slip out of an OOM kill (or normal SIGKILL). | ||
1151 | */ | ||
1152 | recalc_sigpending(); | ||
1153 | if (signal_pending(current)) { | ||
1154 | spin_unlock(¤t->sighand->siglock); | ||
1155 | write_unlock_irq(&tasklist_lock); | ||
1156 | retval = -ERESTARTNOINTR; | ||
1157 | goto bad_fork_cleanup_namespace; | ||
1158 | } | ||
1159 | |||
1139 | if (clone_flags & CLONE_THREAD) { | 1160 | if (clone_flags & CLONE_THREAD) { |
1140 | /* | 1161 | /* |
1141 | * Important: if an exit-all has been started then | 1162 | * Important: if an exit-all has been started then |
@@ -1148,17 +1169,9 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1148 | retval = -EAGAIN; | 1169 | retval = -EAGAIN; |
1149 | goto bad_fork_cleanup_namespace; | 1170 | goto bad_fork_cleanup_namespace; |
1150 | } | 1171 | } |
1151 | p->group_leader = current->group_leader; | ||
1152 | 1172 | ||
1153 | if (current->signal->group_stop_count > 0) { | 1173 | p->group_leader = current->group_leader; |
1154 | /* | 1174 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1155 | * There is an all-stop in progress for the group. | ||
1156 | * We ourselves will stop as soon as we check signals. | ||
1157 | * Make the new thread part of that group stop too. | ||
1158 | */ | ||
1159 | current->signal->group_stop_count++; | ||
1160 | set_tsk_thread_flag(p, TIF_SIGPENDING); | ||
1161 | } | ||
1162 | 1175 | ||
1163 | if (!cputime_eq(current->signal->it_virt_expires, | 1176 | if (!cputime_eq(current->signal->it_virt_expires, |
1164 | cputime_zero) || | 1177 | cputime_zero) || |
@@ -1181,23 +1194,25 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1181 | */ | 1194 | */ |
1182 | p->ioprio = current->ioprio; | 1195 | p->ioprio = current->ioprio; |
1183 | 1196 | ||
1184 | SET_LINKS(p); | 1197 | if (likely(p->pid)) { |
1185 | if (unlikely(p->ptrace & PT_PTRACED)) | 1198 | add_parent(p); |
1186 | __ptrace_link(p, current->parent); | 1199 | if (unlikely(p->ptrace & PT_PTRACED)) |
1187 | 1200 | __ptrace_link(p, current->parent); | |
1188 | if (thread_group_leader(p)) { | 1201 | |
1189 | p->signal->tty = current->signal->tty; | 1202 | if (thread_group_leader(p)) { |
1190 | p->signal->pgrp = process_group(current); | 1203 | p->signal->tty = current->signal->tty; |
1191 | p->signal->session = current->signal->session; | 1204 | p->signal->pgrp = process_group(current); |
1192 | attach_pid(p, PIDTYPE_PGID, process_group(p)); | 1205 | p->signal->session = current->signal->session; |
1193 | attach_pid(p, PIDTYPE_SID, p->signal->session); | 1206 | attach_pid(p, PIDTYPE_PGID, process_group(p)); |
1194 | if (p->pid) | 1207 | attach_pid(p, PIDTYPE_SID, p->signal->session); |
1208 | |||
1209 | list_add_tail(&p->tasks, &init_task.tasks); | ||
1195 | __get_cpu_var(process_counts)++; | 1210 | __get_cpu_var(process_counts)++; |
1211 | } | ||
1212 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
1213 | nr_threads++; | ||
1196 | } | 1214 | } |
1197 | attach_pid(p, PIDTYPE_TGID, p->tgid); | ||
1198 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
1199 | 1215 | ||
1200 | nr_threads++; | ||
1201 | total_forks++; | 1216 | total_forks++; |
1202 | spin_unlock(¤t->sighand->siglock); | 1217 | spin_unlock(¤t->sighand->siglock); |
1203 | write_unlock_irq(&tasklist_lock); | 1218 | write_unlock_irq(&tasklist_lock); |
@@ -1212,9 +1227,9 @@ bad_fork_cleanup_mm: | |||
1212 | if (p->mm) | 1227 | if (p->mm) |
1213 | mmput(p->mm); | 1228 | mmput(p->mm); |
1214 | bad_fork_cleanup_signal: | 1229 | bad_fork_cleanup_signal: |
1215 | exit_signal(p); | 1230 | cleanup_signal(p); |
1216 | bad_fork_cleanup_sighand: | 1231 | bad_fork_cleanup_sighand: |
1217 | exit_sighand(p); | 1232 | __cleanup_sighand(p->sighand); |
1218 | bad_fork_cleanup_fs: | 1233 | bad_fork_cleanup_fs: |
1219 | exit_fs(p); /* blocking */ | 1234 | exit_fs(p); /* blocking */ |
1220 | bad_fork_cleanup_files: | 1235 | bad_fork_cleanup_files: |
@@ -1261,7 +1276,7 @@ task_t * __devinit fork_idle(int cpu) | |||
1261 | if (!task) | 1276 | if (!task) |
1262 | return ERR_PTR(-ENOMEM); | 1277 | return ERR_PTR(-ENOMEM); |
1263 | init_idle(task, cpu); | 1278 | init_idle(task, cpu); |
1264 | unhash_process(task); | 1279 | |
1265 | return task; | 1280 | return task; |
1266 | } | 1281 | } |
1267 | 1282 | ||
@@ -1353,11 +1368,21 @@ long do_fork(unsigned long clone_flags, | |||
1353 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 1368 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
1354 | #endif | 1369 | #endif |
1355 | 1370 | ||
1371 | static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) | ||
1372 | { | ||
1373 | struct sighand_struct *sighand = data; | ||
1374 | |||
1375 | if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == | ||
1376 | SLAB_CTOR_CONSTRUCTOR) | ||
1377 | spin_lock_init(&sighand->siglock); | ||
1378 | } | ||
1379 | |||
1356 | void __init proc_caches_init(void) | 1380 | void __init proc_caches_init(void) |
1357 | { | 1381 | { |
1358 | sighand_cachep = kmem_cache_create("sighand_cache", | 1382 | sighand_cachep = kmem_cache_create("sighand_cache", |
1359 | sizeof(struct sighand_struct), 0, | 1383 | sizeof(struct sighand_struct), 0, |
1360 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1384 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, |
1385 | sighand_ctor, NULL); | ||
1361 | signal_cachep = kmem_cache_create("signal_cache", | 1386 | signal_cachep = kmem_cache_create("signal_cache", |
1362 | sizeof(struct signal_struct), 0, | 1387 | sizeof(struct signal_struct), 0, |
1363 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1388 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |