diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 122 |
1 files changed, 10 insertions, 112 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 346616c0092c..60bc027c61c3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/tsacct_kern.h> | 20 | #include <linux/tsacct_kern.h> |
21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
22 | #include <linux/fdtable.h> | 22 | #include <linux/fdtable.h> |
23 | #include <linux/freezer.h> | ||
23 | #include <linux/binfmts.h> | 24 | #include <linux/binfmts.h> |
24 | #include <linux/nsproxy.h> | 25 | #include <linux/nsproxy.h> |
25 | #include <linux/pid_namespace.h> | 26 | #include <linux/pid_namespace.h> |
@@ -31,7 +32,6 @@ | |||
31 | #include <linux/mempolicy.h> | 32 | #include <linux/mempolicy.h> |
32 | #include <linux/taskstats_kern.h> | 33 | #include <linux/taskstats_kern.h> |
33 | #include <linux/delayacct.h> | 34 | #include <linux/delayacct.h> |
34 | #include <linux/freezer.h> | ||
35 | #include <linux/cgroup.h> | 35 | #include <linux/cgroup.h> |
36 | #include <linux/syscalls.h> | 36 | #include <linux/syscalls.h> |
37 | #include <linux/signal.h> | 37 | #include <linux/signal.h> |
@@ -72,18 +72,6 @@ static void __unhash_process(struct task_struct *p, bool group_dead) | |||
72 | list_del_rcu(&p->tasks); | 72 | list_del_rcu(&p->tasks); |
73 | list_del_init(&p->sibling); | 73 | list_del_init(&p->sibling); |
74 | __this_cpu_dec(process_counts); | 74 | __this_cpu_dec(process_counts); |
75 | /* | ||
76 | * If we are the last child process in a pid namespace to be | ||
77 | * reaped, notify the reaper sleeping zap_pid_ns_processes(). | ||
78 | */ | ||
79 | if (IS_ENABLED(CONFIG_PID_NS)) { | ||
80 | struct task_struct *parent = p->real_parent; | ||
81 | |||
82 | if ((task_active_pid_ns(parent)->child_reaper == parent) && | ||
83 | list_empty(&parent->children) && | ||
84 | (parent->flags & PF_EXITING)) | ||
85 | wake_up_process(parent); | ||
86 | } | ||
87 | } | 75 | } |
88 | list_del_rcu(&p->thread_group); | 76 | list_del_rcu(&p->thread_group); |
89 | } | 77 | } |
@@ -97,6 +85,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
97 | bool group_dead = thread_group_leader(tsk); | 85 | bool group_dead = thread_group_leader(tsk); |
98 | struct sighand_struct *sighand; | 86 | struct sighand_struct *sighand; |
99 | struct tty_struct *uninitialized_var(tty); | 87 | struct tty_struct *uninitialized_var(tty); |
88 | cputime_t utime, stime; | ||
100 | 89 | ||
101 | sighand = rcu_dereference_check(tsk->sighand, | 90 | sighand = rcu_dereference_check(tsk->sighand, |
102 | lockdep_tasklist_lock_is_held()); | 91 | lockdep_tasklist_lock_is_held()); |
@@ -135,9 +124,10 @@ static void __exit_signal(struct task_struct *tsk) | |||
135 | * We won't ever get here for the group leader, since it | 124 | * We won't ever get here for the group leader, since it |
136 | * will have been the last reference on the signal_struct. | 125 | * will have been the last reference on the signal_struct. |
137 | */ | 126 | */ |
138 | sig->utime += tsk->utime; | 127 | task_cputime(tsk, &utime, &stime); |
139 | sig->stime += tsk->stime; | 128 | sig->utime += utime; |
140 | sig->gtime += tsk->gtime; | 129 | sig->stime += stime; |
130 | sig->gtime += task_gtime(tsk); | ||
141 | sig->min_flt += tsk->min_flt; | 131 | sig->min_flt += tsk->min_flt; |
142 | sig->maj_flt += tsk->maj_flt; | 132 | sig->maj_flt += tsk->maj_flt; |
143 | sig->nvcsw += tsk->nvcsw; | 133 | sig->nvcsw += tsk->nvcsw; |
@@ -322,43 +312,6 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | |||
322 | } | 312 | } |
323 | } | 313 | } |
324 | 314 | ||
325 | /** | ||
326 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd | ||
327 | * | ||
328 | * If a kernel thread is launched as a result of a system call, or if | ||
329 | * it ever exits, it should generally reparent itself to kthreadd so it | ||
330 | * isn't in the way of other processes and is correctly cleaned up on exit. | ||
331 | * | ||
332 | * The various task state such as scheduling policy and priority may have | ||
333 | * been inherited from a user process, so we reset them to sane values here. | ||
334 | * | ||
335 | * NOTE that reparent_to_kthreadd() gives the caller full capabilities. | ||
336 | */ | ||
337 | static void reparent_to_kthreadd(void) | ||
338 | { | ||
339 | write_lock_irq(&tasklist_lock); | ||
340 | |||
341 | ptrace_unlink(current); | ||
342 | /* Reparent to init */ | ||
343 | current->real_parent = current->parent = kthreadd_task; | ||
344 | list_move_tail(¤t->sibling, ¤t->real_parent->children); | ||
345 | |||
346 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | ||
347 | current->exit_signal = SIGCHLD; | ||
348 | |||
349 | if (task_nice(current) < 0) | ||
350 | set_user_nice(current, 0); | ||
351 | /* cpus_allowed? */ | ||
352 | /* rt_priority? */ | ||
353 | /* signals? */ | ||
354 | memcpy(current->signal->rlim, init_task.signal->rlim, | ||
355 | sizeof(current->signal->rlim)); | ||
356 | |||
357 | atomic_inc(&init_cred.usage); | ||
358 | commit_creds(&init_cred); | ||
359 | write_unlock_irq(&tasklist_lock); | ||
360 | } | ||
361 | |||
362 | void __set_special_pids(struct pid *pid) | 315 | void __set_special_pids(struct pid *pid) |
363 | { | 316 | { |
364 | struct task_struct *curr = current->group_leader; | 317 | struct task_struct *curr = current->group_leader; |
@@ -370,13 +323,6 @@ void __set_special_pids(struct pid *pid) | |||
370 | change_pid(curr, PIDTYPE_PGID, pid); | 323 | change_pid(curr, PIDTYPE_PGID, pid); |
371 | } | 324 | } |
372 | 325 | ||
373 | static void set_special_pids(struct pid *pid) | ||
374 | { | ||
375 | write_lock_irq(&tasklist_lock); | ||
376 | __set_special_pids(pid); | ||
377 | write_unlock_irq(&tasklist_lock); | ||
378 | } | ||
379 | |||
380 | /* | 326 | /* |
381 | * Let kernel threads use this to say that they allow a certain signal. | 327 | * Let kernel threads use this to say that they allow a certain signal. |
382 | * Must not be used if kthread was cloned with CLONE_SIGHAND. | 328 | * Must not be used if kthread was cloned with CLONE_SIGHAND. |
@@ -416,54 +362,6 @@ int disallow_signal(int sig) | |||
416 | 362 | ||
417 | EXPORT_SYMBOL(disallow_signal); | 363 | EXPORT_SYMBOL(disallow_signal); |
418 | 364 | ||
419 | /* | ||
420 | * Put all the gunge required to become a kernel thread without | ||
421 | * attached user resources in one place where it belongs. | ||
422 | */ | ||
423 | |||
424 | void daemonize(const char *name, ...) | ||
425 | { | ||
426 | va_list args; | ||
427 | sigset_t blocked; | ||
428 | |||
429 | va_start(args, name); | ||
430 | vsnprintf(current->comm, sizeof(current->comm), name, args); | ||
431 | va_end(args); | ||
432 | |||
433 | /* | ||
434 | * If we were started as result of loading a module, close all of the | ||
435 | * user space pages. We don't need them, and if we didn't close them | ||
436 | * they would be locked into memory. | ||
437 | */ | ||
438 | exit_mm(current); | ||
439 | /* | ||
440 | * We don't want to get frozen, in case system-wide hibernation | ||
441 | * or suspend transition begins right now. | ||
442 | */ | ||
443 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); | ||
444 | |||
445 | if (current->nsproxy != &init_nsproxy) { | ||
446 | get_nsproxy(&init_nsproxy); | ||
447 | switch_task_namespaces(current, &init_nsproxy); | ||
448 | } | ||
449 | set_special_pids(&init_struct_pid); | ||
450 | proc_clear_tty(current); | ||
451 | |||
452 | /* Block and flush all signals */ | ||
453 | sigfillset(&blocked); | ||
454 | sigprocmask(SIG_BLOCK, &blocked, NULL); | ||
455 | flush_signals(current); | ||
456 | |||
457 | /* Become as one with the init task */ | ||
458 | |||
459 | daemonize_fs_struct(); | ||
460 | daemonize_descriptors(); | ||
461 | |||
462 | reparent_to_kthreadd(); | ||
463 | } | ||
464 | |||
465 | EXPORT_SYMBOL(daemonize); | ||
466 | |||
467 | #ifdef CONFIG_MM_OWNER | 365 | #ifdef CONFIG_MM_OWNER |
468 | /* | 366 | /* |
469 | * A task is exiting. If it owned this mm, find a new owner for the mm. | 367 | * A task is exiting. If it owned this mm, find a new owner for the mm. |
@@ -587,7 +485,7 @@ static void exit_mm(struct task_struct * tsk) | |||
587 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 485 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
588 | if (!self.task) /* see coredump_finish() */ | 486 | if (!self.task) /* see coredump_finish() */ |
589 | break; | 487 | break; |
590 | schedule(); | 488 | freezable_schedule(); |
591 | } | 489 | } |
592 | __set_task_state(tsk, TASK_RUNNING); | 490 | __set_task_state(tsk, TASK_RUNNING); |
593 | down_read(&mm->mmap_sem); | 491 | down_read(&mm->mmap_sem); |
@@ -1186,17 +1084,17 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1186 | * as other threads in the parent group can be right | 1084 | * as other threads in the parent group can be right |
1187 | * here reaping other children at the same time. | 1085 | * here reaping other children at the same time. |
1188 | * | 1086 | * |
1189 | * We use thread_group_times() to get times for the thread | 1087 | * We use thread_group_cputime_adjusted() to get times for the thread |
1190 | * group, which consolidates times for all threads in the | 1088 | * group, which consolidates times for all threads in the |
1191 | * group including the group leader. | 1089 | * group including the group leader. |
1192 | */ | 1090 | */ |
1193 | thread_group_times(p, &tgutime, &tgstime); | 1091 | thread_group_cputime_adjusted(p, &tgutime, &tgstime); |
1194 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1092 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1195 | psig = p->real_parent->signal; | 1093 | psig = p->real_parent->signal; |
1196 | sig = p->signal; | 1094 | sig = p->signal; |
1197 | psig->cutime += tgutime + sig->cutime; | 1095 | psig->cutime += tgutime + sig->cutime; |
1198 | psig->cstime += tgstime + sig->cstime; | 1096 | psig->cstime += tgstime + sig->cstime; |
1199 | psig->cgtime += p->gtime + sig->gtime + sig->cgtime; | 1097 | psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; |
1200 | psig->cmin_flt += | 1098 | psig->cmin_flt += |
1201 | p->min_flt + sig->min_flt + sig->cmin_flt; | 1099 | p->min_flt + sig->min_flt + sig->cmin_flt; |
1202 | psig->cmaj_flt += | 1100 | psig->cmaj_flt += |