diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
| commit | f541ae326fa120fa5c57433e4d9a133df212ce41 (patch) | |
| tree | bdbd94ec72cfc601118051cb35e8617d55510177 /kernel/exit.c | |
| parent | e255357764f92afcafafbd4879b222b8c752065a (diff) | |
| parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) | |
Merge branch 'linus' into perfcounters/core-v2
Merge reason: we have gathered quite a few conflicts, need to merge upstream
Conflicts:
arch/powerpc/kernel/Makefile
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/irq.c
arch/x86/kernel/syscall_table_32.S
arch/x86/mm/iomap_32.c
include/linux/sched.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/exit.c')
| -rw-r--r-- | kernel/exit.c | 245 |
1 files changed, 69 insertions, 176 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index f52c24eb8a8f..7a14a2b504f5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/blkdev.h> | 46 | #include <linux/blkdev.h> |
| 47 | #include <linux/task_io_accounting_ops.h> | 47 | #include <linux/task_io_accounting_ops.h> |
| 48 | #include <linux/tracehook.h> | 48 | #include <linux/tracehook.h> |
| 49 | #include <linux/fs_struct.h> | ||
| 49 | #include <linux/init_task.h> | 50 | #include <linux/init_task.h> |
| 50 | #include <trace/sched.h> | 51 | #include <trace/sched.h> |
| 51 | 52 | ||
| @@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait); | |||
| 61 | 62 | ||
| 62 | static void exit_mm(struct task_struct * tsk); | 63 | static void exit_mm(struct task_struct * tsk); |
| 63 | 64 | ||
| 64 | static inline int task_detached(struct task_struct *p) | ||
| 65 | { | ||
| 66 | return p->exit_signal == -1; | ||
| 67 | } | ||
| 68 | |||
| 69 | static void __unhash_process(struct task_struct *p) | 65 | static void __unhash_process(struct task_struct *p) |
| 70 | { | 66 | { |
| 71 | nr_threads--; | 67 | nr_threads--; |
| @@ -365,16 +361,12 @@ static void reparent_to_kthreadd(void) | |||
| 365 | void __set_special_pids(struct pid *pid) | 361 | void __set_special_pids(struct pid *pid) |
| 366 | { | 362 | { |
| 367 | struct task_struct *curr = current->group_leader; | 363 | struct task_struct *curr = current->group_leader; |
| 368 | pid_t nr = pid_nr(pid); | ||
| 369 | 364 | ||
| 370 | if (task_session(curr) != pid) { | 365 | if (task_session(curr) != pid) |
| 371 | change_pid(curr, PIDTYPE_SID, pid); | 366 | change_pid(curr, PIDTYPE_SID, pid); |
| 372 | set_task_session(curr, nr); | 367 | |
| 373 | } | 368 | if (task_pgrp(curr) != pid) |
| 374 | if (task_pgrp(curr) != pid) { | ||
| 375 | change_pid(curr, PIDTYPE_PGID, pid); | 369 | change_pid(curr, PIDTYPE_PGID, pid); |
| 376 | set_task_pgrp(curr, nr); | ||
| 377 | } | ||
| 378 | } | 370 | } |
| 379 | 371 | ||
| 380 | static void set_special_pids(struct pid *pid) | 372 | static void set_special_pids(struct pid *pid) |
| @@ -432,7 +424,6 @@ EXPORT_SYMBOL(disallow_signal); | |||
| 432 | void daemonize(const char *name, ...) | 424 | void daemonize(const char *name, ...) |
| 433 | { | 425 | { |
| 434 | va_list args; | 426 | va_list args; |
| 435 | struct fs_struct *fs; | ||
| 436 | sigset_t blocked; | 427 | sigset_t blocked; |
| 437 | 428 | ||
| 438 | va_start(args, name); | 429 | va_start(args, name); |
| @@ -465,11 +456,7 @@ void daemonize(const char *name, ...) | |||
| 465 | 456 | ||
| 466 | /* Become as one with the init task */ | 457 | /* Become as one with the init task */ |
| 467 | 458 | ||
| 468 | exit_fs(current); /* current->fs->count--; */ | 459 | daemonize_fs_struct(); |
| 469 | fs = init_task.fs; | ||
| 470 | current->fs = fs; | ||
| 471 | atomic_inc(&fs->count); | ||
| 472 | |||
| 473 | exit_files(current); | 460 | exit_files(current); |
| 474 | current->files = init_task.files; | 461 | current->files = init_task.files; |
| 475 | atomic_inc(¤t->files->count); | 462 | atomic_inc(¤t->files->count); |
| @@ -568,30 +555,6 @@ void exit_files(struct task_struct *tsk) | |||
| 568 | } | 555 | } |
| 569 | } | 556 | } |
| 570 | 557 | ||
| 571 | void put_fs_struct(struct fs_struct *fs) | ||
| 572 | { | ||
| 573 | /* No need to hold fs->lock if we are killing it */ | ||
| 574 | if (atomic_dec_and_test(&fs->count)) { | ||
| 575 | path_put(&fs->root); | ||
| 576 | path_put(&fs->pwd); | ||
| 577 | kmem_cache_free(fs_cachep, fs); | ||
| 578 | } | ||
| 579 | } | ||
| 580 | |||
| 581 | void exit_fs(struct task_struct *tsk) | ||
| 582 | { | ||
| 583 | struct fs_struct * fs = tsk->fs; | ||
| 584 | |||
| 585 | if (fs) { | ||
| 586 | task_lock(tsk); | ||
| 587 | tsk->fs = NULL; | ||
| 588 | task_unlock(tsk); | ||
| 589 | put_fs_struct(fs); | ||
| 590 | } | ||
| 591 | } | ||
| 592 | |||
| 593 | EXPORT_SYMBOL_GPL(exit_fs); | ||
| 594 | |||
| 595 | #ifdef CONFIG_MM_OWNER | 558 | #ifdef CONFIG_MM_OWNER |
| 596 | /* | 559 | /* |
| 597 | * Task p is exiting and it owned mm, lets find a new owner for it | 560 | * Task p is exiting and it owned mm, lets find a new owner for it |
| @@ -735,119 +698,6 @@ static void exit_mm(struct task_struct * tsk) | |||
| 735 | } | 698 | } |
| 736 | 699 | ||
| 737 | /* | 700 | /* |
| 738 | * Return nonzero if @parent's children should reap themselves. | ||
| 739 | * | ||
| 740 | * Called with write_lock_irq(&tasklist_lock) held. | ||
| 741 | */ | ||
| 742 | static int ignoring_children(struct task_struct *parent) | ||
| 743 | { | ||
| 744 | int ret; | ||
| 745 | struct sighand_struct *psig = parent->sighand; | ||
| 746 | unsigned long flags; | ||
| 747 | spin_lock_irqsave(&psig->siglock, flags); | ||
| 748 | ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | ||
| 749 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); | ||
| 750 | spin_unlock_irqrestore(&psig->siglock, flags); | ||
| 751 | return ret; | ||
| 752 | } | ||
| 753 | |||
| 754 | /* | ||
| 755 | * Detach all tasks we were using ptrace on. | ||
| 756 | * Any that need to be release_task'd are put on the @dead list. | ||
| 757 | * | ||
| 758 | * Called with write_lock(&tasklist_lock) held. | ||
| 759 | */ | ||
| 760 | static void ptrace_exit(struct task_struct *parent, struct list_head *dead) | ||
| 761 | { | ||
| 762 | struct task_struct *p, *n; | ||
| 763 | int ign = -1; | ||
| 764 | |||
| 765 | list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { | ||
| 766 | __ptrace_unlink(p); | ||
| 767 | |||
| 768 | if (p->exit_state != EXIT_ZOMBIE) | ||
| 769 | continue; | ||
| 770 | |||
| 771 | /* | ||
| 772 | * If it's a zombie, our attachedness prevented normal | ||
| 773 | * parent notification or self-reaping. Do notification | ||
| 774 | * now if it would have happened earlier. If it should | ||
| 775 | * reap itself, add it to the @dead list. We can't call | ||
| 776 | * release_task() here because we already hold tasklist_lock. | ||
| 777 | * | ||
| 778 | * If it's our own child, there is no notification to do. | ||
| 779 | * But if our normal children self-reap, then this child | ||
| 780 | * was prevented by ptrace and we must reap it now. | ||
| 781 | */ | ||
| 782 | if (!task_detached(p) && thread_group_empty(p)) { | ||
| 783 | if (!same_thread_group(p->real_parent, parent)) | ||
| 784 | do_notify_parent(p, p->exit_signal); | ||
| 785 | else { | ||
| 786 | if (ign < 0) | ||
| 787 | ign = ignoring_children(parent); | ||
| 788 | if (ign) | ||
| 789 | p->exit_signal = -1; | ||
| 790 | } | ||
| 791 | } | ||
| 792 | |||
| 793 | if (task_detached(p)) { | ||
| 794 | /* | ||
| 795 | * Mark it as in the process of being reaped. | ||
| 796 | */ | ||
| 797 | p->exit_state = EXIT_DEAD; | ||
| 798 | list_add(&p->ptrace_entry, dead); | ||
| 799 | } | ||
| 800 | } | ||
| 801 | } | ||
| 802 | |||
| 803 | /* | ||
| 804 | * Finish up exit-time ptrace cleanup. | ||
| 805 | * | ||
| 806 | * Called without locks. | ||
| 807 | */ | ||
| 808 | static void ptrace_exit_finish(struct task_struct *parent, | ||
| 809 | struct list_head *dead) | ||
| 810 | { | ||
| 811 | struct task_struct *p, *n; | ||
| 812 | |||
| 813 | BUG_ON(!list_empty(&parent->ptraced)); | ||
| 814 | |||
| 815 | list_for_each_entry_safe(p, n, dead, ptrace_entry) { | ||
| 816 | list_del_init(&p->ptrace_entry); | ||
| 817 | release_task(p); | ||
| 818 | } | ||
| 819 | } | ||
| 820 | |||
| 821 | static void reparent_thread(struct task_struct *p, struct task_struct *father) | ||
| 822 | { | ||
| 823 | if (p->pdeath_signal) | ||
| 824 | /* We already hold the tasklist_lock here. */ | ||
| 825 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
| 826 | |||
| 827 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
| 828 | |||
| 829 | /* If this is a threaded reparent there is no need to | ||
| 830 | * notify anyone anything has happened. | ||
| 831 | */ | ||
| 832 | if (same_thread_group(p->real_parent, father)) | ||
| 833 | return; | ||
| 834 | |||
| 835 | /* We don't want people slaying init. */ | ||
| 836 | if (!task_detached(p)) | ||
| 837 | p->exit_signal = SIGCHLD; | ||
| 838 | |||
| 839 | /* If we'd notified the old parent about this child's death, | ||
| 840 | * also notify the new parent. | ||
| 841 | */ | ||
| 842 | if (!ptrace_reparented(p) && | ||
| 843 | p->exit_state == EXIT_ZOMBIE && | ||
| 844 | !task_detached(p) && thread_group_empty(p)) | ||
| 845 | do_notify_parent(p, p->exit_signal); | ||
| 846 | |||
| 847 | kill_orphaned_pgrp(p, father); | ||
| 848 | } | ||
| 849 | |||
| 850 | /* | ||
| 851 | * When we die, we re-parent all our children. | 701 | * When we die, we re-parent all our children. |
| 852 | * Try to give them to another thread in our thread | 702 | * Try to give them to another thread in our thread |
| 853 | * group, and if no such member exists, give it to | 703 | * group, and if no such member exists, give it to |
| @@ -886,17 +736,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
| 886 | return pid_ns->child_reaper; | 736 | return pid_ns->child_reaper; |
| 887 | } | 737 | } |
| 888 | 738 | ||
| 739 | /* | ||
| 740 | * Any that need to be release_task'd are put on the @dead list. | ||
| 741 | */ | ||
| 742 | static void reparent_thread(struct task_struct *father, struct task_struct *p, | ||
| 743 | struct list_head *dead) | ||
| 744 | { | ||
| 745 | if (p->pdeath_signal) | ||
| 746 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
| 747 | |||
| 748 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
| 749 | |||
| 750 | if (task_detached(p)) | ||
| 751 | return; | ||
| 752 | /* | ||
| 753 | * If this is a threaded reparent there is no need to | ||
| 754 | * notify anyone anything has happened. | ||
| 755 | */ | ||
| 756 | if (same_thread_group(p->real_parent, father)) | ||
| 757 | return; | ||
| 758 | |||
| 759 | /* We don't want people slaying init. */ | ||
| 760 | p->exit_signal = SIGCHLD; | ||
| 761 | |||
| 762 | /* If it has exited notify the new parent about this child's death. */ | ||
| 763 | if (!p->ptrace && | ||
| 764 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { | ||
| 765 | do_notify_parent(p, p->exit_signal); | ||
| 766 | if (task_detached(p)) { | ||
| 767 | p->exit_state = EXIT_DEAD; | ||
| 768 | list_move_tail(&p->sibling, dead); | ||
| 769 | } | ||
| 770 | } | ||
| 771 | |||
| 772 | kill_orphaned_pgrp(p, father); | ||
| 773 | } | ||
| 774 | |||
| 889 | static void forget_original_parent(struct task_struct *father) | 775 | static void forget_original_parent(struct task_struct *father) |
| 890 | { | 776 | { |
| 891 | struct task_struct *p, *n, *reaper; | 777 | struct task_struct *p, *n, *reaper; |
| 892 | LIST_HEAD(ptrace_dead); | 778 | LIST_HEAD(dead_children); |
| 779 | |||
| 780 | exit_ptrace(father); | ||
| 893 | 781 | ||
| 894 | write_lock_irq(&tasklist_lock); | 782 | write_lock_irq(&tasklist_lock); |
| 895 | reaper = find_new_reaper(father); | 783 | reaper = find_new_reaper(father); |
| 896 | /* | ||
| 897 | * First clean up ptrace if we were using it. | ||
| 898 | */ | ||
| 899 | ptrace_exit(father, &ptrace_dead); | ||
| 900 | 784 | ||
| 901 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 785 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
| 902 | p->real_parent = reaper; | 786 | p->real_parent = reaper; |
| @@ -904,13 +788,16 @@ static void forget_original_parent(struct task_struct *father) | |||
| 904 | BUG_ON(p->ptrace); | 788 | BUG_ON(p->ptrace); |
| 905 | p->parent = p->real_parent; | 789 | p->parent = p->real_parent; |
| 906 | } | 790 | } |
| 907 | reparent_thread(p, father); | 791 | reparent_thread(father, p, &dead_children); |
| 908 | } | 792 | } |
| 909 | |||
| 910 | write_unlock_irq(&tasklist_lock); | 793 | write_unlock_irq(&tasklist_lock); |
| 794 | |||
| 911 | BUG_ON(!list_empty(&father->children)); | 795 | BUG_ON(!list_empty(&father->children)); |
| 912 | 796 | ||
| 913 | ptrace_exit_finish(father, &ptrace_dead); | 797 | list_for_each_entry_safe(p, n, &dead_children, sibling) { |
| 798 | list_del_init(&p->sibling); | ||
| 799 | release_task(p); | ||
| 800 | } | ||
| 914 | } | 801 | } |
| 915 | 802 | ||
| 916 | /* | 803 | /* |
| @@ -1422,6 +1309,18 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
| 1422 | return retval; | 1309 | return retval; |
| 1423 | } | 1310 | } |
| 1424 | 1311 | ||
| 1312 | static int *task_stopped_code(struct task_struct *p, bool ptrace) | ||
| 1313 | { | ||
| 1314 | if (ptrace) { | ||
| 1315 | if (task_is_stopped_or_traced(p)) | ||
| 1316 | return &p->exit_code; | ||
| 1317 | } else { | ||
| 1318 | if (p->signal->flags & SIGNAL_STOP_STOPPED) | ||
| 1319 | return &p->signal->group_exit_code; | ||
| 1320 | } | ||
| 1321 | return NULL; | ||
| 1322 | } | ||
| 1323 | |||
| 1425 | /* | 1324 | /* |
| 1426 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | 1325 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold |
| 1427 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1326 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
| @@ -1432,7 +1331,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
| 1432 | int options, struct siginfo __user *infop, | 1331 | int options, struct siginfo __user *infop, |
| 1433 | int __user *stat_addr, struct rusage __user *ru) | 1332 | int __user *stat_addr, struct rusage __user *ru) |
| 1434 | { | 1333 | { |
| 1435 | int retval, exit_code, why; | 1334 | int retval, exit_code, *p_code, why; |
| 1436 | uid_t uid = 0; /* unneeded, required by compiler */ | 1335 | uid_t uid = 0; /* unneeded, required by compiler */ |
| 1437 | pid_t pid; | 1336 | pid_t pid; |
| 1438 | 1337 | ||
| @@ -1442,22 +1341,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
| 1442 | exit_code = 0; | 1341 | exit_code = 0; |
| 1443 | spin_lock_irq(&p->sighand->siglock); | 1342 | spin_lock_irq(&p->sighand->siglock); |
| 1444 | 1343 | ||
| 1445 | if (unlikely(!task_is_stopped_or_traced(p))) | 1344 | p_code = task_stopped_code(p, ptrace); |
| 1446 | goto unlock_sig; | 1345 | if (unlikely(!p_code)) |
| 1447 | |||
| 1448 | if (!ptrace && p->signal->group_stop_count > 0) | ||
| 1449 | /* | ||
| 1450 | * A group stop is in progress and this is the group leader. | ||
| 1451 | * We won't report until all threads have stopped. | ||
| 1452 | */ | ||
| 1453 | goto unlock_sig; | 1346 | goto unlock_sig; |
| 1454 | 1347 | ||
| 1455 | exit_code = p->exit_code; | 1348 | exit_code = *p_code; |
| 1456 | if (!exit_code) | 1349 | if (!exit_code) |
| 1457 | goto unlock_sig; | 1350 | goto unlock_sig; |
| 1458 | 1351 | ||
| 1459 | if (!unlikely(options & WNOWAIT)) | 1352 | if (!unlikely(options & WNOWAIT)) |
| 1460 | p->exit_code = 0; | 1353 | *p_code = 0; |
| 1461 | 1354 | ||
| 1462 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1355 | /* don't need the RCU readlock here as we're holding a spinlock */ |
| 1463 | uid = __task_cred(p)->uid; | 1356 | uid = __task_cred(p)->uid; |
| @@ -1613,7 +1506,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, | |||
| 1613 | */ | 1506 | */ |
| 1614 | *notask_error = 0; | 1507 | *notask_error = 0; |
| 1615 | 1508 | ||
| 1616 | if (task_is_stopped_or_traced(p)) | 1509 | if (task_stopped_code(p, ptrace)) |
| 1617 | return wait_task_stopped(ptrace, p, options, | 1510 | return wait_task_stopped(ptrace, p, options, |
| 1618 | infop, stat_addr, ru); | 1511 | infop, stat_addr, ru); |
| 1619 | 1512 | ||
| @@ -1817,7 +1710,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, | |||
| 1817 | pid = find_get_pid(-upid); | 1710 | pid = find_get_pid(-upid); |
| 1818 | } else if (upid == 0) { | 1711 | } else if (upid == 0) { |
| 1819 | type = PIDTYPE_PGID; | 1712 | type = PIDTYPE_PGID; |
| 1820 | pid = get_pid(task_pgrp(current)); | 1713 | pid = get_task_pid(current, PIDTYPE_PGID); |
| 1821 | } else /* upid > 0 */ { | 1714 | } else /* upid > 0 */ { |
| 1822 | type = PIDTYPE_PID; | 1715 | type = PIDTYPE_PID; |
| 1823 | pid = find_get_pid(upid); | 1716 | pid = find_get_pid(upid); |
