diff options
| author | Paul Mackerras <paulus@samba.org> | 2008-05-09 06:12:06 -0400 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2008-05-09 06:12:06 -0400 |
| commit | 2a5f2e3e6cd1ce9fb3f8b186b6bc9aa1f1497a92 (patch) | |
| tree | b2306840f227972a7c9d4a2b75e516fe81358ce8 /kernel/exit.c | |
| parent | 02539d71fa98d5737bb668b02286c76241e4bac9 (diff) | |
| parent | 78be76476a34a77f0ea9db2f78ba46a2b0fd5ab5 (diff) | |
Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx into merge
Diffstat (limited to 'kernel/exit.c')
| -rw-r--r-- | kernel/exit.c | 150 |
1 files changed, 119 insertions, 31 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 2a9d98c641ac..1510f78a0ffa 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/acct.h> | 19 | #include <linux/acct.h> |
| 20 | #include <linux/tsacct_kern.h> | 20 | #include <linux/tsacct_kern.h> |
| 21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
| 22 | #include <linux/fdtable.h> | ||
| 22 | #include <linux/binfmts.h> | 23 | #include <linux/binfmts.h> |
| 23 | #include <linux/nsproxy.h> | 24 | #include <linux/nsproxy.h> |
| 24 | #include <linux/pid_namespace.h> | 25 | #include <linux/pid_namespace.h> |
| @@ -52,6 +53,11 @@ | |||
| 52 | 53 | ||
| 53 | static void exit_mm(struct task_struct * tsk); | 54 | static void exit_mm(struct task_struct * tsk); |
| 54 | 55 | ||
| 56 | static inline int task_detached(struct task_struct *p) | ||
| 57 | { | ||
| 58 | return p->exit_signal == -1; | ||
| 59 | } | ||
| 60 | |||
| 55 | static void __unhash_process(struct task_struct *p) | 61 | static void __unhash_process(struct task_struct *p) |
| 56 | { | 62 | { |
| 57 | nr_threads--; | 63 | nr_threads--; |
| @@ -160,7 +166,7 @@ repeat: | |||
| 160 | zap_leader = 0; | 166 | zap_leader = 0; |
| 161 | leader = p->group_leader; | 167 | leader = p->group_leader; |
| 162 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | 168 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { |
| 163 | BUG_ON(leader->exit_signal == -1); | 169 | BUG_ON(task_detached(leader)); |
| 164 | do_notify_parent(leader, leader->exit_signal); | 170 | do_notify_parent(leader, leader->exit_signal); |
| 165 | /* | 171 | /* |
| 166 | * If we were the last child thread and the leader has | 172 | * If we were the last child thread and the leader has |
| @@ -170,7 +176,7 @@ repeat: | |||
| 170 | * do_notify_parent() will have marked it self-reaping in | 176 | * do_notify_parent() will have marked it self-reaping in |
| 171 | * that case. | 177 | * that case. |
| 172 | */ | 178 | */ |
| 173 | zap_leader = (leader->exit_signal == -1); | 179 | zap_leader = task_detached(leader); |
| 174 | } | 180 | } |
| 175 | 181 | ||
| 176 | write_unlock_irq(&tasklist_lock); | 182 | write_unlock_irq(&tasklist_lock); |
| @@ -329,13 +335,11 @@ void __set_special_pids(struct pid *pid) | |||
| 329 | pid_t nr = pid_nr(pid); | 335 | pid_t nr = pid_nr(pid); |
| 330 | 336 | ||
| 331 | if (task_session(curr) != pid) { | 337 | if (task_session(curr) != pid) { |
| 332 | detach_pid(curr, PIDTYPE_SID); | 338 | change_pid(curr, PIDTYPE_SID, pid); |
| 333 | attach_pid(curr, PIDTYPE_SID, pid); | ||
| 334 | set_task_session(curr, nr); | 339 | set_task_session(curr, nr); |
| 335 | } | 340 | } |
| 336 | if (task_pgrp(curr) != pid) { | 341 | if (task_pgrp(curr) != pid) { |
| 337 | detach_pid(curr, PIDTYPE_PGID); | 342 | change_pid(curr, PIDTYPE_PGID, pid); |
| 338 | attach_pid(curr, PIDTYPE_PGID, pid); | ||
| 339 | set_task_pgrp(curr, nr); | 343 | set_task_pgrp(curr, nr); |
| 340 | } | 344 | } |
| 341 | } | 345 | } |
| @@ -557,6 +561,88 @@ void exit_fs(struct task_struct *tsk) | |||
| 557 | 561 | ||
| 558 | EXPORT_SYMBOL_GPL(exit_fs); | 562 | EXPORT_SYMBOL_GPL(exit_fs); |
| 559 | 563 | ||
| 564 | #ifdef CONFIG_MM_OWNER | ||
| 565 | /* | ||
| 566 | * Task p is exiting and it owned mm, lets find a new owner for it | ||
| 567 | */ | ||
| 568 | static inline int | ||
| 569 | mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | ||
| 570 | { | ||
| 571 | /* | ||
| 572 | * If there are other users of the mm and the owner (us) is exiting | ||
| 573 | * we need to find a new owner to take on the responsibility. | ||
| 574 | */ | ||
| 575 | if (!mm) | ||
| 576 | return 0; | ||
| 577 | if (atomic_read(&mm->mm_users) <= 1) | ||
| 578 | return 0; | ||
| 579 | if (mm->owner != p) | ||
| 580 | return 0; | ||
| 581 | return 1; | ||
| 582 | } | ||
| 583 | |||
| 584 | void mm_update_next_owner(struct mm_struct *mm) | ||
| 585 | { | ||
| 586 | struct task_struct *c, *g, *p = current; | ||
| 587 | |||
| 588 | retry: | ||
| 589 | if (!mm_need_new_owner(mm, p)) | ||
| 590 | return; | ||
| 591 | |||
| 592 | read_lock(&tasklist_lock); | ||
| 593 | /* | ||
| 594 | * Search in the children | ||
| 595 | */ | ||
| 596 | list_for_each_entry(c, &p->children, sibling) { | ||
| 597 | if (c->mm == mm) | ||
| 598 | goto assign_new_owner; | ||
| 599 | } | ||
| 600 | |||
| 601 | /* | ||
| 602 | * Search in the siblings | ||
| 603 | */ | ||
| 604 | list_for_each_entry(c, &p->parent->children, sibling) { | ||
| 605 | if (c->mm == mm) | ||
| 606 | goto assign_new_owner; | ||
| 607 | } | ||
| 608 | |||
| 609 | /* | ||
| 610 | * Search through everything else. We should not get | ||
| 611 | * here often | ||
| 612 | */ | ||
| 613 | do_each_thread(g, c) { | ||
| 614 | if (c->mm == mm) | ||
| 615 | goto assign_new_owner; | ||
| 616 | } while_each_thread(g, c); | ||
| 617 | |||
| 618 | read_unlock(&tasklist_lock); | ||
| 619 | return; | ||
| 620 | |||
| 621 | assign_new_owner: | ||
| 622 | BUG_ON(c == p); | ||
| 623 | get_task_struct(c); | ||
| 624 | /* | ||
| 625 | * The task_lock protects c->mm from changing. | ||
| 626 | * We always want mm->owner->mm == mm | ||
| 627 | */ | ||
| 628 | task_lock(c); | ||
| 629 | /* | ||
| 630 | * Delay read_unlock() till we have the task_lock() | ||
| 631 | * to ensure that c does not slip away underneath us | ||
| 632 | */ | ||
| 633 | read_unlock(&tasklist_lock); | ||
| 634 | if (c->mm != mm) { | ||
| 635 | task_unlock(c); | ||
| 636 | put_task_struct(c); | ||
| 637 | goto retry; | ||
| 638 | } | ||
| 639 | cgroup_mm_owner_callbacks(mm->owner, c); | ||
| 640 | mm->owner = c; | ||
| 641 | task_unlock(c); | ||
| 642 | put_task_struct(c); | ||
| 643 | } | ||
| 644 | #endif /* CONFIG_MM_OWNER */ | ||
| 645 | |||
| 560 | /* | 646 | /* |
| 561 | * Turn us into a lazy TLB process if we | 647 | * Turn us into a lazy TLB process if we |
| 562 | * aren't already.. | 648 | * aren't already.. |
| @@ -596,6 +682,7 @@ static void exit_mm(struct task_struct * tsk) | |||
| 596 | /* We don't want this task to be frozen prematurely */ | 682 | /* We don't want this task to be frozen prematurely */ |
| 597 | clear_freeze_flag(tsk); | 683 | clear_freeze_flag(tsk); |
| 598 | task_unlock(tsk); | 684 | task_unlock(tsk); |
| 685 | mm_update_next_owner(mm); | ||
| 599 | mmput(mm); | 686 | mmput(mm); |
| 600 | } | 687 | } |
| 601 | 688 | ||
| @@ -610,7 +697,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |||
| 610 | if (unlikely(traced)) { | 697 | if (unlikely(traced)) { |
| 611 | /* Preserve ptrace links if someone else is tracing this child. */ | 698 | /* Preserve ptrace links if someone else is tracing this child. */ |
| 612 | list_del_init(&p->ptrace_list); | 699 | list_del_init(&p->ptrace_list); |
| 613 | if (p->parent != p->real_parent) | 700 | if (ptrace_reparented(p)) |
| 614 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); | 701 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); |
| 615 | } else { | 702 | } else { |
| 616 | /* If this child is being traced, then we're the one tracing it | 703 | /* If this child is being traced, then we're the one tracing it |
| @@ -634,18 +721,18 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |||
| 634 | /* If this is a threaded reparent there is no need to | 721 | /* If this is a threaded reparent there is no need to |
| 635 | * notify anyone anything has happened. | 722 | * notify anyone anything has happened. |
| 636 | */ | 723 | */ |
| 637 | if (p->real_parent->group_leader == father->group_leader) | 724 | if (same_thread_group(p->real_parent, father)) |
| 638 | return; | 725 | return; |
| 639 | 726 | ||
| 640 | /* We don't want people slaying init. */ | 727 | /* We don't want people slaying init. */ |
| 641 | if (p->exit_signal != -1) | 728 | if (!task_detached(p)) |
| 642 | p->exit_signal = SIGCHLD; | 729 | p->exit_signal = SIGCHLD; |
| 643 | 730 | ||
| 644 | /* If we'd notified the old parent about this child's death, | 731 | /* If we'd notified the old parent about this child's death, |
| 645 | * also notify the new parent. | 732 | * also notify the new parent. |
| 646 | */ | 733 | */ |
| 647 | if (!traced && p->exit_state == EXIT_ZOMBIE && | 734 | if (!traced && p->exit_state == EXIT_ZOMBIE && |
| 648 | p->exit_signal != -1 && thread_group_empty(p)) | 735 | !task_detached(p) && thread_group_empty(p)) |
| 649 | do_notify_parent(p, p->exit_signal); | 736 | do_notify_parent(p, p->exit_signal); |
| 650 | 737 | ||
| 651 | kill_orphaned_pgrp(p, father); | 738 | kill_orphaned_pgrp(p, father); |
| @@ -698,18 +785,18 @@ static void forget_original_parent(struct task_struct *father) | |||
| 698 | } else { | 785 | } else { |
| 699 | /* reparent ptraced task to its real parent */ | 786 | /* reparent ptraced task to its real parent */ |
| 700 | __ptrace_unlink (p); | 787 | __ptrace_unlink (p); |
| 701 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && | 788 | if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) && |
| 702 | thread_group_empty(p)) | 789 | thread_group_empty(p)) |
| 703 | do_notify_parent(p, p->exit_signal); | 790 | do_notify_parent(p, p->exit_signal); |
| 704 | } | 791 | } |
| 705 | 792 | ||
| 706 | /* | 793 | /* |
| 707 | * if the ptraced child is a zombie with exit_signal == -1 | 794 | * if the ptraced child is a detached zombie we must collect |
| 708 | * we must collect it before we exit, or it will remain | 795 | * it before we exit, or it will remain zombie forever since |
| 709 | * zombie forever since we prevented it from self-reap itself | 796 | * we prevented it from self-reap itself while it was being |
| 710 | * while it was being traced by us, to be able to see it in wait4. | 797 | * traced by us, to be able to see it in wait4. |
| 711 | */ | 798 | */ |
| 712 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) | 799 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p))) |
| 713 | list_add(&p->ptrace_list, &ptrace_dead); | 800 | list_add(&p->ptrace_list, &ptrace_dead); |
| 714 | } | 801 | } |
| 715 | 802 | ||
| @@ -766,29 +853,30 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
| 766 | * we have changed execution domain as these two values started | 853 | * we have changed execution domain as these two values started |
| 767 | * the same after a fork. | 854 | * the same after a fork. |
| 768 | */ | 855 | */ |
| 769 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && | 856 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && |
| 770 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || | 857 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || |
| 771 | tsk->self_exec_id != tsk->parent_exec_id) | 858 | tsk->self_exec_id != tsk->parent_exec_id) && |
| 772 | && !capable(CAP_KILL)) | 859 | !capable(CAP_KILL)) |
| 773 | tsk->exit_signal = SIGCHLD; | 860 | tsk->exit_signal = SIGCHLD; |
| 774 | 861 | ||
| 775 | |||
| 776 | /* If something other than our normal parent is ptracing us, then | 862 | /* If something other than our normal parent is ptracing us, then |
| 777 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | 863 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal |
| 778 | * only has special meaning to our real parent. | 864 | * only has special meaning to our real parent. |
| 779 | */ | 865 | */ |
| 780 | if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { | 866 | if (!task_detached(tsk) && thread_group_empty(tsk)) { |
| 781 | int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; | 867 | int signal = ptrace_reparented(tsk) ? |
| 868 | SIGCHLD : tsk->exit_signal; | ||
| 782 | do_notify_parent(tsk, signal); | 869 | do_notify_parent(tsk, signal); |
| 783 | } else if (tsk->ptrace) { | 870 | } else if (tsk->ptrace) { |
| 784 | do_notify_parent(tsk, SIGCHLD); | 871 | do_notify_parent(tsk, SIGCHLD); |
| 785 | } | 872 | } |
| 786 | 873 | ||
| 787 | state = EXIT_ZOMBIE; | 874 | state = EXIT_ZOMBIE; |
| 788 | if (tsk->exit_signal == -1 && likely(!tsk->ptrace)) | 875 | if (task_detached(tsk) && likely(!tsk->ptrace)) |
| 789 | state = EXIT_DEAD; | 876 | state = EXIT_DEAD; |
| 790 | tsk->exit_state = state; | 877 | tsk->exit_state = state; |
| 791 | 878 | ||
| 879 | /* mt-exec, de_thread() is waiting for us */ | ||
| 792 | if (thread_group_leader(tsk) && | 880 | if (thread_group_leader(tsk) && |
| 793 | tsk->signal->notify_count < 0 && | 881 | tsk->signal->notify_count < 0 && |
| 794 | tsk->signal->group_exit_task) | 882 | tsk->signal->group_exit_task) |
| @@ -1032,12 +1120,13 @@ asmlinkage long sys_exit(int error_code) | |||
| 1032 | NORET_TYPE void | 1120 | NORET_TYPE void |
| 1033 | do_group_exit(int exit_code) | 1121 | do_group_exit(int exit_code) |
| 1034 | { | 1122 | { |
| 1123 | struct signal_struct *sig = current->signal; | ||
| 1124 | |||
| 1035 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ | 1125 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ |
| 1036 | 1126 | ||
| 1037 | if (current->signal->flags & SIGNAL_GROUP_EXIT) | 1127 | if (signal_group_exit(sig)) |
| 1038 | exit_code = current->signal->group_exit_code; | 1128 | exit_code = sig->group_exit_code; |
| 1039 | else if (!thread_group_empty(current)) { | 1129 | else if (!thread_group_empty(current)) { |
| 1040 | struct signal_struct *const sig = current->signal; | ||
| 1041 | struct sighand_struct *const sighand = current->sighand; | 1130 | struct sighand_struct *const sighand = current->sighand; |
| 1042 | spin_lock_irq(&sighand->siglock); | 1131 | spin_lock_irq(&sighand->siglock); |
| 1043 | if (signal_group_exit(sig)) | 1132 | if (signal_group_exit(sig)) |
| @@ -1089,7 +1178,7 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, | |||
| 1089 | * Do not consider detached threads that are | 1178 | * Do not consider detached threads that are |
| 1090 | * not ptraced: | 1179 | * not ptraced: |
| 1091 | */ | 1180 | */ |
| 1092 | if (p->exit_signal == -1 && !p->ptrace) | 1181 | if (task_detached(p) && !p->ptrace) |
| 1093 | return 0; | 1182 | return 0; |
| 1094 | 1183 | ||
| 1095 | /* Wait for all children (clone and not) if __WALL is set; | 1184 | /* Wait for all children (clone and not) if __WALL is set; |
| @@ -1179,8 +1268,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
| 1179 | return 0; | 1268 | return 0; |
| 1180 | } | 1269 | } |
| 1181 | 1270 | ||
| 1182 | /* traced means p->ptrace, but not vice versa */ | 1271 | traced = ptrace_reparented(p); |
| 1183 | traced = (p->real_parent != p->parent); | ||
| 1184 | 1272 | ||
| 1185 | if (likely(!traced)) { | 1273 | if (likely(!traced)) { |
| 1186 | struct signal_struct *psig; | 1274 | struct signal_struct *psig; |
| @@ -1281,9 +1369,9 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
| 1281 | * If it's still not detached after that, don't release | 1369 | * If it's still not detached after that, don't release |
| 1282 | * it now. | 1370 | * it now. |
| 1283 | */ | 1371 | */ |
| 1284 | if (p->exit_signal != -1) { | 1372 | if (!task_detached(p)) { |
| 1285 | do_notify_parent(p, p->exit_signal); | 1373 | do_notify_parent(p, p->exit_signal); |
| 1286 | if (p->exit_signal != -1) { | 1374 | if (!task_detached(p)) { |
| 1287 | p->exit_state = EXIT_ZOMBIE; | 1375 | p->exit_state = EXIT_ZOMBIE; |
| 1288 | p = NULL; | 1376 | p = NULL; |
| 1289 | } | 1377 | } |
