diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-17 17:57:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-17 17:57:20 -0400 |
commit | 393d81aa026e19b6ede6f5f11955c97ee62e5df5 (patch) | |
tree | a1d9511e488e19d41089ff0a736f6ce52a81c6e5 /kernel | |
parent | 93a0886e2368eafb9df5e2021fb185195cee88b2 (diff) | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
Merge branch 'linus' into xen-64bit
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 3 | ||||
-rw-r--r-- | kernel/cpu.c | 1 | ||||
-rw-r--r-- | kernel/exit.c | 451 | ||||
-rw-r--r-- | kernel/fork.c | 6 | ||||
-rw-r--r-- | kernel/kthread.c | 2 | ||||
-rw-r--r-- | kernel/power/disk.c | 50 | ||||
-rw-r--r-- | kernel/power/main.c | 16 | ||||
-rw-r--r-- | kernel/power/process.c | 97 | ||||
-rw-r--r-- | kernel/power/user.c | 71 | ||||
-rw-r--r-- | kernel/ptrace.c | 37 |
10 files changed, 442 insertions, 292 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0a7ed838984b..985ddb7da4d0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -11,8 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
13 | 13 | ||
14 | CFLAGS_REMOVE_sched.o = -mno-spe | ||
15 | |||
16 | ifdef CONFIG_FTRACE | 14 | ifdef CONFIG_FTRACE |
17 | # Do not trace debug files and internal ftrace files | 15 | # Do not trace debug files and internal ftrace files |
18 | CFLAGS_REMOVE_lockdep.o = -pg | 16 | CFLAGS_REMOVE_lockdep.o = -pg |
@@ -21,6 +19,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
22 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | ||
24 | endif | 23 | endif |
25 | 24 | ||
26 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o | 25 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o |
diff --git a/kernel/cpu.c b/kernel/cpu.c index b11f06dc149a..cfb1d43ab801 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -299,6 +299,7 @@ int __ref cpu_down(unsigned int cpu) | |||
299 | cpu_maps_update_done(); | 299 | cpu_maps_update_done(); |
300 | return err; | 300 | return err; |
301 | } | 301 | } |
302 | EXPORT_SYMBOL(cpu_down); | ||
302 | #endif /*CONFIG_HOTPLUG_CPU*/ | 303 | #endif /*CONFIG_HOTPLUG_CPU*/ |
303 | 304 | ||
304 | /* Requires cpu_add_remove_lock to be held */ | 305 | /* Requires cpu_add_remove_lock to be held */ |
diff --git a/kernel/exit.c b/kernel/exit.c index ceb258782835..93d2711b9381 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -71,7 +71,7 @@ static void __unhash_process(struct task_struct *p) | |||
71 | __get_cpu_var(process_counts)--; | 71 | __get_cpu_var(process_counts)--; |
72 | } | 72 | } |
73 | list_del_rcu(&p->thread_group); | 73 | list_del_rcu(&p->thread_group); |
74 | remove_parent(p); | 74 | list_del_init(&p->sibling); |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -152,6 +152,18 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | 152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | ||
156 | * Do final ptrace-related cleanup of a zombie being reaped. | ||
157 | * | ||
158 | * Called with write_lock(&tasklist_lock) held. | ||
159 | */ | ||
160 | static void ptrace_release_task(struct task_struct *p) | ||
161 | { | ||
162 | BUG_ON(!list_empty(&p->ptraced)); | ||
163 | ptrace_unlink(p); | ||
164 | BUG_ON(!list_empty(&p->ptrace_entry)); | ||
165 | } | ||
166 | |||
155 | void release_task(struct task_struct * p) | 167 | void release_task(struct task_struct * p) |
156 | { | 168 | { |
157 | struct task_struct *leader; | 169 | struct task_struct *leader; |
@@ -160,8 +172,7 @@ repeat: | |||
160 | atomic_dec(&p->user->processes); | 172 | atomic_dec(&p->user->processes); |
161 | proc_flush_task(p); | 173 | proc_flush_task(p); |
162 | write_lock_irq(&tasklist_lock); | 174 | write_lock_irq(&tasklist_lock); |
163 | ptrace_unlink(p); | 175 | ptrace_release_task(p); |
164 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); | ||
165 | __exit_signal(p); | 176 | __exit_signal(p); |
166 | 177 | ||
167 | /* | 178 | /* |
@@ -315,9 +326,8 @@ static void reparent_to_kthreadd(void) | |||
315 | 326 | ||
316 | ptrace_unlink(current); | 327 | ptrace_unlink(current); |
317 | /* Reparent to init */ | 328 | /* Reparent to init */ |
318 | remove_parent(current); | ||
319 | current->real_parent = current->parent = kthreadd_task; | 329 | current->real_parent = current->parent = kthreadd_task; |
320 | add_parent(current); | 330 | list_move_tail(¤t->sibling, ¤t->real_parent->children); |
321 | 331 | ||
322 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | 332 | /* Set the exit signal to SIGCHLD so we signal init on exit */ |
323 | current->exit_signal = SIGCHLD; | 333 | current->exit_signal = SIGCHLD; |
@@ -692,37 +702,97 @@ static void exit_mm(struct task_struct * tsk) | |||
692 | mmput(mm); | 702 | mmput(mm); |
693 | } | 703 | } |
694 | 704 | ||
695 | static void | 705 | /* |
696 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | 706 | * Return nonzero if @parent's children should reap themselves. |
707 | * | ||
708 | * Called with write_lock_irq(&tasklist_lock) held. | ||
709 | */ | ||
710 | static int ignoring_children(struct task_struct *parent) | ||
697 | { | 711 | { |
698 | if (p->pdeath_signal) | 712 | int ret; |
699 | /* We already hold the tasklist_lock here. */ | 713 | struct sighand_struct *psig = parent->sighand; |
700 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | 714 | unsigned long flags; |
715 | spin_lock_irqsave(&psig->siglock, flags); | ||
716 | ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | ||
717 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); | ||
718 | spin_unlock_irqrestore(&psig->siglock, flags); | ||
719 | return ret; | ||
720 | } | ||
701 | 721 | ||
702 | /* Move the child from its dying parent to the new one. */ | 722 | /* |
703 | if (unlikely(traced)) { | 723 | * Detach all tasks we were using ptrace on. |
704 | /* Preserve ptrace links if someone else is tracing this child. */ | 724 | * Any that need to be release_task'd are put on the @dead list. |
705 | list_del_init(&p->ptrace_list); | 725 | * |
706 | if (ptrace_reparented(p)) | 726 | * Called with write_lock(&tasklist_lock) held. |
707 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); | 727 | */ |
708 | } else { | 728 | static void ptrace_exit(struct task_struct *parent, struct list_head *dead) |
709 | /* If this child is being traced, then we're the one tracing it | 729 | { |
710 | * anyway, so let go of it. | 730 | struct task_struct *p, *n; |
731 | int ign = -1; | ||
732 | |||
733 | list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { | ||
734 | __ptrace_unlink(p); | ||
735 | |||
736 | if (p->exit_state != EXIT_ZOMBIE) | ||
737 | continue; | ||
738 | |||
739 | /* | ||
740 | * If it's a zombie, our attachedness prevented normal | ||
741 | * parent notification or self-reaping. Do notification | ||
742 | * now if it would have happened earlier. If it should | ||
743 | * reap itself, add it to the @dead list. We can't call | ||
744 | * release_task() here because we already hold tasklist_lock. | ||
745 | * | ||
746 | * If it's our own child, there is no notification to do. | ||
747 | * But if our normal children self-reap, then this child | ||
748 | * was prevented by ptrace and we must reap it now. | ||
711 | */ | 749 | */ |
712 | p->ptrace = 0; | 750 | if (!task_detached(p) && thread_group_empty(p)) { |
713 | remove_parent(p); | 751 | if (!same_thread_group(p->real_parent, parent)) |
714 | p->parent = p->real_parent; | 752 | do_notify_parent(p, p->exit_signal); |
715 | add_parent(p); | 753 | else { |
754 | if (ign < 0) | ||
755 | ign = ignoring_children(parent); | ||
756 | if (ign) | ||
757 | p->exit_signal = -1; | ||
758 | } | ||
759 | } | ||
716 | 760 | ||
717 | if (task_is_traced(p)) { | 761 | if (task_detached(p)) { |
718 | /* | 762 | /* |
719 | * If it was at a trace stop, turn it into | 763 | * Mark it as in the process of being reaped. |
720 | * a normal stop since it's no longer being | ||
721 | * traced. | ||
722 | */ | 764 | */ |
723 | ptrace_untrace(p); | 765 | p->exit_state = EXIT_DEAD; |
766 | list_add(&p->ptrace_entry, dead); | ||
724 | } | 767 | } |
725 | } | 768 | } |
769 | } | ||
770 | |||
771 | /* | ||
772 | * Finish up exit-time ptrace cleanup. | ||
773 | * | ||
774 | * Called without locks. | ||
775 | */ | ||
776 | static void ptrace_exit_finish(struct task_struct *parent, | ||
777 | struct list_head *dead) | ||
778 | { | ||
779 | struct task_struct *p, *n; | ||
780 | |||
781 | BUG_ON(!list_empty(&parent->ptraced)); | ||
782 | |||
783 | list_for_each_entry_safe(p, n, dead, ptrace_entry) { | ||
784 | list_del_init(&p->ptrace_entry); | ||
785 | release_task(p); | ||
786 | } | ||
787 | } | ||
788 | |||
789 | static void reparent_thread(struct task_struct *p, struct task_struct *father) | ||
790 | { | ||
791 | if (p->pdeath_signal) | ||
792 | /* We already hold the tasklist_lock here. */ | ||
793 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
794 | |||
795 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
726 | 796 | ||
727 | /* If this is a threaded reparent there is no need to | 797 | /* If this is a threaded reparent there is no need to |
728 | * notify anyone anything has happened. | 798 | * notify anyone anything has happened. |
@@ -737,7 +807,8 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |||
737 | /* If we'd notified the old parent about this child's death, | 807 | /* If we'd notified the old parent about this child's death, |
738 | * also notify the new parent. | 808 | * also notify the new parent. |
739 | */ | 809 | */ |
740 | if (!traced && p->exit_state == EXIT_ZOMBIE && | 810 | if (!ptrace_reparented(p) && |
811 | p->exit_state == EXIT_ZOMBIE && | ||
741 | !task_detached(p) && thread_group_empty(p)) | 812 | !task_detached(p) && thread_group_empty(p)) |
742 | do_notify_parent(p, p->exit_signal); | 813 | do_notify_parent(p, p->exit_signal); |
743 | 814 | ||
@@ -754,12 +825,15 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |||
754 | static void forget_original_parent(struct task_struct *father) | 825 | static void forget_original_parent(struct task_struct *father) |
755 | { | 826 | { |
756 | struct task_struct *p, *n, *reaper = father; | 827 | struct task_struct *p, *n, *reaper = father; |
757 | struct list_head ptrace_dead; | 828 | LIST_HEAD(ptrace_dead); |
758 | |||
759 | INIT_LIST_HEAD(&ptrace_dead); | ||
760 | 829 | ||
761 | write_lock_irq(&tasklist_lock); | 830 | write_lock_irq(&tasklist_lock); |
762 | 831 | ||
832 | /* | ||
833 | * First clean up ptrace if we were using it. | ||
834 | */ | ||
835 | ptrace_exit(father, &ptrace_dead); | ||
836 | |||
763 | do { | 837 | do { |
764 | reaper = next_thread(reaper); | 838 | reaper = next_thread(reaper); |
765 | if (reaper == father) { | 839 | if (reaper == father) { |
@@ -768,58 +842,19 @@ static void forget_original_parent(struct task_struct *father) | |||
768 | } | 842 | } |
769 | } while (reaper->flags & PF_EXITING); | 843 | } while (reaper->flags & PF_EXITING); |
770 | 844 | ||
771 | /* | ||
772 | * There are only two places where our children can be: | ||
773 | * | ||
774 | * - in our child list | ||
775 | * - in our ptraced child list | ||
776 | * | ||
777 | * Search them and reparent children. | ||
778 | */ | ||
779 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 845 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
780 | int ptrace; | ||
781 | |||
782 | ptrace = p->ptrace; | ||
783 | |||
784 | /* if father isn't the real parent, then ptrace must be enabled */ | ||
785 | BUG_ON(father != p->real_parent && !ptrace); | ||
786 | |||
787 | if (father == p->real_parent) { | ||
788 | /* reparent with a reaper, real father it's us */ | ||
789 | p->real_parent = reaper; | ||
790 | reparent_thread(p, father, 0); | ||
791 | } else { | ||
792 | /* reparent ptraced task to its real parent */ | ||
793 | __ptrace_unlink (p); | ||
794 | if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) && | ||
795 | thread_group_empty(p)) | ||
796 | do_notify_parent(p, p->exit_signal); | ||
797 | } | ||
798 | |||
799 | /* | ||
800 | * if the ptraced child is a detached zombie we must collect | ||
801 | * it before we exit, or it will remain zombie forever since | ||
802 | * we prevented it from self-reap itself while it was being | ||
803 | * traced by us, to be able to see it in wait4. | ||
804 | */ | ||
805 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p))) | ||
806 | list_add(&p->ptrace_list, &ptrace_dead); | ||
807 | } | ||
808 | |||
809 | list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) { | ||
810 | p->real_parent = reaper; | 846 | p->real_parent = reaper; |
811 | reparent_thread(p, father, 1); | 847 | if (p->parent == father) { |
848 | BUG_ON(p->ptrace); | ||
849 | p->parent = p->real_parent; | ||
850 | } | ||
851 | reparent_thread(p, father); | ||
812 | } | 852 | } |
813 | 853 | ||
814 | write_unlock_irq(&tasklist_lock); | 854 | write_unlock_irq(&tasklist_lock); |
815 | BUG_ON(!list_empty(&father->children)); | 855 | BUG_ON(!list_empty(&father->children)); |
816 | BUG_ON(!list_empty(&father->ptrace_children)); | ||
817 | |||
818 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) { | ||
819 | list_del_init(&p->ptrace_list); | ||
820 | release_task(p); | ||
821 | } | ||
822 | 856 | ||
857 | ptrace_exit_finish(father, &ptrace_dead); | ||
823 | } | 858 | } |
824 | 859 | ||
825 | /* | 860 | /* |
@@ -1180,13 +1215,6 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, | |||
1180 | return 0; | 1215 | return 0; |
1181 | } | 1216 | } |
1182 | 1217 | ||
1183 | /* | ||
1184 | * Do not consider detached threads that are | ||
1185 | * not ptraced: | ||
1186 | */ | ||
1187 | if (task_detached(p) && !p->ptrace) | ||
1188 | return 0; | ||
1189 | |||
1190 | /* Wait for all children (clone and not) if __WALL is set; | 1218 | /* Wait for all children (clone and not) if __WALL is set; |
1191 | * otherwise, wait for clone children *only* if __WCLONE is | 1219 | * otherwise, wait for clone children *only* if __WCLONE is |
1192 | * set; otherwise, wait for non-clone children *only*. (Note: | 1220 | * set; otherwise, wait for non-clone children *only*. (Note: |
@@ -1197,14 +1225,10 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, | |||
1197 | return 0; | 1225 | return 0; |
1198 | 1226 | ||
1199 | err = security_task_wait(p); | 1227 | err = security_task_wait(p); |
1200 | if (likely(!err)) | 1228 | if (err) |
1201 | return 1; | 1229 | return err; |
1202 | 1230 | ||
1203 | if (type != PIDTYPE_PID) | 1231 | return 1; |
1204 | return 0; | ||
1205 | /* This child was explicitly requested, abort */ | ||
1206 | read_unlock(&tasklist_lock); | ||
1207 | return err; | ||
1208 | } | 1232 | } |
1209 | 1233 | ||
1210 | static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, | 1234 | static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, |
@@ -1238,7 +1262,7 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, | |||
1238 | * the lock and this task is uninteresting. If we return nonzero, we have | 1262 | * the lock and this task is uninteresting. If we return nonzero, we have |
1239 | * released the lock and the system call should return. | 1263 | * released the lock and the system call should return. |
1240 | */ | 1264 | */ |
1241 | static int wait_task_zombie(struct task_struct *p, int noreap, | 1265 | static int wait_task_zombie(struct task_struct *p, int options, |
1242 | struct siginfo __user *infop, | 1266 | struct siginfo __user *infop, |
1243 | int __user *stat_addr, struct rusage __user *ru) | 1267 | int __user *stat_addr, struct rusage __user *ru) |
1244 | { | 1268 | { |
@@ -1246,7 +1270,10 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1246 | int retval, status, traced; | 1270 | int retval, status, traced; |
1247 | pid_t pid = task_pid_vnr(p); | 1271 | pid_t pid = task_pid_vnr(p); |
1248 | 1272 | ||
1249 | if (unlikely(noreap)) { | 1273 | if (!likely(options & WEXITED)) |
1274 | return 0; | ||
1275 | |||
1276 | if (unlikely(options & WNOWAIT)) { | ||
1250 | uid_t uid = p->uid; | 1277 | uid_t uid = p->uid; |
1251 | int exit_code = p->exit_code; | 1278 | int exit_code = p->exit_code; |
1252 | int why, status; | 1279 | int why, status; |
@@ -1396,21 +1423,24 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1396 | * the lock and this task is uninteresting. If we return nonzero, we have | 1423 | * the lock and this task is uninteresting. If we return nonzero, we have |
1397 | * released the lock and the system call should return. | 1424 | * released the lock and the system call should return. |
1398 | */ | 1425 | */ |
1399 | static int wait_task_stopped(struct task_struct *p, | 1426 | static int wait_task_stopped(int ptrace, struct task_struct *p, |
1400 | int noreap, struct siginfo __user *infop, | 1427 | int options, struct siginfo __user *infop, |
1401 | int __user *stat_addr, struct rusage __user *ru) | 1428 | int __user *stat_addr, struct rusage __user *ru) |
1402 | { | 1429 | { |
1403 | int retval, exit_code, why; | 1430 | int retval, exit_code, why; |
1404 | uid_t uid = 0; /* unneeded, required by compiler */ | 1431 | uid_t uid = 0; /* unneeded, required by compiler */ |
1405 | pid_t pid; | 1432 | pid_t pid; |
1406 | 1433 | ||
1434 | if (!(options & WUNTRACED)) | ||
1435 | return 0; | ||
1436 | |||
1407 | exit_code = 0; | 1437 | exit_code = 0; |
1408 | spin_lock_irq(&p->sighand->siglock); | 1438 | spin_lock_irq(&p->sighand->siglock); |
1409 | 1439 | ||
1410 | if (unlikely(!task_is_stopped_or_traced(p))) | 1440 | if (unlikely(!task_is_stopped_or_traced(p))) |
1411 | goto unlock_sig; | 1441 | goto unlock_sig; |
1412 | 1442 | ||
1413 | if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0) | 1443 | if (!ptrace && p->signal->group_stop_count > 0) |
1414 | /* | 1444 | /* |
1415 | * A group stop is in progress and this is the group leader. | 1445 | * A group stop is in progress and this is the group leader. |
1416 | * We won't report until all threads have stopped. | 1446 | * We won't report until all threads have stopped. |
@@ -1421,7 +1451,7 @@ static int wait_task_stopped(struct task_struct *p, | |||
1421 | if (!exit_code) | 1451 | if (!exit_code) |
1422 | goto unlock_sig; | 1452 | goto unlock_sig; |
1423 | 1453 | ||
1424 | if (!noreap) | 1454 | if (!unlikely(options & WNOWAIT)) |
1425 | p->exit_code = 0; | 1455 | p->exit_code = 0; |
1426 | 1456 | ||
1427 | uid = p->uid; | 1457 | uid = p->uid; |
@@ -1439,10 +1469,10 @@ unlock_sig: | |||
1439 | */ | 1469 | */ |
1440 | get_task_struct(p); | 1470 | get_task_struct(p); |
1441 | pid = task_pid_vnr(p); | 1471 | pid = task_pid_vnr(p); |
1442 | why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; | 1472 | why = ptrace ? CLD_TRAPPED : CLD_STOPPED; |
1443 | read_unlock(&tasklist_lock); | 1473 | read_unlock(&tasklist_lock); |
1444 | 1474 | ||
1445 | if (unlikely(noreap)) | 1475 | if (unlikely(options & WNOWAIT)) |
1446 | return wait_noreap_copyout(p, pid, uid, | 1476 | return wait_noreap_copyout(p, pid, uid, |
1447 | why, exit_code, | 1477 | why, exit_code, |
1448 | infop, ru); | 1478 | infop, ru); |
@@ -1476,7 +1506,7 @@ unlock_sig: | |||
1476 | * the lock and this task is uninteresting. If we return nonzero, we have | 1506 | * the lock and this task is uninteresting. If we return nonzero, we have |
1477 | * released the lock and the system call should return. | 1507 | * released the lock and the system call should return. |
1478 | */ | 1508 | */ |
1479 | static int wait_task_continued(struct task_struct *p, int noreap, | 1509 | static int wait_task_continued(struct task_struct *p, int options, |
1480 | struct siginfo __user *infop, | 1510 | struct siginfo __user *infop, |
1481 | int __user *stat_addr, struct rusage __user *ru) | 1511 | int __user *stat_addr, struct rusage __user *ru) |
1482 | { | 1512 | { |
@@ -1484,6 +1514,9 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1484 | pid_t pid; | 1514 | pid_t pid; |
1485 | uid_t uid; | 1515 | uid_t uid; |
1486 | 1516 | ||
1517 | if (!unlikely(options & WCONTINUED)) | ||
1518 | return 0; | ||
1519 | |||
1487 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | 1520 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) |
1488 | return 0; | 1521 | return 0; |
1489 | 1522 | ||
@@ -1493,7 +1526,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1493 | spin_unlock_irq(&p->sighand->siglock); | 1526 | spin_unlock_irq(&p->sighand->siglock); |
1494 | return 0; | 1527 | return 0; |
1495 | } | 1528 | } |
1496 | if (!noreap) | 1529 | if (!unlikely(options & WNOWAIT)) |
1497 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1530 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1498 | spin_unlock_irq(&p->sighand->siglock); | 1531 | spin_unlock_irq(&p->sighand->siglock); |
1499 | 1532 | ||
@@ -1519,89 +1552,161 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1519 | return retval; | 1552 | return retval; |
1520 | } | 1553 | } |
1521 | 1554 | ||
1555 | /* | ||
1556 | * Consider @p for a wait by @parent. | ||
1557 | * | ||
1558 | * -ECHILD should be in *@notask_error before the first call. | ||
1559 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | ||
1560 | * Returns zero if the search for a child should continue; | ||
1561 | * then *@notask_error is 0 if @p is an eligible child, | ||
1562 | * or another error from security_task_wait(), or still -ECHILD. | ||
1563 | */ | ||
1564 | static int wait_consider_task(struct task_struct *parent, int ptrace, | ||
1565 | struct task_struct *p, int *notask_error, | ||
1566 | enum pid_type type, struct pid *pid, int options, | ||
1567 | struct siginfo __user *infop, | ||
1568 | int __user *stat_addr, struct rusage __user *ru) | ||
1569 | { | ||
1570 | int ret = eligible_child(type, pid, options, p); | ||
1571 | if (!ret) | ||
1572 | return ret; | ||
1573 | |||
1574 | if (unlikely(ret < 0)) { | ||
1575 | /* | ||
1576 | * If we have not yet seen any eligible child, | ||
1577 | * then let this error code replace -ECHILD. | ||
1578 | * A permission error will give the user a clue | ||
1579 | * to look for security policy problems, rather | ||
1580 | * than for mysterious wait bugs. | ||
1581 | */ | ||
1582 | if (*notask_error) | ||
1583 | *notask_error = ret; | ||
1584 | } | ||
1585 | |||
1586 | if (likely(!ptrace) && unlikely(p->ptrace)) { | ||
1587 | /* | ||
1588 | * This child is hidden by ptrace. | ||
1589 | * We aren't allowed to see it now, but eventually we will. | ||
1590 | */ | ||
1591 | *notask_error = 0; | ||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
1595 | if (p->exit_state == EXIT_DEAD) | ||
1596 | return 0; | ||
1597 | |||
1598 | /* | ||
1599 | * We don't reap group leaders with subthreads. | ||
1600 | */ | ||
1601 | if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) | ||
1602 | return wait_task_zombie(p, options, infop, stat_addr, ru); | ||
1603 | |||
1604 | /* | ||
1605 | * It's stopped or running now, so it might | ||
1606 | * later continue, exit, or stop again. | ||
1607 | */ | ||
1608 | *notask_error = 0; | ||
1609 | |||
1610 | if (task_is_stopped_or_traced(p)) | ||
1611 | return wait_task_stopped(ptrace, p, options, | ||
1612 | infop, stat_addr, ru); | ||
1613 | |||
1614 | return wait_task_continued(p, options, infop, stat_addr, ru); | ||
1615 | } | ||
1616 | |||
1617 | /* | ||
1618 | * Do the work of do_wait() for one thread in the group, @tsk. | ||
1619 | * | ||
1620 | * -ECHILD should be in *@notask_error before the first call. | ||
1621 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | ||
1622 | * Returns zero if the search for a child should continue; then | ||
1623 | * *@notask_error is 0 if there were any eligible children, | ||
1624 | * or another error from security_task_wait(), or still -ECHILD. | ||
1625 | */ | ||
1626 | static int do_wait_thread(struct task_struct *tsk, int *notask_error, | ||
1627 | enum pid_type type, struct pid *pid, int options, | ||
1628 | struct siginfo __user *infop, int __user *stat_addr, | ||
1629 | struct rusage __user *ru) | ||
1630 | { | ||
1631 | struct task_struct *p; | ||
1632 | |||
1633 | list_for_each_entry(p, &tsk->children, sibling) { | ||
1634 | /* | ||
1635 | * Do not consider detached threads. | ||
1636 | */ | ||
1637 | if (!task_detached(p)) { | ||
1638 | int ret = wait_consider_task(tsk, 0, p, notask_error, | ||
1639 | type, pid, options, | ||
1640 | infop, stat_addr, ru); | ||
1641 | if (ret) | ||
1642 | return ret; | ||
1643 | } | ||
1644 | } | ||
1645 | |||
1646 | return 0; | ||
1647 | } | ||
1648 | |||
1649 | static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, | ||
1650 | enum pid_type type, struct pid *pid, int options, | ||
1651 | struct siginfo __user *infop, int __user *stat_addr, | ||
1652 | struct rusage __user *ru) | ||
1653 | { | ||
1654 | struct task_struct *p; | ||
1655 | |||
1656 | /* | ||
1657 | * Traditionally we see ptrace'd stopped tasks regardless of options. | ||
1658 | */ | ||
1659 | options |= WUNTRACED; | ||
1660 | |||
1661 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { | ||
1662 | int ret = wait_consider_task(tsk, 1, p, notask_error, | ||
1663 | type, pid, options, | ||
1664 | infop, stat_addr, ru); | ||
1665 | if (ret) | ||
1666 | return ret; | ||
1667 | } | ||
1668 | |||
1669 | return 0; | ||
1670 | } | ||
1671 | |||
1522 | static long do_wait(enum pid_type type, struct pid *pid, int options, | 1672 | static long do_wait(enum pid_type type, struct pid *pid, int options, |
1523 | struct siginfo __user *infop, int __user *stat_addr, | 1673 | struct siginfo __user *infop, int __user *stat_addr, |
1524 | struct rusage __user *ru) | 1674 | struct rusage __user *ru) |
1525 | { | 1675 | { |
1526 | DECLARE_WAITQUEUE(wait, current); | 1676 | DECLARE_WAITQUEUE(wait, current); |
1527 | struct task_struct *tsk; | 1677 | struct task_struct *tsk; |
1528 | int flag, retval; | 1678 | int retval; |
1529 | 1679 | ||
1530 | add_wait_queue(¤t->signal->wait_chldexit,&wait); | 1680 | add_wait_queue(¤t->signal->wait_chldexit,&wait); |
1531 | repeat: | 1681 | repeat: |
1532 | /* If there is nothing that can match our critier just get out */ | 1682 | /* |
1683 | * If there is nothing that can match our critiera just get out. | ||
1684 | * We will clear @retval to zero if we see any child that might later | ||
1685 | * match our criteria, even if we are not able to reap it yet. | ||
1686 | */ | ||
1533 | retval = -ECHILD; | 1687 | retval = -ECHILD; |
1534 | if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) | 1688 | if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) |
1535 | goto end; | 1689 | goto end; |
1536 | 1690 | ||
1537 | /* | ||
1538 | * We will set this flag if we see any child that might later | ||
1539 | * match our criteria, even if we are not able to reap it yet. | ||
1540 | */ | ||
1541 | flag = retval = 0; | ||
1542 | current->state = TASK_INTERRUPTIBLE; | 1691 | current->state = TASK_INTERRUPTIBLE; |
1543 | read_lock(&tasklist_lock); | 1692 | read_lock(&tasklist_lock); |
1544 | tsk = current; | 1693 | tsk = current; |
1545 | do { | 1694 | do { |
1546 | struct task_struct *p; | 1695 | int tsk_result = do_wait_thread(tsk, &retval, |
1547 | 1696 | type, pid, options, | |
1548 | list_for_each_entry(p, &tsk->children, sibling) { | 1697 | infop, stat_addr, ru); |
1549 | int ret = eligible_child(type, pid, options, p); | 1698 | if (!tsk_result) |
1550 | if (!ret) | 1699 | tsk_result = ptrace_do_wait(tsk, &retval, |
1551 | continue; | 1700 | type, pid, options, |
1552 | 1701 | infop, stat_addr, ru); | |
1553 | if (unlikely(ret < 0)) { | 1702 | if (tsk_result) { |
1554 | retval = ret; | 1703 | /* |
1555 | } else if (task_is_stopped_or_traced(p)) { | 1704 | * tasklist_lock is unlocked and we have a final result. |
1556 | /* | 1705 | */ |
1557 | * It's stopped now, so it might later | 1706 | retval = tsk_result; |
1558 | * continue, exit, or stop again. | 1707 | goto end; |
1559 | */ | ||
1560 | flag = 1; | ||
1561 | if (!(p->ptrace & PT_PTRACED) && | ||
1562 | !(options & WUNTRACED)) | ||
1563 | continue; | ||
1564 | |||
1565 | retval = wait_task_stopped(p, | ||
1566 | (options & WNOWAIT), infop, | ||
1567 | stat_addr, ru); | ||
1568 | } else if (p->exit_state == EXIT_ZOMBIE && | ||
1569 | !delay_group_leader(p)) { | ||
1570 | /* | ||
1571 | * We don't reap group leaders with subthreads. | ||
1572 | */ | ||
1573 | if (!likely(options & WEXITED)) | ||
1574 | continue; | ||
1575 | retval = wait_task_zombie(p, | ||
1576 | (options & WNOWAIT), infop, | ||
1577 | stat_addr, ru); | ||
1578 | } else if (p->exit_state != EXIT_DEAD) { | ||
1579 | /* | ||
1580 | * It's running now, so it might later | ||
1581 | * exit, stop, or stop and then continue. | ||
1582 | */ | ||
1583 | flag = 1; | ||
1584 | if (!unlikely(options & WCONTINUED)) | ||
1585 | continue; | ||
1586 | retval = wait_task_continued(p, | ||
1587 | (options & WNOWAIT), infop, | ||
1588 | stat_addr, ru); | ||
1589 | } | ||
1590 | if (retval != 0) /* tasklist_lock released */ | ||
1591 | goto end; | ||
1592 | } | ||
1593 | if (!flag) { | ||
1594 | list_for_each_entry(p, &tsk->ptrace_children, | ||
1595 | ptrace_list) { | ||
1596 | flag = eligible_child(type, pid, options, p); | ||
1597 | if (!flag) | ||
1598 | continue; | ||
1599 | if (likely(flag > 0)) | ||
1600 | break; | ||
1601 | retval = flag; | ||
1602 | goto end; | ||
1603 | } | ||
1604 | } | 1708 | } |
1709 | |||
1605 | if (options & __WNOTHREAD) | 1710 | if (options & __WNOTHREAD) |
1606 | break; | 1711 | break; |
1607 | tsk = next_thread(tsk); | 1712 | tsk = next_thread(tsk); |
@@ -1609,16 +1714,14 @@ repeat: | |||
1609 | } while (tsk != current); | 1714 | } while (tsk != current); |
1610 | read_unlock(&tasklist_lock); | 1715 | read_unlock(&tasklist_lock); |
1611 | 1716 | ||
1612 | if (flag) { | 1717 | if (!retval && !(options & WNOHANG)) { |
1613 | if (options & WNOHANG) | ||
1614 | goto end; | ||
1615 | retval = -ERESTARTSYS; | 1718 | retval = -ERESTARTSYS; |
1616 | if (signal_pending(current)) | 1719 | if (!signal_pending(current)) { |
1617 | goto end; | 1720 | schedule(); |
1618 | schedule(); | 1721 | goto repeat; |
1619 | goto repeat; | 1722 | } |
1620 | } | 1723 | } |
1621 | retval = -ECHILD; | 1724 | |
1622 | end: | 1725 | end: |
1623 | current->state = TASK_RUNNING; | 1726 | current->state = TASK_RUNNING; |
1624 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); | 1727 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); |
diff --git a/kernel/fork.c b/kernel/fork.c index 4bd2f516401f..adefc1131f27 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1125,8 +1125,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1125 | */ | 1125 | */ |
1126 | p->group_leader = p; | 1126 | p->group_leader = p; |
1127 | INIT_LIST_HEAD(&p->thread_group); | 1127 | INIT_LIST_HEAD(&p->thread_group); |
1128 | INIT_LIST_HEAD(&p->ptrace_children); | 1128 | INIT_LIST_HEAD(&p->ptrace_entry); |
1129 | INIT_LIST_HEAD(&p->ptrace_list); | 1129 | INIT_LIST_HEAD(&p->ptraced); |
1130 | 1130 | ||
1131 | /* Now that the task is set up, run cgroup callbacks if | 1131 | /* Now that the task is set up, run cgroup callbacks if |
1132 | * necessary. We need to run them before the task is visible | 1132 | * necessary. We need to run them before the task is visible |
@@ -1198,7 +1198,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | if (likely(p->pid)) { | 1200 | if (likely(p->pid)) { |
1201 | add_parent(p); | 1201 | list_add_tail(&p->sibling, &p->real_parent->children); |
1202 | if (unlikely(p->ptrace & PT_PTRACED)) | 1202 | if (unlikely(p->ptrace & PT_PTRACED)) |
1203 | __ptrace_link(p, current->parent); | 1203 | __ptrace_link(p, current->parent); |
1204 | 1204 | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index 97747cdd37c9..ac3fb7326641 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -235,7 +235,7 @@ int kthreadd(void *unused) | |||
235 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); | 235 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); |
236 | set_cpus_allowed(tsk, CPU_MASK_ALL); | 236 | set_cpus_allowed(tsk, CPU_MASK_ALL); |
237 | 237 | ||
238 | current->flags |= PF_NOFREEZE; | 238 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; |
239 | 239 | ||
240 | for (;;) { | 240 | for (;;) { |
241 | set_current_state(TASK_INTERRUPTIBLE); | 241 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 14a656cdc652..f011e0870b52 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -180,6 +180,17 @@ static void platform_restore_cleanup(int platform_mode) | |||
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * platform_recover - recover the platform from a failure to suspend | ||
184 | * devices. | ||
185 | */ | ||
186 | |||
187 | static void platform_recover(int platform_mode) | ||
188 | { | ||
189 | if (platform_mode && hibernation_ops && hibernation_ops->recover) | ||
190 | hibernation_ops->recover(); | ||
191 | } | ||
192 | |||
193 | /** | ||
183 | * create_image - freeze devices that need to be frozen with interrupts | 194 | * create_image - freeze devices that need to be frozen with interrupts |
184 | * off, create the hibernation image and thaw those devices. Control | 195 | * off, create the hibernation image and thaw those devices. Control |
185 | * reappears in this routine after a restore. | 196 | * reappears in this routine after a restore. |
@@ -193,6 +204,7 @@ static int create_image(int platform_mode) | |||
193 | if (error) | 204 | if (error) |
194 | return error; | 205 | return error; |
195 | 206 | ||
207 | device_pm_lock(); | ||
196 | local_irq_disable(); | 208 | local_irq_disable(); |
197 | /* At this point, device_suspend() has been called, but *not* | 209 | /* At this point, device_suspend() has been called, but *not* |
198 | * device_power_down(). We *must* call device_power_down() now. | 210 | * device_power_down(). We *must* call device_power_down() now. |
@@ -224,9 +236,11 @@ static int create_image(int platform_mode) | |||
224 | /* NOTE: device_power_up() is just a resume() for devices | 236 | /* NOTE: device_power_up() is just a resume() for devices |
225 | * that suspended with irqs off ... no overall powerup. | 237 | * that suspended with irqs off ... no overall powerup. |
226 | */ | 238 | */ |
227 | device_power_up(); | 239 | device_power_up(in_suspend ? |
240 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | ||
228 | Enable_irqs: | 241 | Enable_irqs: |
229 | local_irq_enable(); | 242 | local_irq_enable(); |
243 | device_pm_unlock(); | ||
230 | return error; | 244 | return error; |
231 | } | 245 | } |
232 | 246 | ||
@@ -255,10 +269,10 @@ int hibernation_snapshot(int platform_mode) | |||
255 | suspend_console(); | 269 | suspend_console(); |
256 | error = device_suspend(PMSG_FREEZE); | 270 | error = device_suspend(PMSG_FREEZE); |
257 | if (error) | 271 | if (error) |
258 | goto Resume_console; | 272 | goto Recover_platform; |
259 | 273 | ||
260 | if (hibernation_test(TEST_DEVICES)) | 274 | if (hibernation_test(TEST_DEVICES)) |
261 | goto Resume_devices; | 275 | goto Recover_platform; |
262 | 276 | ||
263 | error = platform_pre_snapshot(platform_mode); | 277 | error = platform_pre_snapshot(platform_mode); |
264 | if (error || hibernation_test(TEST_PLATFORM)) | 278 | if (error || hibernation_test(TEST_PLATFORM)) |
@@ -280,12 +294,16 @@ int hibernation_snapshot(int platform_mode) | |||
280 | Finish: | 294 | Finish: |
281 | platform_finish(platform_mode); | 295 | platform_finish(platform_mode); |
282 | Resume_devices: | 296 | Resume_devices: |
283 | device_resume(); | 297 | device_resume(in_suspend ? |
284 | Resume_console: | 298 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
285 | resume_console(); | 299 | resume_console(); |
286 | Close: | 300 | Close: |
287 | platform_end(platform_mode); | 301 | platform_end(platform_mode); |
288 | return error; | 302 | return error; |
303 | |||
304 | Recover_platform: | ||
305 | platform_recover(platform_mode); | ||
306 | goto Resume_devices; | ||
289 | } | 307 | } |
290 | 308 | ||
291 | /** | 309 | /** |
@@ -300,8 +318,9 @@ static int resume_target_kernel(void) | |||
300 | { | 318 | { |
301 | int error; | 319 | int error; |
302 | 320 | ||
321 | device_pm_lock(); | ||
303 | local_irq_disable(); | 322 | local_irq_disable(); |
304 | error = device_power_down(PMSG_PRETHAW); | 323 | error = device_power_down(PMSG_QUIESCE); |
305 | if (error) { | 324 | if (error) { |
306 | printk(KERN_ERR "PM: Some devices failed to power down, " | 325 | printk(KERN_ERR "PM: Some devices failed to power down, " |
307 | "aborting resume\n"); | 326 | "aborting resume\n"); |
@@ -329,9 +348,10 @@ static int resume_target_kernel(void) | |||
329 | swsusp_free(); | 348 | swsusp_free(); |
330 | restore_processor_state(); | 349 | restore_processor_state(); |
331 | touch_softlockup_watchdog(); | 350 | touch_softlockup_watchdog(); |
332 | device_power_up(); | 351 | device_power_up(PMSG_RECOVER); |
333 | Enable_irqs: | 352 | Enable_irqs: |
334 | local_irq_enable(); | 353 | local_irq_enable(); |
354 | device_pm_unlock(); | ||
335 | return error; | 355 | return error; |
336 | } | 356 | } |
337 | 357 | ||
@@ -350,7 +370,7 @@ int hibernation_restore(int platform_mode) | |||
350 | 370 | ||
351 | pm_prepare_console(); | 371 | pm_prepare_console(); |
352 | suspend_console(); | 372 | suspend_console(); |
353 | error = device_suspend(PMSG_PRETHAW); | 373 | error = device_suspend(PMSG_QUIESCE); |
354 | if (error) | 374 | if (error) |
355 | goto Finish; | 375 | goto Finish; |
356 | 376 | ||
@@ -362,7 +382,7 @@ int hibernation_restore(int platform_mode) | |||
362 | enable_nonboot_cpus(); | 382 | enable_nonboot_cpus(); |
363 | } | 383 | } |
364 | platform_restore_cleanup(platform_mode); | 384 | platform_restore_cleanup(platform_mode); |
365 | device_resume(); | 385 | device_resume(PMSG_RECOVER); |
366 | Finish: | 386 | Finish: |
367 | resume_console(); | 387 | resume_console(); |
368 | pm_restore_console(); | 388 | pm_restore_console(); |
@@ -392,8 +412,11 @@ int hibernation_platform_enter(void) | |||
392 | 412 | ||
393 | suspend_console(); | 413 | suspend_console(); |
394 | error = device_suspend(PMSG_HIBERNATE); | 414 | error = device_suspend(PMSG_HIBERNATE); |
395 | if (error) | 415 | if (error) { |
396 | goto Resume_console; | 416 | if (hibernation_ops->recover) |
417 | hibernation_ops->recover(); | ||
418 | goto Resume_devices; | ||
419 | } | ||
397 | 420 | ||
398 | error = hibernation_ops->prepare(); | 421 | error = hibernation_ops->prepare(); |
399 | if (error) | 422 | if (error) |
@@ -403,6 +426,7 @@ int hibernation_platform_enter(void) | |||
403 | if (error) | 426 | if (error) |
404 | goto Finish; | 427 | goto Finish; |
405 | 428 | ||
429 | device_pm_lock(); | ||
406 | local_irq_disable(); | 430 | local_irq_disable(); |
407 | error = device_power_down(PMSG_HIBERNATE); | 431 | error = device_power_down(PMSG_HIBERNATE); |
408 | if (!error) { | 432 | if (!error) { |
@@ -411,6 +435,7 @@ int hibernation_platform_enter(void) | |||
411 | while (1); | 435 | while (1); |
412 | } | 436 | } |
413 | local_irq_enable(); | 437 | local_irq_enable(); |
438 | device_pm_unlock(); | ||
414 | 439 | ||
415 | /* | 440 | /* |
416 | * We don't need to reenable the nonboot CPUs or resume consoles, since | 441 | * We don't need to reenable the nonboot CPUs or resume consoles, since |
@@ -419,8 +444,7 @@ int hibernation_platform_enter(void) | |||
419 | Finish: | 444 | Finish: |
420 | hibernation_ops->finish(); | 445 | hibernation_ops->finish(); |
421 | Resume_devices: | 446 | Resume_devices: |
422 | device_resume(); | 447 | device_resume(PMSG_RESTORE); |
423 | Resume_console: | ||
424 | resume_console(); | 448 | resume_console(); |
425 | Close: | 449 | Close: |
426 | hibernation_ops->end(); | 450 | hibernation_ops->end(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 6a6d5eb3524e..3398f4651aa1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -228,6 +228,7 @@ static int suspend_enter(suspend_state_t state) | |||
228 | { | 228 | { |
229 | int error = 0; | 229 | int error = 0; |
230 | 230 | ||
231 | device_pm_lock(); | ||
231 | arch_suspend_disable_irqs(); | 232 | arch_suspend_disable_irqs(); |
232 | BUG_ON(!irqs_disabled()); | 233 | BUG_ON(!irqs_disabled()); |
233 | 234 | ||
@@ -239,10 +240,11 @@ static int suspend_enter(suspend_state_t state) | |||
239 | if (!suspend_test(TEST_CORE)) | 240 | if (!suspend_test(TEST_CORE)) |
240 | error = suspend_ops->enter(state); | 241 | error = suspend_ops->enter(state); |
241 | 242 | ||
242 | device_power_up(); | 243 | device_power_up(PMSG_RESUME); |
243 | Done: | 244 | Done: |
244 | arch_suspend_enable_irqs(); | 245 | arch_suspend_enable_irqs(); |
245 | BUG_ON(irqs_disabled()); | 246 | BUG_ON(irqs_disabled()); |
247 | device_pm_unlock(); | ||
246 | return error; | 248 | return error; |
247 | } | 249 | } |
248 | 250 | ||
@@ -267,11 +269,11 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
267 | error = device_suspend(PMSG_SUSPEND); | 269 | error = device_suspend(PMSG_SUSPEND); |
268 | if (error) { | 270 | if (error) { |
269 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); | 271 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); |
270 | goto Resume_console; | 272 | goto Recover_platform; |
271 | } | 273 | } |
272 | 274 | ||
273 | if (suspend_test(TEST_DEVICES)) | 275 | if (suspend_test(TEST_DEVICES)) |
274 | goto Resume_devices; | 276 | goto Recover_platform; |
275 | 277 | ||
276 | if (suspend_ops->prepare) { | 278 | if (suspend_ops->prepare) { |
277 | error = suspend_ops->prepare(); | 279 | error = suspend_ops->prepare(); |
@@ -291,13 +293,17 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
291 | if (suspend_ops->finish) | 293 | if (suspend_ops->finish) |
292 | suspend_ops->finish(); | 294 | suspend_ops->finish(); |
293 | Resume_devices: | 295 | Resume_devices: |
294 | device_resume(); | 296 | device_resume(PMSG_RESUME); |
295 | Resume_console: | ||
296 | resume_console(); | 297 | resume_console(); |
297 | Close: | 298 | Close: |
298 | if (suspend_ops->end) | 299 | if (suspend_ops->end) |
299 | suspend_ops->end(); | 300 | suspend_ops->end(); |
300 | return error; | 301 | return error; |
302 | |||
303 | Recover_platform: | ||
304 | if (suspend_ops->recover) | ||
305 | suspend_ops->recover(); | ||
306 | goto Resume_devices; | ||
301 | } | 307 | } |
302 | 308 | ||
303 | /** | 309 | /** |
diff --git a/kernel/power/process.c b/kernel/power/process.c index f1d0b345c9ba..5fb87652f214 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -19,9 +19,6 @@ | |||
19 | */ | 19 | */ |
20 | #define TIMEOUT (20 * HZ) | 20 | #define TIMEOUT (20 * HZ) |
21 | 21 | ||
22 | #define FREEZER_KERNEL_THREADS 0 | ||
23 | #define FREEZER_USER_SPACE 1 | ||
24 | |||
25 | static inline int freezeable(struct task_struct * p) | 22 | static inline int freezeable(struct task_struct * p) |
26 | { | 23 | { |
27 | if ((p == current) || | 24 | if ((p == current) || |
@@ -84,63 +81,53 @@ static void fake_signal_wake_up(struct task_struct *p) | |||
84 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 81 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
85 | } | 82 | } |
86 | 83 | ||
87 | static int has_mm(struct task_struct *p) | 84 | static inline bool should_send_signal(struct task_struct *p) |
88 | { | 85 | { |
89 | return (p->mm && !(p->flags & PF_BORROWED_MM)); | 86 | return !(p->flags & PF_FREEZER_NOSIG); |
90 | } | 87 | } |
91 | 88 | ||
92 | /** | 89 | /** |
93 | * freeze_task - send a freeze request to given task | 90 | * freeze_task - send a freeze request to given task |
94 | * @p: task to send the request to | 91 | * @p: task to send the request to |
95 | * @with_mm_only: if set, the request will only be sent if the task has its | 92 | * @sig_only: if set, the request will only be sent if the task has the |
96 | * own mm | 93 | * PF_FREEZER_NOSIG flag unset |
97 | * Return value: 0, if @with_mm_only is set and the task has no mm of its | 94 | * Return value: 'false', if @sig_only is set and the task has |
98 | * own or the task is frozen, 1, otherwise | 95 | * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise |
99 | * | 96 | * |
100 | * The freeze request is sent by seting the tasks's TIF_FREEZE flag and | 97 | * The freeze request is sent by setting the tasks's TIF_FREEZE flag and |
101 | * either sending a fake signal to it or waking it up, depending on whether | 98 | * either sending a fake signal to it or waking it up, depending on whether |
102 | * or not it has its own mm (ie. it is a user land task). If @with_mm_only | 99 | * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task |
103 | * is set and the task has no mm of its own (ie. it is a kernel thread), | 100 | * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its |
104 | * its TIF_FREEZE flag should not be set. | 101 | * TIF_FREEZE flag will not be set. |
105 | * | ||
106 | * The task_lock() is necessary to prevent races with exit_mm() or | ||
107 | * use_mm()/unuse_mm() from occuring. | ||
108 | */ | 102 | */ |
109 | static int freeze_task(struct task_struct *p, int with_mm_only) | 103 | static bool freeze_task(struct task_struct *p, bool sig_only) |
110 | { | 104 | { |
111 | int ret = 1; | 105 | /* |
106 | * We first check if the task is freezing and next if it has already | ||
107 | * been frozen to avoid the race with frozen_process() which first marks | ||
108 | * the task as frozen and next clears its TIF_FREEZE. | ||
109 | */ | ||
110 | if (!freezing(p)) { | ||
111 | rmb(); | ||
112 | if (frozen(p)) | ||
113 | return false; | ||
112 | 114 | ||
113 | task_lock(p); | 115 | if (!sig_only || should_send_signal(p)) |
114 | if (freezing(p)) { | 116 | set_freeze_flag(p); |
115 | if (has_mm(p)) { | 117 | else |
116 | if (!signal_pending(p)) | 118 | return false; |
117 | fake_signal_wake_up(p); | 119 | } |
118 | } else { | 120 | |
119 | if (with_mm_only) | 121 | if (should_send_signal(p)) { |
120 | ret = 0; | 122 | if (!signal_pending(p)) |
121 | else | 123 | fake_signal_wake_up(p); |
122 | wake_up_state(p, TASK_INTERRUPTIBLE); | 124 | } else if (sig_only) { |
123 | } | 125 | return false; |
124 | } else { | 126 | } else { |
125 | rmb(); | 127 | wake_up_state(p, TASK_INTERRUPTIBLE); |
126 | if (frozen(p)) { | ||
127 | ret = 0; | ||
128 | } else { | ||
129 | if (has_mm(p)) { | ||
130 | set_freeze_flag(p); | ||
131 | fake_signal_wake_up(p); | ||
132 | } else { | ||
133 | if (with_mm_only) { | ||
134 | ret = 0; | ||
135 | } else { | ||
136 | set_freeze_flag(p); | ||
137 | wake_up_state(p, TASK_INTERRUPTIBLE); | ||
138 | } | ||
139 | } | ||
140 | } | ||
141 | } | 128 | } |
142 | task_unlock(p); | 129 | |
143 | return ret; | 130 | return true; |
144 | } | 131 | } |
145 | 132 | ||
146 | static void cancel_freezing(struct task_struct *p) | 133 | static void cancel_freezing(struct task_struct *p) |
@@ -156,7 +143,7 @@ static void cancel_freezing(struct task_struct *p) | |||
156 | } | 143 | } |
157 | } | 144 | } |
158 | 145 | ||
159 | static int try_to_freeze_tasks(int freeze_user_space) | 146 | static int try_to_freeze_tasks(bool sig_only) |
160 | { | 147 | { |
161 | struct task_struct *g, *p; | 148 | struct task_struct *g, *p; |
162 | unsigned long end_time; | 149 | unsigned long end_time; |
@@ -175,7 +162,7 @@ static int try_to_freeze_tasks(int freeze_user_space) | |||
175 | if (frozen(p) || !freezeable(p)) | 162 | if (frozen(p) || !freezeable(p)) |
176 | continue; | 163 | continue; |
177 | 164 | ||
178 | if (!freeze_task(p, freeze_user_space)) | 165 | if (!freeze_task(p, sig_only)) |
179 | continue; | 166 | continue; |
180 | 167 | ||
181 | /* | 168 | /* |
@@ -235,13 +222,13 @@ int freeze_processes(void) | |||
235 | int error; | 222 | int error; |
236 | 223 | ||
237 | printk("Freezing user space processes ... "); | 224 | printk("Freezing user space processes ... "); |
238 | error = try_to_freeze_tasks(FREEZER_USER_SPACE); | 225 | error = try_to_freeze_tasks(true); |
239 | if (error) | 226 | if (error) |
240 | goto Exit; | 227 | goto Exit; |
241 | printk("done.\n"); | 228 | printk("done.\n"); |
242 | 229 | ||
243 | printk("Freezing remaining freezable tasks ... "); | 230 | printk("Freezing remaining freezable tasks ... "); |
244 | error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS); | 231 | error = try_to_freeze_tasks(false); |
245 | if (error) | 232 | if (error) |
246 | goto Exit; | 233 | goto Exit; |
247 | printk("done."); | 234 | printk("done."); |
@@ -251,7 +238,7 @@ int freeze_processes(void) | |||
251 | return error; | 238 | return error; |
252 | } | 239 | } |
253 | 240 | ||
254 | static void thaw_tasks(int thaw_user_space) | 241 | static void thaw_tasks(bool nosig_only) |
255 | { | 242 | { |
256 | struct task_struct *g, *p; | 243 | struct task_struct *g, *p; |
257 | 244 | ||
@@ -260,7 +247,7 @@ static void thaw_tasks(int thaw_user_space) | |||
260 | if (!freezeable(p)) | 247 | if (!freezeable(p)) |
261 | continue; | 248 | continue; |
262 | 249 | ||
263 | if (!p->mm == thaw_user_space) | 250 | if (nosig_only && should_send_signal(p)) |
264 | continue; | 251 | continue; |
265 | 252 | ||
266 | thaw_process(p); | 253 | thaw_process(p); |
@@ -271,8 +258,8 @@ static void thaw_tasks(int thaw_user_space) | |||
271 | void thaw_processes(void) | 258 | void thaw_processes(void) |
272 | { | 259 | { |
273 | printk("Restarting tasks ... "); | 260 | printk("Restarting tasks ... "); |
274 | thaw_tasks(FREEZER_KERNEL_THREADS); | 261 | thaw_tasks(true); |
275 | thaw_tasks(FREEZER_USER_SPACE); | 262 | thaw_tasks(false); |
276 | schedule(); | 263 | schedule(); |
277 | printk("done.\n"); | 264 | printk("done.\n"); |
278 | } | 265 | } |
diff --git a/kernel/power/user.c b/kernel/power/user.c index f5512cb3aa86..a6332a313262 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/console.h> | 23 | #include <linux/console.h> |
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
26 | #include <linux/smp_lock.h> | ||
26 | 27 | ||
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
28 | 29 | ||
@@ -69,16 +70,22 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
69 | struct snapshot_data *data; | 70 | struct snapshot_data *data; |
70 | int error; | 71 | int error; |
71 | 72 | ||
72 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) | 73 | mutex_lock(&pm_mutex); |
73 | return -EBUSY; | 74 | |
75 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | ||
76 | error = -EBUSY; | ||
77 | goto Unlock; | ||
78 | } | ||
74 | 79 | ||
75 | if ((filp->f_flags & O_ACCMODE) == O_RDWR) { | 80 | if ((filp->f_flags & O_ACCMODE) == O_RDWR) { |
76 | atomic_inc(&snapshot_device_available); | 81 | atomic_inc(&snapshot_device_available); |
77 | return -ENOSYS; | 82 | error = -ENOSYS; |
83 | goto Unlock; | ||
78 | } | 84 | } |
79 | if(create_basic_memory_bitmaps()) { | 85 | if(create_basic_memory_bitmaps()) { |
80 | atomic_inc(&snapshot_device_available); | 86 | atomic_inc(&snapshot_device_available); |
81 | return -ENOMEM; | 87 | error = -ENOMEM; |
88 | goto Unlock; | ||
82 | } | 89 | } |
83 | nonseekable_open(inode, filp); | 90 | nonseekable_open(inode, filp); |
84 | data = &snapshot_state; | 91 | data = &snapshot_state; |
@@ -98,33 +105,36 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
98 | if (error) | 105 | if (error) |
99 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 106 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
100 | } | 107 | } |
101 | if (error) { | 108 | if (error) |
102 | atomic_inc(&snapshot_device_available); | 109 | atomic_inc(&snapshot_device_available); |
103 | return error; | ||
104 | } | ||
105 | data->frozen = 0; | 110 | data->frozen = 0; |
106 | data->ready = 0; | 111 | data->ready = 0; |
107 | data->platform_support = 0; | 112 | data->platform_support = 0; |
108 | 113 | ||
109 | return 0; | 114 | Unlock: |
115 | mutex_unlock(&pm_mutex); | ||
116 | |||
117 | return error; | ||
110 | } | 118 | } |
111 | 119 | ||
112 | static int snapshot_release(struct inode *inode, struct file *filp) | 120 | static int snapshot_release(struct inode *inode, struct file *filp) |
113 | { | 121 | { |
114 | struct snapshot_data *data; | 122 | struct snapshot_data *data; |
115 | 123 | ||
124 | mutex_lock(&pm_mutex); | ||
125 | |||
116 | swsusp_free(); | 126 | swsusp_free(); |
117 | free_basic_memory_bitmaps(); | 127 | free_basic_memory_bitmaps(); |
118 | data = filp->private_data; | 128 | data = filp->private_data; |
119 | free_all_swap_pages(data->swap); | 129 | free_all_swap_pages(data->swap); |
120 | if (data->frozen) { | 130 | if (data->frozen) |
121 | mutex_lock(&pm_mutex); | ||
122 | thaw_processes(); | 131 | thaw_processes(); |
123 | mutex_unlock(&pm_mutex); | ||
124 | } | ||
125 | pm_notifier_call_chain(data->mode == O_WRONLY ? | 132 | pm_notifier_call_chain(data->mode == O_WRONLY ? |
126 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 133 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
127 | atomic_inc(&snapshot_device_available); | 134 | atomic_inc(&snapshot_device_available); |
135 | |||
136 | mutex_unlock(&pm_mutex); | ||
137 | |||
128 | return 0; | 138 | return 0; |
129 | } | 139 | } |
130 | 140 | ||
@@ -134,9 +144,13 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf, | |||
134 | struct snapshot_data *data; | 144 | struct snapshot_data *data; |
135 | ssize_t res; | 145 | ssize_t res; |
136 | 146 | ||
147 | mutex_lock(&pm_mutex); | ||
148 | |||
137 | data = filp->private_data; | 149 | data = filp->private_data; |
138 | if (!data->ready) | 150 | if (!data->ready) { |
139 | return -ENODATA; | 151 | res = -ENODATA; |
152 | goto Unlock; | ||
153 | } | ||
140 | res = snapshot_read_next(&data->handle, count); | 154 | res = snapshot_read_next(&data->handle, count); |
141 | if (res > 0) { | 155 | if (res > 0) { |
142 | if (copy_to_user(buf, data_of(data->handle), res)) | 156 | if (copy_to_user(buf, data_of(data->handle), res)) |
@@ -144,6 +158,10 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf, | |||
144 | else | 158 | else |
145 | *offp = data->handle.offset; | 159 | *offp = data->handle.offset; |
146 | } | 160 | } |
161 | |||
162 | Unlock: | ||
163 | mutex_unlock(&pm_mutex); | ||
164 | |||
147 | return res; | 165 | return res; |
148 | } | 166 | } |
149 | 167 | ||
@@ -153,6 +171,8 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, | |||
153 | struct snapshot_data *data; | 171 | struct snapshot_data *data; |
154 | ssize_t res; | 172 | ssize_t res; |
155 | 173 | ||
174 | mutex_lock(&pm_mutex); | ||
175 | |||
156 | data = filp->private_data; | 176 | data = filp->private_data; |
157 | res = snapshot_write_next(&data->handle, count); | 177 | res = snapshot_write_next(&data->handle, count); |
158 | if (res > 0) { | 178 | if (res > 0) { |
@@ -161,11 +181,14 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, | |||
161 | else | 181 | else |
162 | *offp = data->handle.offset; | 182 | *offp = data->handle.offset; |
163 | } | 183 | } |
184 | |||
185 | mutex_unlock(&pm_mutex); | ||
186 | |||
164 | return res; | 187 | return res; |
165 | } | 188 | } |
166 | 189 | ||
167 | static int snapshot_ioctl(struct inode *inode, struct file *filp, | 190 | static long snapshot_ioctl(struct file *filp, unsigned int cmd, |
168 | unsigned int cmd, unsigned long arg) | 191 | unsigned long arg) |
169 | { | 192 | { |
170 | int error = 0; | 193 | int error = 0; |
171 | struct snapshot_data *data; | 194 | struct snapshot_data *data; |
@@ -179,6 +202,9 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
179 | if (!capable(CAP_SYS_ADMIN)) | 202 | if (!capable(CAP_SYS_ADMIN)) |
180 | return -EPERM; | 203 | return -EPERM; |
181 | 204 | ||
205 | if (!mutex_trylock(&pm_mutex)) | ||
206 | return -EBUSY; | ||
207 | |||
182 | data = filp->private_data; | 208 | data = filp->private_data; |
183 | 209 | ||
184 | switch (cmd) { | 210 | switch (cmd) { |
@@ -186,7 +212,6 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
186 | case SNAPSHOT_FREEZE: | 212 | case SNAPSHOT_FREEZE: |
187 | if (data->frozen) | 213 | if (data->frozen) |
188 | break; | 214 | break; |
189 | mutex_lock(&pm_mutex); | ||
190 | printk("Syncing filesystems ... "); | 215 | printk("Syncing filesystems ... "); |
191 | sys_sync(); | 216 | sys_sync(); |
192 | printk("done.\n"); | 217 | printk("done.\n"); |
@@ -194,7 +219,6 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
194 | error = freeze_processes(); | 219 | error = freeze_processes(); |
195 | if (error) | 220 | if (error) |
196 | thaw_processes(); | 221 | thaw_processes(); |
197 | mutex_unlock(&pm_mutex); | ||
198 | if (!error) | 222 | if (!error) |
199 | data->frozen = 1; | 223 | data->frozen = 1; |
200 | break; | 224 | break; |
@@ -202,9 +226,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
202 | case SNAPSHOT_UNFREEZE: | 226 | case SNAPSHOT_UNFREEZE: |
203 | if (!data->frozen || data->ready) | 227 | if (!data->frozen || data->ready) |
204 | break; | 228 | break; |
205 | mutex_lock(&pm_mutex); | ||
206 | thaw_processes(); | 229 | thaw_processes(); |
207 | mutex_unlock(&pm_mutex); | ||
208 | data->frozen = 0; | 230 | data->frozen = 0; |
209 | break; | 231 | break; |
210 | 232 | ||
@@ -307,16 +329,11 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
307 | error = -EPERM; | 329 | error = -EPERM; |
308 | break; | 330 | break; |
309 | } | 331 | } |
310 | if (!mutex_trylock(&pm_mutex)) { | ||
311 | error = -EBUSY; | ||
312 | break; | ||
313 | } | ||
314 | /* | 332 | /* |
315 | * Tasks are frozen and the notifiers have been called with | 333 | * Tasks are frozen and the notifiers have been called with |
316 | * PM_HIBERNATION_PREPARE | 334 | * PM_HIBERNATION_PREPARE |
317 | */ | 335 | */ |
318 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); | 336 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); |
319 | mutex_unlock(&pm_mutex); | ||
320 | break; | 337 | break; |
321 | 338 | ||
322 | case SNAPSHOT_PLATFORM_SUPPORT: | 339 | case SNAPSHOT_PLATFORM_SUPPORT: |
@@ -390,6 +407,8 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
390 | 407 | ||
391 | } | 408 | } |
392 | 409 | ||
410 | mutex_unlock(&pm_mutex); | ||
411 | |||
393 | return error; | 412 | return error; |
394 | } | 413 | } |
395 | 414 | ||
@@ -399,7 +418,7 @@ static const struct file_operations snapshot_fops = { | |||
399 | .read = snapshot_read, | 418 | .read = snapshot_read, |
400 | .write = snapshot_write, | 419 | .write = snapshot_write, |
401 | .llseek = no_llseek, | 420 | .llseek = no_llseek, |
402 | .ioctl = snapshot_ioctl, | 421 | .unlocked_ioctl = snapshot_ioctl, |
403 | }; | 422 | }; |
404 | 423 | ||
405 | static struct miscdevice snapshot_device = { | 424 | static struct miscdevice snapshot_device = { |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index e337390fce01..8392a9da6450 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -33,13 +33,9 @@ | |||
33 | */ | 33 | */ |
34 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | 34 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
35 | { | 35 | { |
36 | BUG_ON(!list_empty(&child->ptrace_list)); | 36 | BUG_ON(!list_empty(&child->ptrace_entry)); |
37 | if (child->parent == new_parent) | 37 | list_add(&child->ptrace_entry, &new_parent->ptraced); |
38 | return; | ||
39 | list_add(&child->ptrace_list, &child->parent->ptrace_children); | ||
40 | remove_parent(child); | ||
41 | child->parent = new_parent; | 38 | child->parent = new_parent; |
42 | add_parent(child); | ||
43 | } | 39 | } |
44 | 40 | ||
45 | /* | 41 | /* |
@@ -73,12 +69,8 @@ void __ptrace_unlink(struct task_struct *child) | |||
73 | BUG_ON(!child->ptrace); | 69 | BUG_ON(!child->ptrace); |
74 | 70 | ||
75 | child->ptrace = 0; | 71 | child->ptrace = 0; |
76 | if (ptrace_reparented(child)) { | 72 | child->parent = child->real_parent; |
77 | list_del_init(&child->ptrace_list); | 73 | list_del_init(&child->ptrace_entry); |
78 | remove_parent(child); | ||
79 | child->parent = child->real_parent; | ||
80 | add_parent(child); | ||
81 | } | ||
82 | 74 | ||
83 | if (task_is_traced(child)) | 75 | if (task_is_traced(child)) |
84 | ptrace_untrace(child); | 76 | ptrace_untrace(child); |
@@ -492,15 +484,34 @@ int ptrace_traceme(void) | |||
492 | /* | 484 | /* |
493 | * Are we already being traced? | 485 | * Are we already being traced? |
494 | */ | 486 | */ |
487 | repeat: | ||
495 | task_lock(current); | 488 | task_lock(current); |
496 | if (!(current->ptrace & PT_PTRACED)) { | 489 | if (!(current->ptrace & PT_PTRACED)) { |
490 | /* | ||
491 | * See ptrace_attach() comments about the locking here. | ||
492 | */ | ||
493 | unsigned long flags; | ||
494 | if (!write_trylock_irqsave(&tasklist_lock, flags)) { | ||
495 | task_unlock(current); | ||
496 | do { | ||
497 | cpu_relax(); | ||
498 | } while (!write_can_lock(&tasklist_lock)); | ||
499 | goto repeat; | ||
500 | } | ||
501 | |||
497 | ret = security_ptrace(current->parent, current, | 502 | ret = security_ptrace(current->parent, current, |
498 | PTRACE_MODE_ATTACH); | 503 | PTRACE_MODE_ATTACH); |
504 | |||
499 | /* | 505 | /* |
500 | * Set the ptrace bit in the process ptrace flags. | 506 | * Set the ptrace bit in the process ptrace flags. |
507 | * Then link us on our parent's ptraced list. | ||
501 | */ | 508 | */ |
502 | if (!ret) | 509 | if (!ret) { |
503 | current->ptrace |= PT_PTRACED; | 510 | current->ptrace |= PT_PTRACED; |
511 | __ptrace_link(current, current->real_parent); | ||
512 | } | ||
513 | |||
514 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
504 | } | 515 | } |
505 | task_unlock(current); | 516 | task_unlock(current); |
506 | return ret; | 517 | return ret; |