aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2009-04-02 19:58:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-02 22:05:00 -0400
commit39c626ae47c469abdfd30c6e42eff884931380d6 (patch)
tree58cbe75bac79ce8ef55c94189df26448d0283918
parent7f5d3652d469cdf9eb2365dfea7ce3fb9e1409cc (diff)
forget_original_parent: split out the un-ptrace part
By discussion with Roland. - Rename ptrace_exit() to exit_ptrace(), and change it to do all the necessary work with ->ptraced list by its own. - Move this code from exit.c to ptrace.c - Update the comment in ptrace_detach() to explain the rechecking of the child->ptrace. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: "Metzger, Markus T" <markus.t.metzger@intel.com> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/exit.c95
-rw-r--r--kernel/ptrace.c78
4 files changed, 88 insertions, 92 deletions
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 1a2b0cb55535..67c15653fc23 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,7 +94,7 @@ extern void ptrace_notify(int exit_code);
94extern void __ptrace_link(struct task_struct *child, 94extern void __ptrace_link(struct task_struct *child,
95 struct task_struct *new_parent); 95 struct task_struct *new_parent);
96extern void __ptrace_unlink(struct task_struct *child); 96extern void __ptrace_unlink(struct task_struct *child);
97extern int __ptrace_detach(struct task_struct *tracer, struct task_struct *p); 97extern void exit_ptrace(struct task_struct *tracer);
98extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); 98extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
99#define PTRACE_MODE_READ 1 99#define PTRACE_MODE_READ 1
100#define PTRACE_MODE_ATTACH 2 100#define PTRACE_MODE_ATTACH 2
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9186f8c5d5f2..b47c94e7560b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2061,6 +2061,11 @@ static inline int thread_group_empty(struct task_struct *p)
2061#define delay_group_leader(p) \ 2061#define delay_group_leader(p) \
2062 (thread_group_leader(p) && !thread_group_empty(p)) 2062 (thread_group_leader(p) && !thread_group_empty(p))
2063 2063
2064static inline int task_detached(struct task_struct *p)
2065{
2066 return p->exit_signal == -1;
2067}
2068
2064/* 2069/*
2065 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2070 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2066 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2071 * subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/kernel/exit.c b/kernel/exit.c
index 3e09b7cb3b20..506693dfdd4e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -61,11 +61,6 @@ DEFINE_TRACE(sched_process_wait);
61 61
62static void exit_mm(struct task_struct * tsk); 62static void exit_mm(struct task_struct * tsk);
63 63
64static inline int task_detached(struct task_struct *p)
65{
66 return p->exit_signal == -1;
67}
68
69static void __unhash_process(struct task_struct *p) 64static void __unhash_process(struct task_struct *p)
70{ 65{
71 nr_threads--; 66 nr_threads--;
@@ -731,85 +726,6 @@ static void exit_mm(struct task_struct * tsk)
731 mmput(mm); 726 mmput(mm);
732} 727}
733 728
734/*
735 * Called with irqs disabled, returns true if childs should reap themselves.
736 */
737static int ignoring_children(struct sighand_struct *sigh)
738{
739 int ret;
740 spin_lock(&sigh->siglock);
741 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
742 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
743 spin_unlock(&sigh->siglock);
744 return ret;
745}
746
747/* Returns nonzero if the tracee should be released. */
748int __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
749{
750 __ptrace_unlink(p);
751
752 if (p->exit_state != EXIT_ZOMBIE)
753 return 0;
754 /*
755 * If it's a zombie, our attachedness prevented normal
756 * parent notification or self-reaping. Do notification
757 * now if it would have happened earlier. If it should
758 * reap itself we return true.
759 *
760 * If it's our own child, there is no notification to do.
761 * But if our normal children self-reap, then this child
762 * was prevented by ptrace and we must reap it now.
763 */
764 if (!task_detached(p) && thread_group_empty(p)) {
765 if (!same_thread_group(p->real_parent, tracer))
766 do_notify_parent(p, p->exit_signal);
767 else if (ignoring_children(tracer->sighand))
768 p->exit_signal = -1;
769 }
770
771 if (!task_detached(p))
772 return 0;
773
774 /* Mark it as in the process of being reaped. */
775 p->exit_state = EXIT_DEAD;
776 return 1;
777}
778
779/*
780 * Detach all tasks we were using ptrace on.
781 * Any that need to be release_task'd are put on the @dead list.
782 *
783 * Called with write_lock(&tasklist_lock) held.
784 */
785static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
786{
787 struct task_struct *p, *n;
788
789 list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
790 if (__ptrace_detach(parent, p))
791 list_add(&p->ptrace_entry, dead);
792 }
793}
794
795/*
796 * Finish up exit-time ptrace cleanup.
797 *
798 * Called without locks.
799 */
800static void ptrace_exit_finish(struct task_struct *parent,
801 struct list_head *dead)
802{
803 struct task_struct *p, *n;
804
805 BUG_ON(!list_empty(&parent->ptraced));
806
807 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
808 list_del_init(&p->ptrace_entry);
809 release_task(p);
810 }
811}
812
813/* Returns nonzero if the child should be released. */ 729/* Returns nonzero if the child should be released. */
814static int reparent_thread(struct task_struct *p, struct task_struct *father) 730static int reparent_thread(struct task_struct *p, struct task_struct *father)
815{ 731{
@@ -894,12 +810,10 @@ static void forget_original_parent(struct task_struct *father)
894 struct task_struct *p, *n, *reaper; 810 struct task_struct *p, *n, *reaper;
895 LIST_HEAD(ptrace_dead); 811 LIST_HEAD(ptrace_dead);
896 812
813 exit_ptrace(father);
814
897 write_lock_irq(&tasklist_lock); 815 write_lock_irq(&tasklist_lock);
898 reaper = find_new_reaper(father); 816 reaper = find_new_reaper(father);
899 /*
900 * First clean up ptrace if we were using it.
901 */
902 ptrace_exit(father, &ptrace_dead);
903 817
904 list_for_each_entry_safe(p, n, &father->children, sibling) { 818 list_for_each_entry_safe(p, n, &father->children, sibling) {
905 p->real_parent = reaper; 819 p->real_parent = reaper;
@@ -914,7 +828,10 @@ static void forget_original_parent(struct task_struct *father)
914 write_unlock_irq(&tasklist_lock); 828 write_unlock_irq(&tasklist_lock);
915 BUG_ON(!list_empty(&father->children)); 829 BUG_ON(!list_empty(&father->children));
916 830
917 ptrace_exit_finish(father, &ptrace_dead); 831 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
832 list_del_init(&p->ptrace_entry);
833 release_task(p);
834 }
918} 835}
919 836
920/* 837/*
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index ee553b6ad125..f5a9fa5aafa1 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -235,9 +235,57 @@ out:
235 return retval; 235 return retval;
236} 236}
237 237
238/*
239 * Called with irqs disabled, returns true if childs should reap themselves.
240 */
241static int ignoring_children(struct sighand_struct *sigh)
242{
243 int ret;
244 spin_lock(&sigh->siglock);
245 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
246 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
247 spin_unlock(&sigh->siglock);
248 return ret;
249}
250
251/*
252 * Called with tasklist_lock held for writing.
253 * Unlink a traced task, and clean it up if it was a traced zombie.
254 * Return true if it needs to be reaped with release_task().
255 * (We can't call release_task() here because we already hold tasklist_lock.)
256 *
257 * If it's a zombie, our attachedness prevented normal parent notification
258 * or self-reaping. Do notification now if it would have happened earlier.
259 * If it should reap itself, return true.
260 *
261 * If it's our own child, there is no notification to do.
262 * But if our normal children self-reap, then this child
263 * was prevented by ptrace and we must reap it now.
264 */
265static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
266{
267 __ptrace_unlink(p);
268
269 if (p->exit_state == EXIT_ZOMBIE) {
270 if (!task_detached(p) && thread_group_empty(p)) {
271 if (!same_thread_group(p->real_parent, tracer))
272 do_notify_parent(p, p->exit_signal);
273 else if (ignoring_children(tracer->sighand))
274 p->exit_signal = -1;
275 }
276 if (task_detached(p)) {
277 /* Mark it as in the process of being reaped. */
278 p->exit_state = EXIT_DEAD;
279 return true;
280 }
281 }
282
283 return false;
284}
285
238int ptrace_detach(struct task_struct *child, unsigned int data) 286int ptrace_detach(struct task_struct *child, unsigned int data)
239{ 287{
240 int dead = 0; 288 bool dead = false;
241 289
242 if (!valid_signal(data)) 290 if (!valid_signal(data))
243 return -EIO; 291 return -EIO;
@@ -247,7 +295,10 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 295 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 296
249 write_lock_irq(&tasklist_lock); 297 write_lock_irq(&tasklist_lock);
250 /* protect against de_thread()->release_task() */ 298 /*
299 * This child can be already killed. Make sure de_thread() or
300 * our sub-thread doing do_wait() didn't do release_task() yet.
301 */
251 if (child->ptrace) { 302 if (child->ptrace) {
252 child->exit_code = data; 303 child->exit_code = data;
253 304
@@ -264,6 +315,29 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
264 return 0; 315 return 0;
265} 316}
266 317
318/*
319 * Detach all tasks we were using ptrace on.
320 */
321void exit_ptrace(struct task_struct *tracer)
322{
323 struct task_struct *p, *n;
324 LIST_HEAD(ptrace_dead);
325
326 write_lock_irq(&tasklist_lock);
327 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
328 if (__ptrace_detach(tracer, p))
329 list_add(&p->ptrace_entry, &ptrace_dead);
330 }
331 write_unlock_irq(&tasklist_lock);
332
333 BUG_ON(!list_empty(&tracer->ptraced));
334
335 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
336 list_del_init(&p->ptrace_entry);
337 release_task(p);
338 }
339}
340
267int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 341int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
268{ 342{
269 int copied = 0; 343 int copied = 0;