diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2006-06-26 03:26:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:27 -0400 |
commit | d5f70c00ad24cd1158d3678b44ff969b4c971d49 (patch) | |
tree | b4b71a71b413250fb24cb2d83cbbfd7507da9efc | |
parent | 281de339ceb822ca6c04d4373ecb9a45c1890ce4 (diff) |
[PATCH] coredump: kill ptrace related stuff
With this patch zap_process() sets SIGNAL_GROUP_EXIT while sending SIGKILL to
the thread group. This means that a TASK_TRACED task
1. Will be awakened by signal_wake_up(1)
2. Can't sleep again via ptrace_notify()
3. Can't go to do_signal_stop() after return
from ptrace_stop() in get_signal_to_deliver()
So we can remove all ptrace related stuff from coredump path.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | fs/exec.c | 30 | ||||
-rw-r--r-- | include/linux/ptrace.h | 1 | ||||
-rw-r--r-- | kernel/ptrace.c | 3 | ||||
-rw-r--r-- | kernel/signal.c | 35 |
4 files changed, 37 insertions, 32 deletions
@@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr) | |||
1368 | *out_ptr = 0; | 1368 | *out_ptr = 0; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void zap_process(struct task_struct *start, int *ptraced) | 1371 | static void zap_process(struct task_struct *start) |
1372 | { | 1372 | { |
1373 | struct task_struct *t; | 1373 | struct task_struct *t; |
1374 | unsigned long flags; | 1374 | unsigned long flags; |
1375 | 1375 | ||
1376 | spin_lock_irqsave(&start->sighand->siglock, flags); | 1376 | spin_lock_irqsave(&start->sighand->siglock, flags); |
1377 | start->signal->flags = SIGNAL_GROUP_EXIT; | ||
1378 | start->signal->group_stop_count = 0; | ||
1377 | 1379 | ||
1378 | t = start; | 1380 | t = start; |
1379 | do { | 1381 | do { |
@@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced) | |||
1381 | t->mm->core_waiters++; | 1383 | t->mm->core_waiters++; |
1382 | sigaddset(&t->pending.signal, SIGKILL); | 1384 | sigaddset(&t->pending.signal, SIGKILL); |
1383 | signal_wake_up(t, 1); | 1385 | signal_wake_up(t, 1); |
1384 | |||
1385 | if (unlikely(t->ptrace) && | ||
1386 | unlikely(t->parent->mm == t->mm)) | ||
1387 | *ptraced = 1; | ||
1388 | } | 1386 | } |
1389 | } while ((t = next_thread(t)) != start); | 1387 | } while ((t = next_thread(t)) != start); |
1390 | 1388 | ||
1391 | spin_unlock_irqrestore(&start->sighand->siglock, flags); | 1389 | spin_unlock_irqrestore(&start->sighand->siglock, flags); |
1392 | } | 1390 | } |
1393 | 1391 | ||
1394 | static void zap_threads (struct mm_struct *mm) | 1392 | static void zap_threads(struct mm_struct *mm) |
1395 | { | 1393 | { |
1396 | struct task_struct *g, *p; | 1394 | struct task_struct *g, *p; |
1397 | struct task_struct *tsk = current; | 1395 | struct task_struct *tsk = current; |
1398 | struct completion *vfork_done = tsk->vfork_done; | 1396 | struct completion *vfork_done = tsk->vfork_done; |
1399 | int traced = 0; | ||
1400 | 1397 | ||
1401 | /* | 1398 | /* |
1402 | * Make sure nobody is waiting for us to release the VM, | 1399 | * Make sure nobody is waiting for us to release the VM, |
@@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm) | |||
1413 | do { | 1410 | do { |
1414 | if (p->mm) { | 1411 | if (p->mm) { |
1415 | if (p->mm == mm) | 1412 | if (p->mm == mm) |
1416 | zap_process(p, &traced); | 1413 | zap_process(p); |
1417 | break; | 1414 | break; |
1418 | } | 1415 | } |
1419 | } while ((p = next_thread(p)) != g); | 1416 | } while ((p = next_thread(p)) != g); |
1420 | } | 1417 | } |
1421 | read_unlock(&tasklist_lock); | 1418 | read_unlock(&tasklist_lock); |
1422 | |||
1423 | if (unlikely(traced)) { | ||
1424 | /* | ||
1425 | * We are zapping a thread and the thread it ptraces. | ||
1426 | * If the tracee went into a ptrace stop for exit tracing, | ||
1427 | * we could deadlock since the tracer is waiting for this | ||
1428 | * coredump to finish. Detach them so they can both die. | ||
1429 | */ | ||
1430 | write_lock_irq(&tasklist_lock); | ||
1431 | do_each_thread(g,p) { | ||
1432 | if (mm == p->mm && p != tsk && | ||
1433 | p->ptrace && p->parent->mm == mm) { | ||
1434 | __ptrace_detach(p, 0); | ||
1435 | } | ||
1436 | } while_each_thread(g,p); | ||
1437 | write_unlock_irq(&tasklist_lock); | ||
1438 | } | ||
1439 | } | 1419 | } |
1440 | 1420 | ||
1441 | static void coredump_wait(struct mm_struct *mm) | 1421 | static void coredump_wait(struct mm_struct *mm) |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index ee918bc6e18c..8b2749a259dc 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us | |||
88 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | 88 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
89 | extern int ptrace_attach(struct task_struct *tsk); | 89 | extern int ptrace_attach(struct task_struct *tsk); |
90 | extern int ptrace_detach(struct task_struct *, unsigned int); | 90 | extern int ptrace_detach(struct task_struct *, unsigned int); |
91 | extern void __ptrace_detach(struct task_struct *, unsigned int); | ||
92 | extern void ptrace_disable(struct task_struct *); | 91 | extern void ptrace_disable(struct task_struct *); |
93 | extern int ptrace_check_attach(struct task_struct *task, int kill); | 92 | extern int ptrace_check_attach(struct task_struct *task, int kill); |
94 | extern int ptrace_request(struct task_struct *child, long request, long addr, long data); | 93 | extern int ptrace_request(struct task_struct *child, long request, long addr, long data); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 6252d2fa2bf3..335c5b932e14 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -214,7 +214,7 @@ out: | |||
214 | return retval; | 214 | return retval; |
215 | } | 215 | } |
216 | 216 | ||
217 | void __ptrace_detach(struct task_struct *child, unsigned int data) | 217 | static inline void __ptrace_detach(struct task_struct *child, unsigned int data) |
218 | { | 218 | { |
219 | child->exit_code = data; | 219 | child->exit_code = data; |
220 | /* .. re-parent .. */ | 220 | /* .. re-parent .. */ |
@@ -233,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
233 | ptrace_disable(child); | 233 | ptrace_disable(child); |
234 | 234 | ||
235 | write_lock_irq(&tasklist_lock); | 235 | write_lock_irq(&tasklist_lock); |
236 | /* protect against de_thread()->release_task() */ | ||
236 | if (child->ptrace) | 237 | if (child->ptrace) |
237 | __ptrace_detach(child, data); | 238 | __ptrace_detach(child, data); |
238 | write_unlock_irq(&tasklist_lock); | 239 | write_unlock_irq(&tasklist_lock); |
diff --git a/kernel/signal.c b/kernel/signal.c index 1b3c921737e2..52adf53929f6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1531 | spin_unlock_irqrestore(&sighand->siglock, flags); | 1531 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | static inline int may_ptrace_stop(void) | ||
1535 | { | ||
1536 | if (!likely(current->ptrace & PT_PTRACED)) | ||
1537 | return 0; | ||
1538 | |||
1539 | if (unlikely(current->parent == current->real_parent && | ||
1540 | (current->ptrace & PT_ATTACHED))) | ||
1541 | return 0; | ||
1542 | |||
1543 | if (unlikely(current->signal == current->parent->signal) && | ||
1544 | unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) | ||
1545 | return 0; | ||
1546 | |||
1547 | /* | ||
1548 | * Are we in the middle of do_coredump? | ||
1549 | * If so and our tracer is also part of the coredump stopping | ||
1550 | * is a deadlock situation, and pointless because our tracer | ||
1551 | * is dead so don't allow us to stop. | ||
1552 | * If SIGKILL was already sent before the caller unlocked | ||
1553 | * ->siglock we must see ->core_waiters != 0. Otherwise it | ||
1554 | * is safe to enter schedule(). | ||
1555 | */ | ||
1556 | if (unlikely(current->mm->core_waiters) && | ||
1557 | unlikely(current->mm == current->parent->mm)) | ||
1558 | return 0; | ||
1559 | |||
1560 | return 1; | ||
1561 | } | ||
1562 | |||
1534 | /* | 1563 | /* |
1535 | * This must be called with current->sighand->siglock held. | 1564 | * This must be called with current->sighand->siglock held. |
1536 | * | 1565 | * |
@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) | |||
1559 | spin_unlock_irq(¤t->sighand->siglock); | 1588 | spin_unlock_irq(¤t->sighand->siglock); |
1560 | try_to_freeze(); | 1589 | try_to_freeze(); |
1561 | read_lock(&tasklist_lock); | 1590 | read_lock(&tasklist_lock); |
1562 | if (likely(current->ptrace & PT_PTRACED) && | 1591 | if (may_ptrace_stop()) { |
1563 | likely(current->parent != current->real_parent || | ||
1564 | !(current->ptrace & PT_ATTACHED)) && | ||
1565 | (likely(current->parent->signal != current->signal) || | ||
1566 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { | ||
1567 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1592 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1568 | read_unlock(&tasklist_lock); | 1593 | read_unlock(&tasklist_lock); |
1569 | schedule(); | 1594 | schedule(); |