diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2006-06-26 03:26:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:27 -0400 |
commit | d5f70c00ad24cd1158d3678b44ff969b4c971d49 (patch) | |
tree | b4b71a71b413250fb24cb2d83cbbfd7507da9efc /fs | |
parent | 281de339ceb822ca6c04d4373ecb9a45c1890ce4 (diff) |
[PATCH] coredump: kill ptrace related stuff
With this patch zap_process() sets SIGNAL_GROUP_EXIT while sending SIGKILL to
the thread group. This means that a TASK_TRACED task
1. Will be awakened by signal_wake_up(1)
2. Can't sleep again via ptrace_notify()
3. Can't go to do_signal_stop() after return
from ptrace_stop() in get_signal_to_deliver()
So we can remove all ptrace related stuff from coredump path.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/exec.c | 30 |
1 files changed, 5 insertions, 25 deletions
@@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr) | |||
1368 | *out_ptr = 0; | 1368 | *out_ptr = 0; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void zap_process(struct task_struct *start, int *ptraced) | 1371 | static void zap_process(struct task_struct *start) |
1372 | { | 1372 | { |
1373 | struct task_struct *t; | 1373 | struct task_struct *t; |
1374 | unsigned long flags; | 1374 | unsigned long flags; |
1375 | 1375 | ||
1376 | spin_lock_irqsave(&start->sighand->siglock, flags); | 1376 | spin_lock_irqsave(&start->sighand->siglock, flags); |
1377 | start->signal->flags = SIGNAL_GROUP_EXIT; | ||
1378 | start->signal->group_stop_count = 0; | ||
1377 | 1379 | ||
1378 | t = start; | 1380 | t = start; |
1379 | do { | 1381 | do { |
@@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced) | |||
1381 | t->mm->core_waiters++; | 1383 | t->mm->core_waiters++; |
1382 | sigaddset(&t->pending.signal, SIGKILL); | 1384 | sigaddset(&t->pending.signal, SIGKILL); |
1383 | signal_wake_up(t, 1); | 1385 | signal_wake_up(t, 1); |
1384 | |||
1385 | if (unlikely(t->ptrace) && | ||
1386 | unlikely(t->parent->mm == t->mm)) | ||
1387 | *ptraced = 1; | ||
1388 | } | 1386 | } |
1389 | } while ((t = next_thread(t)) != start); | 1387 | } while ((t = next_thread(t)) != start); |
1390 | 1388 | ||
1391 | spin_unlock_irqrestore(&start->sighand->siglock, flags); | 1389 | spin_unlock_irqrestore(&start->sighand->siglock, flags); |
1392 | } | 1390 | } |
1393 | 1391 | ||
1394 | static void zap_threads (struct mm_struct *mm) | 1392 | static void zap_threads(struct mm_struct *mm) |
1395 | { | 1393 | { |
1396 | struct task_struct *g, *p; | 1394 | struct task_struct *g, *p; |
1397 | struct task_struct *tsk = current; | 1395 | struct task_struct *tsk = current; |
1398 | struct completion *vfork_done = tsk->vfork_done; | 1396 | struct completion *vfork_done = tsk->vfork_done; |
1399 | int traced = 0; | ||
1400 | 1397 | ||
1401 | /* | 1398 | /* |
1402 | * Make sure nobody is waiting for us to release the VM, | 1399 | * Make sure nobody is waiting for us to release the VM, |
@@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm) | |||
1413 | do { | 1410 | do { |
1414 | if (p->mm) { | 1411 | if (p->mm) { |
1415 | if (p->mm == mm) | 1412 | if (p->mm == mm) |
1416 | zap_process(p, &traced); | 1413 | zap_process(p); |
1417 | break; | 1414 | break; |
1418 | } | 1415 | } |
1419 | } while ((p = next_thread(p)) != g); | 1416 | } while ((p = next_thread(p)) != g); |
1420 | } | 1417 | } |
1421 | read_unlock(&tasklist_lock); | 1418 | read_unlock(&tasklist_lock); |
1422 | |||
1423 | if (unlikely(traced)) { | ||
1424 | /* | ||
1425 | * We are zapping a thread and the thread it ptraces. | ||
1426 | * If the tracee went into a ptrace stop for exit tracing, | ||
1427 | * we could deadlock since the tracer is waiting for this | ||
1428 | * coredump to finish. Detach them so they can both die. | ||
1429 | */ | ||
1430 | write_lock_irq(&tasklist_lock); | ||
1431 | do_each_thread(g,p) { | ||
1432 | if (mm == p->mm && p != tsk && | ||
1433 | p->ptrace && p->parent->mm == mm) { | ||
1434 | __ptrace_detach(p, 0); | ||
1435 | } | ||
1436 | } while_each_thread(g,p); | ||
1437 | write_unlock_irq(&tasklist_lock); | ||
1438 | } | ||
1439 | } | 1419 | } |
1440 | 1420 | ||
1441 | static void coredump_wait(struct mm_struct *mm) | 1421 | static void coredump_wait(struct mm_struct *mm) |