aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/exec.c70
1 files changed, 40 insertions, 30 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 49fa0127a330..8c8f2894949d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1393,20 +1393,22 @@ static void zap_process(struct task_struct *start)
1393 unlock_task_sighand(start, &flags); 1393 unlock_task_sighand(start, &flags);
1394} 1394}
1395 1395
1396static void zap_threads(struct mm_struct *mm) 1396static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1397 int exit_code)
1397{ 1398{
1398 struct task_struct *g, *p; 1399 struct task_struct *g, *p;
1399 struct task_struct *tsk = current; 1400 int err = -EAGAIN;
1400 struct completion *vfork_done = tsk->vfork_done; 1401
1401 1402 spin_lock_irq(&tsk->sighand->siglock);
1402 /* 1403 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
1403 * Make sure nobody is waiting for us to release the VM, 1404 tsk->signal->flags = SIGNAL_GROUP_EXIT;
1404 * otherwise we can deadlock when we wait on each other 1405 tsk->signal->group_exit_code = exit_code;
1405 */ 1406 tsk->signal->group_stop_count = 0;
1406 if (vfork_done) { 1407 err = 0;
1407 tsk->vfork_done = NULL;
1408 complete(vfork_done);
1409 } 1408 }
1409 spin_unlock_irq(&tsk->sighand->siglock);
1410 if (err)
1411 return err;
1410 1412
1411 rcu_read_lock(); 1413 rcu_read_lock();
1412 for_each_process(g) { 1414 for_each_process(g) {
@@ -1420,22 +1422,43 @@ static void zap_threads(struct mm_struct *mm)
1420 } while ((p = next_thread(p)) != g); 1422 } while ((p = next_thread(p)) != g);
1421 } 1423 }
1422 rcu_read_unlock(); 1424 rcu_read_unlock();
1425
1426 return mm->core_waiters;
1423} 1427}
1424 1428
1425static void coredump_wait(struct mm_struct *mm) 1429static int coredump_wait(int exit_code)
1426{ 1430{
1427 DECLARE_COMPLETION(startup_done); 1431 struct task_struct *tsk = current;
1432 struct mm_struct *mm = tsk->mm;
1433 struct completion startup_done;
1434 struct completion *vfork_done;
1428 int core_waiters; 1435 int core_waiters;
1429 1436
1437 init_completion(&mm->core_done);
1438 init_completion(&startup_done);
1430 mm->core_startup_done = &startup_done; 1439 mm->core_startup_done = &startup_done;
1431 1440
1432 zap_threads(mm); 1441 core_waiters = zap_threads(tsk, mm, exit_code);
1433 core_waiters = mm->core_waiters;
1434 up_write(&mm->mmap_sem); 1442 up_write(&mm->mmap_sem);
1435 1443
1444 if (unlikely(core_waiters < 0))
1445 goto fail;
1446
1447 /*
1448 * Make sure nobody is waiting for us to release the VM,
1449 * otherwise we can deadlock when we wait on each other
1450 */
1451 vfork_done = tsk->vfork_done;
1452 if (vfork_done) {
1453 tsk->vfork_done = NULL;
1454 complete(vfork_done);
1455 }
1456
1436 if (core_waiters) 1457 if (core_waiters)
1437 wait_for_completion(&startup_done); 1458 wait_for_completion(&startup_done);
1459fail:
1438 BUG_ON(mm->core_waiters); 1460 BUG_ON(mm->core_waiters);
1461 return core_waiters;
1439} 1462}
1440 1463
1441int do_coredump(long signr, int exit_code, struct pt_regs * regs) 1464int do_coredump(long signr, int exit_code, struct pt_regs * regs)
@@ -1469,22 +1492,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1469 } 1492 }
1470 mm->dumpable = 0; 1493 mm->dumpable = 0;
1471 1494
1472 retval = -EAGAIN; 1495 retval = coredump_wait(exit_code);
1473 spin_lock_irq(&current->sighand->siglock); 1496 if (retval < 0)
1474 if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
1475 current->signal->flags = SIGNAL_GROUP_EXIT;
1476 current->signal->group_exit_code = exit_code;
1477 current->signal->group_stop_count = 0;
1478 retval = 0;
1479 }
1480 spin_unlock_irq(&current->sighand->siglock);
1481 if (retval) {
1482 up_write(&mm->mmap_sem);
1483 goto fail; 1497 goto fail;
1484 }
1485
1486 init_completion(&mm->core_done);
1487 coredump_wait(mm);
1488 1498
1489 /* 1499 /*
1490 * Clear any false indication of pending signals that might 1500 * Clear any false indication of pending signals that might