aboutsummaryrefslogtreecommitdiffstats
path: root/fs/exec.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2008-07-25 04:47:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-25 13:53:39 -0400
commit999d9fc1670bc082928b93b11d1f2e0e417d973c (patch)
treee540e7fd2fab970ba2be5e39ac9f8282a373bc24 /fs/exec.c
parent32ecb1f26dd50eeaac4e3f4dea4541c97848e459 (diff)
coredump: move mm->core_waiters into struct core_state
Move mm->core_waiters into "struct core_state" allocated on stack. This shrinks mm_struct a little bit and allows further changes. This patch mostly does s/core_waiters/core_state. The only essential change is that coredump_wait() must clear mm->core_state before return. The coredump_wait()'s path is uglified and .text grows by 30 bytes, this is fixed by the next patch. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 71734568f018..50de3aaff4d0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm)
722 * Make sure that if there is a core dump in progress 722 * Make sure that if there is a core dump in progress
723 * for the old mm, we get out and die instead of going 723 * for the old mm, we get out and die instead of going
724 * through with the exec. We must hold mmap_sem around 724 * through with the exec. We must hold mmap_sem around
725 * checking core_waiters and changing tsk->mm. The 725 * checking core_state and changing tsk->mm.
726 * core-inducing thread will increment core_waiters for
727 * each thread whose ->mm == old_mm.
728 */ 726 */
729 down_read(&old_mm->mmap_sem); 727 down_read(&old_mm->mmap_sem);
730 if (unlikely(old_mm->core_waiters)) { 728 if (unlikely(old_mm->core_state)) {
731 up_read(&old_mm->mmap_sem); 729 up_read(&old_mm->mmap_sem);
732 return -EINTR; 730 return -EINTR;
733 } 731 }
@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start)
1514 t = start; 1512 t = start;
1515 do { 1513 do {
1516 if (t != current && t->mm) { 1514 if (t != current && t->mm) {
1517 t->mm->core_waiters++; 1515 t->mm->core_state->nr_threads++;
1518 sigaddset(&t->pending.signal, SIGKILL); 1516 sigaddset(&t->pending.signal, SIGKILL);
1519 signal_wake_up(t, 1); 1517 signal_wake_up(t, 1);
1520 } 1518 }
@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1538 if (err) 1536 if (err)
1539 return err; 1537 return err;
1540 1538
1541 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) 1539 if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
1542 goto done; 1540 goto done;
1543 /* 1541 /*
1544 * We should find and kill all tasks which use this mm, and we should 1542 * We should find and kill all tasks which use this mm, and we should
1545 * count them correctly into mm->core_waiters. We don't take tasklist 1543 * count them correctly into ->nr_threads. We don't take tasklist
1546 * lock, but this is safe wrt: 1544 * lock, but this is safe wrt:
1547 * 1545 *
1548 * fork: 1546 * fork:
@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1590 } 1588 }
1591 rcu_read_unlock(); 1589 rcu_read_unlock();
1592done: 1590done:
1593 return mm->core_waiters; 1591 return mm->core_state->nr_threads;
1594} 1592}
1595 1593
1596static int coredump_wait(int exit_code) 1594static int coredump_wait(int exit_code)
@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code)
1603 1601
1604 init_completion(&mm->core_done); 1602 init_completion(&mm->core_done);
1605 init_completion(&core_state.startup); 1603 init_completion(&core_state.startup);
1604 core_state.nr_threads = 0;
1606 mm->core_state = &core_state; 1605 mm->core_state = &core_state;
1607 1606
1608 core_waiters = zap_threads(tsk, mm, exit_code); 1607 core_waiters = zap_threads(tsk, mm, exit_code);
1608 if (core_waiters < 0)
1609 mm->core_state = NULL;
1609 up_write(&mm->mmap_sem); 1610 up_write(&mm->mmap_sem);
1610 1611
1611 if (unlikely(core_waiters < 0)) 1612 if (unlikely(core_waiters < 0))
@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code)
1623 1624
1624 if (core_waiters) 1625 if (core_waiters)
1625 wait_for_completion(&core_state.startup); 1626 wait_for_completion(&core_state.startup);
1627 mm->core_state = NULL;
1626fail: 1628fail:
1627 BUG_ON(mm->core_waiters);
1628 return core_waiters; 1629 return core_waiters;
1629} 1630}
1630 1631
@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1702 /* 1703 /*
1703 * If another thread got here first, or we are not dumpable, bail out. 1704 * If another thread got here first, or we are not dumpable, bail out.
1704 */ 1705 */
1705 if (mm->core_waiters || !get_dumpable(mm)) { 1706 if (mm->core_state || !get_dumpable(mm)) {
1706 up_write(&mm->mmap_sem); 1707 up_write(&mm->mmap_sem);
1707 goto fail; 1708 goto fail;
1708 } 1709 }