diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-07-25 04:47:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:39 -0400 |
commit | 999d9fc1670bc082928b93b11d1f2e0e417d973c (patch) | |
tree | e540e7fd2fab970ba2be5e39ac9f8282a373bc24 | |
parent | 32ecb1f26dd50eeaac4e3f4dea4541c97848e459 (diff) |
coredump: move mm->core_waiters into struct core_state
Move mm->core_waiters into "struct core_state" allocated on stack. This
shrinks mm_struct a little bit and allows further changes.
This patch mostly does s/core_waiters/core_state. The only essential
change is that coredump_wait() must clear mm->core_state before return.
The coredump_wait()'s path is uglified and .text grows by 30 bytes, this
is fixed by the next patch.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/exec.c | 21 | ||||
-rw-r--r-- | include/linux/mm_types.h | 2 | ||||
-rw-r--r-- | kernel/exit.c | 8 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/signal.c | 4 |
5 files changed, 19 insertions, 18 deletions
@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm) | |||
722 | * Make sure that if there is a core dump in progress | 722 | * Make sure that if there is a core dump in progress |
723 | * for the old mm, we get out and die instead of going | 723 | * for the old mm, we get out and die instead of going |
724 | * through with the exec. We must hold mmap_sem around | 724 | * through with the exec. We must hold mmap_sem around |
725 | * checking core_waiters and changing tsk->mm. The | 725 | * checking core_state and changing tsk->mm. |
726 | * core-inducing thread will increment core_waiters for | ||
727 | * each thread whose ->mm == old_mm. | ||
728 | */ | 726 | */ |
729 | down_read(&old_mm->mmap_sem); | 727 | down_read(&old_mm->mmap_sem); |
730 | if (unlikely(old_mm->core_waiters)) { | 728 | if (unlikely(old_mm->core_state)) { |
731 | up_read(&old_mm->mmap_sem); | 729 | up_read(&old_mm->mmap_sem); |
732 | return -EINTR; | 730 | return -EINTR; |
733 | } | 731 | } |
@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start) | |||
1514 | t = start; | 1512 | t = start; |
1515 | do { | 1513 | do { |
1516 | if (t != current && t->mm) { | 1514 | if (t != current && t->mm) { |
1517 | t->mm->core_waiters++; | 1515 | t->mm->core_state->nr_threads++; |
1518 | sigaddset(&t->pending.signal, SIGKILL); | 1516 | sigaddset(&t->pending.signal, SIGKILL); |
1519 | signal_wake_up(t, 1); | 1517 | signal_wake_up(t, 1); |
1520 | } | 1518 | } |
@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |||
1538 | if (err) | 1536 | if (err) |
1539 | return err; | 1537 | return err; |
1540 | 1538 | ||
1541 | if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) | 1539 | if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1) |
1542 | goto done; | 1540 | goto done; |
1543 | /* | 1541 | /* |
1544 | * We should find and kill all tasks which use this mm, and we should | 1542 | * We should find and kill all tasks which use this mm, and we should |
1545 | * count them correctly into mm->core_waiters. We don't take tasklist | 1543 | * count them correctly into ->nr_threads. We don't take tasklist |
1546 | * lock, but this is safe wrt: | 1544 | * lock, but this is safe wrt: |
1547 | * | 1545 | * |
1548 | * fork: | 1546 | * fork: |
@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |||
1590 | } | 1588 | } |
1591 | rcu_read_unlock(); | 1589 | rcu_read_unlock(); |
1592 | done: | 1590 | done: |
1593 | return mm->core_waiters; | 1591 | return mm->core_state->nr_threads; |
1594 | } | 1592 | } |
1595 | 1593 | ||
1596 | static int coredump_wait(int exit_code) | 1594 | static int coredump_wait(int exit_code) |
@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code) | |||
1603 | 1601 | ||
1604 | init_completion(&mm->core_done); | 1602 | init_completion(&mm->core_done); |
1605 | init_completion(&core_state.startup); | 1603 | init_completion(&core_state.startup); |
1604 | core_state.nr_threads = 0; | ||
1606 | mm->core_state = &core_state; | 1605 | mm->core_state = &core_state; |
1607 | 1606 | ||
1608 | core_waiters = zap_threads(tsk, mm, exit_code); | 1607 | core_waiters = zap_threads(tsk, mm, exit_code); |
1608 | if (core_waiters < 0) | ||
1609 | mm->core_state = NULL; | ||
1609 | up_write(&mm->mmap_sem); | 1610 | up_write(&mm->mmap_sem); |
1610 | 1611 | ||
1611 | if (unlikely(core_waiters < 0)) | 1612 | if (unlikely(core_waiters < 0)) |
@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code) | |||
1623 | 1624 | ||
1624 | if (core_waiters) | 1625 | if (core_waiters) |
1625 | wait_for_completion(&core_state.startup); | 1626 | wait_for_completion(&core_state.startup); |
1627 | mm->core_state = NULL; | ||
1626 | fail: | 1628 | fail: |
1627 | BUG_ON(mm->core_waiters); | ||
1628 | return core_waiters; | 1629 | return core_waiters; |
1629 | } | 1630 | } |
1630 | 1631 | ||
@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) | |||
1702 | /* | 1703 | /* |
1703 | * If another thread got here first, or we are not dumpable, bail out. | 1704 | * If another thread got here first, or we are not dumpable, bail out. |
1704 | */ | 1705 | */ |
1705 | if (mm->core_waiters || !get_dumpable(mm)) { | 1706 | if (mm->core_state || !get_dumpable(mm)) { |
1706 | up_write(&mm->mmap_sem); | 1707 | up_write(&mm->mmap_sem); |
1707 | goto fail; | 1708 | goto fail; |
1708 | } | 1709 | } |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 97819efd2333..c0b1747b61a5 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -160,6 +160,7 @@ struct vm_area_struct { | |||
160 | }; | 160 | }; |
161 | 161 | ||
162 | struct core_state { | 162 | struct core_state { |
163 | int nr_threads; | ||
163 | struct completion startup; | 164 | struct completion startup; |
164 | }; | 165 | }; |
165 | 166 | ||
@@ -179,7 +180,6 @@ struct mm_struct { | |||
179 | atomic_t mm_users; /* How many users with user space? */ | 180 | atomic_t mm_users; /* How many users with user space? */ |
180 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ | 181 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ |
181 | int map_count; /* number of VMAs */ | 182 | int map_count; /* number of VMAs */ |
182 | int core_waiters; | ||
183 | struct rw_semaphore mmap_sem; | 183 | struct rw_semaphore mmap_sem; |
184 | spinlock_t page_table_lock; /* Protects page tables and some counters */ | 184 | spinlock_t page_table_lock; /* Protects page tables and some counters */ |
185 | 185 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f7fa21dbced4..988e232254e9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -670,16 +670,16 @@ static void exit_mm(struct task_struct * tsk) | |||
670 | return; | 670 | return; |
671 | /* | 671 | /* |
672 | * Serialize with any possible pending coredump. | 672 | * Serialize with any possible pending coredump. |
673 | * We must hold mmap_sem around checking core_waiters | 673 | * We must hold mmap_sem around checking core_state |
674 | * and clearing tsk->mm. The core-inducing thread | 674 | * and clearing tsk->mm. The core-inducing thread |
675 | * will increment core_waiters for each thread in the | 675 | * will increment ->nr_threads for each thread in the |
676 | * group with ->mm != NULL. | 676 | * group with ->mm != NULL. |
677 | */ | 677 | */ |
678 | down_read(&mm->mmap_sem); | 678 | down_read(&mm->mmap_sem); |
679 | if (mm->core_waiters) { | 679 | if (mm->core_state) { |
680 | up_read(&mm->mmap_sem); | 680 | up_read(&mm->mmap_sem); |
681 | down_write(&mm->mmap_sem); | 681 | down_write(&mm->mmap_sem); |
682 | if (!--mm->core_waiters) | 682 | if (!--mm->core_state->nr_threads) |
683 | complete(&mm->core_state->startup); | 683 | complete(&mm->core_state->startup); |
684 | up_write(&mm->mmap_sem); | 684 | up_write(&mm->mmap_sem); |
685 | 685 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index eeaec6893b0d..813d5c89b9d5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -400,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
400 | INIT_LIST_HEAD(&mm->mmlist); | 400 | INIT_LIST_HEAD(&mm->mmlist); |
401 | mm->flags = (current->mm) ? current->mm->flags | 401 | mm->flags = (current->mm) ? current->mm->flags |
402 | : MMF_DUMP_FILTER_DEFAULT; | 402 | : MMF_DUMP_FILTER_DEFAULT; |
403 | mm->core_waiters = 0; | 403 | mm->core_state = NULL; |
404 | mm->nr_ptes = 0; | 404 | mm->nr_ptes = 0; |
405 | set_mm_counter(mm, file_rss, 0); | 405 | set_mm_counter(mm, file_rss, 0); |
406 | set_mm_counter(mm, anon_rss, 0); | 406 | set_mm_counter(mm, anon_rss, 0); |
diff --git a/kernel/signal.c b/kernel/signal.c index 39c1706edf03..5c7b7eaa0dc6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1480,10 +1480,10 @@ static inline int may_ptrace_stop(void) | |||
1480 | * is a deadlock situation, and pointless because our tracer | 1480 | * is a deadlock situation, and pointless because our tracer |
1481 | * is dead so don't allow us to stop. | 1481 | * is dead so don't allow us to stop. |
1482 | * If SIGKILL was already sent before the caller unlocked | 1482 | * If SIGKILL was already sent before the caller unlocked |
1483 | * ->siglock we must see ->core_waiters != 0. Otherwise it | 1483 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1484 | * is safe to enter schedule(). | 1484 | * is safe to enter schedule(). |
1485 | */ | 1485 | */ |
1486 | if (unlikely(current->mm->core_waiters) && | 1486 | if (unlikely(current->mm->core_state) && |
1487 | unlikely(current->mm == current->parent->mm)) | 1487 | unlikely(current->mm == current->parent->mm)) |
1488 | return 0; | 1488 | return 0; |
1489 | 1489 | ||