aboutsummaryrefslogtreecommitdiffstats
path: root/fs/exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c157
1 files changed, 101 insertions, 56 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 606cf96828d5..029308754eea 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -46,7 +46,6 @@
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/ima.h>
50#include <linux/syscalls.h> 49#include <linux/syscalls.h>
51#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
52#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
@@ -198,7 +197,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
198 * to work from. 197 * to work from.
199 */ 198 */
200 rlim = current->signal->rlim; 199 rlim = current->signal->rlim;
201 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) { 200 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
202 put_page(page); 201 put_page(page);
203 return NULL; 202 return NULL;
204 } 203 }
@@ -249,6 +248,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
249 vma->vm_start = vma->vm_end - PAGE_SIZE; 248 vma->vm_start = vma->vm_end - PAGE_SIZE;
250 vma->vm_flags = VM_STACK_FLAGS; 249 vma->vm_flags = VM_STACK_FLAGS;
251 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 250 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
251 INIT_LIST_HEAD(&vma->anon_vma_chain);
252 err = insert_vm_struct(mm, vma); 252 err = insert_vm_struct(mm, vma);
253 if (err) 253 if (err)
254 goto err; 254 goto err;
@@ -519,7 +519,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
519 /* 519 /*
520 * cover the whole range: [new_start, old_end) 520 * cover the whole range: [new_start, old_end)
521 */ 521 */
522 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL); 522 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
523 return -ENOMEM;
523 524
524 /* 525 /*
525 * move the page tables downwards, on failure we rely on 526 * move the page tables downwards, on failure we rely on
@@ -550,15 +551,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
550 tlb_finish_mmu(tlb, new_end, old_end); 551 tlb_finish_mmu(tlb, new_end, old_end);
551 552
552 /* 553 /*
553 * shrink the vma to just the new range. 554 * Shrink the vma to just the new range. Always succeeds.
554 */ 555 */
555 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); 556 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
556 557
557 return 0; 558 return 0;
558} 559}
559 560
560#define EXTRA_STACK_VM_PAGES 20 /* random */
561
562/* 561/*
563 * Finalizes the stack vm_area_struct. The flags and permissions are updated, 562 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
564 * the stack is optionally relocated, and some extra space is added. 563 * the stack is optionally relocated, and some extra space is added.
@@ -574,10 +573,13 @@ int setup_arg_pages(struct linux_binprm *bprm,
574 struct vm_area_struct *prev = NULL; 573 struct vm_area_struct *prev = NULL;
575 unsigned long vm_flags; 574 unsigned long vm_flags;
576 unsigned long stack_base; 575 unsigned long stack_base;
576 unsigned long stack_size;
577 unsigned long stack_expand;
578 unsigned long rlim_stack;
577 579
578#ifdef CONFIG_STACK_GROWSUP 580#ifdef CONFIG_STACK_GROWSUP
579 /* Limit stack size to 1GB */ 581 /* Limit stack size to 1GB */
580 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max; 582 stack_base = rlimit_max(RLIMIT_STACK);
581 if (stack_base > (1 << 30)) 583 if (stack_base > (1 << 30))
582 stack_base = 1 << 30; 584 stack_base = 1 << 30;
583 585
@@ -630,10 +632,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
630 goto out_unlock; 632 goto out_unlock;
631 } 633 }
632 634
635 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
636 stack_size = vma->vm_end - vma->vm_start;
637 /*
638 * Align this down to a page boundary as expand_stack
639 * will align it up.
640 */
641 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
633#ifdef CONFIG_STACK_GROWSUP 642#ifdef CONFIG_STACK_GROWSUP
634 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; 643 if (stack_size + stack_expand > rlim_stack)
644 stack_base = vma->vm_start + rlim_stack;
645 else
646 stack_base = vma->vm_end + stack_expand;
635#else 647#else
636 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; 648 if (stack_size + stack_expand > rlim_stack)
649 stack_base = vma->vm_end - rlim_stack;
650 else
651 stack_base = vma->vm_start - stack_expand;
637#endif 652#endif
638 ret = expand_stack(vma, stack_base); 653 ret = expand_stack(vma, stack_base);
639 if (ret) 654 if (ret)
@@ -705,6 +720,7 @@ static int exec_mmap(struct mm_struct *mm)
705 /* Notify parent that we're no longer interested in the old VM */ 720 /* Notify parent that we're no longer interested in the old VM */
706 tsk = current; 721 tsk = current;
707 old_mm = current->mm; 722 old_mm = current->mm;
723 sync_mm_rss(tsk, old_mm);
708 mm_release(tsk, old_mm); 724 mm_release(tsk, old_mm);
709 725
710 if (old_mm) { 726 if (old_mm) {
@@ -829,7 +845,9 @@ static int de_thread(struct task_struct *tsk)
829 attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); 845 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
830 transfer_pid(leader, tsk, PIDTYPE_PGID); 846 transfer_pid(leader, tsk, PIDTYPE_PGID);
831 transfer_pid(leader, tsk, PIDTYPE_SID); 847 transfer_pid(leader, tsk, PIDTYPE_SID);
848
832 list_replace_rcu(&leader->tasks, &tsk->tasks); 849 list_replace_rcu(&leader->tasks, &tsk->tasks);
850 list_replace_init(&leader->sibling, &tsk->sibling);
833 851
834 tsk->group_leader = tsk; 852 tsk->group_leader = tsk;
835 leader->group_leader = tsk; 853 leader->group_leader = tsk;
@@ -926,6 +944,15 @@ char *get_task_comm(char *buf, struct task_struct *tsk)
926void set_task_comm(struct task_struct *tsk, char *buf) 944void set_task_comm(struct task_struct *tsk, char *buf)
927{ 945{
928 task_lock(tsk); 946 task_lock(tsk);
947
948 /*
949 * Threads may access current->comm without holding
950 * the task lock, so write the string carefully.
951 * Readers without a lock may see incomplete new
952 * names but are safe from non-terminating string reads.
953 */
954 memset(tsk->comm, 0, TASK_COMM_LEN);
955 wmb();
929 strlcpy(tsk->comm, buf, sizeof(tsk->comm)); 956 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
930 task_unlock(tsk); 957 task_unlock(tsk);
931 perf_event_comm(tsk); 958 perf_event_comm(tsk);
@@ -933,9 +960,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
933 960
934int flush_old_exec(struct linux_binprm * bprm) 961int flush_old_exec(struct linux_binprm * bprm)
935{ 962{
936 char * name; 963 int retval;
937 int i, ch, retval;
938 char tcomm[sizeof(current->comm)];
939 964
940 /* 965 /*
941 * Make sure we have a private signal table and that 966 * Make sure we have a private signal table and that
@@ -956,6 +981,25 @@ int flush_old_exec(struct linux_binprm * bprm)
956 981
957 bprm->mm = NULL; /* We're using it now */ 982 bprm->mm = NULL; /* We're using it now */
958 983
984 current->flags &= ~PF_RANDOMIZE;
985 flush_thread();
986 current->personality &= ~bprm->per_clear;
987
988 return 0;
989
990out:
991 return retval;
992}
993EXPORT_SYMBOL(flush_old_exec);
994
995void setup_new_exec(struct linux_binprm * bprm)
996{
997 int i, ch;
998 char * name;
999 char tcomm[sizeof(current->comm)];
1000
1001 arch_pick_mmap_layout(current->mm);
1002
959 /* This is the point of no return */ 1003 /* This is the point of no return */
960 current->sas_ss_sp = current->sas_ss_size = 0; 1004 current->sas_ss_sp = current->sas_ss_size = 0;
961 1005
@@ -977,9 +1021,6 @@ int flush_old_exec(struct linux_binprm * bprm)
977 tcomm[i] = '\0'; 1021 tcomm[i] = '\0';
978 set_task_comm(current, tcomm); 1022 set_task_comm(current, tcomm);
979 1023
980 current->flags &= ~PF_RANDOMIZE;
981 flush_thread();
982
983 /* Set the new mm task size. We have to do that late because it may 1024 /* Set the new mm task size. We have to do that late because it may
984 * depend on TIF_32BIT which is only updated in flush_thread() on 1025 * depend on TIF_32BIT which is only updated in flush_thread() on
985 * some architectures like powerpc 1026 * some architectures like powerpc
@@ -995,8 +1036,6 @@ int flush_old_exec(struct linux_binprm * bprm)
995 set_dumpable(current->mm, suid_dumpable); 1036 set_dumpable(current->mm, suid_dumpable);
996 } 1037 }
997 1038
998 current->personality &= ~bprm->per_clear;
999
1000 /* 1039 /*
1001 * Flush performance counters when crossing a 1040 * Flush performance counters when crossing a
1002 * security domain: 1041 * security domain:
@@ -1011,14 +1050,8 @@ int flush_old_exec(struct linux_binprm * bprm)
1011 1050
1012 flush_signal_handlers(current, 0); 1051 flush_signal_handlers(current, 0);
1013 flush_old_files(current->files); 1052 flush_old_files(current->files);
1014
1015 return 0;
1016
1017out:
1018 return retval;
1019} 1053}
1020 1054EXPORT_SYMBOL(setup_new_exec);
1021EXPORT_SYMBOL(flush_old_exec);
1022 1055
1023/* 1056/*
1024 * Prepare credentials and lock ->cred_guard_mutex. 1057 * Prepare credentials and lock ->cred_guard_mutex.
@@ -1211,9 +1244,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1211 retval = security_bprm_check(bprm); 1244 retval = security_bprm_check(bprm);
1212 if (retval) 1245 if (retval)
1213 return retval; 1246 return retval;
1214 retval = ima_bprm_check(bprm);
1215 if (retval)
1216 return retval;
1217 1247
1218 /* kernel module loader fixup */ 1248 /* kernel module loader fixup */
1219 /* so we don't try to load run modprobe in kernel space. */ 1249 /* so we don't try to load run modprobe in kernel space. */
@@ -1360,8 +1390,6 @@ int do_execve(char * filename,
1360 if (retval < 0) 1390 if (retval < 0)
1361 goto out; 1391 goto out;
1362 1392
1363 current->stack_start = current->mm->start_stack;
1364
1365 /* execve succeeded */ 1393 /* execve succeeded */
1366 current->fs->in_exec = 0; 1394 current->fs->in_exec = 0;
1367 current->in_execve = 0; 1395 current->in_execve = 0;
@@ -1506,7 +1534,7 @@ static int format_corename(char *corename, long signr)
1506 /* core limit size */ 1534 /* core limit size */
1507 case 'c': 1535 case 'c':
1508 rc = snprintf(out_ptr, out_end - out_ptr, 1536 rc = snprintf(out_ptr, out_end - out_ptr,
1509 "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur); 1537 "%lu", rlimit(RLIMIT_CORE));
1510 if (rc > out_end - out_ptr) 1538 if (rc > out_end - out_ptr)
1511 goto out; 1539 goto out;
1512 out_ptr += rc; 1540 out_ptr += rc;
@@ -1534,12 +1562,13 @@ out:
1534 return ispipe; 1562 return ispipe;
1535} 1563}
1536 1564
1537static int zap_process(struct task_struct *start) 1565static int zap_process(struct task_struct *start, int exit_code)
1538{ 1566{
1539 struct task_struct *t; 1567 struct task_struct *t;
1540 int nr = 0; 1568 int nr = 0;
1541 1569
1542 start->signal->flags = SIGNAL_GROUP_EXIT; 1570 start->signal->flags = SIGNAL_GROUP_EXIT;
1571 start->signal->group_exit_code = exit_code;
1543 start->signal->group_stop_count = 0; 1572 start->signal->group_stop_count = 0;
1544 1573
1545 t = start; 1574 t = start;
@@ -1564,8 +1593,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1564 spin_lock_irq(&tsk->sighand->siglock); 1593 spin_lock_irq(&tsk->sighand->siglock);
1565 if (!signal_group_exit(tsk->signal)) { 1594 if (!signal_group_exit(tsk->signal)) {
1566 mm->core_state = core_state; 1595 mm->core_state = core_state;
1567 tsk->signal->group_exit_code = exit_code; 1596 nr = zap_process(tsk, exit_code);
1568 nr = zap_process(tsk);
1569 } 1597 }
1570 spin_unlock_irq(&tsk->sighand->siglock); 1598 spin_unlock_irq(&tsk->sighand->siglock);
1571 if (unlikely(nr < 0)) 1599 if (unlikely(nr < 0))
@@ -1614,7 +1642,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1614 if (p->mm) { 1642 if (p->mm) {
1615 if (unlikely(p->mm == mm)) { 1643 if (unlikely(p->mm == mm)) {
1616 lock_task_sighand(p, &flags); 1644 lock_task_sighand(p, &flags);
1617 nr += zap_process(p); 1645 nr += zap_process(p, exit_code);
1618 unlock_task_sighand(p, &flags); 1646 unlock_task_sighand(p, &flags);
1619 } 1647 }
1620 break; 1648 break;
@@ -1721,14 +1749,19 @@ void set_dumpable(struct mm_struct *mm, int value)
1721 } 1749 }
1722} 1750}
1723 1751
1724int get_dumpable(struct mm_struct *mm) 1752static int __get_dumpable(unsigned long mm_flags)
1725{ 1753{
1726 int ret; 1754 int ret;
1727 1755
1728 ret = mm->flags & 0x3; 1756 ret = mm_flags & MMF_DUMPABLE_MASK;
1729 return (ret >= 2) ? 2 : ret; 1757 return (ret >= 2) ? 2 : ret;
1730} 1758}
1731 1759
1760int get_dumpable(struct mm_struct *mm)
1761{
1762 return __get_dumpable(mm->flags);
1763}
1764
1732static void wait_for_dump_helpers(struct file *file) 1765static void wait_for_dump_helpers(struct file *file)
1733{ 1766{
1734 struct pipe_inode_info *pipe; 1767 struct pipe_inode_info *pipe;
@@ -1759,17 +1792,26 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1759 struct mm_struct *mm = current->mm; 1792 struct mm_struct *mm = current->mm;
1760 struct linux_binfmt * binfmt; 1793 struct linux_binfmt * binfmt;
1761 struct inode * inode; 1794 struct inode * inode;
1762 struct file * file;
1763 const struct cred *old_cred; 1795 const struct cred *old_cred;
1764 struct cred *cred; 1796 struct cred *cred;
1765 int retval = 0; 1797 int retval = 0;
1766 int flag = 0; 1798 int flag = 0;
1767 int ispipe = 0; 1799 int ispipe = 0;
1768 unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1769 char **helper_argv = NULL; 1800 char **helper_argv = NULL;
1770 int helper_argc = 0; 1801 int helper_argc = 0;
1771 int dump_count = 0; 1802 int dump_count = 0;
1772 static atomic_t core_dump_count = ATOMIC_INIT(0); 1803 static atomic_t core_dump_count = ATOMIC_INIT(0);
1804 struct coredump_params cprm = {
1805 .signr = signr,
1806 .regs = regs,
1807 .limit = rlimit(RLIMIT_CORE),
1808 /*
1809 * We must use the same mm->flags while dumping core to avoid
1810 * inconsistency of bit flags, since this flag is not protected
1811 * by any locks.
1812 */
1813 .mm_flags = mm->flags,
1814 };
1773 1815
1774 audit_core_dumps(signr); 1816 audit_core_dumps(signr);
1775 1817
@@ -1787,7 +1829,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1787 /* 1829 /*
1788 * If another thread got here first, or we are not dumpable, bail out. 1830 * If another thread got here first, or we are not dumpable, bail out.
1789 */ 1831 */
1790 if (mm->core_state || !get_dumpable(mm)) { 1832 if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
1791 up_write(&mm->mmap_sem); 1833 up_write(&mm->mmap_sem);
1792 put_cred(cred); 1834 put_cred(cred);
1793 goto fail; 1835 goto fail;
@@ -1798,7 +1840,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1798 * process nor do we know its entire history. We only know it 1840 * process nor do we know its entire history. We only know it
1799 * was tainted so we dump it as root in mode 2. 1841 * was tainted so we dump it as root in mode 2.
1800 */ 1842 */
1801 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */ 1843 if (__get_dumpable(cprm.mm_flags) == 2) {
1844 /* Setuid core dump mode */
1802 flag = O_EXCL; /* Stop rewrite attacks */ 1845 flag = O_EXCL; /* Stop rewrite attacks */
1803 cred->fsuid = 0; /* Dump root private */ 1846 cred->fsuid = 0; /* Dump root private */
1804 } 1847 }
@@ -1825,15 +1868,15 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1825 ispipe = format_corename(corename, signr); 1868 ispipe = format_corename(corename, signr);
1826 unlock_kernel(); 1869 unlock_kernel();
1827 1870
1828 if ((!ispipe) && (core_limit < binfmt->min_coredump)) 1871 if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
1829 goto fail_unlock; 1872 goto fail_unlock;
1830 1873
1831 if (ispipe) { 1874 if (ispipe) {
1832 if (core_limit == 0) { 1875 if (cprm.limit == 0) {
1833 /* 1876 /*
1834 * Normally core limits are irrelevant to pipes, since 1877 * Normally core limits are irrelevant to pipes, since
1835 * we're not writing to the file system, but we use 1878 * we're not writing to the file system, but we use
1836 * core_limit of 0 here as a speacial value. Any 1879 * cprm.limit of 0 here as a speacial value. Any
1837 * non-zero limit gets set to RLIM_INFINITY below, but 1880 * non-zero limit gets set to RLIM_INFINITY below, but
1838 * a limit of 0 skips the dump. This is a consistent 1881 * a limit of 0 skips the dump. This is a consistent
1839 * way to catch recursive crashes. We can still crash 1882 * way to catch recursive crashes. We can still crash
@@ -1866,25 +1909,25 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1866 goto fail_dropcount; 1909 goto fail_dropcount;
1867 } 1910 }
1868 1911
1869 core_limit = RLIM_INFINITY; 1912 cprm.limit = RLIM_INFINITY;
1870 1913
1871 /* SIGPIPE can happen, but it's just never processed */ 1914 /* SIGPIPE can happen, but it's just never processed */
1872 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, 1915 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
1873 &file)) { 1916 &cprm.file)) {
1874 printk(KERN_INFO "Core dump to %s pipe failed\n", 1917 printk(KERN_INFO "Core dump to %s pipe failed\n",
1875 corename); 1918 corename);
1876 goto fail_dropcount; 1919 goto fail_dropcount;
1877 } 1920 }
1878 } else 1921 } else
1879 file = filp_open(corename, 1922 cprm.file = filp_open(corename,
1880 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 1923 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1881 0600); 1924 0600);
1882 if (IS_ERR(file)) 1925 if (IS_ERR(cprm.file))
1883 goto fail_dropcount; 1926 goto fail_dropcount;
1884 inode = file->f_path.dentry->d_inode; 1927 inode = cprm.file->f_path.dentry->d_inode;
1885 if (inode->i_nlink > 1) 1928 if (inode->i_nlink > 1)
1886 goto close_fail; /* multiple links - don't dump */ 1929 goto close_fail; /* multiple links - don't dump */
1887 if (!ispipe && d_unhashed(file->f_path.dentry)) 1930 if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
1888 goto close_fail; 1931 goto close_fail;
1889 1932
1890 /* AK: actually i see no reason to not allow this for named pipes etc., 1933 /* AK: actually i see no reason to not allow this for named pipes etc.,
@@ -1894,24 +1937,26 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1894 /* 1937 /*
1895 * Dont allow local users get cute and trick others to coredump 1938 * Dont allow local users get cute and trick others to coredump
1896 * into their pre-created files: 1939 * into their pre-created files:
1940 * Note, this is not relevant for pipes
1897 */ 1941 */
1898 if (inode->i_uid != current_fsuid()) 1942 if (!ispipe && (inode->i_uid != current_fsuid()))
1899 goto close_fail; 1943 goto close_fail;
1900 if (!file->f_op) 1944 if (!cprm.file->f_op)
1901 goto close_fail; 1945 goto close_fail;
1902 if (!file->f_op->write) 1946 if (!cprm.file->f_op->write)
1903 goto close_fail; 1947 goto close_fail;
1904 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0) 1948 if (!ispipe &&
1949 do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
1905 goto close_fail; 1950 goto close_fail;
1906 1951
1907 retval = binfmt->core_dump(signr, regs, file, core_limit); 1952 retval = binfmt->core_dump(&cprm);
1908 1953
1909 if (retval) 1954 if (retval)
1910 current->signal->group_exit_code |= 0x80; 1955 current->signal->group_exit_code |= 0x80;
1911close_fail: 1956close_fail:
1912 if (ispipe && core_pipe_limit) 1957 if (ispipe && core_pipe_limit)
1913 wait_for_dump_helpers(file); 1958 wait_for_dump_helpers(cprm.file);
1914 filp_close(file, NULL); 1959 filp_close(cprm.file, NULL);
1915fail_dropcount: 1960fail_dropcount:
1916 if (dump_count) 1961 if (dump_count)
1917 atomic_dec(&core_dump_count); 1962 atomic_dec(&core_dump_count);