diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
commit | 3dd41424090a0ca3a660218d06afe6ff4441bad3 (patch) | |
tree | 511ef1bb1799027fc5aad574adce49120ecadd87 /fs/exec.c | |
parent | 5c5456402d467969b217d7fdd6670f8c8600f5a8 (diff) | |
parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) |
Merge commit 'v2.6.36' into wip-merge-2.6.36
Conflicts:
Makefile
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/syscall_table_32.S
kernel/sched.c
kernel/time/tick-sched.c
Relevant API and functions changes (solved in this commit):
- (API) .enqueue_task() (enqueue_task_litmus),
dequeue_task() (dequeue_task_litmus),
[litmus/sched_litmus.c]
- (API) .select_task_rq() (select_task_rq_litmus)
[litmus/sched_litmus.c]
- (API) sysrq_dump_trace_buffer() and sysrq_handle_kill_rt_tasks()
[litmus/sched_trace.c]
- struct kfifo internal buffer name changed (buffer -> buf)
[litmus/sched_trace.c]
- add_wait_queue_exclusive_locked -> __add_wait_queue_tail_exclusive
[litmus/fmlp.c]
- syscall numbers for both x86_32 and x86_64
Diffstat (limited to 'fs/exec.c')
-rw-r--r-- | fs/exec.c | 293 |
1 files changed, 190 insertions, 103 deletions
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/stat.h> | 29 | #include <linux/stat.h> |
30 | #include <linux/fcntl.h> | 30 | #include <linux/fcntl.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/swap.h> | 31 | #include <linux/swap.h> |
33 | #include <linux/string.h> | 32 | #include <linux/string.h> |
34 | #include <linux/init.h> | 33 | #include <linux/init.h> |
@@ -131,7 +130,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) | |||
131 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) | 130 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) |
132 | goto exit; | 131 | goto exit; |
133 | 132 | ||
134 | fsnotify_open(file->f_path.dentry); | 133 | fsnotify_open(file); |
135 | 134 | ||
136 | error = -ENOEXEC; | 135 | error = -ENOEXEC; |
137 | if(file->f_op) { | 136 | if(file->f_op) { |
@@ -244,9 +243,10 @@ static int __bprm_mm_init(struct linux_binprm *bprm) | |||
244 | * use STACK_TOP because that can depend on attributes which aren't | 243 | * use STACK_TOP because that can depend on attributes which aren't |
245 | * configured yet. | 244 | * configured yet. |
246 | */ | 245 | */ |
246 | BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); | ||
247 | vma->vm_end = STACK_TOP_MAX; | 247 | vma->vm_end = STACK_TOP_MAX; |
248 | vma->vm_start = vma->vm_end - PAGE_SIZE; | 248 | vma->vm_start = vma->vm_end - PAGE_SIZE; |
249 | vma->vm_flags = VM_STACK_FLAGS; | 249 | vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; |
250 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 250 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
251 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 251 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
252 | err = insert_vm_struct(mm, vma); | 252 | err = insert_vm_struct(mm, vma); |
@@ -363,13 +363,13 @@ err: | |||
363 | /* | 363 | /* |
364 | * count() counts the number of strings in array ARGV. | 364 | * count() counts the number of strings in array ARGV. |
365 | */ | 365 | */ |
366 | static int count(char __user * __user * argv, int max) | 366 | static int count(const char __user * const __user * argv, int max) |
367 | { | 367 | { |
368 | int i = 0; | 368 | int i = 0; |
369 | 369 | ||
370 | if (argv != NULL) { | 370 | if (argv != NULL) { |
371 | for (;;) { | 371 | for (;;) { |
372 | char __user * p; | 372 | const char __user * p; |
373 | 373 | ||
374 | if (get_user(p, argv)) | 374 | if (get_user(p, argv)) |
375 | return -EFAULT; | 375 | return -EFAULT; |
@@ -378,6 +378,9 @@ static int count(char __user * __user * argv, int max) | |||
378 | argv++; | 378 | argv++; |
379 | if (i++ >= max) | 379 | if (i++ >= max) |
380 | return -E2BIG; | 380 | return -E2BIG; |
381 | |||
382 | if (fatal_signal_pending(current)) | ||
383 | return -ERESTARTNOHAND; | ||
381 | cond_resched(); | 384 | cond_resched(); |
382 | } | 385 | } |
383 | } | 386 | } |
@@ -389,7 +392,7 @@ static int count(char __user * __user * argv, int max) | |||
389 | * processes's memory to the new process's stack. The call to get_user_pages() | 392 | * processes's memory to the new process's stack. The call to get_user_pages() |
390 | * ensures the destination page is created and not swapped out. | 393 | * ensures the destination page is created and not swapped out. |
391 | */ | 394 | */ |
392 | static int copy_strings(int argc, char __user * __user * argv, | 395 | static int copy_strings(int argc, const char __user *const __user *argv, |
393 | struct linux_binprm *bprm) | 396 | struct linux_binprm *bprm) |
394 | { | 397 | { |
395 | struct page *kmapped_page = NULL; | 398 | struct page *kmapped_page = NULL; |
@@ -398,7 +401,7 @@ static int copy_strings(int argc, char __user * __user * argv, | |||
398 | int ret; | 401 | int ret; |
399 | 402 | ||
400 | while (argc-- > 0) { | 403 | while (argc-- > 0) { |
401 | char __user *str; | 404 | const char __user *str; |
402 | int len; | 405 | int len; |
403 | unsigned long pos; | 406 | unsigned long pos; |
404 | 407 | ||
@@ -421,6 +424,12 @@ static int copy_strings(int argc, char __user * __user * argv, | |||
421 | while (len > 0) { | 424 | while (len > 0) { |
422 | int offset, bytes_to_copy; | 425 | int offset, bytes_to_copy; |
423 | 426 | ||
427 | if (fatal_signal_pending(current)) { | ||
428 | ret = -ERESTARTNOHAND; | ||
429 | goto out; | ||
430 | } | ||
431 | cond_resched(); | ||
432 | |||
424 | offset = pos % PAGE_SIZE; | 433 | offset = pos % PAGE_SIZE; |
425 | if (offset == 0) | 434 | if (offset == 0) |
426 | offset = PAGE_SIZE; | 435 | offset = PAGE_SIZE; |
@@ -472,12 +481,13 @@ out: | |||
472 | /* | 481 | /* |
473 | * Like copy_strings, but get argv and its values from kernel memory. | 482 | * Like copy_strings, but get argv and its values from kernel memory. |
474 | */ | 483 | */ |
475 | int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm) | 484 | int copy_strings_kernel(int argc, const char *const *argv, |
485 | struct linux_binprm *bprm) | ||
476 | { | 486 | { |
477 | int r; | 487 | int r; |
478 | mm_segment_t oldfs = get_fs(); | 488 | mm_segment_t oldfs = get_fs(); |
479 | set_fs(KERNEL_DS); | 489 | set_fs(KERNEL_DS); |
480 | r = copy_strings(argc, (char __user * __user *)argv, bprm); | 490 | r = copy_strings(argc, (const char __user *const __user *)argv, bprm); |
481 | set_fs(oldfs); | 491 | set_fs(oldfs); |
482 | return r; | 492 | return r; |
483 | } | 493 | } |
@@ -595,6 +605,11 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
595 | #else | 605 | #else |
596 | stack_top = arch_align_stack(stack_top); | 606 | stack_top = arch_align_stack(stack_top); |
597 | stack_top = PAGE_ALIGN(stack_top); | 607 | stack_top = PAGE_ALIGN(stack_top); |
608 | |||
609 | if (unlikely(stack_top < mmap_min_addr) || | ||
610 | unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) | ||
611 | return -ENOMEM; | ||
612 | |||
598 | stack_shift = vma->vm_end - stack_top; | 613 | stack_shift = vma->vm_end - stack_top; |
599 | 614 | ||
600 | bprm->p -= stack_shift; | 615 | bprm->p -= stack_shift; |
@@ -618,6 +633,7 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
618 | else if (executable_stack == EXSTACK_DISABLE_X) | 633 | else if (executable_stack == EXSTACK_DISABLE_X) |
619 | vm_flags &= ~VM_EXEC; | 634 | vm_flags &= ~VM_EXEC; |
620 | vm_flags |= mm->def_flags; | 635 | vm_flags |= mm->def_flags; |
636 | vm_flags |= VM_STACK_INCOMPLETE_SETUP; | ||
621 | 637 | ||
622 | ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, | 638 | ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, |
623 | vm_flags); | 639 | vm_flags); |
@@ -632,6 +648,9 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
632 | goto out_unlock; | 648 | goto out_unlock; |
633 | } | 649 | } |
634 | 650 | ||
651 | /* mprotect_fixup is overkill to remove the temporary stack flags */ | ||
652 | vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; | ||
653 | |||
635 | stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ | 654 | stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ |
636 | stack_size = vma->vm_end - vma->vm_start; | 655 | stack_size = vma->vm_end - vma->vm_start; |
637 | /* | 656 | /* |
@@ -650,6 +669,7 @@ int setup_arg_pages(struct linux_binprm *bprm, | |||
650 | else | 669 | else |
651 | stack_base = vma->vm_start - stack_expand; | 670 | stack_base = vma->vm_start - stack_expand; |
652 | #endif | 671 | #endif |
672 | current->mm->start_stack = bprm->p; | ||
653 | ret = expand_stack(vma, stack_base); | 673 | ret = expand_stack(vma, stack_base); |
654 | if (ret) | 674 | if (ret) |
655 | ret = -EFAULT; | 675 | ret = -EFAULT; |
@@ -680,7 +700,7 @@ struct file *open_exec(const char *name) | |||
680 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) | 700 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) |
681 | goto exit; | 701 | goto exit; |
682 | 702 | ||
683 | fsnotify_open(file->f_path.dentry); | 703 | fsnotify_open(file); |
684 | 704 | ||
685 | err = deny_write_access(file); | 705 | err = deny_write_access(file); |
686 | if (err) | 706 | if (err) |
@@ -765,7 +785,6 @@ static int de_thread(struct task_struct *tsk) | |||
765 | struct signal_struct *sig = tsk->signal; | 785 | struct signal_struct *sig = tsk->signal; |
766 | struct sighand_struct *oldsighand = tsk->sighand; | 786 | struct sighand_struct *oldsighand = tsk->sighand; |
767 | spinlock_t *lock = &oldsighand->siglock; | 787 | spinlock_t *lock = &oldsighand->siglock; |
768 | int count; | ||
769 | 788 | ||
770 | if (thread_group_empty(tsk)) | 789 | if (thread_group_empty(tsk)) |
771 | goto no_thread_group; | 790 | goto no_thread_group; |
@@ -782,13 +801,13 @@ static int de_thread(struct task_struct *tsk) | |||
782 | spin_unlock_irq(lock); | 801 | spin_unlock_irq(lock); |
783 | return -EAGAIN; | 802 | return -EAGAIN; |
784 | } | 803 | } |
804 | |||
785 | sig->group_exit_task = tsk; | 805 | sig->group_exit_task = tsk; |
786 | zap_other_threads(tsk); | 806 | sig->notify_count = zap_other_threads(tsk); |
807 | if (!thread_group_leader(tsk)) | ||
808 | sig->notify_count--; | ||
787 | 809 | ||
788 | /* Account for the thread group leader hanging around: */ | 810 | while (sig->notify_count) { |
789 | count = thread_group_leader(tsk) ? 1 : 2; | ||
790 | sig->notify_count = count; | ||
791 | while (atomic_read(&sig->count) > count) { | ||
792 | __set_current_state(TASK_UNINTERRUPTIBLE); | 811 | __set_current_state(TASK_UNINTERRUPTIBLE); |
793 | spin_unlock_irq(lock); | 812 | spin_unlock_irq(lock); |
794 | schedule(); | 813 | schedule(); |
@@ -995,7 +1014,7 @@ EXPORT_SYMBOL(flush_old_exec); | |||
995 | void setup_new_exec(struct linux_binprm * bprm) | 1014 | void setup_new_exec(struct linux_binprm * bprm) |
996 | { | 1015 | { |
997 | int i, ch; | 1016 | int i, ch; |
998 | char * name; | 1017 | const char *name; |
999 | char tcomm[sizeof(current->comm)]; | 1018 | char tcomm[sizeof(current->comm)]; |
1000 | 1019 | ||
1001 | arch_pick_mmap_layout(current->mm); | 1020 | arch_pick_mmap_layout(current->mm); |
@@ -1115,7 +1134,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) | |||
1115 | bprm->unsafe = tracehook_unsafe_exec(p); | 1134 | bprm->unsafe = tracehook_unsafe_exec(p); |
1116 | 1135 | ||
1117 | n_fs = 1; | 1136 | n_fs = 1; |
1118 | write_lock(&p->fs->lock); | 1137 | spin_lock(&p->fs->lock); |
1119 | rcu_read_lock(); | 1138 | rcu_read_lock(); |
1120 | for (t = next_thread(p); t != p; t = next_thread(t)) { | 1139 | for (t = next_thread(p); t != p; t = next_thread(t)) { |
1121 | if (t->fs == p->fs) | 1140 | if (t->fs == p->fs) |
@@ -1132,7 +1151,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) | |||
1132 | res = 1; | 1151 | res = 1; |
1133 | } | 1152 | } |
1134 | } | 1153 | } |
1135 | write_unlock(&p->fs->lock); | 1154 | spin_unlock(&p->fs->lock); |
1136 | 1155 | ||
1137 | return res; | 1156 | return res; |
1138 | } | 1157 | } |
@@ -1314,9 +1333,9 @@ EXPORT_SYMBOL(search_binary_handler); | |||
1314 | /* | 1333 | /* |
1315 | * sys_execve() executes a new program. | 1334 | * sys_execve() executes a new program. |
1316 | */ | 1335 | */ |
1317 | int do_execve(char * filename, | 1336 | int do_execve(const char * filename, |
1318 | char __user *__user *argv, | 1337 | const char __user *const __user *argv, |
1319 | char __user *__user *envp, | 1338 | const char __user *const __user *envp, |
1320 | struct pt_regs * regs) | 1339 | struct pt_regs * regs) |
1321 | { | 1340 | { |
1322 | struct linux_binprm *bprm; | 1341 | struct linux_binprm *bprm; |
@@ -1660,12 +1679,15 @@ static int coredump_wait(int exit_code, struct core_state *core_state) | |||
1660 | struct task_struct *tsk = current; | 1679 | struct task_struct *tsk = current; |
1661 | struct mm_struct *mm = tsk->mm; | 1680 | struct mm_struct *mm = tsk->mm; |
1662 | struct completion *vfork_done; | 1681 | struct completion *vfork_done; |
1663 | int core_waiters; | 1682 | int core_waiters = -EBUSY; |
1664 | 1683 | ||
1665 | init_completion(&core_state->startup); | 1684 | init_completion(&core_state->startup); |
1666 | core_state->dumper.task = tsk; | 1685 | core_state->dumper.task = tsk; |
1667 | core_state->dumper.next = NULL; | 1686 | core_state->dumper.next = NULL; |
1668 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); | 1687 | |
1688 | down_write(&mm->mmap_sem); | ||
1689 | if (!mm->core_state) | ||
1690 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); | ||
1669 | up_write(&mm->mmap_sem); | 1691 | up_write(&mm->mmap_sem); |
1670 | 1692 | ||
1671 | if (unlikely(core_waiters < 0)) | 1693 | if (unlikely(core_waiters < 0)) |
@@ -1785,21 +1807,61 @@ static void wait_for_dump_helpers(struct file *file) | |||
1785 | } | 1807 | } |
1786 | 1808 | ||
1787 | 1809 | ||
1810 | /* | ||
1811 | * uhm_pipe_setup | ||
1812 | * helper function to customize the process used | ||
1813 | * to collect the core in userspace. Specifically | ||
1814 | * it sets up a pipe and installs it as fd 0 (stdin) | ||
1815 | * for the process. Returns 0 on success, or | ||
1816 | * PTR_ERR on failure. | ||
1817 | * Note that it also sets the core limit to 1. This | ||
1818 | * is a special value that we use to trap recursive | ||
1819 | * core dumps | ||
1820 | */ | ||
1821 | static int umh_pipe_setup(struct subprocess_info *info) | ||
1822 | { | ||
1823 | struct file *rp, *wp; | ||
1824 | struct fdtable *fdt; | ||
1825 | struct coredump_params *cp = (struct coredump_params *)info->data; | ||
1826 | struct files_struct *cf = current->files; | ||
1827 | |||
1828 | wp = create_write_pipe(0); | ||
1829 | if (IS_ERR(wp)) | ||
1830 | return PTR_ERR(wp); | ||
1831 | |||
1832 | rp = create_read_pipe(wp, 0); | ||
1833 | if (IS_ERR(rp)) { | ||
1834 | free_write_pipe(wp); | ||
1835 | return PTR_ERR(rp); | ||
1836 | } | ||
1837 | |||
1838 | cp->file = wp; | ||
1839 | |||
1840 | sys_close(0); | ||
1841 | fd_install(0, rp); | ||
1842 | spin_lock(&cf->file_lock); | ||
1843 | fdt = files_fdtable(cf); | ||
1844 | FD_SET(0, fdt->open_fds); | ||
1845 | FD_CLR(0, fdt->close_on_exec); | ||
1846 | spin_unlock(&cf->file_lock); | ||
1847 | |||
1848 | /* and disallow core files too */ | ||
1849 | current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; | ||
1850 | |||
1851 | return 0; | ||
1852 | } | ||
1853 | |||
1788 | void do_coredump(long signr, int exit_code, struct pt_regs *regs) | 1854 | void do_coredump(long signr, int exit_code, struct pt_regs *regs) |
1789 | { | 1855 | { |
1790 | struct core_state core_state; | 1856 | struct core_state core_state; |
1791 | char corename[CORENAME_MAX_SIZE + 1]; | 1857 | char corename[CORENAME_MAX_SIZE + 1]; |
1792 | struct mm_struct *mm = current->mm; | 1858 | struct mm_struct *mm = current->mm; |
1793 | struct linux_binfmt * binfmt; | 1859 | struct linux_binfmt * binfmt; |
1794 | struct inode * inode; | ||
1795 | const struct cred *old_cred; | 1860 | const struct cred *old_cred; |
1796 | struct cred *cred; | 1861 | struct cred *cred; |
1797 | int retval = 0; | 1862 | int retval = 0; |
1798 | int flag = 0; | 1863 | int flag = 0; |
1799 | int ispipe = 0; | 1864 | int ispipe; |
1800 | char **helper_argv = NULL; | ||
1801 | int helper_argc = 0; | ||
1802 | int dump_count = 0; | ||
1803 | static atomic_t core_dump_count = ATOMIC_INIT(0); | 1865 | static atomic_t core_dump_count = ATOMIC_INIT(0); |
1804 | struct coredump_params cprm = { | 1866 | struct coredump_params cprm = { |
1805 | .signr = signr, | 1867 | .signr = signr, |
@@ -1818,23 +1880,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
1818 | binfmt = mm->binfmt; | 1880 | binfmt = mm->binfmt; |
1819 | if (!binfmt || !binfmt->core_dump) | 1881 | if (!binfmt || !binfmt->core_dump) |
1820 | goto fail; | 1882 | goto fail; |
1821 | 1883 | if (!__get_dumpable(cprm.mm_flags)) | |
1822 | cred = prepare_creds(); | ||
1823 | if (!cred) { | ||
1824 | retval = -ENOMEM; | ||
1825 | goto fail; | 1884 | goto fail; |
1826 | } | ||
1827 | 1885 | ||
1828 | down_write(&mm->mmap_sem); | 1886 | cred = prepare_creds(); |
1829 | /* | 1887 | if (!cred) |
1830 | * If another thread got here first, or we are not dumpable, bail out. | ||
1831 | */ | ||
1832 | if (mm->core_state || !__get_dumpable(cprm.mm_flags)) { | ||
1833 | up_write(&mm->mmap_sem); | ||
1834 | put_cred(cred); | ||
1835 | goto fail; | 1888 | goto fail; |
1836 | } | ||
1837 | |||
1838 | /* | 1889 | /* |
1839 | * We cannot trust fsuid as being the "true" uid of the | 1890 | * We cannot trust fsuid as being the "true" uid of the |
1840 | * process nor do we know its entire history. We only know it | 1891 | * process nor do we know its entire history. We only know it |
@@ -1847,10 +1898,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
1847 | } | 1898 | } |
1848 | 1899 | ||
1849 | retval = coredump_wait(exit_code, &core_state); | 1900 | retval = coredump_wait(exit_code, &core_state); |
1850 | if (retval < 0) { | 1901 | if (retval < 0) |
1851 | put_cred(cred); | 1902 | goto fail_creds; |
1852 | goto fail; | ||
1853 | } | ||
1854 | 1903 | ||
1855 | old_cred = override_creds(cred); | 1904 | old_cred = override_creds(cred); |
1856 | 1905 | ||
@@ -1860,27 +1909,21 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
1860 | */ | 1909 | */ |
1861 | clear_thread_flag(TIF_SIGPENDING); | 1910 | clear_thread_flag(TIF_SIGPENDING); |
1862 | 1911 | ||
1863 | /* | ||
1864 | * lock_kernel() because format_corename() is controlled by sysctl, which | ||
1865 | * uses lock_kernel() | ||
1866 | */ | ||
1867 | lock_kernel(); | ||
1868 | ispipe = format_corename(corename, signr); | 1912 | ispipe = format_corename(corename, signr); |
1869 | unlock_kernel(); | ||
1870 | |||
1871 | if ((!ispipe) && (cprm.limit < binfmt->min_coredump)) | ||
1872 | goto fail_unlock; | ||
1873 | 1913 | ||
1874 | if (ispipe) { | 1914 | if (ispipe) { |
1875 | if (cprm.limit == 0) { | 1915 | int dump_count; |
1916 | char **helper_argv; | ||
1917 | |||
1918 | if (cprm.limit == 1) { | ||
1876 | /* | 1919 | /* |
1877 | * Normally core limits are irrelevant to pipes, since | 1920 | * Normally core limits are irrelevant to pipes, since |
1878 | * we're not writing to the file system, but we use | 1921 | * we're not writing to the file system, but we use |
1879 | * cprm.limit of 0 here as a speacial value. Any | 1922 | * cprm.limit of 1 here as a speacial value. Any |
1880 | * non-zero limit gets set to RLIM_INFINITY below, but | 1923 | * non-1 limit gets set to RLIM_INFINITY below, but |
1881 | * a limit of 0 skips the dump. This is a consistent | 1924 | * a limit of 0 skips the dump. This is a consistent |
1882 | * way to catch recursive crashes. We can still crash | 1925 | * way to catch recursive crashes. We can still crash |
1883 | * if the core_pattern binary sets RLIM_CORE = !0 | 1926 | * if the core_pattern binary sets RLIM_CORE = !1 |
1884 | * but it runs as root, and can do lots of stupid things | 1927 | * but it runs as root, and can do lots of stupid things |
1885 | * Note that we use task_tgid_vnr here to grab the pid | 1928 | * Note that we use task_tgid_vnr here to grab the pid |
1886 | * of the process group leader. That way we get the | 1929 | * of the process group leader. That way we get the |
@@ -1888,11 +1931,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
1888 | * core_pattern process dies. | 1931 | * core_pattern process dies. |
1889 | */ | 1932 | */ |
1890 | printk(KERN_WARNING | 1933 | printk(KERN_WARNING |
1891 | "Process %d(%s) has RLIMIT_CORE set to 0\n", | 1934 | "Process %d(%s) has RLIMIT_CORE set to 1\n", |
1892 | task_tgid_vnr(current), current->comm); | 1935 | task_tgid_vnr(current), current->comm); |
1893 | printk(KERN_WARNING "Aborting core\n"); | 1936 | printk(KERN_WARNING "Aborting core\n"); |
1894 | goto fail_unlock; | 1937 | goto fail_unlock; |
1895 | } | 1938 | } |
1939 | cprm.limit = RLIM_INFINITY; | ||
1896 | 1940 | ||
1897 | dump_count = atomic_inc_return(&core_dump_count); | 1941 | dump_count = atomic_inc_return(&core_dump_count); |
1898 | if (core_pipe_limit && (core_pipe_limit < dump_count)) { | 1942 | if (core_pipe_limit && (core_pipe_limit < dump_count)) { |
@@ -1902,71 +1946,114 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
1902 | goto fail_dropcount; | 1946 | goto fail_dropcount; |
1903 | } | 1947 | } |
1904 | 1948 | ||
1905 | helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); | 1949 | helper_argv = argv_split(GFP_KERNEL, corename+1, NULL); |
1906 | if (!helper_argv) { | 1950 | if (!helper_argv) { |
1907 | printk(KERN_WARNING "%s failed to allocate memory\n", | 1951 | printk(KERN_WARNING "%s failed to allocate memory\n", |
1908 | __func__); | 1952 | __func__); |
1909 | goto fail_dropcount; | 1953 | goto fail_dropcount; |
1910 | } | 1954 | } |
1911 | 1955 | ||
1912 | cprm.limit = RLIM_INFINITY; | 1956 | retval = call_usermodehelper_fns(helper_argv[0], helper_argv, |
1913 | 1957 | NULL, UMH_WAIT_EXEC, umh_pipe_setup, | |
1914 | /* SIGPIPE can happen, but it's just never processed */ | 1958 | NULL, &cprm); |
1915 | if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, | 1959 | argv_free(helper_argv); |
1916 | &cprm.file)) { | 1960 | if (retval) { |
1917 | printk(KERN_INFO "Core dump to %s pipe failed\n", | 1961 | printk(KERN_INFO "Core dump to %s pipe failed\n", |
1918 | corename); | 1962 | corename); |
1919 | goto fail_dropcount; | 1963 | goto close_fail; |
1920 | } | 1964 | } |
1921 | } else | 1965 | } else { |
1966 | struct inode *inode; | ||
1967 | |||
1968 | if (cprm.limit < binfmt->min_coredump) | ||
1969 | goto fail_unlock; | ||
1970 | |||
1922 | cprm.file = filp_open(corename, | 1971 | cprm.file = filp_open(corename, |
1923 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | 1972 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, |
1924 | 0600); | 1973 | 0600); |
1925 | if (IS_ERR(cprm.file)) | 1974 | if (IS_ERR(cprm.file)) |
1926 | goto fail_dropcount; | 1975 | goto fail_unlock; |
1927 | inode = cprm.file->f_path.dentry->d_inode; | ||
1928 | if (inode->i_nlink > 1) | ||
1929 | goto close_fail; /* multiple links - don't dump */ | ||
1930 | if (!ispipe && d_unhashed(cprm.file->f_path.dentry)) | ||
1931 | goto close_fail; | ||
1932 | |||
1933 | /* AK: actually i see no reason to not allow this for named pipes etc., | ||
1934 | but keep the previous behaviour for now. */ | ||
1935 | if (!ispipe && !S_ISREG(inode->i_mode)) | ||
1936 | goto close_fail; | ||
1937 | /* | ||
1938 | * Dont allow local users get cute and trick others to coredump | ||
1939 | * into their pre-created files: | ||
1940 | * Note, this is not relevant for pipes | ||
1941 | */ | ||
1942 | if (!ispipe && (inode->i_uid != current_fsuid())) | ||
1943 | goto close_fail; | ||
1944 | if (!cprm.file->f_op) | ||
1945 | goto close_fail; | ||
1946 | if (!cprm.file->f_op->write) | ||
1947 | goto close_fail; | ||
1948 | if (!ispipe && | ||
1949 | do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0) | ||
1950 | goto close_fail; | ||
1951 | 1976 | ||
1952 | retval = binfmt->core_dump(&cprm); | 1977 | inode = cprm.file->f_path.dentry->d_inode; |
1978 | if (inode->i_nlink > 1) | ||
1979 | goto close_fail; | ||
1980 | if (d_unhashed(cprm.file->f_path.dentry)) | ||
1981 | goto close_fail; | ||
1982 | /* | ||
1983 | * AK: actually i see no reason to not allow this for named | ||
1984 | * pipes etc, but keep the previous behaviour for now. | ||
1985 | */ | ||
1986 | if (!S_ISREG(inode->i_mode)) | ||
1987 | goto close_fail; | ||
1988 | /* | ||
1989 | * Dont allow local users get cute and trick others to coredump | ||
1990 | * into their pre-created files. | ||
1991 | */ | ||
1992 | if (inode->i_uid != current_fsuid()) | ||
1993 | goto close_fail; | ||
1994 | if (!cprm.file->f_op || !cprm.file->f_op->write) | ||
1995 | goto close_fail; | ||
1996 | if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) | ||
1997 | goto close_fail; | ||
1998 | } | ||
1953 | 1999 | ||
2000 | retval = binfmt->core_dump(&cprm); | ||
1954 | if (retval) | 2001 | if (retval) |
1955 | current->signal->group_exit_code |= 0x80; | 2002 | current->signal->group_exit_code |= 0x80; |
1956 | close_fail: | 2003 | |
1957 | if (ispipe && core_pipe_limit) | 2004 | if (ispipe && core_pipe_limit) |
1958 | wait_for_dump_helpers(cprm.file); | 2005 | wait_for_dump_helpers(cprm.file); |
1959 | filp_close(cprm.file, NULL); | 2006 | close_fail: |
2007 | if (cprm.file) | ||
2008 | filp_close(cprm.file, NULL); | ||
1960 | fail_dropcount: | 2009 | fail_dropcount: |
1961 | if (dump_count) | 2010 | if (ispipe) |
1962 | atomic_dec(&core_dump_count); | 2011 | atomic_dec(&core_dump_count); |
1963 | fail_unlock: | 2012 | fail_unlock: |
1964 | if (helper_argv) | 2013 | coredump_finish(mm); |
1965 | argv_free(helper_argv); | ||
1966 | |||
1967 | revert_creds(old_cred); | 2014 | revert_creds(old_cred); |
2015 | fail_creds: | ||
1968 | put_cred(cred); | 2016 | put_cred(cred); |
1969 | coredump_finish(mm); | ||
1970 | fail: | 2017 | fail: |
1971 | return; | 2018 | return; |
1972 | } | 2019 | } |
2020 | |||
2021 | /* | ||
2022 | * Core dumping helper functions. These are the only things you should | ||
2023 | * do on a core-file: use only these functions to write out all the | ||
2024 | * necessary info. | ||
2025 | */ | ||
2026 | int dump_write(struct file *file, const void *addr, int nr) | ||
2027 | { | ||
2028 | return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
2029 | } | ||
2030 | EXPORT_SYMBOL(dump_write); | ||
2031 | |||
2032 | int dump_seek(struct file *file, loff_t off) | ||
2033 | { | ||
2034 | int ret = 1; | ||
2035 | |||
2036 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
2037 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | ||
2038 | return 0; | ||
2039 | } else { | ||
2040 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | ||
2041 | |||
2042 | if (!buf) | ||
2043 | return 0; | ||
2044 | while (off > 0) { | ||
2045 | unsigned long n = off; | ||
2046 | |||
2047 | if (n > PAGE_SIZE) | ||
2048 | n = PAGE_SIZE; | ||
2049 | if (!dump_write(file, buf, n)) { | ||
2050 | ret = 0; | ||
2051 | break; | ||
2052 | } | ||
2053 | off -= n; | ||
2054 | } | ||
2055 | free_page((unsigned long)buf); | ||
2056 | } | ||
2057 | return ret; | ||
2058 | } | ||
2059 | EXPORT_SYMBOL(dump_seek); | ||