aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/binfmt_elf.c44
-rw-r--r--fs/binfmt_elf_fdpic.c56
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/memory.c33
4 files changed, 66 insertions, 68 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7c1e65d54872..442d94fe255c 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1280,9 +1280,6 @@ static int writenote(struct memelfnote *men, struct file *file,
1280#define DUMP_WRITE(addr, nr) \ 1280#define DUMP_WRITE(addr, nr) \
1281 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ 1281 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1282 goto end_coredump; 1282 goto end_coredump;
1283#define DUMP_SEEK(off) \
1284 if (!dump_seek(file, (off))) \
1285 goto end_coredump;
1286 1283
1287static void fill_elf_header(struct elfhdr *elf, int segs, 1284static void fill_elf_header(struct elfhdr *elf, int segs,
1288 u16 machine, u32 flags, u8 osabi) 1285 u16 machine, u32 flags, u8 osabi)
@@ -2016,7 +2013,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
2016 goto end_coredump; 2013 goto end_coredump;
2017 2014
2018 /* Align to page */ 2015 /* Align to page */
2019 DUMP_SEEK(dataoff - foffset); 2016 if (!dump_seek(file, dataoff - foffset))
2017 goto end_coredump;
2020 2018
2021 for (vma = first_vma(current, gate_vma); vma != NULL; 2019 for (vma = first_vma(current, gate_vma); vma != NULL;
2022 vma = next_vma(vma, gate_vma)) { 2020 vma = next_vma(vma, gate_vma)) {
@@ -2027,33 +2025,19 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
2027 2025
2028 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { 2026 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2029 struct page *page; 2027 struct page *page;
2030 struct vm_area_struct *tmp_vma; 2028 int stop;
2031 2029
2032 if (get_user_pages(current, current->mm, addr, 1, 0, 1, 2030 page = get_dump_page(addr);
2033 &page, &tmp_vma) <= 0) { 2031 if (page) {
2034 DUMP_SEEK(PAGE_SIZE); 2032 void *kaddr = kmap(page);
2035 } else { 2033 stop = ((size += PAGE_SIZE) > limit) ||
2036 if (page == ZERO_PAGE(0)) { 2034 !dump_write(file, kaddr, PAGE_SIZE);
2037 if (!dump_seek(file, PAGE_SIZE)) { 2035 kunmap(page);
2038 page_cache_release(page);
2039 goto end_coredump;
2040 }
2041 } else {
2042 void *kaddr;
2043 flush_cache_page(tmp_vma, addr,
2044 page_to_pfn(page));
2045 kaddr = kmap(page);
2046 if ((size += PAGE_SIZE) > limit ||
2047 !dump_write(file, kaddr,
2048 PAGE_SIZE)) {
2049 kunmap(page);
2050 page_cache_release(page);
2051 goto end_coredump;
2052 }
2053 kunmap(page);
2054 }
2055 page_cache_release(page); 2036 page_cache_release(page);
2056 } 2037 } else
2038 stop = !dump_seek(file, PAGE_SIZE);
2039 if (stop)
2040 goto end_coredump;
2057 } 2041 }
2058 } 2042 }
2059 2043
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 20fbeced472b..76285471073e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1325,9 +1325,6 @@ static int writenote(struct memelfnote *men, struct file *file)
1325#define DUMP_WRITE(addr, nr) \ 1325#define DUMP_WRITE(addr, nr) \
1326 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ 1326 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1327 goto end_coredump; 1327 goto end_coredump;
1328#define DUMP_SEEK(off) \
1329 if (!dump_seek(file, (off))) \
1330 goto end_coredump;
1331 1328
1332static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) 1329static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
1333{ 1330{
@@ -1518,6 +1515,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
1518 unsigned long *limit, unsigned long mm_flags) 1515 unsigned long *limit, unsigned long mm_flags)
1519{ 1516{
1520 struct vm_area_struct *vma; 1517 struct vm_area_struct *vma;
1518 int err = 0;
1521 1519
1522 for (vma = current->mm->mmap; vma; vma = vma->vm_next) { 1520 for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
1523 unsigned long addr; 1521 unsigned long addr;
@@ -1525,43 +1523,26 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
1525 if (!maydump(vma, mm_flags)) 1523 if (!maydump(vma, mm_flags))
1526 continue; 1524 continue;
1527 1525
1528 for (addr = vma->vm_start; 1526 for (addr = vma->vm_start; addr < vma->vm_end;
1529 addr < vma->vm_end; 1527 addr += PAGE_SIZE) {
1530 addr += PAGE_SIZE 1528 struct page *page = get_dump_page(addr);
1531 ) { 1529 if (page) {
1532 struct vm_area_struct *vma; 1530 void *kaddr = kmap(page);
1533 struct page *page; 1531 *size += PAGE_SIZE;
1534 1532 if (*size > *limit)
1535 if (get_user_pages(current, current->mm, addr, 1, 0, 1, 1533 err = -EFBIG;
1536 &page, &vma) <= 0) { 1534 else if (!dump_write(file, kaddr, PAGE_SIZE))
1537 DUMP_SEEK(file->f_pos + PAGE_SIZE); 1535 err = -EIO;
1538 }
1539 else if (page == ZERO_PAGE(0)) {
1540 page_cache_release(page);
1541 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1542 }
1543 else {
1544 void *kaddr;
1545
1546 flush_cache_page(vma, addr, page_to_pfn(page));
1547 kaddr = kmap(page);
1548 if ((*size += PAGE_SIZE) > *limit ||
1549 !dump_write(file, kaddr, PAGE_SIZE)
1550 ) {
1551 kunmap(page);
1552 page_cache_release(page);
1553 return -EIO;
1554 }
1555 kunmap(page); 1536 kunmap(page);
1556 page_cache_release(page); 1537 page_cache_release(page);
1557 } 1538 } else if (!dump_seek(file, file->f_pos + PAGE_SIZE))
1539 err = -EFBIG;
1540 if (err)
1541 goto out;
1558 } 1542 }
1559 } 1543 }
1560 1544out:
1561 return 0; 1545 return err;
1562
1563end_coredump:
1564 return -EFBIG;
1565} 1546}
1566#endif 1547#endif
1567 1548
@@ -1802,7 +1783,8 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
1802 goto end_coredump; 1783 goto end_coredump;
1803 } 1784 }
1804 1785
1805 DUMP_SEEK(dataoff); 1786 if (!dump_seek(file, dataoff))
1787 goto end_coredump;
1806 1788
1807 if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0) 1789 if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
1808 goto end_coredump; 1790 goto end_coredump;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 19ff81c49ba6..e41795bba95d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -817,6 +817,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
817 struct page **pages, struct vm_area_struct **vmas); 817 struct page **pages, struct vm_area_struct **vmas);
818int get_user_pages_fast(unsigned long start, int nr_pages, int write, 818int get_user_pages_fast(unsigned long start, int nr_pages, int write,
819 struct page **pages); 819 struct page **pages);
820struct page *get_dump_page(unsigned long addr);
820 821
821extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 822extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
822extern void do_invalidatepage(struct page *page, unsigned long offset); 823extern void do_invalidatepage(struct page *page, unsigned long offset);
diff --git a/mm/memory.c b/mm/memory.c
index 4b5200f5f35a..a8430ff13837 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1423,9 +1423,40 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1423 1423
1424 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); 1424 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1425} 1425}
1426
1427EXPORT_SYMBOL(get_user_pages); 1426EXPORT_SYMBOL(get_user_pages);
1428 1427
1428/**
1429 * get_dump_page() - pin user page in memory while writing it to core dump
1430 * @addr: user address
1431 *
1432 * Returns struct page pointer of user page pinned for dump,
1433 * to be freed afterwards by page_cache_release() or put_page().
1434 *
1435 * Returns NULL on any kind of failure - a hole must then be inserted into
1436 * the corefile, to preserve alignment with its headers; and also returns
1437 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1438 * allowing a hole to be left in the corefile to save diskspace.
1439 *
1440 * Called without mmap_sem, but after all other threads have been killed.
1441 */
1442#ifdef CONFIG_ELF_CORE
1443struct page *get_dump_page(unsigned long addr)
1444{
1445 struct vm_area_struct *vma;
1446 struct page *page;
1447
1448 if (__get_user_pages(current, current->mm, addr, 1,
1449 GUP_FLAGS_FORCE, &page, &vma) < 1)
1450 return NULL;
1451 if (page == ZERO_PAGE(0)) {
1452 page_cache_release(page);
1453 return NULL;
1454 }
1455 flush_cache_page(vma, addr, page_to_pfn(page));
1456 return page;
1457}
1458#endif /* CONFIG_ELF_CORE */
1459
1429pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1460pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1430 spinlock_t **ptl) 1461 spinlock_t **ptl)
1431{ 1462{