aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2007-01-26 03:56:49 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 16:50:58 -0500
commitf47aef55d9a18945fcdd7fd6bf01121ce973b91b (patch)
tree69f5f6c1fd4ae27d18344ae4b33b5c7bd4b18699
parente5b97dde514f9bd43f9e525451d0a863c4fc8a9a (diff)
[PATCH] i386 vDSO: use VM_ALWAYSDUMP
This patch fixes core dumps to include the vDSO vma, which is left out now. It removes the special-case core writing macros, which were not doing the right thing for the vDSO vma anyway. Instead, it uses VM_ALWAYSDUMP in the vma; there is no need for the fixmap page to be installed. It handles the CONFIG_COMPAT_VDSO case by making elf_core_dump use the fake vma from get_gate_vma after real vmas in the same way the /proc/PID/maps code does. This changes core dumps so they no longer include the non-PT_LOAD phdrs from the vDSO. I made the change to add them in the first place, but in turned out that nothing ever wanted them there since the advent of NT_AUXV. It's cleaner to leave them out, and just let the phdrs inside the vDSO image speak for themselves. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/sysenter.c12
-rw-r--r--fs/binfmt_elf.c38
-rw-r--r--include/asm-i386/elf.h44
-rw-r--r--mm/memory.c7
4 files changed, 49 insertions, 52 deletions
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 454d12df59ea..5da744204d10 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -79,11 +79,6 @@ int __init sysenter_setup(void)
79#ifdef CONFIG_COMPAT_VDSO 79#ifdef CONFIG_COMPAT_VDSO
80 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); 80 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
81 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); 81 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
82#else
83 /*
84 * In the non-compat case the ELF coredumping code needs the fixmap:
85 */
86 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
87#endif 82#endif
88 83
89 if (!boot_cpu_has(X86_FEATURE_SEP)) { 84 if (!boot_cpu_has(X86_FEATURE_SEP)) {
@@ -147,6 +142,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
147 vma->vm_end = addr + PAGE_SIZE; 142 vma->vm_end = addr + PAGE_SIZE;
148 /* MAYWRITE to allow gdb to COW and set breakpoints */ 143 /* MAYWRITE to allow gdb to COW and set breakpoints */
149 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; 144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 /*
146 * Make sure the vDSO gets into every core dump.
147 * Dumping its contents makes post-mortem fully interpretable later
148 * without matching up the same kernel and hardware config to see
149 * what PC values meant.
150 */
151 vma->vm_flags |= VM_ALWAYSDUMP;
150 vma->vm_flags |= mm->def_flags; 152 vma->vm_flags |= mm->def_flags;
151 vma->vm_page_prot = protection_map[vma->vm_flags & 7]; 153 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
152 vma->vm_ops = &syscall_vm_ops; 154 vma->vm_ops = &syscall_vm_ops;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6fec8bfa6bac..90461f49e902 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1428,6 +1428,32 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1428 return sz; 1428 return sz;
1429} 1429}
1430 1430
1431static struct vm_area_struct *first_vma(struct task_struct *tsk,
1432 struct vm_area_struct *gate_vma)
1433{
1434 struct vm_area_struct *ret = tsk->mm->mmap;
1435
1436 if (ret)
1437 return ret;
1438 return gate_vma;
1439}
1440/*
1441 * Helper function for iterating across a vma list. It ensures that the caller
1442 * will visit `gate_vma' prior to terminating the search.
1443 */
1444static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1445 struct vm_area_struct *gate_vma)
1446{
1447 struct vm_area_struct *ret;
1448
1449 ret = this_vma->vm_next;
1450 if (ret)
1451 return ret;
1452 if (this_vma == gate_vma)
1453 return NULL;
1454 return gate_vma;
1455}
1456
1431/* 1457/*
1432 * Actual dumper 1458 * Actual dumper
1433 * 1459 *
@@ -1443,7 +1469,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1443 int segs; 1469 int segs;
1444 size_t size = 0; 1470 size_t size = 0;
1445 int i; 1471 int i;
1446 struct vm_area_struct *vma; 1472 struct vm_area_struct *vma, *gate_vma;
1447 struct elfhdr *elf = NULL; 1473 struct elfhdr *elf = NULL;
1448 loff_t offset = 0, dataoff, foffset; 1474 loff_t offset = 0, dataoff, foffset;
1449 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; 1475 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
@@ -1529,6 +1555,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1529 segs += ELF_CORE_EXTRA_PHDRS; 1555 segs += ELF_CORE_EXTRA_PHDRS;
1530#endif 1556#endif
1531 1557
1558 gate_vma = get_gate_vma(current);
1559 if (gate_vma != NULL)
1560 segs++;
1561
1532 /* Set up header */ 1562 /* Set up header */
1533 fill_elf_header(elf, segs + 1); /* including notes section */ 1563 fill_elf_header(elf, segs + 1); /* including notes section */
1534 1564
@@ -1596,7 +1626,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1596 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 1626 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1597 1627
1598 /* Write program headers for segments dump */ 1628 /* Write program headers for segments dump */
1599 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { 1629 for (vma = first_vma(current, gate_vma); vma != NULL;
1630 vma = next_vma(vma, gate_vma)) {
1600 struct elf_phdr phdr; 1631 struct elf_phdr phdr;
1601 size_t sz; 1632 size_t sz;
1602 1633
@@ -1645,7 +1676,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1645 /* Align to page */ 1676 /* Align to page */
1646 DUMP_SEEK(dataoff - foffset); 1677 DUMP_SEEK(dataoff - foffset);
1647 1678
1648 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { 1679 for (vma = first_vma(current, gate_vma); vma != NULL;
1680 vma = next_vma(vma, gate_vma)) {
1649 unsigned long addr; 1681 unsigned long addr;
1650 1682
1651 if (!maydump(vma)) 1683 if (!maydump(vma))
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 0515d61d5411..369035dfe4b6 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -168,50 +168,6 @@ do if (vdso_enabled) { \
168 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ 168 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
169} while (0) 169} while (0)
170 170
171/*
172 * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
173 * extra segments containing the vsyscall DSO contents. Dumping its
174 * contents makes post-mortem fully interpretable later without matching up
175 * the same kernel and hardware config to see what PC values meant.
176 * Dumping its extra ELF program headers includes all the other information
177 * a debugger needs to easily find how the vsyscall DSO was being used.
178 */
179#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
180#define ELF_CORE_WRITE_EXTRA_PHDRS \
181do { \
182 const struct elf_phdr *const vsyscall_phdrs = \
183 (const struct elf_phdr *) (VDSO_HIGH_BASE \
184 + VDSO_HIGH_EHDR->e_phoff); \
185 int i; \
186 Elf32_Off ofs = 0; \
187 for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
188 struct elf_phdr phdr = vsyscall_phdrs[i]; \
189 if (phdr.p_type == PT_LOAD) { \
190 BUG_ON(ofs != 0); \
191 ofs = phdr.p_offset = offset; \
192 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
193 phdr.p_filesz = phdr.p_memsz; \
194 offset += phdr.p_filesz; \
195 } \
196 else \
197 phdr.p_offset += ofs; \
198 phdr.p_paddr = 0; /* match other core phdrs */ \
199 DUMP_WRITE(&phdr, sizeof(phdr)); \
200 } \
201} while (0)
202#define ELF_CORE_WRITE_EXTRA_DATA \
203do { \
204 const struct elf_phdr *const vsyscall_phdrs = \
205 (const struct elf_phdr *) (VDSO_HIGH_BASE \
206 + VDSO_HIGH_EHDR->e_phoff); \
207 int i; \
208 for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
209 if (vsyscall_phdrs[i].p_type == PT_LOAD) \
210 DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
211 PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
212 } \
213} while (0)
214
215#endif 171#endif
216 172
217#endif 173#endif
diff --git a/mm/memory.c b/mm/memory.c
index 5beb4b894c5a..ef09f0acb1d8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2608,6 +2608,13 @@ static int __init gate_vma_init(void)
2608 gate_vma.vm_end = FIXADDR_USER_END; 2608 gate_vma.vm_end = FIXADDR_USER_END;
2609 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 2609 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
2610 gate_vma.vm_page_prot = __P101; 2610 gate_vma.vm_page_prot = __P101;
2611 /*
2612 * Make sure the vDSO gets into every core dump.
2613 * Dumping its contents makes post-mortem fully interpretable later
2614 * without matching up the same kernel and hardware config to see
2615 * what PC values meant.
2616 */
2617 gate_vma.vm_flags |= VM_ALWAYSDUMP;
2611 return 0; 2618 return 0;
2612} 2619}
2613__initcall(gate_vma_init); 2620__initcall(gate_vma_init);