diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/entry.S | 4 | ||||
-rw-r--r-- | arch/i386/kernel/sysenter.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 7 | ||||
-rw-r--r-- | arch/um/Kconfig.i386 | 34 | ||||
-rw-r--r-- | arch/x86_64/ia32/ia32_binfmt.c | 49 | ||||
-rw-r--r-- | arch/x86_64/ia32/syscall32.c | 15 |
6 files changed, 52 insertions, 71 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 06461b8b715d..5e47683fc63a 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -302,12 +302,16 @@ sysenter_past_esp: | |||
302 | pushl $(__USER_CS) | 302 | pushl $(__USER_CS) |
303 | CFI_ADJUST_CFA_OFFSET 4 | 303 | CFI_ADJUST_CFA_OFFSET 4 |
304 | /*CFI_REL_OFFSET cs, 0*/ | 304 | /*CFI_REL_OFFSET cs, 0*/ |
305 | #ifndef CONFIG_COMPAT_VDSO | ||
305 | /* | 306 | /* |
306 | * Push current_thread_info()->sysenter_return to the stack. | 307 | * Push current_thread_info()->sysenter_return to the stack. |
307 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 308 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
308 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 309 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
309 | */ | 310 | */ |
310 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | 311 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) |
312 | #else | ||
313 | pushl $SYSENTER_RETURN | ||
314 | #endif | ||
311 | CFI_ADJUST_CFA_OFFSET 4 | 315 | CFI_ADJUST_CFA_OFFSET 4 |
312 | CFI_REL_OFFSET eip, 0 | 316 | CFI_REL_OFFSET eip, 0 |
313 | 317 | ||
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 7de9117b5a3a..5da744204d10 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c | |||
@@ -79,11 +79,6 @@ int __init sysenter_setup(void) | |||
79 | #ifdef CONFIG_COMPAT_VDSO | 79 | #ifdef CONFIG_COMPAT_VDSO |
80 | __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); | 80 | __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); |
81 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); | 81 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); |
82 | #else | ||
83 | /* | ||
84 | * In the non-compat case the ELF coredumping code needs the fixmap: | ||
85 | */ | ||
86 | __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO); | ||
87 | #endif | 82 | #endif |
88 | 83 | ||
89 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | 84 | if (!boot_cpu_has(X86_FEATURE_SEP)) { |
@@ -100,6 +95,7 @@ int __init sysenter_setup(void) | |||
100 | return 0; | 95 | return 0; |
101 | } | 96 | } |
102 | 97 | ||
98 | #ifndef CONFIG_COMPAT_VDSO | ||
103 | static struct page *syscall_nopage(struct vm_area_struct *vma, | 99 | static struct page *syscall_nopage(struct vm_area_struct *vma, |
104 | unsigned long adr, int *type) | 100 | unsigned long adr, int *type) |
105 | { | 101 | { |
@@ -146,6 +142,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | |||
146 | vma->vm_end = addr + PAGE_SIZE; | 142 | vma->vm_end = addr + PAGE_SIZE; |
147 | /* MAYWRITE to allow gdb to COW and set breakpoints */ | 143 | /* MAYWRITE to allow gdb to COW and set breakpoints */ |
148 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; | 144 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; |
145 | /* | ||
146 | * Make sure the vDSO gets into every core dump. | ||
147 | * Dumping its contents makes post-mortem fully interpretable later | ||
148 | * without matching up the same kernel and hardware config to see | ||
149 | * what PC values meant. | ||
150 | */ | ||
151 | vma->vm_flags |= VM_ALWAYSDUMP; | ||
149 | vma->vm_flags |= mm->def_flags; | 152 | vma->vm_flags |= mm->def_flags; |
150 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; | 153 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; |
151 | vma->vm_ops = &syscall_vm_ops; | 154 | vma->vm_ops = &syscall_vm_ops; |
@@ -187,3 +190,4 @@ int in_gate_area_no_task(unsigned long addr) | |||
187 | { | 190 | { |
188 | return 0; | 191 | return 0; |
189 | } | 192 | } |
193 | #endif | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index a4b28c73bba0..ae0ede19879d 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -284,6 +284,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
284 | * pages though | 284 | * pages though |
285 | */ | 285 | */ |
286 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; | 286 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; |
287 | /* | ||
288 | * Make sure the vDSO gets into every core dump. | ||
289 | * Dumping its contents makes post-mortem fully interpretable later | ||
290 | * without matching up the same kernel and hardware config to see | ||
291 | * what PC values meant. | ||
292 | */ | ||
293 | vma->vm_flags |= VM_ALWAYSDUMP; | ||
287 | vma->vm_flags |= mm->def_flags; | 294 | vma->vm_flags |= mm->def_flags; |
288 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; | 295 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; |
289 | vma->vm_ops = &vdso_vmops; | 296 | vma->vm_ops = &vdso_vmops; |
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index f191a550a079..77558a88a2fe 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -19,22 +19,22 @@ config SEMAPHORE_SLEEPERS | |||
19 | choice | 19 | choice |
20 | prompt "Host memory split" | 20 | prompt "Host memory split" |
21 | default HOST_VMSPLIT_3G | 21 | default HOST_VMSPLIT_3G |
22 | ---help--- | 22 | help |
23 | This is needed when the host kernel on which you run has a non-default | 23 | This is needed when the host kernel on which you run has a non-default |
24 | (like 2G/2G) memory split, instead of the customary 3G/1G. If you did | 24 | (like 2G/2G) memory split, instead of the customary 3G/1G. If you did |
25 | not recompile your own kernel but use the default distro's one, you can | 25 | not recompile your own kernel but use the default distro's one, you can |
26 | safely accept the "Default split" option. | 26 | safely accept the "Default split" option. |
27 | 27 | ||
28 | It can be enabled on recent (>=2.6.16-rc2) vanilla kernels via | 28 | It can be enabled on recent (>=2.6.16-rc2) vanilla kernels via |
29 | CONFIG_VM_SPLIT_*, or on previous kernels with special patches (-ck | 29 | CONFIG_VM_SPLIT_*, or on previous kernels with special patches (-ck |
30 | patchset by Con Kolivas, or other ones) - option names match closely the | 30 | patchset by Con Kolivas, or other ones) - option names match closely the |
31 | host CONFIG_VM_SPLIT_* ones. | 31 | host CONFIG_VM_SPLIT_* ones. |
32 | 32 | ||
33 | A lower setting (where 1G/3G is lowest and 3G/1G is higher) will | 33 | A lower setting (where 1G/3G is lowest and 3G/1G is higher) will |
34 | tolerate even more "normal" host kernels, but an higher setting will be | 34 | tolerate even more "normal" host kernels, but an higher setting will be |
35 | stricter. | 35 | stricter. |
36 | 36 | ||
37 | So, if you do not know what to do here, say 'Default split'. | 37 | So, if you do not know what to do here, say 'Default split'. |
38 | 38 | ||
39 | config HOST_VMSPLIT_3G | 39 | config HOST_VMSPLIT_3G |
40 | bool "Default split (3G/1G user/kernel host split)" | 40 | bool "Default split (3G/1G user/kernel host split)" |
@@ -67,13 +67,13 @@ config 3_LEVEL_PGTABLES | |||
67 | 67 | ||
68 | config STUB_CODE | 68 | config STUB_CODE |
69 | hex | 69 | hex |
70 | default 0xbfffe000 if !HOST_2G_2G | 70 | default 0xbfffe000 if !HOST_VMSPLIT_2G |
71 | default 0x7fffe000 if HOST_2G_2G | 71 | default 0x7fffe000 if HOST_VMSPLIT_2G |
72 | 72 | ||
73 | config STUB_DATA | 73 | config STUB_DATA |
74 | hex | 74 | hex |
75 | default 0xbffff000 if !HOST_2G_2G | 75 | default 0xbffff000 if !HOST_VMSPLIT_2G |
76 | default 0x7ffff000 if HOST_2G_2G | 76 | default 0x7ffff000 if HOST_VMSPLIT_2G |
77 | 77 | ||
78 | config STUB_START | 78 | config STUB_START |
79 | hex | 79 | hex |
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c index 543ef4f405e9..5ce0bd486bbf 100644 --- a/arch/x86_64/ia32/ia32_binfmt.c +++ b/arch/x86_64/ia32/ia32_binfmt.c | |||
@@ -64,55 +64,6 @@ typedef unsigned int elf_greg_t; | |||
64 | #define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t)) | 64 | #define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t)) |
65 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 65 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
66 | 66 | ||
67 | /* | ||
68 | * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out | ||
69 | * extra segments containing the vsyscall DSO contents. Dumping its | ||
70 | * contents makes post-mortem fully interpretable later without matching up | ||
71 | * the same kernel and hardware config to see what PC values meant. | ||
72 | * Dumping its extra ELF program headers includes all the other information | ||
73 | * a debugger needs to easily find how the vsyscall DSO was being used. | ||
74 | */ | ||
75 | #define ELF_CORE_EXTRA_PHDRS (find_vma(current->mm, VSYSCALL32_BASE) ? \ | ||
76 | (VSYSCALL32_EHDR->e_phnum) : 0) | ||
77 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ | ||
78 | do { \ | ||
79 | if (find_vma(current->mm, VSYSCALL32_BASE)) { \ | ||
80 | const struct elf32_phdr *const vsyscall_phdrs = \ | ||
81 | (const struct elf32_phdr *) (VSYSCALL32_BASE \ | ||
82 | + VSYSCALL32_EHDR->e_phoff);\ | ||
83 | int i; \ | ||
84 | Elf32_Off ofs = 0; \ | ||
85 | for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ | ||
86 | struct elf32_phdr phdr = vsyscall_phdrs[i]; \ | ||
87 | if (phdr.p_type == PT_LOAD) { \ | ||
88 | BUG_ON(ofs != 0); \ | ||
89 | ofs = phdr.p_offset = offset; \ | ||
90 | phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ | ||
91 | phdr.p_filesz = phdr.p_memsz; \ | ||
92 | offset += phdr.p_filesz; \ | ||
93 | } \ | ||
94 | else \ | ||
95 | phdr.p_offset += ofs; \ | ||
96 | phdr.p_paddr = 0; /* match other core phdrs */ \ | ||
97 | DUMP_WRITE(&phdr, sizeof(phdr)); \ | ||
98 | } \ | ||
99 | } \ | ||
100 | } while (0) | ||
101 | #define ELF_CORE_WRITE_EXTRA_DATA \ | ||
102 | do { \ | ||
103 | if (find_vma(current->mm, VSYSCALL32_BASE)) { \ | ||
104 | const struct elf32_phdr *const vsyscall_phdrs = \ | ||
105 | (const struct elf32_phdr *) (VSYSCALL32_BASE \ | ||
106 | + VSYSCALL32_EHDR->e_phoff); \ | ||
107 | int i; \ | ||
108 | for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ | ||
109 | if (vsyscall_phdrs[i].p_type == PT_LOAD) \ | ||
110 | DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\ | ||
111 | PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ | ||
112 | } \ | ||
113 | } \ | ||
114 | } while (0) | ||
115 | |||
116 | struct elf_siginfo | 67 | struct elf_siginfo |
117 | { | 68 | { |
118 | int si_signo; /* signal number */ | 69 | int si_signo; /* signal number */ |
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c index 3e5ed20cba45..59f1fa155915 100644 --- a/arch/x86_64/ia32/syscall32.c +++ b/arch/x86_64/ia32/syscall32.c | |||
@@ -59,6 +59,13 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack) | |||
59 | vma->vm_end = VSYSCALL32_END; | 59 | vma->vm_end = VSYSCALL32_END; |
60 | /* MAYWRITE to allow gdb to COW and set breakpoints */ | 60 | /* MAYWRITE to allow gdb to COW and set breakpoints */ |
61 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; | 61 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; |
62 | /* | ||
63 | * Make sure the vDSO gets into every core dump. | ||
64 | * Dumping its contents makes post-mortem fully interpretable later | ||
65 | * without matching up the same kernel and hardware config to see | ||
66 | * what PC values meant. | ||
67 | */ | ||
68 | vma->vm_flags |= VM_ALWAYSDUMP; | ||
62 | vma->vm_flags |= mm->def_flags; | 69 | vma->vm_flags |= mm->def_flags; |
63 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; | 70 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; |
64 | vma->vm_ops = &syscall32_vm_ops; | 71 | vma->vm_ops = &syscall32_vm_ops; |
@@ -75,6 +82,14 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack) | |||
75 | return 0; | 82 | return 0; |
76 | } | 83 | } |
77 | 84 | ||
85 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
86 | { | ||
87 | if (vma->vm_start == VSYSCALL32_BASE && | ||
88 | vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET) | ||
89 | return "[vdso]"; | ||
90 | return NULL; | ||
91 | } | ||
92 | |||
78 | static int __init init_syscall32(void) | 93 | static int __init init_syscall32(void) |
79 | { | 94 | { |
80 | syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); | 95 | syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); |