diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-04-16 18:24:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:24:35 -0400 |
commit | 547ee84cea37696d25c93306e909378a87db2f66 (patch) | |
tree | e21a5ce886975623d07add60beb223e6f36bab80 /arch/ppc64 | |
parent | fa89c5092eddcbcb425a1416f85906e3cc519793 (diff) |
[PATCH] ppc64: Improve mapping of vDSO
This patch reworks the way the ppc64 is mapped in user memory by the kernel
to make it more robust against possible collisions with executable
segments. Instead of just whacking a VMA at 1Mb, I now use
get_unmapped_area() with a hint, and I moved the mapping of the vDSO to
after the mapping of the various ELF segments and of the interpreter, so
that conflicts get caught properly (it still has to be before
create_elf_tables since the later will fill the AT_SYSINFO_EHDR with the
proper address).
While I was at it, I also changed the 32 and 64 bits vDSO's to link at
their "natural" address of 1Mb instead of 0. This is the address where
they are normally mapped in absence of conflict. By doing so, it should be
possible to properly prelink one it's been verified to work on glibc.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/kernel/vdso.c | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c index 8c4597224b71..4777676365fe 100644 --- a/arch/ppc64/kernel/vdso.c +++ b/arch/ppc64/kernel/vdso.c | |||
@@ -213,13 +213,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) | |||
213 | vdso_base = VDSO64_MBASE; | 213 | vdso_base = VDSO64_MBASE; |
214 | } | 214 | } |
215 | 215 | ||
216 | current->thread.vdso_base = 0; | ||
217 | |||
216 | /* vDSO has a problem and was disabled, just don't "enable" it for the | 218 | /* vDSO has a problem and was disabled, just don't "enable" it for the |
217 | * process | 219 | * process |
218 | */ | 220 | */ |
219 | if (vdso_pages == 0) { | 221 | if (vdso_pages == 0) |
220 | current->thread.vdso_base = 0; | ||
221 | return 0; | 222 | return 0; |
222 | } | 223 | |
223 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 224 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
224 | if (vma == NULL) | 225 | if (vma == NULL) |
225 | return -ENOMEM; | 226 | return -ENOMEM; |
@@ -230,12 +231,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) | |||
230 | memset(vma, 0, sizeof(*vma)); | 231 | memset(vma, 0, sizeof(*vma)); |
231 | 232 | ||
232 | /* | 233 | /* |
233 | * pick a base address for the vDSO in process space. We have a default | 234 | * pick a base address for the vDSO in process space. We try to put it |
234 | * base of 1Mb on which we had a random offset up to 1Mb. | 235 | * at vdso_base which is the "natural" base for it, but we might fail |
235 | * XXX: Add possibility for a program header to specify that location | 236 | * and end up putting it elsewhere. |
236 | */ | 237 | */ |
238 | vdso_base = get_unmapped_area(NULL, vdso_base, | ||
239 | vdso_pages << PAGE_SHIFT, 0, 0); | ||
240 | if (vdso_base & ~PAGE_MASK) | ||
241 | return (int)vdso_base; | ||
242 | |||
237 | current->thread.vdso_base = vdso_base; | 243 | current->thread.vdso_base = vdso_base; |
238 | /* + ((unsigned long)vma & 0x000ff000); */ | ||
239 | 244 | ||
240 | vma->vm_mm = mm; | 245 | vma->vm_mm = mm; |
241 | vma->vm_start = current->thread.vdso_base; | 246 | vma->vm_start = current->thread.vdso_base; |