diff options
Diffstat (limited to 'arch/powerpc/kernel/vdso.c')
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 57 |
1 files changed, 36 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 573afb68d69e..bc3e15be3087 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
223 | struct vm_area_struct *vma; | 223 | struct vm_area_struct *vma; |
224 | unsigned long vdso_pages; | 224 | unsigned long vdso_pages; |
225 | unsigned long vdso_base; | 225 | unsigned long vdso_base; |
226 | int rc; | ||
226 | 227 | ||
227 | #ifdef CONFIG_PPC64 | 228 | #ifdef CONFIG_PPC64 |
228 | if (test_thread_flag(TIF_32BIT)) { | 229 | if (test_thread_flag(TIF_32BIT)) { |
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
237 | vdso_base = VDSO32_MBASE; | 238 | vdso_base = VDSO32_MBASE; |
238 | #endif | 239 | #endif |
239 | 240 | ||
240 | current->thread.vdso_base = 0; | 241 | current->mm->context.vdso_base = 0; |
241 | 242 | ||
242 | /* vDSO has a problem and was disabled, just don't "enable" it for the | 243 | /* vDSO has a problem and was disabled, just don't "enable" it for the |
243 | * process | 244 | * process |
244 | */ | 245 | */ |
245 | if (vdso_pages == 0) | 246 | if (vdso_pages == 0) |
246 | return 0; | 247 | return 0; |
247 | |||
248 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | ||
249 | if (vma == NULL) | ||
250 | return -ENOMEM; | ||
251 | |||
252 | memset(vma, 0, sizeof(*vma)); | ||
253 | |||
254 | /* Add a page to the vdso size for the data page */ | 248 | /* Add a page to the vdso size for the data page */ |
255 | vdso_pages ++; | 249 | vdso_pages ++; |
256 | 250 | ||
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
259 | * at vdso_base which is the "natural" base for it, but we might fail | 253 | * at vdso_base which is the "natural" base for it, but we might fail |
260 | * and end up putting it elsewhere. | 254 | * and end up putting it elsewhere. |
261 | */ | 255 | */ |
256 | down_write(&mm->mmap_sem); | ||
262 | vdso_base = get_unmapped_area(NULL, vdso_base, | 257 | vdso_base = get_unmapped_area(NULL, vdso_base, |
263 | vdso_pages << PAGE_SHIFT, 0, 0); | 258 | vdso_pages << PAGE_SHIFT, 0, 0); |
264 | if (vdso_base & ~PAGE_MASK) { | 259 | if (IS_ERR_VALUE(vdso_base)) { |
265 | kmem_cache_free(vm_area_cachep, vma); | 260 | rc = vdso_base; |
266 | return (int)vdso_base; | 261 | goto fail_mmapsem; |
267 | } | 262 | } |
268 | 263 | ||
269 | current->thread.vdso_base = vdso_base; | ||
270 | 264 | ||
265 | /* Allocate a VMA structure and fill it up */ | ||
266 | vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); | ||
267 | if (vma == NULL) { | ||
268 | rc = -ENOMEM; | ||
269 | goto fail_mmapsem; | ||
270 | } | ||
271 | vma->vm_mm = mm; | 271 | vma->vm_mm = mm; |
272 | vma->vm_start = current->thread.vdso_base; | 272 | vma->vm_start = vdso_base; |
273 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); | 273 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); |
274 | 274 | ||
275 | /* | 275 | /* |
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
282 | * It's fine to use that for setting breakpoints in the vDSO code | 282 | * It's fine to use that for setting breakpoints in the vDSO code |
283 | * pages though | 283 | * pages though |
284 | */ | 284 | */ |
285 | vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 285 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; |
286 | vma->vm_flags |= mm->def_flags; | 286 | vma->vm_flags |= mm->def_flags; |
287 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; | 287 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; |
288 | vma->vm_ops = &vdso_vmops; | 288 | vma->vm_ops = &vdso_vmops; |
289 | 289 | ||
290 | down_write(&mm->mmap_sem); | 290 | /* Insert new VMA */ |
291 | if (insert_vm_struct(mm, vma)) { | 291 | rc = insert_vm_struct(mm, vma); |
292 | up_write(&mm->mmap_sem); | 292 | if (rc) |
293 | kmem_cache_free(vm_area_cachep, vma); | 293 | goto fail_vma; |
294 | return -ENOMEM; | 294 | |
295 | } | 295 | /* Put vDSO base into mm struct and account for memory usage */ |
296 | current->mm->context.vdso_base = vdso_base; | ||
296 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 297 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
297 | up_write(&mm->mmap_sem); | 298 | up_write(&mm->mmap_sem); |
298 | |||
299 | return 0; | 299 | return 0; |
300 | |||
301 | fail_vma: | ||
302 | kmem_cache_free(vm_area_cachep, vma); | ||
303 | fail_mmapsem: | ||
304 | up_write(&mm->mmap_sem); | ||
305 | return rc; | ||
306 | } | ||
307 | |||
308 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
309 | { | ||
310 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) | ||
311 | return "[vdso]"; | ||
312 | return NULL; | ||
300 | } | 313 | } |
301 | 314 | ||
315 | |||
316 | |||
302 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, | 317 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, |
303 | unsigned long *size) | 318 | unsigned long *size) |
304 | { | 319 | { |