diff options
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 57 | ||||
-rw-r--r-- | include/asm-powerpc/elf.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/mmu.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/page.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/processor.h | 1 |
7 files changed, 47 insertions, 29 deletions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 01e3c08cb550..22f078984845 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -757,10 +757,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka, | |||
757 | 757 | ||
758 | /* Save user registers on the stack */ | 758 | /* Save user registers on the stack */ |
759 | frame = &rt_sf->uc.uc_mcontext; | 759 | frame = &rt_sf->uc.uc_mcontext; |
760 | if (vdso32_rt_sigtramp && current->thread.vdso_base) { | 760 | if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { |
761 | if (save_user_regs(regs, frame, 0)) | 761 | if (save_user_regs(regs, frame, 0)) |
762 | goto badframe; | 762 | goto badframe; |
763 | regs->link = current->thread.vdso_base + vdso32_rt_sigtramp; | 763 | regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; |
764 | } else { | 764 | } else { |
765 | if (save_user_regs(regs, frame, __NR_rt_sigreturn)) | 765 | if (save_user_regs(regs, frame, __NR_rt_sigreturn)) |
766 | goto badframe; | 766 | goto badframe; |
@@ -1029,10 +1029,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
1029 | || __put_user(sig, &sc->signal)) | 1029 | || __put_user(sig, &sc->signal)) |
1030 | goto badframe; | 1030 | goto badframe; |
1031 | 1031 | ||
1032 | if (vdso32_sigtramp && current->thread.vdso_base) { | 1032 | if (vdso32_sigtramp && current->mm->context.vdso_base) { |
1033 | if (save_user_regs(regs, &frame->mctx, 0)) | 1033 | if (save_user_regs(regs, &frame->mctx, 0)) |
1034 | goto badframe; | 1034 | goto badframe; |
1035 | regs->link = current->thread.vdso_base + vdso32_sigtramp; | 1035 | regs->link = current->mm->context.vdso_base + vdso32_sigtramp; |
1036 | } else { | 1036 | } else { |
1037 | if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) | 1037 | if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) |
1038 | goto badframe; | 1038 | goto badframe; |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 27f65b95184d..23ba69c26913 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -394,8 +394,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
394 | current->thread.fpscr.val = 0; | 394 | current->thread.fpscr.val = 0; |
395 | 395 | ||
396 | /* Set up to return from userspace. */ | 396 | /* Set up to return from userspace. */ |
397 | if (vdso64_rt_sigtramp && current->thread.vdso_base) { | 397 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { |
398 | regs->link = current->thread.vdso_base + vdso64_rt_sigtramp; | 398 | regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; |
399 | } else { | 399 | } else { |
400 | err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); | 400 | err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); |
401 | if (err) | 401 | if (err) |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 573afb68d69e..bc3e15be3087 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
223 | struct vm_area_struct *vma; | 223 | struct vm_area_struct *vma; |
224 | unsigned long vdso_pages; | 224 | unsigned long vdso_pages; |
225 | unsigned long vdso_base; | 225 | unsigned long vdso_base; |
226 | int rc; | ||
226 | 227 | ||
227 | #ifdef CONFIG_PPC64 | 228 | #ifdef CONFIG_PPC64 |
228 | if (test_thread_flag(TIF_32BIT)) { | 229 | if (test_thread_flag(TIF_32BIT)) { |
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
237 | vdso_base = VDSO32_MBASE; | 238 | vdso_base = VDSO32_MBASE; |
238 | #endif | 239 | #endif |
239 | 240 | ||
240 | current->thread.vdso_base = 0; | 241 | current->mm->context.vdso_base = 0; |
241 | 242 | ||
242 | /* vDSO has a problem and was disabled, just don't "enable" it for the | 243 | /* vDSO has a problem and was disabled, just don't "enable" it for the |
243 | * process | 244 | * process |
244 | */ | 245 | */ |
245 | if (vdso_pages == 0) | 246 | if (vdso_pages == 0) |
246 | return 0; | 247 | return 0; |
247 | |||
248 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | ||
249 | if (vma == NULL) | ||
250 | return -ENOMEM; | ||
251 | |||
252 | memset(vma, 0, sizeof(*vma)); | ||
253 | |||
254 | /* Add a page to the vdso size for the data page */ | 248 | /* Add a page to the vdso size for the data page */ |
255 | vdso_pages ++; | 249 | vdso_pages ++; |
256 | 250 | ||
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
259 | * at vdso_base which is the "natural" base for it, but we might fail | 253 | * at vdso_base which is the "natural" base for it, but we might fail |
260 | * and end up putting it elsewhere. | 254 | * and end up putting it elsewhere. |
261 | */ | 255 | */ |
256 | down_write(&mm->mmap_sem); | ||
262 | vdso_base = get_unmapped_area(NULL, vdso_base, | 257 | vdso_base = get_unmapped_area(NULL, vdso_base, |
263 | vdso_pages << PAGE_SHIFT, 0, 0); | 258 | vdso_pages << PAGE_SHIFT, 0, 0); |
264 | if (vdso_base & ~PAGE_MASK) { | 259 | if (IS_ERR_VALUE(vdso_base)) { |
265 | kmem_cache_free(vm_area_cachep, vma); | 260 | rc = vdso_base; |
266 | return (int)vdso_base; | 261 | goto fail_mmapsem; |
267 | } | 262 | } |
268 | 263 | ||
269 | current->thread.vdso_base = vdso_base; | ||
270 | 264 | ||
265 | /* Allocate a VMA structure and fill it up */ | ||
266 | vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); | ||
267 | if (vma == NULL) { | ||
268 | rc = -ENOMEM; | ||
269 | goto fail_mmapsem; | ||
270 | } | ||
271 | vma->vm_mm = mm; | 271 | vma->vm_mm = mm; |
272 | vma->vm_start = current->thread.vdso_base; | 272 | vma->vm_start = vdso_base; |
273 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); | 273 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); |
274 | 274 | ||
275 | /* | 275 | /* |
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
282 | * It's fine to use that for setting breakpoints in the vDSO code | 282 | * It's fine to use that for setting breakpoints in the vDSO code |
283 | * pages though | 283 | * pages though |
284 | */ | 284 | */ |
285 | vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 285 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; |
286 | vma->vm_flags |= mm->def_flags; | 286 | vma->vm_flags |= mm->def_flags; |
287 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; | 287 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; |
288 | vma->vm_ops = &vdso_vmops; | 288 | vma->vm_ops = &vdso_vmops; |
289 | 289 | ||
290 | down_write(&mm->mmap_sem); | 290 | /* Insert new VMA */ |
291 | if (insert_vm_struct(mm, vma)) { | 291 | rc = insert_vm_struct(mm, vma); |
292 | up_write(&mm->mmap_sem); | 292 | if (rc) |
293 | kmem_cache_free(vm_area_cachep, vma); | 293 | goto fail_vma; |
294 | return -ENOMEM; | 294 | |
295 | } | 295 | /* Put vDSO base into mm struct and account for memory usage */ |
296 | current->mm->context.vdso_base = vdso_base; | ||
296 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 297 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
297 | up_write(&mm->mmap_sem); | 298 | up_write(&mm->mmap_sem); |
298 | |||
299 | return 0; | 299 | return 0; |
300 | |||
301 | fail_vma: | ||
302 | kmem_cache_free(vm_area_cachep, vma); | ||
303 | fail_mmapsem: | ||
304 | up_write(&mm->mmap_sem); | ||
305 | return rc; | ||
306 | } | ||
307 | |||
308 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
309 | { | ||
310 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) | ||
311 | return "[vdso]"; | ||
312 | return NULL; | ||
300 | } | 313 | } |
301 | 314 | ||
315 | |||
316 | |||
302 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, | 317 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, |
303 | unsigned long *size) | 318 | unsigned long *size) |
304 | { | 319 | { |
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index 94d228f9c6ac..319655ce66c0 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h | |||
@@ -294,7 +294,7 @@ do { \ | |||
294 | NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ | 294 | NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ |
295 | NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ | 295 | NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ |
296 | NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ | 296 | NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ |
297 | VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \ | 297 | VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \ |
298 | } while (0) | 298 | } while (0) |
299 | 299 | ||
300 | /* PowerPC64 relocations defined by the ABIs */ | 300 | /* PowerPC64 relocations defined by the ABIs */ |
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 31f721994bd8..96e47d1ce979 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h | |||
@@ -360,6 +360,7 @@ typedef struct { | |||
360 | #ifdef CONFIG_HUGETLB_PAGE | 360 | #ifdef CONFIG_HUGETLB_PAGE |
361 | u16 low_htlb_areas, high_htlb_areas; | 361 | u16 low_htlb_areas, high_htlb_areas; |
362 | #endif | 362 | #endif |
363 | unsigned long vdso_base; | ||
363 | } mm_context_t; | 364 | } mm_context_t; |
364 | 365 | ||
365 | 366 | ||
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index ae610b620487..a315d0c0d96a 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h | |||
@@ -192,6 +192,9 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, | |||
192 | struct page *p); | 192 | struct page *p); |
193 | extern int page_is_ram(unsigned long pfn); | 193 | extern int page_is_ram(unsigned long pfn); |
194 | 194 | ||
195 | struct vm_area_struct; | ||
196 | extern const char *arch_vma_name(struct vm_area_struct *vma); | ||
197 | |||
195 | #include <asm-generic/memory_model.h> | 198 | #include <asm-generic/memory_model.h> |
196 | #endif /* __ASSEMBLY__ */ | 199 | #endif /* __ASSEMBLY__ */ |
197 | 200 | ||
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index 93f83efeb310..d5c7ef1cca26 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h | |||
@@ -153,7 +153,6 @@ struct thread_struct { | |||
153 | unsigned long start_tb; /* Start purr when proc switched in */ | 153 | unsigned long start_tb; /* Start purr when proc switched in */ |
154 | unsigned long accum_tb; /* Total accumilated purr for process */ | 154 | unsigned long accum_tb; /* Total accumilated purr for process */ |
155 | #endif | 155 | #endif |
156 | unsigned long vdso_base; /* base of the vDSO library */ | ||
157 | unsigned long dabr; /* Data address breakpoint register */ | 156 | unsigned long dabr; /* Data address breakpoint register */ |
158 | #ifdef CONFIG_ALTIVEC | 157 | #ifdef CONFIG_ALTIVEC |
159 | /* Complete AltiVec register set */ | 158 | /* Complete AltiVec register set */ |