diff options
author | Roland McGrath <roland@redhat.com> | 2007-02-08 17:20:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-09 12:25:47 -0500 |
commit | c13e4ca247311c294b032089e0d05e96f2708c16 (patch) | |
tree | c0aa349228370a3021046dd4eca3f032eea425c0 /arch/powerpc | |
parent | dc5882b20a69fb16219cc61ae3d21d73dd6360a7 (diff) |
[PATCH] powerpc vDSO: use install_special_mapping
This patch uses install_special_mapping for the powerpc vDSO setup,
consolidating duplicated code.
Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 104 |
1 files changed, 27 insertions, 77 deletions
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index ae0ede19879d..50149ec6efa4 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -49,9 +49,13 @@ | |||
49 | /* Max supported size for symbol names */ | 49 | /* Max supported size for symbol names */ |
50 | #define MAX_SYMNAME 64 | 50 | #define MAX_SYMNAME 64 |
51 | 51 | ||
52 | #define VDSO32_MAXPAGES (((0x3000 + PAGE_MASK) >> PAGE_SHIFT) + 2) | ||
53 | #define VDSO64_MAXPAGES (((0x3000 + PAGE_MASK) >> PAGE_SHIFT) + 2) | ||
54 | |||
52 | extern char vdso32_start, vdso32_end; | 55 | extern char vdso32_start, vdso32_end; |
53 | static void *vdso32_kbase = &vdso32_start; | 56 | static void *vdso32_kbase = &vdso32_start; |
54 | unsigned int vdso32_pages; | 57 | unsigned int vdso32_pages; |
58 | static struct page *vdso32_pagelist[VDSO32_MAXPAGES]; | ||
55 | unsigned long vdso32_sigtramp; | 59 | unsigned long vdso32_sigtramp; |
56 | unsigned long vdso32_rt_sigtramp; | 60 | unsigned long vdso32_rt_sigtramp; |
57 | 61 | ||
@@ -59,6 +63,7 @@ unsigned long vdso32_rt_sigtramp; | |||
59 | extern char vdso64_start, vdso64_end; | 63 | extern char vdso64_start, vdso64_end; |
60 | static void *vdso64_kbase = &vdso64_start; | 64 | static void *vdso64_kbase = &vdso64_start; |
61 | unsigned int vdso64_pages; | 65 | unsigned int vdso64_pages; |
66 | static struct page *vdso64_pagelist[VDSO64_MAXPAGES]; | ||
62 | unsigned long vdso64_rt_sigtramp; | 67 | unsigned long vdso64_rt_sigtramp; |
63 | #endif /* CONFIG_PPC64 */ | 68 | #endif /* CONFIG_PPC64 */ |
64 | 69 | ||
@@ -165,55 +170,6 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
165 | #endif /* DEBUG */ | 170 | #endif /* DEBUG */ |
166 | 171 | ||
167 | /* | 172 | /* |
168 | * Keep a dummy vma_close for now, it will prevent VMA merging. | ||
169 | */ | ||
170 | static void vdso_vma_close(struct vm_area_struct * vma) | ||
171 | { | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Our nopage() function, maps in the actual vDSO kernel pages, they will | ||
176 | * be mapped read-only by do_no_page(), and eventually COW'ed, either | ||
177 | * right away for an initial write access, or by do_wp_page(). | ||
178 | */ | ||
179 | static struct page * vdso_vma_nopage(struct vm_area_struct * vma, | ||
180 | unsigned long address, int *type) | ||
181 | { | ||
182 | unsigned long offset = address - vma->vm_start; | ||
183 | struct page *pg; | ||
184 | #ifdef CONFIG_PPC64 | ||
185 | void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ? | ||
186 | vdso64_kbase : vdso32_kbase; | ||
187 | #else | ||
188 | void *vbase = vdso32_kbase; | ||
189 | #endif | ||
190 | |||
191 | DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n", | ||
192 | current->comm, address, offset); | ||
193 | |||
194 | if (address < vma->vm_start || address > vma->vm_end) | ||
195 | return NOPAGE_SIGBUS; | ||
196 | |||
197 | /* | ||
198 | * Last page is systemcfg. | ||
199 | */ | ||
200 | if ((vma->vm_end - address) <= PAGE_SIZE) | ||
201 | pg = virt_to_page(vdso_data); | ||
202 | else | ||
203 | pg = virt_to_page(vbase + offset); | ||
204 | |||
205 | get_page(pg); | ||
206 | DBG(" ->page count: %d\n", page_count(pg)); | ||
207 | |||
208 | return pg; | ||
209 | } | ||
210 | |||
211 | static struct vm_operations_struct vdso_vmops = { | ||
212 | .close = vdso_vma_close, | ||
213 | .nopage = vdso_vma_nopage, | ||
214 | }; | ||
215 | |||
216 | /* | ||
217 | * This is called from binfmt_elf, we create the special vma for the | 173 | * This is called from binfmt_elf, we create the special vma for the |
218 | * vDSO and insert it into the mm struct tree | 174 | * vDSO and insert it into the mm struct tree |
219 | */ | 175 | */ |
@@ -221,20 +177,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
221 | int executable_stack) | 177 | int executable_stack) |
222 | { | 178 | { |
223 | struct mm_struct *mm = current->mm; | 179 | struct mm_struct *mm = current->mm; |
224 | struct vm_area_struct *vma; | 180 | struct page **vdso_pagelist; |
225 | unsigned long vdso_pages; | 181 | unsigned long vdso_pages; |
226 | unsigned long vdso_base; | 182 | unsigned long vdso_base; |
227 | int rc; | 183 | int rc; |
228 | 184 | ||
229 | #ifdef CONFIG_PPC64 | 185 | #ifdef CONFIG_PPC64 |
230 | if (test_thread_flag(TIF_32BIT)) { | 186 | if (test_thread_flag(TIF_32BIT)) { |
187 | vdso_pagelist = vdso32_pagelist; | ||
231 | vdso_pages = vdso32_pages; | 188 | vdso_pages = vdso32_pages; |
232 | vdso_base = VDSO32_MBASE; | 189 | vdso_base = VDSO32_MBASE; |
233 | } else { | 190 | } else { |
191 | vdso_pagelist = vdso64_pagelist; | ||
234 | vdso_pages = vdso64_pages; | 192 | vdso_pages = vdso64_pages; |
235 | vdso_base = VDSO64_MBASE; | 193 | vdso_base = VDSO64_MBASE; |
236 | } | 194 | } |
237 | #else | 195 | #else |
196 | vdso_pagelist = vdso32_pagelist; | ||
238 | vdso_pages = vdso32_pages; | 197 | vdso_pages = vdso32_pages; |
239 | vdso_base = VDSO32_MBASE; | 198 | vdso_base = VDSO32_MBASE; |
240 | #endif | 199 | #endif |
@@ -262,17 +221,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
262 | goto fail_mmapsem; | 221 | goto fail_mmapsem; |
263 | } | 222 | } |
264 | 223 | ||
265 | |||
266 | /* Allocate a VMA structure and fill it up */ | ||
267 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
268 | if (vma == NULL) { | ||
269 | rc = -ENOMEM; | ||
270 | goto fail_mmapsem; | ||
271 | } | ||
272 | vma->vm_mm = mm; | ||
273 | vma->vm_start = vdso_base; | ||
274 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); | ||
275 | |||
276 | /* | 224 | /* |
277 | * our vma flags don't have VM_WRITE so by default, the process isn't | 225 | * our vma flags don't have VM_WRITE so by default, the process isn't |
278 | * allowed to write those pages. | 226 | * allowed to write those pages. |
@@ -282,32 +230,26 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
282 | * and your nice userland gettimeofday will be totally dead. | 230 | * and your nice userland gettimeofday will be totally dead. |
283 | * It's fine to use that for setting breakpoints in the vDSO code | 231 | * It's fine to use that for setting breakpoints in the vDSO code |
284 | * pages though | 232 | * pages though |
285 | */ | 233 | * |
286 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; | ||
287 | /* | ||
288 | * Make sure the vDSO gets into every core dump. | 234 | * Make sure the vDSO gets into every core dump. |
289 | * Dumping its contents makes post-mortem fully interpretable later | 235 | * Dumping its contents makes post-mortem fully interpretable later |
290 | * without matching up the same kernel and hardware config to see | 236 | * without matching up the same kernel and hardware config to see |
291 | * what PC values meant. | 237 | * what PC values meant. |
292 | */ | 238 | */ |
293 | vma->vm_flags |= VM_ALWAYSDUMP; | 239 | rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, |
294 | vma->vm_flags |= mm->def_flags; | 240 | VM_READ|VM_EXEC| |
295 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; | 241 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
296 | vma->vm_ops = &vdso_vmops; | 242 | VM_ALWAYSDUMP, |
297 | 243 | vdso_pagelist); | |
298 | /* Insert new VMA */ | ||
299 | rc = insert_vm_struct(mm, vma); | ||
300 | if (rc) | 244 | if (rc) |
301 | goto fail_vma; | 245 | goto fail_mmapsem; |
302 | 246 | ||
303 | /* Put vDSO base into mm struct and account for memory usage */ | 247 | /* Put vDSO base into mm struct */ |
304 | current->mm->context.vdso_base = vdso_base; | 248 | current->mm->context.vdso_base = vdso_base; |
305 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 249 | |
306 | up_write(&mm->mmap_sem); | 250 | up_write(&mm->mmap_sem); |
307 | return 0; | 251 | return 0; |
308 | 252 | ||
309 | fail_vma: | ||
310 | kmem_cache_free(vm_area_cachep, vma); | ||
311 | fail_mmapsem: | 253 | fail_mmapsem: |
312 | up_write(&mm->mmap_sem); | 254 | up_write(&mm->mmap_sem); |
313 | return rc; | 255 | return rc; |
@@ -778,18 +720,26 @@ void __init vdso_init(void) | |||
778 | } | 720 | } |
779 | 721 | ||
780 | /* Make sure pages are in the correct state */ | 722 | /* Make sure pages are in the correct state */ |
723 | BUG_ON(vdso32_pages + 2 > VDSO32_MAXPAGES); | ||
781 | for (i = 0; i < vdso32_pages; i++) { | 724 | for (i = 0; i < vdso32_pages; i++) { |
782 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); | 725 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
783 | ClearPageReserved(pg); | 726 | ClearPageReserved(pg); |
784 | get_page(pg); | 727 | get_page(pg); |
785 | 728 | vdso32_pagelist[i] = pg; | |
786 | } | 729 | } |
730 | vdso32_pagelist[i++] = virt_to_page(vdso_data); | ||
731 | vdso32_pagelist[i] = NULL; | ||
732 | |||
787 | #ifdef CONFIG_PPC64 | 733 | #ifdef CONFIG_PPC64 |
734 | BUG_ON(vdso64_pages + 2 > VDSO64_MAXPAGES); | ||
788 | for (i = 0; i < vdso64_pages; i++) { | 735 | for (i = 0; i < vdso64_pages; i++) { |
789 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); | 736 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
790 | ClearPageReserved(pg); | 737 | ClearPageReserved(pg); |
791 | get_page(pg); | 738 | get_page(pg); |
739 | vdso64_pagelist[i] = pg; | ||
792 | } | 740 | } |
741 | vdso64_pagelist[i++] = virt_to_page(vdso_data); | ||
742 | vdso64_pagelist[i] = NULL; | ||
793 | #endif /* CONFIG_PPC64 */ | 743 | #endif /* CONFIG_PPC64 */ |
794 | 744 | ||
795 | get_page(virt_to_page(vdso_data)); | 745 | get_page(virt_to_page(vdso_data)); |