aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2007-02-08 17:20:42 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 12:25:47 -0500
commit7d91d531900bfa1165d445390b3b13a8013f98f7 (patch)
treeede8518a1cc86191b344462d12e19fb37cb3ccbf
parentfa5dc22f8586cc3742413dd05f5cd9e039dfab9e (diff)
[PATCH] i386 vDSO: use install_special_mapping
This patch uses install_special_mapping for the i386 vDSO setup, consolidating duplicated code. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/sysenter.c53
1 files changed, 11 insertions, 42 deletions
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 5da744204d10..bc882a2b1db6 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -70,11 +70,12 @@ void enable_sep_cpu(void)
70 */ 70 */
71extern const char vsyscall_int80_start, vsyscall_int80_end; 71extern const char vsyscall_int80_start, vsyscall_int80_end;
72extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; 72extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
73static void *syscall_page; 73static struct page *syscall_pages[1];
74 74
75int __init sysenter_setup(void) 75int __init sysenter_setup(void)
76{ 76{
77 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); 77 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
78 syscall_pages[0] = virt_to_page(syscall_page);
78 79
79#ifdef CONFIG_COMPAT_VDSO 80#ifdef CONFIG_COMPAT_VDSO
80 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); 81 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
@@ -96,31 +97,12 @@ int __init sysenter_setup(void)
96} 97}
97 98
98#ifndef CONFIG_COMPAT_VDSO 99#ifndef CONFIG_COMPAT_VDSO
99static struct page *syscall_nopage(struct vm_area_struct *vma,
100 unsigned long adr, int *type)
101{
102 struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
103 get_page(p);
104 return p;
105}
106
107/* Prevent VMA merging */
108static void syscall_vma_close(struct vm_area_struct *vma)
109{
110}
111
112static struct vm_operations_struct syscall_vm_ops = {
113 .close = syscall_vma_close,
114 .nopage = syscall_nopage,
115};
116
117/* Defined in vsyscall-sysenter.S */ 100/* Defined in vsyscall-sysenter.S */
118extern void SYSENTER_RETURN; 101extern void SYSENTER_RETURN;
119 102
120/* Setup a VMA at program startup for the vsyscall page */ 103/* Setup a VMA at program startup for the vsyscall page */
121int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) 104int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
122{ 105{
123 struct vm_area_struct *vma;
124 struct mm_struct *mm = current->mm; 106 struct mm_struct *mm = current->mm;
125 unsigned long addr; 107 unsigned long addr;
126 int ret; 108 int ret;
@@ -132,38 +114,25 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
132 goto up_fail; 114 goto up_fail;
133 } 115 }
134 116
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (!vma) {
137 ret = -ENOMEM;
138 goto up_fail;
139 }
140
141 vma->vm_start = addr;
142 vma->vm_end = addr + PAGE_SIZE;
143 /* MAYWRITE to allow gdb to COW and set breakpoints */
144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 /* 117 /*
118 * MAYWRITE to allow gdb to COW and set breakpoints
119 *
146 * Make sure the vDSO gets into every core dump. 120 * Make sure the vDSO gets into every core dump.
147 * Dumping its contents makes post-mortem fully interpretable later 121 * Dumping its contents makes post-mortem fully interpretable later
148 * without matching up the same kernel and hardware config to see 122 * without matching up the same kernel and hardware config to see
149 * what PC values meant. 123 * what PC values meant.
150 */ 124 */
151 vma->vm_flags |= VM_ALWAYSDUMP; 125 ret = install_special_mapping(mm, addr, PAGE_SIZE,
152 vma->vm_flags |= mm->def_flags; 126 VM_READ|VM_EXEC|
153 vma->vm_page_prot = protection_map[vma->vm_flags & 7]; 127 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
154 vma->vm_ops = &syscall_vm_ops; 128 VM_ALWAYSDUMP,
155 vma->vm_mm = mm; 129 syscall_pages);
156 130 if (ret)
157 ret = insert_vm_struct(mm, vma);
158 if (unlikely(ret)) {
159 kmem_cache_free(vm_area_cachep, vma);
160 goto up_fail; 131 goto up_fail;
161 }
162 132
163 current->mm->context.vdso = (void *)addr; 133 current->mm->context.vdso = (void *)addr;
164 current_thread_info()->sysenter_return = 134 current_thread_info()->sysenter_return =
165 (void *)VDSO_SYM(&SYSENTER_RETURN); 135 (void *)VDSO_SYM(&SYSENTER_RETURN);
166 mm->total_vm++;
167up_fail: 136up_fail:
168 up_write(&mm->mmap_sem); 137 up_write(&mm->mmap_sem);
169 return ret; 138 return ret;