aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-07-09 14:22:12 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-07-17 11:18:46 -0400
commit2fea7f6c98f5957e539eb8aa0ce849729b900342 (patch)
tree3e0a70f2b96215f509621801d3d6d2d941a72db1 /arch/arm64
parent8715493852783358ef8656a0054a14bf822509cf (diff)
arm64: vdso: move to _install_special_mapping and remove arch_vma_name
_install_special_mapping replaces install_special_mapping and removes the need to detect special VMA in arch_vma_name. This patch moves the vdso and compat vectors page code over to the new API. Cc: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/vdso.c80
1 files changed, 35 insertions, 45 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 84cafbc3eb54..60ae12087d9f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
88{ 88{
89 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
90 unsigned long addr = AARCH32_VECTORS_BASE; 90 unsigned long addr = AARCH32_VECTORS_BASE;
91 int ret; 91 static struct vm_special_mapping spec = {
92 .name = "[vectors]",
93 .pages = vectors_page,
94
95 };
96 void *ret;
92 97
93 down_write(&mm->mmap_sem); 98 down_write(&mm->mmap_sem);
94 current->mm->context.vdso = (void *)addr; 99 current->mm->context.vdso = (void *)addr;
95 100
96 /* Map vectors page at the high address. */ 101 /* Map vectors page at the high address. */
97 ret = install_special_mapping(mm, addr, PAGE_SIZE, 102 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
98 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, 103 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
99 vectors_page); 104 &spec);
100 105
101 up_write(&mm->mmap_sem); 106 up_write(&mm->mmap_sem);
102 107
103 return ret; 108 return PTR_ERR_OR_ZERO(ret);
104} 109}
105#endif /* CONFIG_COMPAT */ 110#endif /* CONFIG_COMPAT */
106 111
112static struct vm_special_mapping vdso_spec[2];
113
107static int __init vdso_init(void) 114static int __init vdso_init(void)
108{ 115{
109 int i; 116 int i;
@@ -130,6 +137,17 @@ static int __init vdso_init(void)
130 /* Grab the vDSO data page. */ 137 /* Grab the vDSO data page. */
131 vdso_pagelist[i] = virt_to_page(vdso_data); 138 vdso_pagelist[i] = virt_to_page(vdso_data);
132 139
140 /* Populate the special mapping structures */
141 vdso_spec[0] = (struct vm_special_mapping) {
142 .name = "[vdso]",
143 .pages = vdso_pagelist,
144 };
145
146 vdso_spec[1] = (struct vm_special_mapping) {
147 .name = "[vvar]",
148 .pages = vdso_pagelist + vdso_pages,
149 };
150
133 return 0; 151 return 0;
134} 152}
135arch_initcall(vdso_init); 153arch_initcall(vdso_init);
@@ -139,7 +157,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
139{ 157{
140 struct mm_struct *mm = current->mm; 158 struct mm_struct *mm = current->mm;
141 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 159 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
142 int ret; 160 void *ret;
143 161
144 vdso_text_len = vdso_pages << PAGE_SHIFT; 162 vdso_text_len = vdso_pages << PAGE_SHIFT;
145 /* Be sure to map the data page */ 163 /* Be sure to map the data page */
@@ -148,23 +166,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
148 down_write(&mm->mmap_sem); 166 down_write(&mm->mmap_sem);
149 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 167 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
150 if (IS_ERR_VALUE(vdso_base)) { 168 if (IS_ERR_VALUE(vdso_base)) {
151 ret = vdso_base; 169 ret = ERR_PTR(vdso_base);
152 goto up_fail; 170 goto up_fail;
153 } 171 }
154 mm->context.vdso = (void *)vdso_base; 172 mm->context.vdso = (void *)vdso_base;
155 173
156 ret = install_special_mapping(mm, vdso_base, vdso_text_len, 174 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
157 VM_READ|VM_EXEC| 175 VM_READ|VM_EXEC|
158 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 176 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
159 vdso_pagelist); 177 &vdso_spec[0]);
160 if (ret) 178 if (IS_ERR(ret))
161 goto up_fail; 179 goto up_fail;
162 180
163 vdso_base += vdso_text_len; 181 vdso_base += vdso_text_len;
164 ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, 182 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
165 VM_READ|VM_MAYREAD, 183 VM_READ|VM_MAYREAD,
166 vdso_pagelist + vdso_pages); 184 &vdso_spec[1]);
167 if (ret) 185 if (IS_ERR(ret))
168 goto up_fail; 186 goto up_fail;
169 187
170 up_write(&mm->mmap_sem); 188 up_write(&mm->mmap_sem);
@@ -173,35 +191,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
173up_fail: 191up_fail:
174 mm->context.vdso = NULL; 192 mm->context.vdso = NULL;
175 up_write(&mm->mmap_sem); 193 up_write(&mm->mmap_sem);
176 return ret; 194 return PTR_ERR(ret);
177}
178
179const char *arch_vma_name(struct vm_area_struct *vma)
180{
181 unsigned long vdso_text;
182
183 if (!vma->vm_mm)
184 return NULL;
185
186 vdso_text = (unsigned long)vma->vm_mm->context.vdso;
187
188 /*
189 * We can re-use the vdso pointer in mm_context_t for identifying
190 * the vectors page for compat applications. The vDSO will always
191 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
192 * it conflicting with the vectors base.
193 */
194 if (vma->vm_start == vdso_text) {
195#ifdef CONFIG_COMPAT
196 if (vma->vm_start == AARCH32_VECTORS_BASE)
197 return "[vectors]";
198#endif
199 return "[vdso]";
200 } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
201 return "[vvar]";
202 }
203
204 return NULL;
205} 195}
206 196
207/* 197/*