diff options
author | Will Deacon <will.deacon@arm.com> | 2014-07-09 14:22:11 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2014-07-17 11:18:36 -0400 |
commit | 8715493852783358ef8656a0054a14bf822509cf (patch) | |
tree | 1d947307eead93e8b35664360c21f232e7e36ddb | |
parent | b2f8c07bcb7d1a3575f41444d2d8048d0c922762 (diff) |
arm64: vdso: put vdso datapage in a separate vma
The VDSO datapage doesn't need to be executable (no code there) or
CoW-able (the kernel writes the page, so a private copy is totally
useless).
This patch moves the datapage into its own VMA, identified as "[vvar]"
in /proc/<pid>/maps.
Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/kernel/vdso.c | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 50384fec56c4..84cafbc3eb54 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -138,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
138 | int uses_interp) | 138 | int uses_interp) |
139 | { | 139 | { |
140 | struct mm_struct *mm = current->mm; | 140 | struct mm_struct *mm = current->mm; |
141 | unsigned long vdso_base, vdso_mapping_len; | 141 | unsigned long vdso_base, vdso_text_len, vdso_mapping_len; |
142 | int ret; | 142 | int ret; |
143 | 143 | ||
144 | vdso_text_len = vdso_pages << PAGE_SHIFT; | ||
144 | /* Be sure to map the data page */ | 145 | /* Be sure to map the data page */ |
145 | vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; | 146 | vdso_mapping_len = vdso_text_len + PAGE_SIZE; |
146 | 147 | ||
147 | down_write(&mm->mmap_sem); | 148 | down_write(&mm->mmap_sem); |
148 | vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); | 149 | vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); |
@@ -152,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
152 | } | 153 | } |
153 | mm->context.vdso = (void *)vdso_base; | 154 | mm->context.vdso = (void *)vdso_base; |
154 | 155 | ||
155 | ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, | 156 | ret = install_special_mapping(mm, vdso_base, vdso_text_len, |
156 | VM_READ|VM_EXEC| | 157 | VM_READ|VM_EXEC| |
157 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 158 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
158 | vdso_pagelist); | 159 | vdso_pagelist); |
159 | if (ret) { | 160 | if (ret) |
160 | mm->context.vdso = NULL; | 161 | goto up_fail; |
162 | |||
163 | vdso_base += vdso_text_len; | ||
164 | ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, | ||
165 | VM_READ|VM_MAYREAD, | ||
166 | vdso_pagelist + vdso_pages); | ||
167 | if (ret) | ||
161 | goto up_fail; | 168 | goto up_fail; |
162 | } | ||
163 | 169 | ||
164 | up_fail: | ||
165 | up_write(&mm->mmap_sem); | 170 | up_write(&mm->mmap_sem); |
171 | return 0; | ||
166 | 172 | ||
173 | up_fail: | ||
174 | mm->context.vdso = NULL; | ||
175 | up_write(&mm->mmap_sem); | ||
167 | return ret; | 176 | return ret; |
168 | } | 177 | } |
169 | 178 | ||
170 | const char *arch_vma_name(struct vm_area_struct *vma) | 179 | const char *arch_vma_name(struct vm_area_struct *vma) |
171 | { | 180 | { |
181 | unsigned long vdso_text; | ||
182 | |||
183 | if (!vma->vm_mm) | ||
184 | return NULL; | ||
185 | |||
186 | vdso_text = (unsigned long)vma->vm_mm->context.vdso; | ||
187 | |||
172 | /* | 188 | /* |
173 | * We can re-use the vdso pointer in mm_context_t for identifying | 189 | * We can re-use the vdso pointer in mm_context_t for identifying |
174 | * the vectors page for compat applications. The vDSO will always | 190 | * the vectors page for compat applications. The vDSO will always |
175 | * sit above TASK_UNMAPPED_BASE and so we don't need to worry about | 191 | * sit above TASK_UNMAPPED_BASE and so we don't need to worry about |
176 | * it conflicting with the vectors base. | 192 | * it conflicting with the vectors base. |
177 | */ | 193 | */ |
178 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { | 194 | if (vma->vm_start == vdso_text) { |
179 | #ifdef CONFIG_COMPAT | 195 | #ifdef CONFIG_COMPAT |
180 | if (vma->vm_start == AARCH32_VECTORS_BASE) | 196 | if (vma->vm_start == AARCH32_VECTORS_BASE) |
181 | return "[vectors]"; | 197 | return "[vectors]"; |
182 | #endif | 198 | #endif |
183 | return "[vdso]"; | 199 | return "[vdso]"; |
200 | } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { | ||
201 | return "[vvar]"; | ||
184 | } | 202 | } |
185 | 203 | ||
186 | return NULL; | 204 | return NULL; |