aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/vsyscall_64.c49
-rw-r--r--arch/x86/mm/init_64.c49
2 files changed, 49 insertions, 49 deletions
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 957779f4eb40..521d5ed19547 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -284,6 +284,55 @@ sigsegv:
284} 284}
285 285
286/* 286/*
287 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
288 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
289 * not need special handling anymore:
290 */
291static const char *gate_vma_name(struct vm_area_struct *vma)
292{
293 return "[vsyscall]";
294}
295static struct vm_operations_struct gate_vma_ops = {
296 .name = gate_vma_name,
297};
298static struct vm_area_struct gate_vma = {
299 .vm_start = VSYSCALL_ADDR,
300 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
301 .vm_page_prot = PAGE_READONLY_EXEC,
302 .vm_flags = VM_READ | VM_EXEC,
303 .vm_ops = &gate_vma_ops,
304};
305
306struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
307{
308#ifdef CONFIG_IA32_EMULATION
309 if (!mm || mm->context.ia32_compat)
310 return NULL;
311#endif
312 return &gate_vma;
313}
314
315int in_gate_area(struct mm_struct *mm, unsigned long addr)
316{
317 struct vm_area_struct *vma = get_gate_vma(mm);
318
319 if (!vma)
320 return 0;
321
322 return (addr >= vma->vm_start) && (addr < vma->vm_end);
323}
324
325/*
326 * Use this when you have no reliable mm, typically from interrupt
327 * context. It is less reliable than using a task's mm and may give
328 * false positives.
329 */
330int in_gate_area_no_mm(unsigned long addr)
331{
332 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
333}
334
335/*
287 * Assume __initcall executes before all user space. Hopefully kmod 336 * Assume __initcall executes before all user space. Hopefully kmod
288 * doesn't violate that. We'll find out if it does. 337 * doesn't violate that. We'll find out if it does.
289 */ 338 */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4cb8763868fc..dd9ca9becc0f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1193,55 +1193,6 @@ int kern_addr_valid(unsigned long addr)
1193 return pfn_valid(pte_pfn(*pte)); 1193 return pfn_valid(pte_pfn(*pte));
1194} 1194}
1195 1195
1196/*
1197 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
1198 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1199 * not need special handling anymore:
1200 */
1201static const char *gate_vma_name(struct vm_area_struct *vma)
1202{
1203 return "[vsyscall]";
1204}
1205static struct vm_operations_struct gate_vma_ops = {
1206 .name = gate_vma_name,
1207};
1208static struct vm_area_struct gate_vma = {
1209 .vm_start = VSYSCALL_ADDR,
1210 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
1211 .vm_page_prot = PAGE_READONLY_EXEC,
1212 .vm_flags = VM_READ | VM_EXEC,
1213 .vm_ops = &gate_vma_ops,
1214};
1215
1216struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1217{
1218#ifdef CONFIG_IA32_EMULATION
1219 if (!mm || mm->context.ia32_compat)
1220 return NULL;
1221#endif
1222 return &gate_vma;
1223}
1224
1225int in_gate_area(struct mm_struct *mm, unsigned long addr)
1226{
1227 struct vm_area_struct *vma = get_gate_vma(mm);
1228
1229 if (!vma)
1230 return 0;
1231
1232 return (addr >= vma->vm_start) && (addr < vma->vm_end);
1233}
1234
1235/*
1236 * Use this when you have no reliable mm, typically from interrupt
1237 * context. It is less reliable than using a task's mm and may give
1238 * false positives.
1239 */
1240int in_gate_area_no_mm(unsigned long addr)
1241{
1242 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
1243}
1244
1245static unsigned long probe_memory_block_size(void) 1196static unsigned long probe_memory_block_size(void)
1246{ 1197{
1247 /* start from 2g */ 1198 /* start from 2g */