aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-05-05 15:19:36 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-05 16:19:01 -0400
commitf40c330091c7aa9956ab66f97a3abc8a68b67240 (patch)
treeb0c03dce9bf67eb15c98980a7323ca35122d10df /arch/x86/mm
parent18d0a6fd227177fd243993179c90e454d0638b06 (diff)
x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO
This makes the 64-bit and x32 vdsos use the same mechanism as the 32-bit vdso. Most of the churn is deleting all the old fixmap code. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/8af87023f57f6bb96ec8d17fce3f88018195b49b.1399317206.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c5
-rw-r--r--arch/x86/mm/init_64.c10
2 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8e5722992677..858b47b5221b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -18,7 +18,8 @@
18#include <asm/traps.h> /* dotraplinkage, ... */ 18#include <asm/traps.h> /* dotraplinkage, ... */
19#include <asm/pgalloc.h> /* pgd_*(), ... */ 19#include <asm/pgalloc.h> /* pgd_*(), ... */
20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21#include <asm/fixmap.h> /* VSYSCALL_START */ 21#include <asm/fixmap.h> /* VSYSCALL_ADDR */
22#include <asm/vsyscall.h> /* emulate_vsyscall */
22 23
23#define CREATE_TRACE_POINTS 24#define CREATE_TRACE_POINTS
24#include <asm/trace/exceptions.h> 25#include <asm/trace/exceptions.h>
@@ -771,7 +772,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
771 * emulation. 772 * emulation.
772 */ 773 */
773 if (unlikely((error_code & PF_INSTR) && 774 if (unlikely((error_code & PF_INSTR) &&
774 ((address & ~0xfff) == VSYSCALL_START))) { 775 ((address & ~0xfff) == VSYSCALL_ADDR))) {
775 if (emulate_vsyscall(regs, address)) 776 if (emulate_vsyscall(regs, address))
776 return; 777 return;
777 } 778 }
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 563849600d3e..6f881842116c 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1055,8 +1055,8 @@ void __init mem_init(void)
1055 after_bootmem = 1; 1055 after_bootmem = 1;
1056 1056
1057 /* Register memory areas for /proc/kcore */ 1057 /* Register memory areas for /proc/kcore */
1058 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 1058 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1059 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); 1059 PAGE_SIZE, KCORE_OTHER);
1060 1060
1061 mem_init_print_info(NULL); 1061 mem_init_print_info(NULL);
1062} 1062}
@@ -1186,8 +1186,8 @@ int kern_addr_valid(unsigned long addr)
1186 * not need special handling anymore: 1186 * not need special handling anymore:
1187 */ 1187 */
1188static struct vm_area_struct gate_vma = { 1188static struct vm_area_struct gate_vma = {
1189 .vm_start = VSYSCALL_START, 1189 .vm_start = VSYSCALL_ADDR,
1190 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), 1190 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
1191 .vm_page_prot = PAGE_READONLY_EXEC, 1191 .vm_page_prot = PAGE_READONLY_EXEC,
1192 .vm_flags = VM_READ | VM_EXEC 1192 .vm_flags = VM_READ | VM_EXEC
1193}; 1193};
@@ -1218,7 +1218,7 @@ int in_gate_area(struct mm_struct *mm, unsigned long addr)
1218 */ 1218 */
1219int in_gate_area_no_mm(unsigned long addr) 1219int in_gate_area_no_mm(unsigned long addr)
1220{ 1220{
1221 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 1221 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
1222} 1222}
1223 1223
1224const char *arch_vma_name(struct vm_area_struct *vma) 1224const char *arch_vma_name(struct vm_area_struct *vma)