diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 11:05:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 11:05:29 -0400 |
commit | a0abcf2e8f8017051830f738ac1bf5ef42703243 (patch) | |
tree | ef6ff14b5eb9cf14cd135c0f0f09fa0944192ef0 /arch/x86/mm | |
parent | 2071b3e34fd33e496ebd7b90331ac5b3b0ac3b81 (diff) | |
parent | c191920f737a09a7252088f018f6747f0d2f484d (diff) |
Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin:
"Vdso cleanups and improvements largely from Andy Lutomirski. This
makes the vdso a lot less ''special''"
* 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/vdso, build: Make LE access macros clearer, host-safe
x86/vdso, build: Fix cross-compilation from big-endian architectures
x86/vdso, build: When vdso2c fails, unlink the output
x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls
x86, mm: Improve _install_special_mapping and fix x86 vdso naming
mm, fs: Add vm_ops->name as an alternative to arch_vma_name
x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments
x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO
x86, vdso: Move the 32-bit vdso special pages after the text
x86, vdso: Reimplement vdso.so preparation in build-time C
x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c
x86, vdso: Clean up 32-bit vs 64-bit vdso params
x86, mm: Ensure correct alignment of the fixmap
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 29 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 6 |
4 files changed, 26 insertions, 20 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8e5722992677..858b47b5221b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -18,7 +18,8 @@ | |||
18 | #include <asm/traps.h> /* dotraplinkage, ... */ | 18 | #include <asm/traps.h> /* dotraplinkage, ... */ |
19 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | 19 | #include <asm/pgalloc.h> /* pgd_*(), ... */ |
20 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ | 20 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
21 | #include <asm/fixmap.h> /* VSYSCALL_START */ | 21 | #include <asm/fixmap.h> /* VSYSCALL_ADDR */ |
22 | #include <asm/vsyscall.h> /* emulate_vsyscall */ | ||
22 | 23 | ||
23 | #define CREATE_TRACE_POINTS | 24 | #define CREATE_TRACE_POINTS |
24 | #include <asm/trace/exceptions.h> | 25 | #include <asm/trace/exceptions.h> |
@@ -771,7 +772,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
771 | * emulation. | 772 | * emulation. |
772 | */ | 773 | */ |
773 | if (unlikely((error_code & PF_INSTR) && | 774 | if (unlikely((error_code & PF_INSTR) && |
774 | ((address & ~0xfff) == VSYSCALL_START))) { | 775 | ((address & ~0xfff) == VSYSCALL_ADDR))) { |
775 | if (emulate_vsyscall(regs, address)) | 776 | if (emulate_vsyscall(regs, address)) |
776 | return; | 777 | return; |
777 | } | 778 | } |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b92591fa8970..df1a9927ad29 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -1055,8 +1055,8 @@ void __init mem_init(void) | |||
1055 | after_bootmem = 1; | 1055 | after_bootmem = 1; |
1056 | 1056 | ||
1057 | /* Register memory areas for /proc/kcore */ | 1057 | /* Register memory areas for /proc/kcore */ |
1058 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, | 1058 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, |
1059 | VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); | 1059 | PAGE_SIZE, KCORE_OTHER); |
1060 | 1060 | ||
1061 | mem_init_print_info(NULL); | 1061 | mem_init_print_info(NULL); |
1062 | } | 1062 | } |
@@ -1185,11 +1185,19 @@ int kern_addr_valid(unsigned long addr) | |||
1185 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | 1185 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does |
1186 | * not need special handling anymore: | 1186 | * not need special handling anymore: |
1187 | */ | 1187 | */ |
1188 | static const char *gate_vma_name(struct vm_area_struct *vma) | ||
1189 | { | ||
1190 | return "[vsyscall]"; | ||
1191 | } | ||
1192 | static struct vm_operations_struct gate_vma_ops = { | ||
1193 | .name = gate_vma_name, | ||
1194 | }; | ||
1188 | static struct vm_area_struct gate_vma = { | 1195 | static struct vm_area_struct gate_vma = { |
1189 | .vm_start = VSYSCALL_START, | 1196 | .vm_start = VSYSCALL_ADDR, |
1190 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | 1197 | .vm_end = VSYSCALL_ADDR + PAGE_SIZE, |
1191 | .vm_page_prot = PAGE_READONLY_EXEC, | 1198 | .vm_page_prot = PAGE_READONLY_EXEC, |
1192 | .vm_flags = VM_READ | VM_EXEC | 1199 | .vm_flags = VM_READ | VM_EXEC, |
1200 | .vm_ops = &gate_vma_ops, | ||
1193 | }; | 1201 | }; |
1194 | 1202 | ||
1195 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | 1203 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
@@ -1218,16 +1226,7 @@ int in_gate_area(struct mm_struct *mm, unsigned long addr) | |||
1218 | */ | 1226 | */ |
1219 | int in_gate_area_no_mm(unsigned long addr) | 1227 | int in_gate_area_no_mm(unsigned long addr) |
1220 | { | 1228 | { |
1221 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); | 1229 | return (addr & PAGE_MASK) == VSYSCALL_ADDR; |
1222 | } | ||
1223 | |||
1224 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
1225 | { | ||
1226 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | ||
1227 | return "[vdso]"; | ||
1228 | if (vma == &gate_vma) | ||
1229 | return "[vsyscall]"; | ||
1230 | return NULL; | ||
1231 | } | 1230 | } |
1232 | 1231 | ||
1233 | static unsigned long probe_memory_block_size(void) | 1232 | static unsigned long probe_memory_block_size(void) |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bc7527e109c8..baff1da354e0 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -367,6 +367,12 @@ void __init early_ioremap_init(void) | |||
367 | { | 367 | { |
368 | pmd_t *pmd; | 368 | pmd_t *pmd; |
369 | 369 | ||
370 | #ifdef CONFIG_X86_64 | ||
371 | BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | ||
372 | #else | ||
373 | WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | ||
374 | #endif | ||
375 | |||
370 | early_ioremap_setup(); | 376 | early_ioremap_setup(); |
371 | 377 | ||
372 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | 378 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 0004ac72dbdd..6fb6927f9e76 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -456,9 +456,9 @@ void __init reserve_top_address(unsigned long reserve) | |||
456 | { | 456 | { |
457 | #ifdef CONFIG_X86_32 | 457 | #ifdef CONFIG_X86_32 |
458 | BUG_ON(fixmaps_set > 0); | 458 | BUG_ON(fixmaps_set > 0); |
459 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | 459 | __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; |
460 | (int)-reserve); | 460 | printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", |
461 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | 461 | -reserve, __FIXADDR_TOP + PAGE_SIZE); |
462 | #endif | 462 | #endif |
463 | } | 463 | } |
464 | 464 | ||