diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-12-20 12:51:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-12-22 14:13:05 -0500 |
commit | 92a0f81d89571e3e8759366e050ee05cc545ef99 (patch) | |
tree | e91c882e3a4ee2ab80673473389067adf9dd7807 | |
parent | ed1bbc40a0d10e0c5c74fe7bdc6298295cf40255 (diff) |
x86/cpu_entry_area: Move it out of the fixmap
Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big
and 0-day already hit a case where the fixmap PTEs were cleared by
cleanup_highmap().
Aside of that the fixmap API is a pain as it's all backwards.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | Documentation/x86/x86_64/mm.txt | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/cpu_entry_area.h | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/desc.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 32 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32_types.h | 15 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_64_types.h | 47 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/cpu_entry_area.c | 66 | ||||
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 29 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/mmu_pv.c | 2 |
14 files changed, 143 insertions, 88 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 63a41671d25b..51101708a03a 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt | |||
@@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) | |||
12 | ... unused hole ... | 12 | ... unused hole ... |
13 | ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) | 13 | ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) |
14 | ... unused hole ... | 14 | ... unused hole ... |
15 | fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping | ||
15 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | 16 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks |
16 | ... unused hole ... | 17 | ... unused hole ... |
17 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space | 18 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space |
@@ -35,6 +36,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) | |||
35 | ... unused hole ... | 36 | ... unused hole ... |
36 | ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) | 37 | ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) |
37 | ... unused hole ... | 38 | ... unused hole ... |
39 | fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping | ||
38 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks | 40 | ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks |
39 | ... unused hole ... | 41 | ... unused hole ... |
40 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space | 42 | ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space |
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 5471826803af..2fbc69a0916e 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h | |||
@@ -43,10 +43,26 @@ struct cpu_entry_area { | |||
43 | }; | 43 | }; |
44 | 44 | ||
45 | #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) | 45 | #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) |
46 | #define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) | 46 | #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) |
47 | 47 | ||
48 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); | 48 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); |
49 | 49 | ||
50 | extern void setup_cpu_entry_areas(void); | 50 | extern void setup_cpu_entry_areas(void); |
51 | extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); | ||
52 | |||
53 | #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE | ||
54 | #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) | ||
55 | |||
56 | #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) | ||
57 | |||
58 | #define CPU_ENTRY_AREA_MAP_SIZE \ | ||
59 | (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) | ||
60 | |||
61 | extern struct cpu_entry_area *get_cpu_entry_area(int cpu); | ||
62 | |||
63 | static inline struct entry_stack *cpu_entry_stack(int cpu) | ||
64 | { | ||
65 | return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | ||
66 | } | ||
51 | 67 | ||
52 | #endif | 68 | #endif |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 2ace1f90d138..bc359dd2f7f6 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <asm/mmu.h> | 7 | #include <asm/mmu.h> |
8 | #include <asm/fixmap.h> | 8 | #include <asm/fixmap.h> |
9 | #include <asm/irq_vectors.h> | 9 | #include <asm/irq_vectors.h> |
10 | #include <asm/cpu_entry_area.h> | ||
10 | 11 | ||
11 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
12 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index fb801662a230..64c4a30e0d39 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #else | 25 | #else |
26 | #include <uapi/asm/vsyscall.h> | 26 | #include <uapi/asm/vsyscall.h> |
27 | #endif | 27 | #endif |
28 | #include <asm/cpu_entry_area.h> | ||
29 | 28 | ||
30 | /* | 29 | /* |
31 | * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall | 30 | * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall |
@@ -84,7 +83,6 @@ enum fixed_addresses { | |||
84 | FIX_IO_APIC_BASE_0, | 83 | FIX_IO_APIC_BASE_0, |
85 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, | 84 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, |
86 | #endif | 85 | #endif |
87 | FIX_RO_IDT, /* Virtual mapping for read-only IDT */ | ||
88 | #ifdef CONFIG_X86_32 | 86 | #ifdef CONFIG_X86_32 |
89 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 87 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
90 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | 88 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
@@ -100,9 +98,6 @@ enum fixed_addresses { | |||
100 | #ifdef CONFIG_X86_INTEL_MID | 98 | #ifdef CONFIG_X86_INTEL_MID |
101 | FIX_LNW_VRTC, | 99 | FIX_LNW_VRTC, |
102 | #endif | 100 | #endif |
103 | /* Fixmap entries to remap the GDTs, one per processor. */ | ||
104 | FIX_CPU_ENTRY_AREA_TOP, | ||
105 | FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, | ||
106 | 101 | ||
107 | #ifdef CONFIG_ACPI_APEI_GHES | 102 | #ifdef CONFIG_ACPI_APEI_GHES |
108 | /* Used for GHES mapping from assorted contexts */ | 103 | /* Used for GHES mapping from assorted contexts */ |
@@ -143,7 +138,7 @@ enum fixed_addresses { | |||
143 | extern void reserve_top_address(unsigned long reserve); | 138 | extern void reserve_top_address(unsigned long reserve); |
144 | 139 | ||
145 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | 140 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) |
146 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 141 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
147 | 142 | ||
148 | extern int fixmaps_set; | 143 | extern int fixmaps_set; |
149 | 144 | ||
@@ -191,30 +186,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, | |||
191 | void __early_set_fixmap(enum fixed_addresses idx, | 186 | void __early_set_fixmap(enum fixed_addresses idx, |
192 | phys_addr_t phys, pgprot_t flags); | 187 | phys_addr_t phys, pgprot_t flags); |
193 | 188 | ||
194 | static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) | ||
195 | { | ||
196 | BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | ||
197 | |||
198 | return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; | ||
199 | } | ||
200 | |||
201 | #define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ | ||
202 | BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ | ||
203 | __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ | ||
204 | }) | ||
205 | |||
206 | #define get_cpu_entry_area_index(cpu, field) \ | ||
207 | __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) | ||
208 | |||
209 | static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) | ||
210 | { | ||
211 | return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); | ||
212 | } | ||
213 | |||
214 | static inline struct entry_stack *cpu_entry_stack(int cpu) | ||
215 | { | ||
216 | return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | ||
217 | } | ||
218 | |||
219 | #endif /* !__ASSEMBLY__ */ | 189 | #endif /* !__ASSEMBLY__ */ |
220 | #endif /* _ASM_X86_FIXMAP_H */ | 190 | #endif /* _ASM_X86_FIXMAP_H */ |
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index f2ca9b28fd68..ce245b0cdfca 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h | |||
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ | |||
38 | #define LAST_PKMAP 1024 | 38 | #define LAST_PKMAP 1024 |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ | 41 | /* |
42 | & PMD_MASK) | 42 | * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c |
43 | * to avoid include recursion hell | ||
44 | */ | ||
45 | #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) | ||
46 | |||
47 | #define CPU_ENTRY_AREA_BASE \ | ||
48 | ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) | ||
49 | |||
50 | #define PKMAP_BASE \ | ||
51 | ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) | ||
43 | 52 | ||
44 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
45 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) | 54 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
46 | #else | 55 | #else |
47 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) | 56 | # define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) |
48 | #endif | 57 | #endif |
49 | 58 | ||
50 | #define MODULES_VADDR VMALLOC_START | 59 | #define MODULES_VADDR VMALLOC_START |
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6d5f45dcd4a1..3d27831bc58d 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
@@ -76,32 +76,41 @@ typedef struct { pteval_t pte; } pte_t; | |||
76 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | 76 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
77 | 77 | ||
78 | /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ | 78 | /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ |
79 | #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) | 79 | #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) |
80 | |||
80 | #ifdef CONFIG_X86_5LEVEL | 81 | #ifdef CONFIG_X86_5LEVEL |
81 | #define VMALLOC_SIZE_TB _AC(16384, UL) | 82 | # define VMALLOC_SIZE_TB _AC(16384, UL) |
82 | #define __VMALLOC_BASE _AC(0xff92000000000000, UL) | 83 | # define __VMALLOC_BASE _AC(0xff92000000000000, UL) |
83 | #define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) | 84 | # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) |
84 | #else | 85 | #else |
85 | #define VMALLOC_SIZE_TB _AC(32, UL) | 86 | # define VMALLOC_SIZE_TB _AC(32, UL) |
86 | #define __VMALLOC_BASE _AC(0xffffc90000000000, UL) | 87 | # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) |
87 | #define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) | 88 | # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) |
88 | #endif | 89 | #endif |
90 | |||
89 | #ifdef CONFIG_RANDOMIZE_MEMORY | 91 | #ifdef CONFIG_RANDOMIZE_MEMORY |
90 | #define VMALLOC_START vmalloc_base | 92 | # define VMALLOC_START vmalloc_base |
91 | #define VMEMMAP_START vmemmap_base | 93 | # define VMEMMAP_START vmemmap_base |
92 | #else | 94 | #else |
93 | #define VMALLOC_START __VMALLOC_BASE | 95 | # define VMALLOC_START __VMALLOC_BASE |
94 | #define VMEMMAP_START __VMEMMAP_BASE | 96 | # define VMEMMAP_START __VMEMMAP_BASE |
95 | #endif /* CONFIG_RANDOMIZE_MEMORY */ | 97 | #endif /* CONFIG_RANDOMIZE_MEMORY */ |
96 | #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) | 98 | |
97 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) | 99 | #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) |
100 | |||
101 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) | ||
98 | /* The module sections ends with the start of the fixmap */ | 102 | /* The module sections ends with the start of the fixmap */ |
99 | #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) | 103 | #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) |
100 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 104 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
101 | #define ESPFIX_PGD_ENTRY _AC(-2, UL) | 105 | |
102 | #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) | 106 | #define ESPFIX_PGD_ENTRY _AC(-2, UL) |
103 | #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) | 107 | #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) |
104 | #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) | 108 | |
109 | #define CPU_ENTRY_AREA_PGD _AC(-3, UL) | ||
110 | #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) | ||
111 | |||
112 | #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) | ||
113 | #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) | ||
105 | 114 | ||
106 | #define EARLY_DYNAMIC_PAGE_TABLES 64 | 115 | #define EARLY_DYNAMIC_PAGE_TABLES 64 |
107 | 116 | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1dd3f533d78c..36b17e0febe8 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/nmi.h> | 18 | #include <linux/nmi.h> |
19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
20 | 20 | ||
21 | #include <asm/cpu_entry_area.h> | ||
21 | #include <asm/stacktrace.h> | 22 | #include <asm/stacktrace.h> |
22 | #include <asm/unwind.h> | 23 | #include <asm/unwind.h> |
23 | 24 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 464daed6894f..7c16fe0b60c2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -951,8 +951,9 @@ void __init trap_init(void) | |||
951 | * "sidt" instruction will not leak the location of the kernel, and | 951 | * "sidt" instruction will not leak the location of the kernel, and |
952 | * to defend the IDT against arbitrary memory write vulnerabilities. | 952 | * to defend the IDT against arbitrary memory write vulnerabilities. |
953 | * It will be reloaded in cpu_init() */ | 953 | * It will be reloaded in cpu_init() */ |
954 | __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); | 954 | cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), |
955 | idt_descr.address = fix_to_virt(FIX_RO_IDT); | 955 | PAGE_KERNEL_RO); |
956 | idt_descr.address = CPU_ENTRY_AREA_RO_IDT; | ||
956 | 957 | ||
957 | /* | 958 | /* |
958 | * Should be a barrier for any external CPU state: | 959 | * Should be a barrier for any external CPU state: |
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 235ff9cfaaf4..21e8b595cbb1 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c | |||
@@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | |||
15 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | 15 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | struct cpu_entry_area *get_cpu_entry_area(int cpu) | ||
19 | { | ||
20 | unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; | ||
21 | BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | ||
22 | |||
23 | return (struct cpu_entry_area *) va; | ||
24 | } | ||
25 | EXPORT_SYMBOL(get_cpu_entry_area); | ||
26 | |||
27 | void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) | ||
28 | { | ||
29 | unsigned long va = (unsigned long) cea_vaddr; | ||
30 | |||
31 | set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); | ||
32 | } | ||
33 | |||
18 | static void __init | 34 | static void __init |
19 | set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) | 35 | cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) |
20 | { | 36 | { |
21 | for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) | 37 | for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) |
22 | __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); | 38 | cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); |
23 | } | 39 | } |
24 | 40 | ||
25 | /* Setup the fixmap mappings only once per-processor */ | 41 | /* Setup the fixmap mappings only once per-processor */ |
@@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu) | |||
47 | pgprot_t tss_prot = PAGE_KERNEL; | 63 | pgprot_t tss_prot = PAGE_KERNEL; |
48 | #endif | 64 | #endif |
49 | 65 | ||
50 | __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); | 66 | cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), |
51 | set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), | 67 | gdt_prot); |
52 | per_cpu_ptr(&entry_stack_storage, cpu), 1, | 68 | |
53 | PAGE_KERNEL); | 69 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, |
70 | per_cpu_ptr(&entry_stack_storage, cpu), 1, | ||
71 | PAGE_KERNEL); | ||
54 | 72 | ||
55 | /* | 73 | /* |
56 | * The Intel SDM says (Volume 3, 7.2.1): | 74 | * The Intel SDM says (Volume 3, 7.2.1): |
@@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu) | |||
72 | BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ | 90 | BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ |
73 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); | 91 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); |
74 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); | 92 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); |
75 | set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), | 93 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, |
76 | &per_cpu(cpu_tss_rw, cpu), | 94 | &per_cpu(cpu_tss_rw, cpu), |
77 | sizeof(struct tss_struct) / PAGE_SIZE, | 95 | sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); |
78 | tss_prot); | ||
79 | 96 | ||
80 | #ifdef CONFIG_X86_32 | 97 | #ifdef CONFIG_X86_32 |
81 | per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); | 98 | per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); |
@@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu) | |||
85 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); | 102 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); |
86 | BUILD_BUG_ON(sizeof(exception_stacks) != | 103 | BUILD_BUG_ON(sizeof(exception_stacks) != |
87 | sizeof(((struct cpu_entry_area *)0)->exception_stacks)); | 104 | sizeof(((struct cpu_entry_area *)0)->exception_stacks)); |
88 | set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), | 105 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, |
89 | &per_cpu(exception_stacks, cpu), | 106 | &per_cpu(exception_stacks, cpu), |
90 | sizeof(exception_stacks) / PAGE_SIZE, | 107 | sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); |
91 | PAGE_KERNEL); | ||
92 | 108 | ||
93 | __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), | 109 | cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, |
94 | __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); | 110 | __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); |
95 | #endif | 111 | #endif |
96 | } | 112 | } |
97 | 113 | ||
114 | static __init void setup_cpu_entry_area_ptes(void) | ||
115 | { | ||
116 | #ifdef CONFIG_X86_32 | ||
117 | unsigned long start, end; | ||
118 | |||
119 | BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); | ||
120 | BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); | ||
121 | |||
122 | start = CPU_ENTRY_AREA_BASE; | ||
123 | end = start + CPU_ENTRY_AREA_MAP_SIZE; | ||
124 | |||
125 | for (; start < end; start += PMD_SIZE) | ||
126 | populate_extra_pte(start); | ||
127 | #endif | ||
128 | } | ||
129 | |||
98 | void __init setup_cpu_entry_areas(void) | 130 | void __init setup_cpu_entry_areas(void) |
99 | { | 131 | { |
100 | unsigned int cpu; | 132 | unsigned int cpu; |
101 | 133 | ||
134 | setup_cpu_entry_area_ptes(); | ||
135 | |||
102 | for_each_possible_cpu(cpu) | 136 | for_each_possible_cpu(cpu) |
103 | setup_cpu_entry_area(cpu); | 137 | setup_cpu_entry_area(cpu); |
104 | } | 138 | } |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index fdf09d8f98da..43dedbfb7257 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -58,6 +58,7 @@ enum address_markers_idx { | |||
58 | KASAN_SHADOW_START_NR, | 58 | KASAN_SHADOW_START_NR, |
59 | KASAN_SHADOW_END_NR, | 59 | KASAN_SHADOW_END_NR, |
60 | #endif | 60 | #endif |
61 | CPU_ENTRY_AREA_NR, | ||
61 | #ifdef CONFIG_X86_ESPFIX64 | 62 | #ifdef CONFIG_X86_ESPFIX64 |
62 | ESPFIX_START_NR, | 63 | ESPFIX_START_NR, |
63 | #endif | 64 | #endif |
@@ -81,6 +82,7 @@ static struct addr_marker address_markers[] = { | |||
81 | [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, | 82 | [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, |
82 | [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, | 83 | [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, |
83 | #endif | 84 | #endif |
85 | [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, | ||
84 | #ifdef CONFIG_X86_ESPFIX64 | 86 | #ifdef CONFIG_X86_ESPFIX64 |
85 | [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, | 87 | [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, |
86 | #endif | 88 | #endif |
@@ -104,6 +106,7 @@ enum address_markers_idx { | |||
104 | #ifdef CONFIG_HIGHMEM | 106 | #ifdef CONFIG_HIGHMEM |
105 | PKMAP_BASE_NR, | 107 | PKMAP_BASE_NR, |
106 | #endif | 108 | #endif |
109 | CPU_ENTRY_AREA_NR, | ||
107 | FIXADDR_START_NR, | 110 | FIXADDR_START_NR, |
108 | END_OF_SPACE_NR, | 111 | END_OF_SPACE_NR, |
109 | }; | 112 | }; |
@@ -116,6 +119,7 @@ static struct addr_marker address_markers[] = { | |||
116 | #ifdef CONFIG_HIGHMEM | 119 | #ifdef CONFIG_HIGHMEM |
117 | [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, | 120 | [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, |
118 | #endif | 121 | #endif |
122 | [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, | ||
119 | [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, | 123 | [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, |
120 | [END_OF_SPACE_NR] = { -1, NULL } | 124 | [END_OF_SPACE_NR] = { -1, NULL } |
121 | }; | 125 | }; |
@@ -541,8 +545,8 @@ static int __init pt_dump_init(void) | |||
541 | address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; | 545 | address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; |
542 | # endif | 546 | # endif |
543 | address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; | 547 | address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; |
548 | address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; | ||
544 | #endif | 549 | #endif |
545 | |||
546 | return 0; | 550 | return 0; |
547 | } | 551 | } |
548 | __initcall(pt_dump_init); | 552 | __initcall(pt_dump_init); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a64a6f2848d..135c9a7898c7 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
51 | #include <asm/set_memory.h> | 51 | #include <asm/set_memory.h> |
52 | #include <asm/page_types.h> | 52 | #include <asm/page_types.h> |
53 | #include <asm/cpu_entry_area.h> | ||
53 | #include <asm/init.h> | 54 | #include <asm/init.h> |
54 | 55 | ||
55 | #include "mm_internal.h" | 56 | #include "mm_internal.h" |
@@ -766,6 +767,7 @@ void __init mem_init(void) | |||
766 | mem_init_print_info(NULL); | 767 | mem_init_print_info(NULL); |
767 | printk(KERN_INFO "virtual kernel memory layout:\n" | 768 | printk(KERN_INFO "virtual kernel memory layout:\n" |
768 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 769 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
770 | " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
769 | #ifdef CONFIG_HIGHMEM | 771 | #ifdef CONFIG_HIGHMEM |
770 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 772 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
771 | #endif | 773 | #endif |
@@ -777,6 +779,10 @@ void __init mem_init(void) | |||
777 | FIXADDR_START, FIXADDR_TOP, | 779 | FIXADDR_START, FIXADDR_TOP, |
778 | (FIXADDR_TOP - FIXADDR_START) >> 10, | 780 | (FIXADDR_TOP - FIXADDR_START) >> 10, |
779 | 781 | ||
782 | CPU_ENTRY_AREA_BASE, | ||
783 | CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, | ||
784 | CPU_ENTRY_AREA_MAP_SIZE >> 10, | ||
785 | |||
780 | #ifdef CONFIG_HIGHMEM | 786 | #ifdef CONFIG_HIGHMEM |
781 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | 787 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
782 | (LAST_PKMAP*PAGE_SIZE) >> 10, | 788 | (LAST_PKMAP*PAGE_SIZE) >> 10, |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 9ec70d780f1f..47388f0c0e59 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/sections.h> | 16 | #include <asm/sections.h> |
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/cpu_entry_area.h> | ||
18 | 19 | ||
19 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; | 20 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
20 | 21 | ||
@@ -322,31 +323,33 @@ void __init kasan_init(void) | |||
322 | map_range(&pfn_mapped[i]); | 323 | map_range(&pfn_mapped[i]); |
323 | } | 324 | } |
324 | 325 | ||
325 | kasan_populate_zero_shadow( | 326 | shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; |
326 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | ||
327 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | ||
328 | |||
329 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | ||
330 | (unsigned long)kasan_mem_to_shadow(_end), | ||
331 | early_pfn_to_nid(__pa(_stext))); | ||
332 | |||
333 | shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); | ||
334 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); | 327 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); |
335 | shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, | 328 | shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, |
336 | PAGE_SIZE); | 329 | PAGE_SIZE); |
337 | 330 | ||
338 | shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); | 331 | shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + |
332 | CPU_ENTRY_AREA_MAP_SIZE); | ||
339 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); | 333 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); |
340 | shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, | 334 | shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, |
341 | PAGE_SIZE); | 335 | PAGE_SIZE); |
342 | 336 | ||
343 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | 337 | kasan_populate_zero_shadow( |
344 | shadow_cpu_entry_begin); | 338 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
339 | shadow_cpu_entry_begin); | ||
345 | 340 | ||
346 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, | 341 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, |
347 | (unsigned long)shadow_cpu_entry_end, 0); | 342 | (unsigned long)shadow_cpu_entry_end, 0); |
348 | 343 | ||
349 | kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); | 344 | kasan_populate_zero_shadow(shadow_cpu_entry_end, |
345 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | ||
346 | |||
347 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | ||
348 | (unsigned long)kasan_mem_to_shadow(_end), | ||
349 | early_pfn_to_nid(__pa(_stext))); | ||
350 | |||
351 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | ||
352 | (void *)KASAN_SHADOW_END); | ||
350 | 353 | ||
351 | load_cr3(init_top_pgt); | 354 | load_cr3(init_top_pgt); |
352 | __flush_tlb_all(); | 355 | __flush_tlb_all(); |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6b9bf023a700..c3c5274410a9 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/pagemap.h> | 10 | #include <linux/pagemap.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | 12 | ||
13 | #include <asm/cpu_entry_area.h> | ||
13 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
14 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
15 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index c2454237fa67..a0e2b8c6e5c7 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -2261,7 +2261,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
2261 | 2261 | ||
2262 | switch (idx) { | 2262 | switch (idx) { |
2263 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | 2263 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: |
2264 | case FIX_RO_IDT: | ||
2265 | #ifdef CONFIG_X86_32 | 2264 | #ifdef CONFIG_X86_32 |
2266 | case FIX_WP_TEST: | 2265 | case FIX_WP_TEST: |
2267 | # ifdef CONFIG_HIGHMEM | 2266 | # ifdef CONFIG_HIGHMEM |
@@ -2272,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
2272 | #endif | 2271 | #endif |
2273 | case FIX_TEXT_POKE0: | 2272 | case FIX_TEXT_POKE0: |
2274 | case FIX_TEXT_POKE1: | 2273 | case FIX_TEXT_POKE1: |
2275 | case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: | ||
2276 | /* All local page mappings */ | 2274 | /* All local page mappings */ |
2277 | pte = pfn_pte(phys, prot); | 2275 | pte = pfn_pte(phys, prot); |
2278 | break; | 2276 | break; |