aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-02-14 13:25:41 -0500
committerIngo Molnar <mingo@kernel.org>2018-02-16 04:48:49 -0500
commit91f606a8fa68264224cbc76888fa8649cdbe9990 (patch)
tree6aa21758fdeba04876a471b800c05975092bedf8
parent98219dda2ab56ce2a967fdebf81e838d676d9ddc (diff)
x86/mm: Replace compile-time checks for 5-level paging with runtime-time checks
This patch converts the of CONFIG_X86_5LEVEL check to runtime checks for p4d folding. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-9-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/pgtable_64.h23
-rw-r--r--arch/x86/mm/dump_pagetables.c4
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/ident_map.c2
-rw-r--r--arch/x86/mm/init_64.c30
-rw-r--r--arch/x86/mm/kasan_init_64.c12
-rw-r--r--arch/x86/mm/kaslr.c6
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/power/hibernate_64.c6
10 files changed, 46 insertions, 45 deletions
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 81462e9a34f6..81dda8d1d0bd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -217,29 +217,26 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
217 217
218static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 218static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
219{ 219{
220#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) 220 pgd_t pgd;
221 p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); 221
222#else 222 if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
223 *p4dp = p4d; 223 *p4dp = p4d;
224#endif 224 return;
225 }
226
227 pgd = native_make_pgd(p4d_val(p4d));
228 pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
229 *p4dp = native_make_p4d(pgd_val(pgd));
225} 230}
226 231
227static inline void native_p4d_clear(p4d_t *p4d) 232static inline void native_p4d_clear(p4d_t *p4d)
228{ 233{
229#ifdef CONFIG_X86_5LEVEL
230 native_set_p4d(p4d, native_make_p4d(0)); 234 native_set_p4d(p4d, native_make_p4d(0));
231#else
232 native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
233#endif
234} 235}
235 236
236static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 237static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
237{ 238{
238#ifdef CONFIG_PAGE_TABLE_ISOLATION
239 *pgdp = pti_set_user_pgd(pgdp, pgd); 239 *pgdp = pti_set_user_pgd(pgdp, pgd);
240#else
241 *pgdp = pgd;
242#endif
243} 240}
244 241
245static inline void native_pgd_clear(pgd_t *pgd) 242static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 9efee6f464ab..0d6d67d18ad6 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -348,9 +348,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
348 void *pt) 348 void *pt)
349{ 349{
350 if (__pa(pt) == __pa(kasan_zero_pmd) || 350 if (__pa(pt) == __pa(kasan_zero_pmd) ||
351#ifdef CONFIG_X86_5LEVEL 351 (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) ||
352 __pa(pt) == __pa(kasan_zero_p4d) ||
353#endif
354 __pa(pt) == __pa(kasan_zero_pud)) { 352 __pa(pt) == __pa(kasan_zero_pud)) {
355 pgprotval_t prot = pte_flags(kasan_zero_pte[0]); 353 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
356 note_page(m, st, __pgprot(prot), 5); 354 note_page(m, st, __pgprot(prot), 5);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 800de815519c..321b78060e93 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address)
439 if (pgd_none(*pgd_ref)) 439 if (pgd_none(*pgd_ref))
440 return -1; 440 return -1;
441 441
442 if (CONFIG_PGTABLE_LEVELS > 4) { 442 if (pgtable_l5_enabled) {
443 if (pgd_none(*pgd)) { 443 if (pgd_none(*pgd)) {
444 set_pgd(pgd, *pgd_ref); 444 set_pgd(pgd, *pgd_ref);
445 arch_flush_lazy_mmu_mode(); 445 arch_flush_lazy_mmu_mode();
@@ -454,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
454 if (p4d_none(*p4d_ref)) 454 if (p4d_none(*p4d_ref))
455 return -1; 455 return -1;
456 456
457 if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) { 457 if (p4d_none(*p4d) && !pgtable_l5_enabled) {
458 set_p4d(p4d, *p4d_ref); 458 set_p4d(p4d, *p4d_ref);
459 arch_flush_lazy_mmu_mode(); 459 arch_flush_lazy_mmu_mode();
460 } else { 460 } else {
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ab33a32df2a8..9aa22be8331e 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -120,7 +120,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
120 result = ident_p4d_init(info, p4d, addr, next); 120 result = ident_p4d_init(info, p4d, addr, next);
121 if (result) 121 if (result)
122 return result; 122 return result;
123 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 123 if (pgtable_l5_enabled) {
124 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag)); 124 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
125 } else { 125 } else {
126 /* 126 /*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6a4b20bc7527..3186e6836036 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -88,12 +88,7 @@ static int __init nonx32_setup(char *str)
88} 88}
89__setup("noexec32=", nonx32_setup); 89__setup("noexec32=", nonx32_setup);
90 90
91/* 91static void sync_global_pgds_l5(unsigned long start, unsigned long end)
92 * When memory was added make sure all the processes MM have
93 * suitable PGD entries in the local PGD level page.
94 */
95#ifdef CONFIG_X86_5LEVEL
96void sync_global_pgds(unsigned long start, unsigned long end)
97{ 92{
98 unsigned long addr; 93 unsigned long addr;
99 94
@@ -129,8 +124,8 @@ void sync_global_pgds(unsigned long start, unsigned long end)
129 spin_unlock(&pgd_lock); 124 spin_unlock(&pgd_lock);
130 } 125 }
131} 126}
132#else 127
133void sync_global_pgds(unsigned long start, unsigned long end) 128static void sync_global_pgds_l4(unsigned long start, unsigned long end)
134{ 129{
135 unsigned long addr; 130 unsigned long addr;
136 131
@@ -173,7 +168,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
173 spin_unlock(&pgd_lock); 168 spin_unlock(&pgd_lock);
174 } 169 }
175} 170}
176#endif 171
172/*
173 * When memory was added make sure all the processes MM have
174 * suitable PGD entries in the local PGD level page.
175 */
176void sync_global_pgds(unsigned long start, unsigned long end)
177{
178 if (pgtable_l5_enabled)
179 sync_global_pgds_l5(start, end);
180 else
181 sync_global_pgds_l4(start, end);
182}
177 183
178/* 184/*
179 * NOTE: This function is marked __ref because it calls __init function 185 * NOTE: This function is marked __ref because it calls __init function
@@ -632,7 +638,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
632 unsigned long vaddr = (unsigned long)__va(paddr); 638 unsigned long vaddr = (unsigned long)__va(paddr);
633 int i = p4d_index(vaddr); 639 int i = p4d_index(vaddr);
634 640
635 if (!IS_ENABLED(CONFIG_X86_5LEVEL)) 641 if (!pgtable_l5_enabled)
636 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); 642 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
637 643
638 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { 644 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
@@ -712,7 +718,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
712 page_size_mask); 718 page_size_mask);
713 719
714 spin_lock(&init_mm.page_table_lock); 720 spin_lock(&init_mm.page_table_lock);
715 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 721 if (pgtable_l5_enabled)
716 pgd_populate(&init_mm, pgd, p4d); 722 pgd_populate(&init_mm, pgd, p4d);
717 else 723 else
718 p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); 724 p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
@@ -1093,7 +1099,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1093 * 5-level case we should free them. This code will have to change 1099 * 5-level case we should free them. This code will have to change
1094 * to adapt for boot-time switching between 4 and 5 level page tables. 1100 * to adapt for boot-time switching between 4 and 5 level page tables.
1095 */ 1101 */
1096 if (CONFIG_PGTABLE_LEVELS == 5) 1102 if (pgtable_l5_enabled)
1097 free_pud_table(pud_base, p4d, altmap); 1103 free_pud_table(pud_base, p4d, altmap);
1098 } 1104 }
1099 1105
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 12ec90f62457..0df0dd13a71d 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -176,10 +176,10 @@ static void __init clear_pgds(unsigned long start,
176 * With folded p4d, pgd_clear() is nop, use p4d_clear() 176 * With folded p4d, pgd_clear() is nop, use p4d_clear()
177 * instead. 177 * instead.
178 */ 178 */
179 if (CONFIG_PGTABLE_LEVELS < 5) 179 if (pgtable_l5_enabled)
180 p4d_clear(p4d_offset(pgd, start));
181 else
182 pgd_clear(pgd); 180 pgd_clear(pgd);
181 else
182 p4d_clear(p4d_offset(pgd, start));
183 } 183 }
184 184
185 pgd = pgd_offset_k(start); 185 pgd = pgd_offset_k(start);
@@ -191,7 +191,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
191{ 191{
192 unsigned long p4d; 192 unsigned long p4d;
193 193
194 if (!IS_ENABLED(CONFIG_X86_5LEVEL)) 194 if (!pgtable_l5_enabled)
195 return (p4d_t *)pgd; 195 return (p4d_t *)pgd;
196 196
197 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; 197 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
@@ -272,7 +272,7 @@ void __init kasan_early_init(void)
272 for (i = 0; i < PTRS_PER_PUD; i++) 272 for (i = 0; i < PTRS_PER_PUD; i++)
273 kasan_zero_pud[i] = __pud(pud_val); 273 kasan_zero_pud[i] = __pud(pud_val);
274 274
275 for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) 275 for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++)
276 kasan_zero_p4d[i] = __p4d(p4d_val); 276 kasan_zero_p4d[i] = __p4d(p4d_val);
277 277
278 kasan_map_early_shadow(early_top_pgt); 278 kasan_map_early_shadow(early_top_pgt);
@@ -303,7 +303,7 @@ void __init kasan_init(void)
303 * bunch of things like kernel code, modules, EFI mapping, etc. 303 * bunch of things like kernel code, modules, EFI mapping, etc.
304 * We need to take extra steps to not overwrite them. 304 * We need to take extra steps to not overwrite them.
305 */ 305 */
306 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 306 if (pgtable_l5_enabled) {
307 void *ptr; 307 void *ptr;
308 308
309 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); 309 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 641169d38184..615cc03ced84 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -124,7 +124,7 @@ void __init kernel_randomize_memory(void)
124 */ 124 */
125 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); 125 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
126 prandom_bytes_state(&rand_state, &rand, sizeof(rand)); 126 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
127 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 127 if (pgtable_l5_enabled)
128 entropy = (rand % (entropy + 1)) & P4D_MASK; 128 entropy = (rand % (entropy + 1)) & P4D_MASK;
129 else 129 else
130 entropy = (rand % (entropy + 1)) & PUD_MASK; 130 entropy = (rand % (entropy + 1)) & PUD_MASK;
@@ -136,7 +136,7 @@ void __init kernel_randomize_memory(void)
136 * randomization alignment. 136 * randomization alignment.
137 */ 137 */
138 vaddr += get_padding(&kaslr_regions[i]); 138 vaddr += get_padding(&kaslr_regions[i]);
139 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 139 if (pgtable_l5_enabled)
140 vaddr = round_up(vaddr + 1, P4D_SIZE); 140 vaddr = round_up(vaddr + 1, P4D_SIZE);
141 else 141 else
142 vaddr = round_up(vaddr + 1, PUD_SIZE); 142 vaddr = round_up(vaddr + 1, PUD_SIZE);
@@ -212,7 +212,7 @@ void __meminit init_trampoline(void)
212 return; 212 return;
213 } 213 }
214 214
215 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 215 if (pgtable_l5_enabled)
216 init_trampoline_p4d(); 216 init_trampoline_p4d();
217 else 217 else
218 init_trampoline_pud(); 218 init_trampoline_pud();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6550d37d0f06..92cb8a901c36 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -157,7 +157,7 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
157 unsigned long sp = current_stack_pointer; 157 unsigned long sp = current_stack_pointer;
158 pgd_t *pgd = pgd_offset(mm, sp); 158 pgd_t *pgd = pgd_offset(mm, sp);
159 159
160 if (CONFIG_PGTABLE_LEVELS > 4) { 160 if (pgtable_l5_enabled) {
161 if (unlikely(pgd_none(*pgd))) { 161 if (unlikely(pgd_none(*pgd))) {
162 pgd_t *pgd_ref = pgd_offset_k(sp); 162 pgd_t *pgd_ref = pgd_offset_k(sp);
163 163
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index d52aaa7dc088..4845871a2006 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
227 227
228 pud = pud_alloc(&init_mm, p4d, EFI_VA_END); 228 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
229 if (!pud) { 229 if (!pud) {
230 if (CONFIG_PGTABLE_LEVELS > 4) 230 if (pgtable_l5_enabled)
231 free_page((unsigned long) pgd_page_vaddr(*pgd)); 231 free_page((unsigned long) pgd_page_vaddr(*pgd));
232 free_page((unsigned long)efi_pgd); 232 free_page((unsigned long)efi_pgd);
233 return -ENOMEM; 233 return -ENOMEM;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 0ef5e5204968..74a532989308 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -50,7 +50,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
50{ 50{
51 pmd_t *pmd; 51 pmd_t *pmd;
52 pud_t *pud; 52 pud_t *pud;
53 p4d_t *p4d; 53 p4d_t *p4d = NULL;
54 54
55 /* 55 /*
56 * The new mapping only has to cover the page containing the image 56 * The new mapping only has to cover the page containing the image
@@ -66,7 +66,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
66 * tables used by the image kernel. 66 * tables used by the image kernel.
67 */ 67 */
68 68
69 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 69 if (pgtable_l5_enabled) {
70 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); 70 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
71 if (!p4d) 71 if (!p4d)
72 return -ENOMEM; 72 return -ENOMEM;
@@ -84,7 +84,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
84 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); 84 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
85 set_pud(pud + pud_index(restore_jump_address), 85 set_pud(pud + pud_index(restore_jump_address),
86 __pud(__pa(pmd) | _KERNPG_TABLE)); 86 __pud(__pa(pmd) | _KERNPG_TABLE));
87 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 87 if (p4d) {
88 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE)); 88 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
89 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE)); 89 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
90 } else { 90 } else {