aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2018-07-18 05:40:54 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-19 19:11:41 -0400
commite3238faf20fb1b51a814497751398ab525a2c884 (patch)
tree68f0ec05cb51412ddd6a47f25409d2fdaaab57d3
parent7ffcf1497c8ab59a705bfafb7401876fd2f6f71e (diff)
x86/pgtable/32: Allocate 8k page-tables when PTI is enabled
Allocate a kernel and a user page-table root when PTI is enabled. Also allocate a full page per root for PAE because otherwise the bit to flip in CR3 to switch between them would be non-constant, which creates a lot of hassle. Keep that for a later optimization. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-18-git-send-email-joro@8bytes.org
-rw-r--r--arch/x86/kernel/head_32.S20
-rw-r--r--arch/x86/mm/pgtable.c5
2 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index abe6df15a8fb..30f9cb2c0b55 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,11 +512,18 @@ ENTRY(initial_code)
512ENTRY(setup_once_ref) 512ENTRY(setup_once_ref)
513 .long setup_once 513 .long setup_once
514 514
515#ifdef CONFIG_PAGE_TABLE_ISOLATION
516#define PGD_ALIGN (2 * PAGE_SIZE)
517#define PTI_USER_PGD_FILL 1024
518#else
519#define PGD_ALIGN (PAGE_SIZE)
520#define PTI_USER_PGD_FILL 0
521#endif
515/* 522/*
516 * BSS section 523 * BSS section
517 */ 524 */
518__PAGE_ALIGNED_BSS 525__PAGE_ALIGNED_BSS
519 .align PAGE_SIZE 526 .align PGD_ALIGN
520#ifdef CONFIG_X86_PAE 527#ifdef CONFIG_X86_PAE
521.globl initial_pg_pmd 528.globl initial_pg_pmd
522initial_pg_pmd: 529initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
526initial_page_table: 533initial_page_table:
527 .fill 1024,4,0 534 .fill 1024,4,0
528#endif 535#endif
536 .align PGD_ALIGN
529initial_pg_fixmap: 537initial_pg_fixmap:
530 .fill 1024,4,0 538 .fill 1024,4,0
531.globl empty_zero_page
532empty_zero_page:
533 .fill 4096,1,0
534.globl swapper_pg_dir 539.globl swapper_pg_dir
540 .align PGD_ALIGN
535swapper_pg_dir: 541swapper_pg_dir:
536 .fill 1024,4,0 542 .fill 1024,4,0
543 .fill PTI_USER_PGD_FILL,4,0
544.globl empty_zero_page
545empty_zero_page:
546 .fill 4096,1,0
537EXPORT_SYMBOL(empty_zero_page) 547EXPORT_SYMBOL(empty_zero_page)
538 548
539/* 549/*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
542#ifdef CONFIG_X86_PAE 552#ifdef CONFIG_X86_PAE
543__PAGE_ALIGNED_DATA 553__PAGE_ALIGNED_DATA
544 /* Page-aligned for the benefit of paravirt? */ 554 /* Page-aligned for the benefit of paravirt? */
545 .align PAGE_SIZE 555 .align PGD_ALIGN
546ENTRY(initial_page_table) 556ENTRY(initial_page_table)
547 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 557 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
548# if KPMDS == 3 558# if KPMDS == 3
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 47b5951e592b..db6fb7740bf7 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -343,7 +343,8 @@ static inline pgd_t *_pgd_alloc(void)
343 * We allocate one page for pgd. 343 * We allocate one page for pgd.
344 */ 344 */
345 if (!SHARED_KERNEL_PMD) 345 if (!SHARED_KERNEL_PMD)
346 return (pgd_t *)__get_free_page(PGALLOC_GFP); 346 return (pgd_t *)__get_free_pages(PGALLOC_GFP,
347 PGD_ALLOCATION_ORDER);
347 348
348 /* 349 /*
349 * Now PAE kernel is not running as a Xen domain. We can allocate 350 * Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +356,7 @@ static inline pgd_t *_pgd_alloc(void)
355static inline void _pgd_free(pgd_t *pgd) 356static inline void _pgd_free(pgd_t *pgd)
356{ 357{
357 if (!SHARED_KERNEL_PMD) 358 if (!SHARED_KERNEL_PMD)
358 free_page((unsigned long)pgd); 359 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
359 else 360 else
360 kmem_cache_free(pgd_cache, pgd); 361 kmem_cache_free(pgd_cache, pgd);
361} 362}