diff options
author | Andy Lutomirski <luto@kernel.org> | 2018-01-25 16:12:15 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-01-26 09:56:23 -0500 |
commit | 36b3a7726886f24c4209852a58e64435bde3af98 (patch) | |
tree | b7138f27cf51076b3d2c05ca9b6d268d5e3bce62 | |
parent | 5beda7d54eafece4c974cfa9fbb9f60fb18fd20a (diff) |
x86/mm/64: Tighten up vmalloc_fault() sanity checks on 5-level kernels
On a 5-level kernel, if a non-init mm has a top-level entry, it needs to
match init_mm's, but the vmalloc_fault() code skipped over the BUG_ON()
that would have checked it.
While we're at it, get rid of the rather confusing 4-level folded "pgd"
logic.
Cleans-up: b50858ce3e2a ("x86/mm/vmalloc: Add 5-level paging support")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Neil Berrington <neil.berrington@datacore.com>
Link: https://lkml.kernel.org/r/2ae598f8c279b0a29baf75df207e6f2fdddc0a1b.1516914529.git.luto@kernel.org
-rw-r--r-- | arch/x86/mm/fault.c | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b3e40773dce0..800de815519c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address) | |||
439 | if (pgd_none(*pgd_ref)) | 439 | if (pgd_none(*pgd_ref)) |
440 | return -1; | 440 | return -1; |
441 | 441 | ||
442 | if (pgd_none(*pgd)) { | 442 | if (CONFIG_PGTABLE_LEVELS > 4) { |
443 | set_pgd(pgd, *pgd_ref); | 443 | if (pgd_none(*pgd)) { |
444 | arch_flush_lazy_mmu_mode(); | 444 | set_pgd(pgd, *pgd_ref); |
445 | } else if (CONFIG_PGTABLE_LEVELS > 4) { | 445 | arch_flush_lazy_mmu_mode(); |
446 | /* | 446 | } else { |
447 | * With folded p4d, pgd_none() is always false, so the pgd may | 447 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
448 | * point to an empty page table entry and pgd_page_vaddr() | 448 | } |
449 | * will return garbage. | ||
450 | * | ||
451 | * We will do the correct sanity check on the p4d level. | ||
452 | */ | ||
453 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
454 | } | 449 | } |
455 | 450 | ||
456 | /* With 4-level paging, copying happens on the p4d level. */ | 451 | /* With 4-level paging, copying happens on the p4d level. */ |
@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
459 | if (p4d_none(*p4d_ref)) | 454 | if (p4d_none(*p4d_ref)) |
460 | return -1; | 455 | return -1; |
461 | 456 | ||
462 | if (p4d_none(*p4d)) { | 457 | if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) { |
463 | set_p4d(p4d, *p4d_ref); | 458 | set_p4d(p4d, *p4d_ref); |
464 | arch_flush_lazy_mmu_mode(); | 459 | arch_flush_lazy_mmu_mode(); |
465 | } else { | 460 | } else { |
@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
470 | * Below here mismatches are bugs because these lower tables | 465 | * Below here mismatches are bugs because these lower tables |
471 | * are shared: | 466 | * are shared: |
472 | */ | 467 | */ |
468 | BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); | ||
473 | 469 | ||
474 | pud = pud_offset(p4d, address); | 470 | pud = pud_offset(p4d, address); |
475 | pud_ref = pud_offset(p4d_ref, address); | 471 | pud_ref = pud_offset(p4d_ref, address); |