aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2019-06-24 08:31:50 -0400
committerThomas Gleixner <tglx@linutronix.de>2019-06-26 01:25:09 -0400
commit432c833218dd0f75e7b56bd5e8658b72073158d2 (patch)
tree1c22a54a3ea0fce7b822242284fa55afc1945e81
parentc1887159eb48ba40e775584cfb2a443962cf1a05 (diff)
x86/mm: Handle physical-virtual alignment mismatch in phys_p4d_init()
Kyle has reported occasional crashes when booting a kernel in 5-level paging mode with KASLR enabled: WARNING: CPU: 0 PID: 0 at arch/x86/mm/init_64.c:87 phys_p4d_init+0x1d4/0x1ea RIP: 0010:phys_p4d_init+0x1d4/0x1ea Call Trace: __kernel_physical_mapping_init+0x10a/0x35c kernel_physical_mapping_init+0xe/0x10 init_memory_mapping+0x1aa/0x3b0 init_range_memory_mapping+0xc8/0x116 init_mem_mapping+0x225/0x2eb setup_arch+0x6ff/0xcf5 start_kernel+0x64/0x53b ? copy_bootdata+0x1f/0xce x86_64_start_reservations+0x24/0x26 x86_64_start_kernel+0x8a/0x8d secondary_startup_64+0xb6/0xc0 which causes later: BUG: unable to handle page fault for address: ff484d019580eff8 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page BAD Oops: 0000 [#1] SMP NOPTI RIP: 0010:fill_pud+0x13/0x130 Call Trace: set_pte_vaddr_p4d+0x2e/0x50 set_pte_vaddr+0x6f/0xb0 __native_set_fixmap+0x28/0x40 native_set_fixmap+0x39/0x70 register_lapic_address+0x49/0xb6 early_acpi_boot_init+0xa5/0xde setup_arch+0x944/0xcf5 start_kernel+0x64/0x53b Kyle bisected the issue to commit b569c1843498 ("x86/mm/KASLR: Reduce randomization granularity for 5-level paging to 1GB") Before this commit PAGE_OFFSET was always aligned to P4D_SIZE when booting 5-level paging mode. But now only PUD_SIZE alignment is guaranteed. In the case I was able to reproduce the following vaddr/paddr values were observed in phys_p4d_init(): Iteration vaddr paddr 1 0xff4228027fe00000 0x033fe00000 2 0xff42287f40000000 0x8000000000 'vaddr' in both cases belongs to the same p4d entry. But due to the original assumption that PAGE_OFFSET is aligned to P4D_SIZE this overlap cannot be handled correctly. The code assumes strictly aligned entries and unconditionally increments the index into the P4D table, which creates false duplicate entries. Once the index reaches the end, the last entry in the page table is missing. Aside of that the 'paddr >= paddr_end' condition can evaluate wrong which causes an P4D entry to be cleared incorrectly. Change the loop in phys_p4d_init() to walk purely based on virtual addresses like __kernel_physical_mapping_init() does. This makes it work correctly with unaligned virtual addresses. Fixes: b569c1843498 ("x86/mm/KASLR: Reduce randomization granularity for 5-level paging to 1GB") Reported-by: Kyle Pelton <kyle.d.pelton@intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Kyle Pelton <kyle.d.pelton@intel.com> Acked-by: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/20190624123150.920-1-kirill.shutemov@linux.intel.com
-rw-r--r--arch/x86/mm/init_64.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 693aaf28d5fe..0f01c7b1d217 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -671,23 +671,25 @@ static unsigned long __meminit
671phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, 671phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
672 unsigned long page_size_mask, bool init) 672 unsigned long page_size_mask, bool init)
673{ 673{
674 unsigned long paddr_next, paddr_last = paddr_end; 674 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
675 unsigned long vaddr = (unsigned long)__va(paddr); 675
676 int i = p4d_index(vaddr); 676 paddr_last = paddr_end;
677 vaddr = (unsigned long)__va(paddr);
678 vaddr_end = (unsigned long)__va(paddr_end);
677 679
678 if (!pgtable_l5_enabled()) 680 if (!pgtable_l5_enabled())
679 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, 681 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
680 page_size_mask, init); 682 page_size_mask, init);
681 683
682 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { 684 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
683 p4d_t *p4d; 685 p4d_t *p4d = p4d_page + p4d_index(vaddr);
684 pud_t *pud; 686 pud_t *pud;
685 687
686 vaddr = (unsigned long)__va(paddr); 688 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
687 p4d = p4d_page + p4d_index(vaddr); 689 paddr = __pa(vaddr);
688 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
689 690
690 if (paddr >= paddr_end) { 691 if (paddr >= paddr_end) {
692 paddr_next = __pa(vaddr_next);
691 if (!after_bootmem && 693 if (!after_bootmem &&
692 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 694 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
693 E820_TYPE_RAM) && 695 E820_TYPE_RAM) &&
@@ -699,13 +701,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
699 701
700 if (!p4d_none(*p4d)) { 702 if (!p4d_none(*p4d)) {
701 pud = pud_offset(p4d, 0); 703 pud = pud_offset(p4d, 0);
702 paddr_last = phys_pud_init(pud, paddr, paddr_end, 704 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
703 page_size_mask, init); 705 page_size_mask, init);
704 continue; 706 continue;
705 } 707 }
706 708
707 pud = alloc_low_page(); 709 pud = alloc_low_page();
708 paddr_last = phys_pud_init(pud, paddr, paddr_end, 710 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
709 page_size_mask, init); 711 page_size_mask, init);
710 712
711 spin_lock(&init_mm.page_table_lock); 713 spin_lock(&init_mm.page_table_lock);