aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2017-06-06 07:31:30 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-13 02:56:57 -0400
commit7e82ea946ae4d056859b19fcdec66425878395eb (patch)
tree1ae94b7648b3b773791f96245f6fcb5a77346118
parent141efad7d7fa4f4abb3a1b19f6a968d1b1f21903 (diff)
x86/mm: Make kernel_physical_mapping_init() support 5-level paging
Populate additional page table level if CONFIG_X86_5LEVEL is enabled. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-12-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/init_64.c69
1 files changed, 60 insertions, 9 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7a9497ac468d..b863d14e452a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -624,6 +624,57 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
624 return paddr_last; 624 return paddr_last;
625} 625}
626 626
627static unsigned long __meminit
628phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
629 unsigned long page_size_mask)
630{
631 unsigned long paddr_next, paddr_last = paddr_end;
632 unsigned long vaddr = (unsigned long)__va(paddr);
633 int i = p4d_index(vaddr);
634
635 if (!IS_ENABLED(CONFIG_X86_5LEVEL))
636 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
637
638 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
639 p4d_t *p4d;
640 pud_t *pud;
641
642 vaddr = (unsigned long)__va(paddr);
643 p4d = p4d_page + p4d_index(vaddr);
644 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
645
646 if (paddr >= paddr_end) {
647 if (!after_bootmem &&
648 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
649 E820_TYPE_RAM) &&
650 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
651 E820_TYPE_RESERVED_KERN))
652 set_p4d(p4d, __p4d(0));
653 continue;
654 }
655
656 if (!p4d_none(*p4d)) {
657 pud = pud_offset(p4d, 0);
658 paddr_last = phys_pud_init(pud, paddr,
659 paddr_end,
660 page_size_mask);
661 __flush_tlb_all();
662 continue;
663 }
664
665 pud = alloc_low_page();
666 paddr_last = phys_pud_init(pud, paddr, paddr_end,
667 page_size_mask);
668
669 spin_lock(&init_mm.page_table_lock);
670 p4d_populate(&init_mm, p4d, pud);
671 spin_unlock(&init_mm.page_table_lock);
672 }
673 __flush_tlb_all();
674
675 return paddr_last;
676}
677
627/* 678/*
628 * Create page table mapping for the physical memory for specific physical 679 * Create page table mapping for the physical memory for specific physical
629 * addresses. The virtual and physical addresses have to be aligned on PMD level 680 * addresses. The virtual and physical addresses have to be aligned on PMD level
@@ -645,26 +696,26 @@ kernel_physical_mapping_init(unsigned long paddr_start,
645 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 696 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
646 pgd_t *pgd = pgd_offset_k(vaddr); 697 pgd_t *pgd = pgd_offset_k(vaddr);
647 p4d_t *p4d; 698 p4d_t *p4d;
648 pud_t *pud;
649 699
650 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; 700 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
651 701
652 BUILD_BUG_ON(pgd_none(*pgd)); 702 if (pgd_val(*pgd)) {
653 p4d = p4d_offset(pgd, vaddr); 703 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
654 if (p4d_val(*p4d)) { 704 paddr_last = phys_p4d_init(p4d, __pa(vaddr),
655 pud = (pud_t *)p4d_page_vaddr(*p4d);
656 paddr_last = phys_pud_init(pud, __pa(vaddr),
657 __pa(vaddr_end), 705 __pa(vaddr_end),
658 page_size_mask); 706 page_size_mask);
659 continue; 707 continue;
660 } 708 }
661 709
662 pud = alloc_low_page(); 710 p4d = alloc_low_page();
663 paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end), 711 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
664 page_size_mask); 712 page_size_mask);
665 713
666 spin_lock(&init_mm.page_table_lock); 714 spin_lock(&init_mm.page_table_lock);
667 p4d_populate(&init_mm, p4d, pud); 715 if (IS_ENABLED(CONFIG_X86_5LEVEL))
716 pgd_populate(&init_mm, pgd, p4d);
717 else
718 p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
668 spin_unlock(&init_mm.page_table_lock); 719 spin_unlock(&init_mm.page_table_lock);
669 pgd_changed = true; 720 pgd_changed = true;
670 } 721 }