aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Garnier <thgarnie@google.com>2016-06-21 20:47:00 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-08 11:33:46 -0400
commitfaa379332f3cb3375db1849e27386f8bc9b97da4 (patch)
tree0fa6986ca49db4f1c4528fc3ebed248e9fe14c57
parent59b3d0206d74a700069e49160e8194b2ca93b703 (diff)
x86/mm: Add PUD VA support for physical mapping
Minor change that allows early boot physical mapping of PUD level virtual addresses. The current implementation expects the virtual address to be PUD aligned. For KASLR memory randomization, we need to be able to randomize the offset used on the PUD table. It has no impact on current usage. Signed-off-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Alexander Popov <alpopov@ptsecurity.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lv Zheng <lv.zheng@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1466556426-32664-4-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/init_64.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6714712bd5da..7bf1ddb54537 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -465,7 +465,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
465 465
466/* 466/*
467 * Create PUD level page table mapping for physical addresses. The virtual 467 * Create PUD level page table mapping for physical addresses. The virtual
468 * and physical address have to be aligned at this level. 468 * and physical address do not have to be aligned at this level. KASLR can
469 * randomize virtual addresses up to this level.
469 * It returns the last physical address mapped. 470 * It returns the last physical address mapped.
470 */ 471 */
471static unsigned long __meminit 472static unsigned long __meminit
@@ -474,14 +475,18 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
474{ 475{
475 unsigned long pages = 0, paddr_next; 476 unsigned long pages = 0, paddr_next;
476 unsigned long paddr_last = paddr_end; 477 unsigned long paddr_last = paddr_end;
477 int i = pud_index(paddr); 478 unsigned long vaddr = (unsigned long)__va(paddr);
479 int i = pud_index(vaddr);
478 480
479 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { 481 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
480 pud_t *pud = pud_page + pud_index(paddr); 482 pud_t *pud;
481 pmd_t *pmd; 483 pmd_t *pmd;
482 pgprot_t prot = PAGE_KERNEL; 484 pgprot_t prot = PAGE_KERNEL;
483 485
486 vaddr = (unsigned long)__va(paddr);
487 pud = pud_page + pud_index(vaddr);
484 paddr_next = (paddr & PUD_MASK) + PUD_SIZE; 488 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
489
485 if (paddr >= paddr_end) { 490 if (paddr >= paddr_end) {
486 if (!after_bootmem && 491 if (!after_bootmem &&
487 !e820_any_mapped(paddr & PUD_MASK, paddr_next, 492 !e820_any_mapped(paddr & PUD_MASK, paddr_next,
@@ -551,7 +556,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
551 556
552/* 557/*
553 * Create page table mapping for the physical memory for specific physical 558 * Create page table mapping for the physical memory for specific physical
554 * addresses. The virtual and physical addresses have to be aligned on PUD level 559 * addresses. The virtual and physical addresses have to be aligned on PMD level
555 * down. It returns the last physical address mapped. 560 * down. It returns the last physical address mapped.
556 */ 561 */
557unsigned long __meminit 562unsigned long __meminit