aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu_pv.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-10-26 08:28:54 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-11-06 15:35:11 -0500
commitd52888aa2753e3063a9d3a0c9f72f94aa9809c15 (patch)
treefbbb33771ac6c392caeb283163a594f1a7e6d04d /arch/x86/xen/mmu_pv.c
parente8a308e5f47e545e0d41d0686c00f5f5217c5f61 (diff)
x86/mm: Move LDT remap out of KASLR region on 5-level paging
On 5-level paging the LDT remap area is placed in the middle of the KASLR randomization region and it can overlap with the direct mapping, the vmalloc or the vmap area. The LDT mapping is per mm, so it cannot be moved into the P4D page table next to the CPU_ENTRY_AREA without complicating PGD table allocation for 5-level paging. The 4 PGD slot gap just before the direct mapping is reserved for hypervisors, so it cannot be used. Move the direct mapping one slot deeper and use the resulting gap for the LDT remap area. The resulting layout is the same for 4 and 5 level paging. [ tglx: Massaged changelog ] Fixes: f55f0501cbf6 ("x86/pti: Put the LDT in its own PGD if PTI is on") Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Andy Lutomirski <luto@kernel.org> Cc: bp@alien8.de Cc: hpa@zytor.com Cc: dave.hansen@linux.intel.com Cc: peterz@infradead.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: bhe@redhat.com Cc: willy@infradead.org Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20181026122856.66224-2-kirill.shutemov@linux.intel.com
Diffstat (limited to 'arch/x86/xen/mmu_pv.c')
-rw-r--r--arch/x86/xen/mmu_pv.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 0d7b3ae4960b..a5d7ed125337 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1905 init_top_pgt[0] = __pgd(0); 1905 init_top_pgt[0] = __pgd(0);
1906 1906
1907 /* Pre-constructed entries are in pfn, so convert to mfn */ 1907 /* Pre-constructed entries are in pfn, so convert to mfn */
1908 /* L4[272] -> level3_ident_pgt */ 1908 /* L4[273] -> level3_ident_pgt */
1909 /* L4[511] -> level3_kernel_pgt */ 1909 /* L4[511] -> level3_kernel_pgt */
1910 convert_pfn_mfn(init_top_pgt); 1910 convert_pfn_mfn(init_top_pgt);
1911 1911
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1925 addr[0] = (unsigned long)pgd; 1925 addr[0] = (unsigned long)pgd;
1926 addr[1] = (unsigned long)l3; 1926 addr[1] = (unsigned long)l3;
1927 addr[2] = (unsigned long)l2; 1927 addr[2] = (unsigned long)l2;
1928 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: 1928 /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
1929 * Both L4[272][0] and L4[511][510] have entries that point to the same 1929 * Both L4[273][0] and L4[511][510] have entries that point to the same
1930 * L2 (PMD) tables. Meaning that if you modify it in __va space 1930 * L2 (PMD) tables. Meaning that if you modify it in __va space
1931 * it will be also modified in the __ka space! (But if you just 1931 * it will be also modified in the __ka space! (But if you just
1932 * modify the PMD table to point to other PTE's or none, then you 1932 * modify the PMD table to point to other PTE's or none, then you