aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/cpu_entry_area.c14
-rw-r--r--arch/x86/mm/pti.c23
2 files changed, 35 insertions, 2 deletions
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 476d810639a8..b45f5aaefd74 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -27,8 +27,20 @@ EXPORT_SYMBOL(get_cpu_entry_area);
27void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) 27void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
28{ 28{
29 unsigned long va = (unsigned long) cea_vaddr; 29 unsigned long va = (unsigned long) cea_vaddr;
30 pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
30 31
31 set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); 32 /*
33 * The cpu_entry_area is shared between the user and kernel
34 * page tables. All of its ptes can safely be global.
35 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
36 * non-present PTEs, so be careful not to set it in that
37 * case to avoid confusion.
38 */
39 if (boot_cpu_has(X86_FEATURE_PGE) &&
40 (pgprot_val(flags) & _PAGE_PRESENT))
41 pte = pte_set_flags(pte, _PAGE_GLOBAL);
42
43 set_pte_vaddr(va, pte);
32} 44}
33 45
34static void __init 46static void __init
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 631507f0c198..8082f8b0c10e 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -300,6 +300,27 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
300 return; 300 return;
301 301
302 /* 302 /*
303 * Only clone present PMDs. This ensures only setting
304 * _PAGE_GLOBAL on present PMDs. This should only be
305 * called on well-known addresses anyway, so a non-
306 * present PMD would be a surprise.
307 */
308 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
309 return;
310
311 /*
312 * Setting 'target_pmd' below creates a mapping in both
313 * the user and kernel page tables. It is effectively
314 * global, so set it as global in both copies. Note:
315 * the X86_FEATURE_PGE check is not _required_ because
316 * the CPU ignores _PAGE_GLOBAL when PGE is not
317 * supported. The check keeps consistentency with
318 * code that only set this bit when supported.
319 */
320 if (boot_cpu_has(X86_FEATURE_PGE))
321 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
322
323 /*
303 * Copy the PMD. That is, the kernelmode and usermode 324 * Copy the PMD. That is, the kernelmode and usermode
304 * tables will share the last-level page tables of this 325 * tables will share the last-level page tables of this
305 * address range 326 * address range
@@ -348,7 +369,7 @@ static void __init pti_clone_entry_text(void)
348{ 369{
349 pti_clone_pmds((unsigned long) __entry_text_start, 370 pti_clone_pmds((unsigned long) __entry_text_start,
350 (unsigned long) __irqentry_text_end, 371 (unsigned long) __irqentry_text_end,
351 _PAGE_RW | _PAGE_GLOBAL); 372 _PAGE_RW);
352} 373}
353 374
354/* 375/*