aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 84ba74820ad6..7233bd7e357b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -508,18 +508,24 @@ static int split_large_page(pte_t *kpte, unsigned long address)
508#endif 508#endif
509 509
510 /* 510 /*
511 * Install the new, split up pagetable. Important details here: 511 * Install the new, split up pagetable.
512 * 512 *
513 * On Intel the NX bit of all levels must be cleared to make a 513 * We use the standard kernel pagetable protections for the new
514 * page executable. See section 4.13.2 of Intel 64 and IA-32 514 * pagetable protections, the actual ptes set above control the
515 * Architectures Software Developer's Manual). 515 * primary protection behavior:
516 */
517 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
518
519 /*
520 * Intel Atom errata AAH41 workaround.
516 * 521 *
517 * Mark the entry present. The current mapping might be 522 * The real fix should be in hw or in a microcode update, but
518 * set to not present, which we preserved above. 523 * we also probabilistically try to reduce the window of having
524 * a large TLB mixed with 4K TLBs while instruction fetches are
525 * going on.
519 */ 526 */
520 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); 527 __flush_tlb_all();
521 pgprot_val(ref_prot) |= _PAGE_PRESENT; 528
522 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
523 base = NULL; 529 base = NULL;
524 530
525out_unlock: 531out_unlock:
@@ -575,7 +581,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
575 address = cpa->vaddr[cpa->curpage]; 581 address = cpa->vaddr[cpa->curpage];
576 else 582 else
577 address = *cpa->vaddr; 583 address = *cpa->vaddr;
578
579repeat: 584repeat:
580 kpte = lookup_address(address, &level); 585 kpte = lookup_address(address, &level);
581 if (!kpte) 586 if (!kpte)
@@ -812,6 +817,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
812 817
813 vm_unmap_aliases(); 818 vm_unmap_aliases();
814 819
820 /*
821 * If we're called with lazy mmu updates enabled, the
822 * in-memory pte state may be stale. Flush pending updates to
823 * bring them up to date.
824 */
825 arch_flush_lazy_mmu_mode();
826
815 cpa.vaddr = addr; 827 cpa.vaddr = addr;
816 cpa.numpages = numpages; 828 cpa.numpages = numpages;
817 cpa.mask_set = mask_set; 829 cpa.mask_set = mask_set;
@@ -854,6 +866,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
854 } else 866 } else
855 cpa_flush_all(cache); 867 cpa_flush_all(cache);
856 868
869 /*
870 * If we've been called with lazy mmu updates enabled, then
871 * make sure that everything gets flushed out before we
872 * return.
873 */
874 arch_flush_lazy_mmu_mode();
875
857out: 876out:
858 return ret; 877 return ret;
859} 878}