diff options
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 84ba74820ad6..7be47d1a97e4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -508,18 +508,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
508 | #endif | 508 | #endif |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Install the new, split up pagetable. Important details here: | 511 | * Install the new, split up pagetable. |
512 | * | 512 | * |
513 | * On Intel the NX bit of all levels must be cleared to make a | 513 | * We use the standard kernel pagetable protections for the new |
514 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | 514 | * pagetable protections, the actual ptes set above control the |
515 | * Architectures Software Developer's Manual). | 515 | * primary protection behavior: |
516 | * | ||
517 | * Mark the entry present. The current mapping might be | ||
518 | * set to not present, which we preserved above. | ||
519 | */ | 516 | */ |
520 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); | 517 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
521 | pgprot_val(ref_prot) |= _PAGE_PRESENT; | ||
522 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | ||
523 | base = NULL; | 518 | base = NULL; |
524 | 519 | ||
525 | out_unlock: | 520 | out_unlock: |
@@ -575,7 +570,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
575 | address = cpa->vaddr[cpa->curpage]; | 570 | address = cpa->vaddr[cpa->curpage]; |
576 | else | 571 | else |
577 | address = *cpa->vaddr; | 572 | address = *cpa->vaddr; |
578 | |||
579 | repeat: | 573 | repeat: |
580 | kpte = lookup_address(address, &level); | 574 | kpte = lookup_address(address, &level); |
581 | if (!kpte) | 575 | if (!kpte) |
@@ -812,6 +806,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
812 | 806 | ||
813 | vm_unmap_aliases(); | 807 | vm_unmap_aliases(); |
814 | 808 | ||
809 | /* | ||
810 | * If we're called with lazy mmu updates enabled, the | ||
811 | * in-memory pte state may be stale. Flush pending updates to | ||
812 | * bring them up to date. | ||
813 | */ | ||
814 | arch_flush_lazy_mmu_mode(); | ||
815 | |||
815 | cpa.vaddr = addr; | 816 | cpa.vaddr = addr; |
816 | cpa.numpages = numpages; | 817 | cpa.numpages = numpages; |
817 | cpa.mask_set = mask_set; | 818 | cpa.mask_set = mask_set; |
@@ -854,6 +855,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
854 | } else | 855 | } else |
855 | cpa_flush_all(cache); | 856 | cpa_flush_all(cache); |
856 | 857 | ||
858 | /* | ||
859 | * If we've been called with lazy mmu updates enabled, then | ||
860 | * make sure that everything gets flushed out before we | ||
861 | * return. | ||
862 | */ | ||
863 | arch_flush_lazy_mmu_mode(); | ||
864 | |||
857 | out: | 865 | out: |
858 | return ret; | 866 | return ret; |
859 | } | 867 | } |