aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c15
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
2 files changed, 26 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6c67b230e958..5cd8b4ec3a01 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
156#define CREATE_TRACE_POINTS 156#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 157#include "mmutrace.h"
158 158
159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 162
161struct kvm_rmap_desc { 163struct kvm_rmap_desc {
@@ -1754,7 +1756,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1754 unsigned pte_access, int user_fault, 1756 unsigned pte_access, int user_fault,
1755 int write_fault, int dirty, int level, 1757 int write_fault, int dirty, int level,
1756 gfn_t gfn, pfn_t pfn, bool speculative, 1758 gfn_t gfn, pfn_t pfn, bool speculative,
1757 bool can_unsync) 1759 bool can_unsync, bool reset_host_protection)
1758{ 1760{
1759 u64 spte; 1761 u64 spte;
1760 int ret = 0; 1762 int ret = 0;
@@ -1781,6 +1783,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1781 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1782 kvm_is_mmio_pfn(pfn)); 1784 kvm_is_mmio_pfn(pfn));
1783 1785
1786 if (reset_host_protection)
1787 spte |= SPTE_HOST_WRITEABLE;
1788
1784 spte |= (u64)pfn << PAGE_SHIFT; 1789 spte |= (u64)pfn << PAGE_SHIFT;
1785 1790
1786 if ((pte_access & ACC_WRITE_MASK) 1791 if ((pte_access & ACC_WRITE_MASK)
@@ -1826,7 +1831,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1826 unsigned pt_access, unsigned pte_access, 1831 unsigned pt_access, unsigned pte_access,
1827 int user_fault, int write_fault, int dirty, 1832 int user_fault, int write_fault, int dirty,
1828 int *ptwrite, int level, gfn_t gfn, 1833 int *ptwrite, int level, gfn_t gfn,
1829 pfn_t pfn, bool speculative) 1834 pfn_t pfn, bool speculative,
1835 bool reset_host_protection)
1830{ 1836{
1831 int was_rmapped = 0; 1837 int was_rmapped = 0;
1832 int was_writeble = is_writeble_pte(*sptep); 1838 int was_writeble = is_writeble_pte(*sptep);
@@ -1858,7 +1864,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1858 } 1864 }
1859 1865
1860 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 1866 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1861 dirty, level, gfn, pfn, speculative, true)) { 1867 dirty, level, gfn, pfn, speculative, true,
1868 reset_host_protection)) {
1862 if (write_fault) 1869 if (write_fault)
1863 *ptwrite = 1; 1870 *ptwrite = 1;
1864 kvm_x86_ops->tlb_flush(vcpu); 1871 kvm_x86_ops->tlb_flush(vcpu);
@@ -1906,7 +1913,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1906 if (iterator.level == level) { 1913 if (iterator.level == level) {
1907 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1914 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1908 0, write, 1, &pt_write, 1915 0, write, 1, &pt_write,
1909 level, gfn, pfn, false); 1916 level, gfn, pfn, false, true);
1910 ++vcpu->stat.pf_fixed; 1917 ++vcpu->stat.pf_fixed;
1911 break; 1918 break;
1912 } 1919 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d2fec9c12d22..72558f8ff3f5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) 273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return; 274 return;
275 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
276 /*
277 * we call mmu_set_spte() with reset_host_protection = true beacuse that
278 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
279 */
276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 280 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, 281 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
278 gpte_to_gfn(gpte), pfn, true); 282 gpte_to_gfn(gpte), pfn, true, true);
279} 283}
280 284
281/* 285/*
@@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
308 user_fault, write_fault, 312 user_fault, write_fault,
309 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 313 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
310 ptwrite, level, 314 ptwrite, level,
311 gw->gfn, pfn, false); 315 gw->gfn, pfn, false, true);
312 break; 316 break;
313 } 317 }
314 318
@@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
558static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 562static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
559{ 563{
560 int i, offset, nr_present; 564 int i, offset, nr_present;
565 bool reset_host_protection;
561 566
562 offset = nr_present = 0; 567 offset = nr_present = 0;
563 568
@@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
595 600
596 nr_present++; 601 nr_present++;
597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 602 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
603 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
604 pte_access &= ~ACC_WRITE_MASK;
605 reset_host_protection = 0;
606 } else {
607 reset_host_protection = 1;
608 }
598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 609 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, 610 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
600 spte_to_pfn(sp->spt[i]), true, false); 611 spte_to_pfn(sp->spt[i]), true, false,
612 reset_host_protection);
601 } 613 }
602 614
603 return !nr_present; 615 return !nr_present;