aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2009-09-23 14:47:17 -0400
committerAvi Kivity <avi@redhat.com>2009-10-04 11:04:50 -0400
commit1403283acca398e244ece35741ad251c1feb5972 (patch)
tree59a19bdd805c2e58929a38503435fb73d95f2ee4 /arch/x86/kvm/mmu.c
parentacb66dd051d0834c8b36d147ff83a8d39da0fe0b (diff)
KVM: MMU: add SPTE_HOST_WRITEABLE flag to the shadow ptes
this flag notify that the host physical page we are pointing to from the spte is write protected, and therefore we cant change its access to be write unless we run get_user_pages(write = 1). (this is needed for change_pte support in kvm) Signed-off-by: Izik Eidus <ieidus@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6c67b230e958..5cd8b4ec3a01 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
156#define CREATE_TRACE_POINTS 156#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 157#include "mmutrace.h"
158 158
159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 162
161struct kvm_rmap_desc { 163struct kvm_rmap_desc {
@@ -1754,7 +1756,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1754 unsigned pte_access, int user_fault, 1756 unsigned pte_access, int user_fault,
1755 int write_fault, int dirty, int level, 1757 int write_fault, int dirty, int level,
1756 gfn_t gfn, pfn_t pfn, bool speculative, 1758 gfn_t gfn, pfn_t pfn, bool speculative,
1757 bool can_unsync) 1759 bool can_unsync, bool reset_host_protection)
1758{ 1760{
1759 u64 spte; 1761 u64 spte;
1760 int ret = 0; 1762 int ret = 0;
@@ -1781,6 +1783,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1781 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1782 kvm_is_mmio_pfn(pfn)); 1784 kvm_is_mmio_pfn(pfn));
1783 1785
1786 if (reset_host_protection)
1787 spte |= SPTE_HOST_WRITEABLE;
1788
1784 spte |= (u64)pfn << PAGE_SHIFT; 1789 spte |= (u64)pfn << PAGE_SHIFT;
1785 1790
1786 if ((pte_access & ACC_WRITE_MASK) 1791 if ((pte_access & ACC_WRITE_MASK)
@@ -1826,7 +1831,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1826 unsigned pt_access, unsigned pte_access, 1831 unsigned pt_access, unsigned pte_access,
1827 int user_fault, int write_fault, int dirty, 1832 int user_fault, int write_fault, int dirty,
1828 int *ptwrite, int level, gfn_t gfn, 1833 int *ptwrite, int level, gfn_t gfn,
1829 pfn_t pfn, bool speculative) 1834 pfn_t pfn, bool speculative,
1835 bool reset_host_protection)
1830{ 1836{
1831 int was_rmapped = 0; 1837 int was_rmapped = 0;
1832 int was_writeble = is_writeble_pte(*sptep); 1838 int was_writeble = is_writeble_pte(*sptep);
@@ -1858,7 +1864,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1858 } 1864 }
1859 1865
1860 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 1866 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1861 dirty, level, gfn, pfn, speculative, true)) { 1867 dirty, level, gfn, pfn, speculative, true,
1868 reset_host_protection)) {
1862 if (write_fault) 1869 if (write_fault)
1863 *ptwrite = 1; 1870 *ptwrite = 1;
1864 kvm_x86_ops->tlb_flush(vcpu); 1871 kvm_x86_ops->tlb_flush(vcpu);
@@ -1906,7 +1913,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1906 if (iterator.level == level) { 1913 if (iterator.level == level) {
1907 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1914 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1908 0, write, 1, &pt_write, 1915 0, write, 1, &pt_write,
1909 level, gfn, pfn, false); 1916 level, gfn, pfn, false, true);
1910 ++vcpu->stat.pf_fixed; 1917 ++vcpu->stat.pf_fixed;
1911 break; 1918 break;
1912 } 1919 }