aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-02-10 07:21:32 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:11 -0500
commit1871c6020d7308afb99127bba51f04548e7ca84e (patch)
tree64871be680574ed53104923456dc0b184db3cf69 /arch/x86/kvm/mmu.c
parenta0044755679f3e761b8b95995e5f2db2b7efd0f6 (diff)
KVM: x86 emulator: fix memory access during x86 emulation
Currently when x86 emulator needs to access memory, page walk is done with broadest permission possible, so if emulated instruction was executed by userspace process it can still access kernel memory. Fix that by providing correct memory access to page walker during emulation. Signed-off-by: Gleb Natapov <gleb@redhat.com> Cc: stable@kernel.org Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 739793240d1d..741373e8ca77 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -138,12 +138,6 @@ module_param(oos_shadow, bool, 0644);
138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139 | PT64_NX_MASK) 139 | PT64_NX_MASK)
140 140
141#define PFERR_PRESENT_MASK (1U << 0)
142#define PFERR_WRITE_MASK (1U << 1)
143#define PFERR_USER_MASK (1U << 2)
144#define PFERR_RSVD_MASK (1U << 3)
145#define PFERR_FETCH_MASK (1U << 4)
146
147#define RMAP_EXT 4 141#define RMAP_EXT 4
148 142
149#define ACC_EXEC_MASK 1 143#define ACC_EXEC_MASK 1
@@ -1632,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1632{ 1626{
1633 struct page *page; 1627 struct page *page;
1634 1628
1635 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1629 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1636 1630
1637 if (gpa == UNMAPPED_GVA) 1631 if (gpa == UNMAPPED_GVA)
1638 return NULL; 1632 return NULL;
@@ -2155,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2155 spin_unlock(&vcpu->kvm->mmu_lock); 2149 spin_unlock(&vcpu->kvm->mmu_lock);
2156} 2150}
2157 2151
2158static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2152static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2153 u32 access, u32 *error)
2159{ 2154{
2155 if (error)
2156 *error = 0;
2160 return vaddr; 2157 return vaddr;
2161} 2158}
2162 2159
@@ -2740,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2740 if (tdp_enabled) 2737 if (tdp_enabled)
2741 return 0; 2738 return 0;
2742 2739
2743 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 2740 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2744 2741
2745 spin_lock(&vcpu->kvm->mmu_lock); 2742 spin_lock(&vcpu->kvm->mmu_lock);
2746 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 2743 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -3237,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3237 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level)) 3234 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3238 audit_mappings_page(vcpu, ent, va, level - 1); 3235 audit_mappings_page(vcpu, ent, va, level - 1);
3239 else { 3236 else {
3240 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); 3237 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
3241 gfn_t gfn = gpa >> PAGE_SHIFT; 3238 gfn_t gfn = gpa >> PAGE_SHIFT;
3242 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); 3239 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3243 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; 3240 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;