aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-05-12 04:48:18 -0400
committerAvi Kivity <avi@redhat.com>2010-05-19 04:41:09 -0400
commit3dbe141595faa48a067add3e47bba3205b79d33c (patch)
treea7d37004d0021298ed305810543ae6a3a4ed79e2 /arch/x86
parenta3d204e28579427609c3d15d2310127ebaa47d94 (diff)
KVM: MMU: Segregate shadow pages with different cr0.wp
When cr0.wp=0, we may shadow a gpte having u/s=1 and r/w=0 with an spte having u/s=0 and r/w=1. This allows excessive access if the guest sets cr0.wp=1 and accesses through this spte. Fix by making cr0.wp part of the base role; we'll have different sptes for the two cases and the problem disappears. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c3
2 files changed, 3 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3f0007b076da..76f5483cffec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -179,6 +179,7 @@ union kvm_mmu_page_role {
179 unsigned access:3; 179 unsigned access:3;
180 unsigned invalid:1; 180 unsigned invalid:1;
181 unsigned nxe:1; 181 unsigned nxe:1;
182 unsigned cr0_wp:1;
182 }; 183 };
183}; 184};
184 185
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index de996380ec26..81563e76e28f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -217,7 +217,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
217} 217}
218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
219 219
220static int is_write_protection(struct kvm_vcpu *vcpu) 220static bool is_write_protection(struct kvm_vcpu *vcpu)
221{ 221{
222 return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 222 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
223} 223}
@@ -2432,6 +2432,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2432 r = paging32_init_context(vcpu); 2432 r = paging32_init_context(vcpu);
2433 2433
2434 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2434 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2435 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2435 2436
2436 return r; 2437 return r;
2437} 2438}