diff options
author | Nadav Amit <namit@cs.technion.ac.il> | 2014-04-17 20:35:09 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2014-04-23 16:46:57 -0400 |
commit | 346874c9507a2582d0c00021f848de6e115f276c (patch) | |
tree | 7dd8014b1c86fd5a77d4e0705f4d46726e3200e9 /arch/x86/kvm | |
parent | 671bd9934a861288a248b051751061b11654aef9 (diff) |
KVM: x86: Fix CR3 reserved bits
According to Intel specifications, PAE and non-PAE does not have any reserved
bits. In long-mode, regardless to PCIDE, only the high bits (above the
physical address) are reserved.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 25 |
2 files changed, 5 insertions, 24 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0dec502d20be..f3834bbca1d7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -3388,10 +3388,6 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) | |||
3388 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | 3388 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); |
3389 | if (efer & EFER_LMA) | 3389 | if (efer & EFER_LMA) |
3390 | rsvd = CR3_L_MODE_RESERVED_BITS; | 3390 | rsvd = CR3_L_MODE_RESERVED_BITS; |
3391 | else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) | ||
3392 | rsvd = CR3_PAE_RESERVED_BITS; | ||
3393 | else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) | ||
3394 | rsvd = CR3_NONPAE_RESERVED_BITS; | ||
3395 | 3391 | ||
3396 | if (new_val & rsvd) | 3392 | if (new_val & rsvd) |
3397 | return emulate_gp(ctxt, 0); | 3393 | return emulate_gp(ctxt, 0); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bc4aaf68190c..e4ccc6cf4108 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -701,26 +701,11 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
701 | return 0; | 701 | return 0; |
702 | } | 702 | } |
703 | 703 | ||
704 | if (is_long_mode(vcpu)) { | 704 | if (is_long_mode(vcpu) && (cr3 & CR3_L_MODE_RESERVED_BITS)) |
705 | if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) { | 705 | return 1; |
706 | if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) | 706 | if (is_pae(vcpu) && is_paging(vcpu) && |
707 | return 1; | 707 | !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) |
708 | } else | 708 | return 1; |
709 | if (cr3 & CR3_L_MODE_RESERVED_BITS) | ||
710 | return 1; | ||
711 | } else { | ||
712 | if (is_pae(vcpu)) { | ||
713 | if (cr3 & CR3_PAE_RESERVED_BITS) | ||
714 | return 1; | ||
715 | if (is_paging(vcpu) && | ||
716 | !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) | ||
717 | return 1; | ||
718 | } | ||
719 | /* | ||
720 | * We don't check reserved bits in nonpae mode, because | ||
721 | * this isn't enforced, and VMware depends on this. | ||
722 | */ | ||
723 | } | ||
724 | 709 | ||
725 | vcpu->arch.cr3 = cr3; | 710 | vcpu->arch.cr3 = cr3; |
726 | __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); | 711 | __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); |