aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2013-03-07 08:08:07 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-07 13:48:47 -0500
commit1a0d74e66405a795bb37a4a23ece50f8d8e5e81e (patch)
treecb2ccf2d528d5f7053e17e606ee05c09d0d172a2 /arch/x86/kvm/vmx.c
parent33fb20c39e98b90813b5ab2d9a0d6faa6300caca (diff)
KVM: nVMX: Fix setting of CR0 and CR4 in guest mode
The logic for calculating the value with which we call kvm_set_cr0/4 was broken (will definitely be visible with nested unrestricted guest mode support). Also, we performed the check regarding CR0_ALWAYSON too early when in guest mode. What really needs to be done on both CR0 and CR4 is to mask out L1-owned bits and merge them in from L1's guest_cr0/4. In contrast, arch.cr0/4 and arch.cr0/4_guest_owned_bits contain the mangled L0+L1 state and, thus, are not suited as input. For both CRs, we can then apply the check against VMXON_CRx_ALWAYSON and refuse the update if it fails. To be fully consistent, we implement this check now also for CR4. For CR4, we move the check into vmx_set_cr4 while we keep it in handle_set_cr0. This is because the CR0 checks for vmxon vs. guest mode will diverge soon when adding unrestricted guest mode support. Finally, we have to set the shadow to the value L2 wanted to write originally. Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c48
1 files changed, 33 insertions, 15 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a9d885353108..260da9ac1678 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3223,7 +3223,9 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3223 */ 3223 */
3224 if (!nested_vmx_allowed(vcpu)) 3224 if (!nested_vmx_allowed(vcpu))
3225 return 1; 3225 return 1;
3226 } else if (to_vmx(vcpu)->nested.vmxon) 3226 }
3227 if (to_vmx(vcpu)->nested.vmxon &&
3228 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3227 return 1; 3229 return 1;
3228 3230
3229 vcpu->arch.cr4 = cr4; 3231 vcpu->arch.cr4 = cr4;
@@ -4612,34 +4614,50 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4612/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 4614/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
4613static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 4615static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
4614{ 4616{
4615 if (to_vmx(vcpu)->nested.vmxon &&
4616 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
4617 return 1;
4618
4619 if (is_guest_mode(vcpu)) { 4617 if (is_guest_mode(vcpu)) {
4618 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4619 unsigned long orig_val = val;
4620
4620 /* 4621 /*
4621 * We get here when L2 changed cr0 in a way that did not change 4622 * We get here when L2 changed cr0 in a way that did not change
4622 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 4623 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
4623 * but did change L0 shadowed bits. This can currently happen 4624 * but did change L0 shadowed bits. So we first calculate the
4624 * with the TS bit: L0 may want to leave TS on (for lazy fpu 4625 * effective cr0 value that L1 would like to write into the
4625 * loading) while pretending to allow the guest to change it. 4626 * hardware. It consists of the L2-owned bits from the new
4627 * value combined with the L1-owned bits from L1's guest_cr0.
4626 */ 4628 */
4627 if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) | 4629 val = (val & ~vmcs12->cr0_guest_host_mask) |
4628 (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits))) 4630 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
4631
4632 /* TODO: will have to take unrestricted guest mode into
4633 * account */
4634 if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
4629 return 1; 4635 return 1;
4630 vmcs_writel(CR0_READ_SHADOW, val); 4636
4637 if (kvm_set_cr0(vcpu, val))
4638 return 1;
4639 vmcs_writel(CR0_READ_SHADOW, orig_val);
4631 return 0; 4640 return 0;
4632 } else 4641 } else {
4642 if (to_vmx(vcpu)->nested.vmxon &&
4643 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
4644 return 1;
4633 return kvm_set_cr0(vcpu, val); 4645 return kvm_set_cr0(vcpu, val);
4646 }
4634} 4647}
4635 4648
4636static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 4649static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
4637{ 4650{
4638 if (is_guest_mode(vcpu)) { 4651 if (is_guest_mode(vcpu)) {
4639 if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) | 4652 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4640 (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits))) 4653 unsigned long orig_val = val;
4654
4655 /* analogously to handle_set_cr0 */
4656 val = (val & ~vmcs12->cr4_guest_host_mask) |
4657 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
4658 if (kvm_set_cr4(vcpu, val))
4641 return 1; 4659 return 1;
4642 vmcs_writel(CR4_READ_SHADOW, val); 4660 vmcs_writel(CR4_READ_SHADOW, orig_val);
4643 return 0; 4661 return 0;
4644 } else 4662 } else
4645 return kvm_set_cr4(vcpu, val); 4663 return kvm_set_cr4(vcpu, val);