diff options
author | Nadav Har'El <nyh@il.ibm.com> | 2011-05-25 16:14:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 04:45:18 -0400 |
commit | eeadf9e7558ce2c34c0d91985d26047a6e2245e7 (patch) | |
tree | bdf4b3bdb4fdcd5672990e51a211b8dc15e04ae1 | |
parent | 66c78ae40cd0a7258d01ef433ede74e33e4adbbe (diff) |
KVM: nVMX: Handling of CR0 and CR4 modifying instructions
When L2 tries to modify CR0 or CR4 (with mov or clts), and modifies a bit
which L1 asked to shadow (via CR[04]_GUEST_HOST_MASK), we already do the right
thing: we let L1 handle the trap (see nested_vmx_exit_handled_cr() in a
previous patch).
When L2 modifies bits that L1 doesn't care about, we let it think (via
CR[04]_READ_SHADOW) that it did these modifications, while only changing
(in GUEST_CR[04]) the bits that L0 doesn't shadow.
This is needed for corect handling of CR0.TS for lazy FPU loading: L0 may
want to leave TS on, while pretending to allow the guest to change it.
Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 58 |
1 files changed, 55 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1e6bd69ae433..6e9bebcdd5a6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4164,6 +4164,58 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
4164 | hypercall[2] = 0xc1; | 4164 | hypercall[2] = 0xc1; |
4165 | } | 4165 | } |
4166 | 4166 | ||
4167 | /* called to set cr0 as approriate for a mov-to-cr0 exit. */ | ||
4168 | static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) | ||
4169 | { | ||
4170 | if (to_vmx(vcpu)->nested.vmxon && | ||
4171 | ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) | ||
4172 | return 1; | ||
4173 | |||
4174 | if (is_guest_mode(vcpu)) { | ||
4175 | /* | ||
4176 | * We get here when L2 changed cr0 in a way that did not change | ||
4177 | * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), | ||
4178 | * but did change L0 shadowed bits. This can currently happen | ||
4179 | * with the TS bit: L0 may want to leave TS on (for lazy fpu | ||
4180 | * loading) while pretending to allow the guest to change it. | ||
4181 | */ | ||
4182 | if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) | | ||
4183 | (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits))) | ||
4184 | return 1; | ||
4185 | vmcs_writel(CR0_READ_SHADOW, val); | ||
4186 | return 0; | ||
4187 | } else | ||
4188 | return kvm_set_cr0(vcpu, val); | ||
4189 | } | ||
4190 | |||
4191 | static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) | ||
4192 | { | ||
4193 | if (is_guest_mode(vcpu)) { | ||
4194 | if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) | | ||
4195 | (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits))) | ||
4196 | return 1; | ||
4197 | vmcs_writel(CR4_READ_SHADOW, val); | ||
4198 | return 0; | ||
4199 | } else | ||
4200 | return kvm_set_cr4(vcpu, val); | ||
4201 | } | ||
4202 | |||
4203 | /* called to set cr0 as approriate for clts instruction exit. */ | ||
4204 | static void handle_clts(struct kvm_vcpu *vcpu) | ||
4205 | { | ||
4206 | if (is_guest_mode(vcpu)) { | ||
4207 | /* | ||
4208 | * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS | ||
4209 | * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, | ||
4210 | * just pretend it's off (also in arch.cr0 for fpu_activate). | ||
4211 | */ | ||
4212 | vmcs_writel(CR0_READ_SHADOW, | ||
4213 | vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); | ||
4214 | vcpu->arch.cr0 &= ~X86_CR0_TS; | ||
4215 | } else | ||
4216 | vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); | ||
4217 | } | ||
4218 | |||
4167 | static int handle_cr(struct kvm_vcpu *vcpu) | 4219 | static int handle_cr(struct kvm_vcpu *vcpu) |
4168 | { | 4220 | { |
4169 | unsigned long exit_qualification, val; | 4221 | unsigned long exit_qualification, val; |
@@ -4180,7 +4232,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
4180 | trace_kvm_cr_write(cr, val); | 4232 | trace_kvm_cr_write(cr, val); |
4181 | switch (cr) { | 4233 | switch (cr) { |
4182 | case 0: | 4234 | case 0: |
4183 | err = kvm_set_cr0(vcpu, val); | 4235 | err = handle_set_cr0(vcpu, val); |
4184 | kvm_complete_insn_gp(vcpu, err); | 4236 | kvm_complete_insn_gp(vcpu, err); |
4185 | return 1; | 4237 | return 1; |
4186 | case 3: | 4238 | case 3: |
@@ -4188,7 +4240,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
4188 | kvm_complete_insn_gp(vcpu, err); | 4240 | kvm_complete_insn_gp(vcpu, err); |
4189 | return 1; | 4241 | return 1; |
4190 | case 4: | 4242 | case 4: |
4191 | err = kvm_set_cr4(vcpu, val); | 4243 | err = handle_set_cr4(vcpu, val); |
4192 | kvm_complete_insn_gp(vcpu, err); | 4244 | kvm_complete_insn_gp(vcpu, err); |
4193 | return 1; | 4245 | return 1; |
4194 | case 8: { | 4246 | case 8: { |
@@ -4206,7 +4258,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
4206 | }; | 4258 | }; |
4207 | break; | 4259 | break; |
4208 | case 2: /* clts */ | 4260 | case 2: /* clts */ |
4209 | vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); | 4261 | handle_clts(vcpu); |
4210 | trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); | 4262 | trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); |
4211 | skip_emulated_instruction(vcpu); | 4263 | skip_emulated_instruction(vcpu); |
4212 | vmx_fpu_activate(vcpu); | 4264 | vmx_fpu_activate(vcpu); |