aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-04 06:39:28 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:01 -0400
commitcfec82cb7d313ae5b2c2dbb974401d7c214c7b09 (patch)
tree63351befc6b9981127ac8dd2d22149d3ddf75806 /arch/x86/kvm/svm.c
parent8a76d7f25f8f24fc5a328c8e15e4a7313cf141b9 (diff)
KVM: SVM: Add intercept check for emulated cr accesses
This patch adds all necessary intercept checks for instructions that access the crX registers. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c81
1 files changed, 80 insertions, 1 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 798ebe695f1d..ff4ed3619d00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3868,11 +3868,90 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3868 update_cr0_intercept(svm); 3868 update_cr0_intercept(svm);
3869} 3869}
3870 3870
3871#define POST_EX(exit) { .exit_code = (exit), \
3872 .stage = X86_ICPT_POST_EXCEPT, \
3873 .valid = true }
3874
3875static struct __x86_intercept {
3876 u32 exit_code;
3877 enum x86_intercept_stage stage;
3878 bool valid;
3879} x86_intercept_map[] = {
3880 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3881 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3882 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3883 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3884 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
3885};
3886
3887#undef POST_EX
3888
3871static int svm_check_intercept(struct kvm_vcpu *vcpu, 3889static int svm_check_intercept(struct kvm_vcpu *vcpu,
3872 struct x86_instruction_info *info, 3890 struct x86_instruction_info *info,
3873 enum x86_intercept_stage stage) 3891 enum x86_intercept_stage stage)
3874{ 3892{
3875 return X86EMUL_CONTINUE; 3893 struct vcpu_svm *svm = to_svm(vcpu);
3894 int vmexit, ret = X86EMUL_CONTINUE;
3895 struct __x86_intercept icpt_info;
3896 struct vmcb *vmcb = svm->vmcb;
3897
3898 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3899 goto out;
3900
3901 icpt_info = x86_intercept_map[info->intercept];
3902
3903 if (!icpt_info.valid || stage != icpt_info.stage)
3904 goto out;
3905
3906 switch (icpt_info.exit_code) {
3907 case SVM_EXIT_READ_CR0:
3908 if (info->intercept == x86_intercept_cr_read)
3909 icpt_info.exit_code += info->modrm_reg;
3910 break;
3911 case SVM_EXIT_WRITE_CR0: {
3912 unsigned long cr0, val;
3913 u64 intercept;
3914
3915 if (info->intercept == x86_intercept_cr_write)
3916 icpt_info.exit_code += info->modrm_reg;
3917
3918 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
3919 break;
3920
3921 intercept = svm->nested.intercept;
3922
3923 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3924 break;
3925
3926 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3927 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
3928
3929 if (info->intercept == x86_intercept_lmsw) {
3930 cr0 &= 0xfUL;
3931 val &= 0xfUL;
3932 /* lmsw can't clear PE - catch this here */
3933 if (cr0 & X86_CR0_PE)
3934 val |= X86_CR0_PE;
3935 }
3936
3937 if (cr0 ^ val)
3938 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3939
3940 break;
3941 }
3942 default:
3943 break;
3944 }
3945
3946 vmcb->control.next_rip = info->next_rip;
3947 vmcb->control.exit_code = icpt_info.exit_code;
3948 vmexit = nested_svm_exit_handled(svm);
3949
3950 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3951 : X86EMUL_CONTINUE;
3952
3953out:
3954 return ret;
3876} 3955}
3877 3956
3878static struct kvm_x86_ops svm_x86_ops = { 3957static struct kvm_x86_ops svm_x86_ops = {