aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-11-30 12:04:00 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:30:16 -0500
commit8a05a1b8693371547bbb2d06f842595cebd16687 (patch)
tree93945f6b914207c5f103cf61a5a2b7fc62cb4068 /arch/x86/kvm/svm.c
parent18c918c5f59bc35f9c567689daef8c255b575fdc (diff)
KVM: SVM: Add manipulation functions for misc intercepts
This patch wraps changes to the misc intercepts of SVM into seperate functions to abstract nested-svm better and prepare the implementation of the vmcb-clean-bits feature. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c84
1 files changed, 51 insertions, 33 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d69ec44143c0..cde5392bbe90 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -277,6 +277,24 @@ static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
277 recalc_intercepts(svm); 277 recalc_intercepts(svm);
278} 278}
279 279
280static inline void set_intercept(struct vcpu_svm *svm, int bit)
281{
282 struct vmcb *vmcb = get_host_vmcb(svm);
283
284 vmcb->control.intercept |= (1ULL << bit);
285
286 recalc_intercepts(svm);
287}
288
289static inline void clr_intercept(struct vcpu_svm *svm, int bit)
290{
291 struct vmcb *vmcb = get_host_vmcb(svm);
292
293 vmcb->control.intercept &= ~(1ULL << bit);
294
295 recalc_intercepts(svm);
296}
297
280static inline void enable_gif(struct vcpu_svm *svm) 298static inline void enable_gif(struct vcpu_svm *svm)
281{ 299{
282 svm->vcpu.arch.hflags |= HF_GIF_MASK; 300 svm->vcpu.arch.hflags |= HF_GIF_MASK;
@@ -863,29 +881,29 @@ static void init_vmcb(struct vcpu_svm *svm)
863 set_exception_intercept(svm, UD_VECTOR); 881 set_exception_intercept(svm, UD_VECTOR);
864 set_exception_intercept(svm, MC_VECTOR); 882 set_exception_intercept(svm, MC_VECTOR);
865 883
866 control->intercept = (1ULL << INTERCEPT_INTR) | 884 set_intercept(svm, INTERCEPT_INTR);
867 (1ULL << INTERCEPT_NMI) | 885 set_intercept(svm, INTERCEPT_NMI);
868 (1ULL << INTERCEPT_SMI) | 886 set_intercept(svm, INTERCEPT_SMI);
869 (1ULL << INTERCEPT_SELECTIVE_CR0) | 887 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
870 (1ULL << INTERCEPT_CPUID) | 888 set_intercept(svm, INTERCEPT_CPUID);
871 (1ULL << INTERCEPT_INVD) | 889 set_intercept(svm, INTERCEPT_INVD);
872 (1ULL << INTERCEPT_HLT) | 890 set_intercept(svm, INTERCEPT_HLT);
873 (1ULL << INTERCEPT_INVLPG) | 891 set_intercept(svm, INTERCEPT_INVLPG);
874 (1ULL << INTERCEPT_INVLPGA) | 892 set_intercept(svm, INTERCEPT_INVLPGA);
875 (1ULL << INTERCEPT_IOIO_PROT) | 893 set_intercept(svm, INTERCEPT_IOIO_PROT);
876 (1ULL << INTERCEPT_MSR_PROT) | 894 set_intercept(svm, INTERCEPT_MSR_PROT);
877 (1ULL << INTERCEPT_TASK_SWITCH) | 895 set_intercept(svm, INTERCEPT_TASK_SWITCH);
878 (1ULL << INTERCEPT_SHUTDOWN) | 896 set_intercept(svm, INTERCEPT_SHUTDOWN);
879 (1ULL << INTERCEPT_VMRUN) | 897 set_intercept(svm, INTERCEPT_VMRUN);
880 (1ULL << INTERCEPT_VMMCALL) | 898 set_intercept(svm, INTERCEPT_VMMCALL);
881 (1ULL << INTERCEPT_VMLOAD) | 899 set_intercept(svm, INTERCEPT_VMLOAD);
882 (1ULL << INTERCEPT_VMSAVE) | 900 set_intercept(svm, INTERCEPT_VMSAVE);
883 (1ULL << INTERCEPT_STGI) | 901 set_intercept(svm, INTERCEPT_STGI);
884 (1ULL << INTERCEPT_CLGI) | 902 set_intercept(svm, INTERCEPT_CLGI);
885 (1ULL << INTERCEPT_SKINIT) | 903 set_intercept(svm, INTERCEPT_SKINIT);
886 (1ULL << INTERCEPT_WBINVD) | 904 set_intercept(svm, INTERCEPT_WBINVD);
887 (1ULL << INTERCEPT_MONITOR) | 905 set_intercept(svm, INTERCEPT_MONITOR);
888 (1ULL << INTERCEPT_MWAIT); 906 set_intercept(svm, INTERCEPT_MWAIT);
889 907
890 control->iopm_base_pa = iopm_base; 908 control->iopm_base_pa = iopm_base;
891 control->msrpm_base_pa = __pa(svm->msrpm); 909 control->msrpm_base_pa = __pa(svm->msrpm);
@@ -936,8 +954,8 @@ static void init_vmcb(struct vcpu_svm *svm)
936 if (npt_enabled) { 954 if (npt_enabled) {
937 /* Setup VMCB for Nested Paging */ 955 /* Setup VMCB for Nested Paging */
938 control->nested_ctl = 1; 956 control->nested_ctl = 1;
939 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | 957 clr_intercept(svm, INTERCEPT_TASK_SWITCH);
940 (1ULL << INTERCEPT_INVLPG)); 958 clr_intercept(svm, INTERCEPT_INVLPG);
941 clr_exception_intercept(svm, PF_VECTOR); 959 clr_exception_intercept(svm, PF_VECTOR);
942 clr_cr_intercept(svm, INTERCEPT_CR3_READ); 960 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
943 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); 961 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
@@ -952,7 +970,7 @@ static void init_vmcb(struct vcpu_svm *svm)
952 970
953 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { 971 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
954 control->pause_filter_count = 3000; 972 control->pause_filter_count = 3000;
955 control->intercept |= (1ULL << INTERCEPT_PAUSE); 973 set_intercept(svm, INTERCEPT_PAUSE);
956 } 974 }
957 975
958 enable_gif(svm); 976 enable_gif(svm);
@@ -1126,12 +1144,12 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1126 1144
1127static void svm_set_vintr(struct vcpu_svm *svm) 1145static void svm_set_vintr(struct vcpu_svm *svm)
1128{ 1146{
1129 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; 1147 set_intercept(svm, INTERCEPT_VINTR);
1130} 1148}
1131 1149
1132static void svm_clear_vintr(struct vcpu_svm *svm) 1150static void svm_clear_vintr(struct vcpu_svm *svm)
1133{ 1151{
1134 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 1152 clr_intercept(svm, INTERCEPT_VINTR);
1135} 1153}
1136 1154
1137static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 1155static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -2309,7 +2327,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2309 } 2327 }
2310 2328
2311 /* We don't want to see VMMCALLs from a nested guest */ 2329 /* We don't want to see VMMCALLs from a nested guest */
2312 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL); 2330 clr_intercept(svm, INTERCEPT_VMMCALL);
2313 2331
2314 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; 2332 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
2315 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; 2333 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
@@ -2557,7 +2575,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
2557static int iret_interception(struct vcpu_svm *svm) 2575static int iret_interception(struct vcpu_svm *svm)
2558{ 2576{
2559 ++svm->vcpu.stat.nmi_window_exits; 2577 ++svm->vcpu.stat.nmi_window_exits;
2560 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); 2578 clr_intercept(svm, INTERCEPT_IRET);
2561 svm->vcpu.arch.hflags |= HF_IRET_MASK; 2579 svm->vcpu.arch.hflags |= HF_IRET_MASK;
2562 return 1; 2580 return 1;
2563} 2581}
@@ -3103,7 +3121,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3103 3121
3104 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 3122 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3105 vcpu->arch.hflags |= HF_NMI_MASK; 3123 vcpu->arch.hflags |= HF_NMI_MASK;
3106 svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); 3124 set_intercept(svm, INTERCEPT_IRET);
3107 ++vcpu->stat.nmi_injections; 3125 ++vcpu->stat.nmi_injections;
3108} 3126}
3109 3127
@@ -3170,10 +3188,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3170 3188
3171 if (masked) { 3189 if (masked) {
3172 svm->vcpu.arch.hflags |= HF_NMI_MASK; 3190 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3173 svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); 3191 set_intercept(svm, INTERCEPT_IRET);
3174 } else { 3192 } else {
3175 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; 3193 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3176 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); 3194 clr_intercept(svm, INTERCEPT_IRET);
3177 } 3195 }
3178} 3196}
3179 3197