aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorRoedel, Joerg <Joerg.Roedel@amd.com>2010-12-03 07:15:21 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:30:22 -0500
commit8d28fec406e4d5ce6c109fe12699976e72e9748e (patch)
tree1d5403a970d7688ef0bed73268ddb84d68e1f7bd /arch/x86/kvm/svm.c
parent700e1b12196c4b01524ca10d89f8731418d72b6e (diff)
KVM: SVM: Add clean-bits infrastructure code
This patch adds the infrastructure for the implementation of the individual clean-bits. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 50387860a53c..e73cbc3c49f9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -185,6 +185,28 @@ static int nested_svm_vmexit(struct vcpu_svm *svm);
185static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 185static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
186 bool has_error_code, u32 error_code); 186 bool has_error_code, u32 error_code);
187 187
188enum {
189 VMCB_DIRTY_MAX,
190};
191
192#define VMCB_ALWAYS_DIRTY_MASK 0U
193
194static inline void mark_all_dirty(struct vmcb *vmcb)
195{
196 vmcb->control.clean = 0;
197}
198
199static inline void mark_all_clean(struct vmcb *vmcb)
200{
201 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
202 & ~VMCB_ALWAYS_DIRTY_MASK;
203}
204
205static inline void mark_dirty(struct vmcb *vmcb, int bit)
206{
207 vmcb->control.clean &= ~(1 << bit);
208}
209
188static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 210static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
189{ 211{
190 return container_of(vcpu, struct vcpu_svm, vcpu); 212 return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -973,6 +995,8 @@ static void init_vmcb(struct vcpu_svm *svm)
973 set_intercept(svm, INTERCEPT_PAUSE); 995 set_intercept(svm, INTERCEPT_PAUSE);
974 } 996 }
975 997
998 mark_all_dirty(svm->vmcb);
999
976 enable_gif(svm); 1000 enable_gif(svm);
977} 1001}
978 1002
@@ -1089,6 +1113,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1089 1113
1090 if (unlikely(cpu != vcpu->cpu)) { 1114 if (unlikely(cpu != vcpu->cpu)) {
1091 svm->asid_generation = 0; 1115 svm->asid_generation = 0;
1116 mark_all_dirty(svm->vmcb);
1092 } 1117 }
1093 1118
1094#ifdef CONFIG_X86_64 1119#ifdef CONFIG_X86_64
@@ -2140,6 +2165,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
2140 svm->vmcb->save.cpl = 0; 2165 svm->vmcb->save.cpl = 0;
2141 svm->vmcb->control.exit_int_info = 0; 2166 svm->vmcb->control.exit_int_info = 0;
2142 2167
2168 mark_all_dirty(svm->vmcb);
2169
2143 nested_svm_unmap(page); 2170 nested_svm_unmap(page);
2144 2171
2145 nested_svm_uninit_mmu_context(&svm->vcpu); 2172 nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2351,6 +2378,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2351 2378
2352 enable_gif(svm); 2379 enable_gif(svm);
2353 2380
2381 mark_all_dirty(svm->vmcb);
2382
2354 return true; 2383 return true;
2355} 2384}
2356 2385
@@ -3490,6 +3519,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3490 if (unlikely(svm->vmcb->control.exit_code == 3519 if (unlikely(svm->vmcb->control.exit_code ==
3491 SVM_EXIT_EXCP_BASE + MC_VECTOR)) 3520 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3492 svm_handle_mce(svm); 3521 svm_handle_mce(svm);
3522
3523 mark_all_clean(svm->vmcb);
3493} 3524}
3494 3525
3495#undef R 3526#undef R