aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-10-21 06:20:33 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:33 -0500
commitdacccfdd6bb386ec0714e1fec250c4b7d0aaccc9 (patch)
tree95067709061a1cc268b5bf36efe5150078dc222d
parentafe9e66f8233e33e16fcc5b855070e45978f919e (diff)
KVM: SVM: Move fs/gs/ldt save/restore to heavyweight exit path
ldt is never used in the kernel context; same goes for fs (x86_64) and gs (i386). So save/restore them in the heavyweight exit path instead of the lightweight path. By itself, this doesn't buy us much, but it paves the way for moving vmload and vmsave to the heavyweight exit path, since they modify the same registers. [jan: fix copy/pase mistake on i386] Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a217978b370f..8ea4a79bc4ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -125,6 +125,9 @@ struct vcpu_svm {
125 125
126 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; 126 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
127 struct { 127 struct {
128 u16 fs;
129 u16 gs;
130 u16 ldt;
128 u64 gs_base; 131 u64 gs_base;
129 } host; 132 } host;
130 133
@@ -184,6 +187,9 @@ static int nested_svm_vmexit(struct vcpu_svm *svm);
184static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 187static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
185 bool has_error_code, u32 error_code); 188 bool has_error_code, u32 error_code);
186 189
190static void save_host_msrs(struct kvm_vcpu *vcpu);
191static void load_host_msrs(struct kvm_vcpu *vcpu);
192
187static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 193static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
188{ 194{
189 return container_of(vcpu, struct vcpu_svm, vcpu); 195 return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -996,6 +1002,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
996 svm->asid_generation = 0; 1002 svm->asid_generation = 0;
997 } 1003 }
998 1004
1005 save_host_msrs(vcpu);
1006 savesegment(fs, svm->host.fs);
1007 savesegment(gs, svm->host.gs);
1008 svm->host.ldt = kvm_read_ldt();
1009
999 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 1010 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1000 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1011 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1001} 1012}
@@ -1006,6 +1017,14 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1006 int i; 1017 int i;
1007 1018
1008 ++vcpu->stat.host_state_reload; 1019 ++vcpu->stat.host_state_reload;
1020 kvm_load_ldt(svm->host.ldt);
1021#ifdef CONFIG_X86_64
1022 loadsegment(fs, svm->host.fs);
1023 load_gs_index(svm->host.gs);
1024 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1025#else
1026 loadsegment(gs, svm->host.gs);
1027#endif
1009 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 1028 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1010 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1029 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1011} 1030}
@@ -3314,9 +3333,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3314static void svm_vcpu_run(struct kvm_vcpu *vcpu) 3333static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3315{ 3334{
3316 struct vcpu_svm *svm = to_svm(vcpu); 3335 struct vcpu_svm *svm = to_svm(vcpu);
3317 u16 fs_selector;
3318 u16 gs_selector;
3319 u16 ldt_selector;
3320 3336
3321 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 3337 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3322 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 3338 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
@@ -3333,10 +3349,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3333 3349
3334 sync_lapic_to_cr8(vcpu); 3350 sync_lapic_to_cr8(vcpu);
3335 3351
3336 save_host_msrs(vcpu);
3337 savesegment(fs, fs_selector);
3338 savesegment(gs, gs_selector);
3339 ldt_selector = kvm_read_ldt();
3340 svm->vmcb->save.cr2 = vcpu->arch.cr2; 3352 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3341 3353
3342 clgi(); 3354 clgi();
@@ -3415,13 +3427,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3415 ); 3427 );
3416 3428
3417 load_host_msrs(vcpu); 3429 load_host_msrs(vcpu);
3418 kvm_load_ldt(ldt_selector); 3430#ifndef CONFIG_X86_64
3419 loadsegment(fs, fs_selector); 3431 loadsegment(fs, svm->host.fs);
3420#ifdef CONFIG_X86_64
3421 load_gs_index(gs_selector);
3422 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
3423#else
3424 loadsegment(gs, gs_selector);
3425#endif 3432#endif
3426 3433
3427 reload_tss(vcpu); 3434 reload_tss(vcpu);