aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/svm.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-04-29 04:56:06 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:32 -0400
commit94dfbdb3894eda2f673b70e20da2743c4a8d3968 (patch)
tree74e7a4a31a72ed0e3753e9390189d2e8132ecf0e /drivers/kvm/svm.c
parent2807696c3791d6dd1dcf20f022eaa2dc7615bc5d (diff)
KVM: SVM: Only save/restore MSRs when needed
We only have to save/restore MSR_GS_BASE on every VMEXIT. The rest can be saved/restored when we leave the VCPU. Since we don't emulate the DEBUGCTL MSRs and the guest cannot write to them, we don't have to worry about saving/restoring them at all. This shaves a whopping 40% off raw vmexit costs on AMD. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r--drivers/kvm/svm.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index bddd0238869d..9c15f32eea18 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -522,8 +522,6 @@ static void init_vmcb(struct vmcb *vmcb)
522 control->msrpm_base_pa = msrpm_base; 522 control->msrpm_base_pa = msrpm_base;
523 control->tsc_offset = 0; 523 control->tsc_offset = 0;
524 control->int_ctl = V_INTR_MASKING_MASK; 524 control->int_ctl = V_INTR_MASKING_MASK;
525 if (svm_has(SVM_FEATURE_LBRV))
526 control->lbr_ctl = 1ULL;
527 525
528 init_seg(&save->es); 526 init_seg(&save->es);
529 init_seg(&save->ss); 527 init_seg(&save->ss);
@@ -611,7 +609,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
611 609
612static void svm_vcpu_load(struct kvm_vcpu *vcpu) 610static void svm_vcpu_load(struct kvm_vcpu *vcpu)
613{ 611{
614 int cpu; 612 int cpu, i;
615 613
616 cpu = get_cpu(); 614 cpu = get_cpu();
617 if (unlikely(cpu != vcpu->cpu)) { 615 if (unlikely(cpu != vcpu->cpu)) {
@@ -626,10 +624,18 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu)
626 vcpu->svm->vmcb->control.tsc_offset += delta; 624 vcpu->svm->vmcb->control.tsc_offset += delta;
627 vcpu->cpu = cpu; 625 vcpu->cpu = cpu;
628 } 626 }
627
628 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
629 rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
629} 630}
630 631
631static void svm_vcpu_put(struct kvm_vcpu *vcpu) 632static void svm_vcpu_put(struct kvm_vcpu *vcpu)
632{ 633{
634 int i;
635
636 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
637 wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
638
633 rdtscll(vcpu->host_tsc); 639 rdtscll(vcpu->host_tsc);
634 put_cpu(); 640 put_cpu();
635} 641}
@@ -815,18 +821,16 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
815 821
816static void load_host_msrs(struct kvm_vcpu *vcpu) 822static void load_host_msrs(struct kvm_vcpu *vcpu)
817{ 823{
818 int i; 824#ifdef CONFIG_X86_64
819 825 wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
820 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) 826#endif
821 wrmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
822} 827}
823 828
824static void save_host_msrs(struct kvm_vcpu *vcpu) 829static void save_host_msrs(struct kvm_vcpu *vcpu)
825{ 830{
826 int i; 831#ifdef CONFIG_X86_64
827 832 rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
828 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) 833#endif
829 rdmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
830} 834}
831 835
832static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) 836static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)