aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-07 05:49:30 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:23 -0400
commitdefbba5660fb9fcad186bd799a635e52994a4d1a (patch)
tree90be523b529f4d7f99fe05497385cb59fc1dbfd9 /arch/x86/kvm/svm.c
parent33740e4009b7d287538f68f614eb3542df3597e4 (diff)
KVM: SVM: optimize nested vmrun
Only copy the necessary parts of the vmcb save area on vmrun and save precious time. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2f5f2236f2a2..f11f88005c29 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1681,6 +1681,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1681{ 1681{
1682 struct vmcb *nested_vmcb = (struct vmcb *)arg1; 1682 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1683 struct vmcb *hsave = svm->hsave; 1683 struct vmcb *hsave = svm->hsave;
1684 struct vmcb *vmcb = svm->vmcb;
1684 1685
1685 /* nested_vmcb is our indicator if nested SVM is activated */ 1686 /* nested_vmcb is our indicator if nested SVM is activated */
1686 svm->nested_vmcb = svm->vmcb->save.rax; 1687 svm->nested_vmcb = svm->vmcb->save.rax;
@@ -1691,12 +1692,25 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1691 1692
1692 /* Save the old vmcb, so we don't need to pick what we save, but 1693 /* Save the old vmcb, so we don't need to pick what we save, but
1693 can restore everything when a VMEXIT occurs */ 1694 can restore everything when a VMEXIT occurs */
1694 memcpy(hsave, svm->vmcb, sizeof(struct vmcb)); 1695 hsave->save.es = vmcb->save.es;
1695 /* We need to remember the original CR3 in the SPT case */ 1696 hsave->save.cs = vmcb->save.cs;
1696 if (!npt_enabled) 1697 hsave->save.ss = vmcb->save.ss;
1697 hsave->save.cr3 = svm->vcpu.arch.cr3; 1698 hsave->save.ds = vmcb->save.ds;
1698 hsave->save.cr4 = svm->vcpu.arch.cr4; 1699 hsave->save.gdtr = vmcb->save.gdtr;
1699 hsave->save.rip = svm->next_rip; 1700 hsave->save.idtr = vmcb->save.idtr;
1701 hsave->save.efer = svm->vcpu.arch.shadow_efer;
1702 hsave->save.cr0 = svm->vcpu.arch.cr0;
1703 hsave->save.cr4 = svm->vcpu.arch.cr4;
1704 hsave->save.rflags = vmcb->save.rflags;
1705 hsave->save.rip = svm->next_rip;
1706 hsave->save.rsp = vmcb->save.rsp;
1707 hsave->save.rax = vmcb->save.rax;
1708 if (npt_enabled)
1709 hsave->save.cr3 = vmcb->save.cr3;
1710 else
1711 hsave->save.cr3 = svm->vcpu.arch.cr3;
1712
1713 hsave->control = vmcb->control;
1700 1714
1701 if (svm->vmcb->save.rflags & X86_EFLAGS_IF) 1715 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1702 svm->vcpu.arch.hflags |= HF_HIF_MASK; 1716 svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -1721,7 +1735,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1721 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 1735 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1722 kvm_mmu_reset_context(&svm->vcpu); 1736 kvm_mmu_reset_context(&svm->vcpu);
1723 } 1737 }
1724 svm->vmcb->save.cr2 = nested_vmcb->save.cr2; 1738 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
1725 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); 1739 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1726 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); 1740 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1727 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); 1741 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);