aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-07 05:49:40 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 03:46:28 -0400
commit9966bf6872598362b632b738213edfb5a961315d (patch)
treee39c8266c804366b4b5e8f67e267869ffa698d52 /arch/x86/kvm
parent3d62d9aa9868865217ce3a1b70d6039a98b50820 (diff)
KVM: SVM: clean up nestec vmload/vmsave paths
This patch removes the usage of nested_svm_do from the vmload and vmsave emulation code paths. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e85d79142ffc..78c0463ccdd4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -128,8 +128,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm);
128 128
129static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); 129static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
130static int nested_svm_vmexit(struct vcpu_svm *svm); 130static int nested_svm_vmexit(struct vcpu_svm *svm);
131static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
132 void *arg2, void *opaque);
133static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 131static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
134 bool has_error_code, u32 error_code); 132 bool has_error_code, u32 error_code);
135 133
@@ -1865,7 +1863,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1865 return 0; 1863 return 0;
1866} 1864}
1867 1865
1868static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 1866static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1869{ 1867{
1870 to_vmcb->save.fs = from_vmcb->save.fs; 1868 to_vmcb->save.fs = from_vmcb->save.fs;
1871 to_vmcb->save.gs = from_vmcb->save.gs; 1869 to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1879,44 +1877,44 @@ static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1879 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 1877 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1880 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 1878 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1881 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 1879 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1882
1883 return 1;
1884}
1885
1886static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1887 void *arg2, void *opaque)
1888{
1889 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1890}
1891
1892static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1893 void *arg2, void *opaque)
1894{
1895 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1896} 1880}
1897 1881
1898static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1882static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1899{ 1883{
1884 struct vmcb *nested_vmcb;
1885
1900 if (nested_svm_check_permissions(svm)) 1886 if (nested_svm_check_permissions(svm))
1901 return 1; 1887 return 1;
1902 1888
1903 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1889 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1904 skip_emulated_instruction(&svm->vcpu); 1890 skip_emulated_instruction(&svm->vcpu);
1905 1891
1906 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload); 1892 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1893 if (!nested_vmcb)
1894 return 1;
1895
1896 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1897 nested_svm_unmap(nested_vmcb, KM_USER0);
1907 1898
1908 return 1; 1899 return 1;
1909} 1900}
1910 1901
1911static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1902static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1912{ 1903{
1904 struct vmcb *nested_vmcb;
1905
1913 if (nested_svm_check_permissions(svm)) 1906 if (nested_svm_check_permissions(svm))
1914 return 1; 1907 return 1;
1915 1908
1916 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1909 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1917 skip_emulated_instruction(&svm->vcpu); 1910 skip_emulated_instruction(&svm->vcpu);
1918 1911
1919 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave); 1912 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1913 if (!nested_vmcb)
1914 return 1;
1915
1916 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1917 nested_svm_unmap(nested_vmcb, KM_USER0);
1920 1918
1921 return 1; 1919 return 1;
1922} 1920}