aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-07 05:49:38 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:26 -0400
commit34f80cfad59ee587e374cbaf5f2a31d9f5015404 (patch)
tree80c12229f6a27f797fcd8d72e822ac17ae4332b7 /arch/x86
parent0295ad7de86a6347316bc7414c1b9c15f56a1333 (diff)
KVM: SVM: get rid of nested_svm_vmexit_real
This patch is the starting point of removing nested_svm_do from the nested svm code. The nested_svm_do function basically maps two guest physical pages to host virtual addresses and calls a passed function on it. This function pointer code flow is hard to read and not the best technical solution here. As a side effect this patch indroduces the nested_svm_[un]map helper functions. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/svm.c52
1 files changed, 40 insertions, 12 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 67fad6641d55..5e55a1bdd13d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1390,6 +1390,39 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
1390 return 0; 1390 return 0;
1391} 1391}
1392 1392
1393static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1394{
1395 struct page *page;
1396
1397 down_read(&current->mm->mmap_sem);
1398 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1399 up_read(&current->mm->mmap_sem);
1400
1401 if (is_error_page(page))
1402 goto error;
1403
1404 return kmap_atomic(page, idx);
1405
1406error:
1407 kvm_release_page_clean(page);
1408 kvm_inject_gp(&svm->vcpu, 0);
1409
1410 return NULL;
1411}
1412
1413static void nested_svm_unmap(void *addr, enum km_type idx)
1414{
1415 struct page *page;
1416
1417 if (!addr)
1418 return;
1419
1420 page = kmap_atomic_to_page(addr);
1421
1422 kunmap_atomic(addr, idx);
1423 kvm_release_page_dirty(page);
1424}
1425
1393static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) 1426static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1394{ 1427{
1395 struct page *page; 1428 struct page *page;
@@ -1597,13 +1630,16 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
1597 dst->lbr_ctl = from->lbr_ctl; 1630 dst->lbr_ctl = from->lbr_ctl;
1598} 1631}
1599 1632
1600static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, 1633static int nested_svm_vmexit(struct vcpu_svm *svm)
1601 void *arg2, void *opaque)
1602{ 1634{
1603 struct vmcb *nested_vmcb = (struct vmcb *)arg1; 1635 struct vmcb *nested_vmcb;
1604 struct vmcb *hsave = svm->nested.hsave; 1636 struct vmcb *hsave = svm->nested.hsave;
1605 struct vmcb *vmcb = svm->vmcb; 1637 struct vmcb *vmcb = svm->vmcb;
1606 1638
1639 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
1640 if (!nested_vmcb)
1641 return 1;
1642
1607 /* Give the current vmcb to the guest */ 1643 /* Give the current vmcb to the guest */
1608 disable_gif(svm); 1644 disable_gif(svm);
1609 1645
@@ -1678,15 +1714,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1678 /* Exit nested SVM mode */ 1714 /* Exit nested SVM mode */
1679 svm->nested.vmcb = 0; 1715 svm->nested.vmcb = 0;
1680 1716
1681 return 0; 1717 nested_svm_unmap(nested_vmcb, KM_USER0);
1682}
1683
1684static int nested_svm_vmexit(struct vcpu_svm *svm)
1685{
1686 nsvm_printk("VMexit\n");
1687 if (nested_svm_do(svm, svm->nested.vmcb, 0,
1688 NULL, nested_svm_vmexit_real))
1689 return 1;
1690 1718
1691 kvm_mmu_reset_context(&svm->vcpu); 1719 kvm_mmu_reset_context(&svm->vcpu);
1692 kvm_mmu_load(&svm->vcpu); 1720 kvm_mmu_load(&svm->vcpu);