aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-19 10:23:00 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 05:34:07 -0400
commit7597f129d8b6799da7a264e6d6f7401668d3a36d (patch)
treea24e84b9e7b86fb1e74b2e63a61386a09d1a1c42 /arch/x86/kvm/svm.c
parentad91f8ffbb18413e79f9f976a55b4e11d02e6a6d (diff)
KVM: SVM: Don't use kmap_atomic in nested_svm_map
Use of kmap_atomic disables preemption but if we run in shadow-shadow mode the vmrun emulation executes kvm_set_cr3 which might sleep or fault. So use kmap instead for nested_svm_map. Cc: stable@kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 77fa2e3053b5..f9da35b06ec7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1423,7 +1423,7 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
1423 return 0; 1423 return 0;
1424} 1424}
1425 1425
1426static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) 1426static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
1427{ 1427{
1428 struct page *page; 1428 struct page *page;
1429 1429
@@ -1431,7 +1431,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1431 if (is_error_page(page)) 1431 if (is_error_page(page))
1432 goto error; 1432 goto error;
1433 1433
1434 return kmap_atomic(page, idx); 1434 *_page = page;
1435
1436 return kmap(page);
1435 1437
1436error: 1438error:
1437 kvm_release_page_clean(page); 1439 kvm_release_page_clean(page);
@@ -1440,16 +1442,9 @@ error:
1440 return NULL; 1442 return NULL;
1441} 1443}
1442 1444
1443static void nested_svm_unmap(void *addr, enum km_type idx) 1445static void nested_svm_unmap(struct page *page)
1444{ 1446{
1445 struct page *page; 1447 kunmap(page);
1446
1447 if (!addr)
1448 return;
1449
1450 page = kmap_atomic_to_page(addr);
1451
1452 kunmap_atomic(addr, idx);
1453 kvm_release_page_dirty(page); 1448 kvm_release_page_dirty(page);
1454} 1449}
1455 1450
@@ -1457,6 +1452,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1457{ 1452{
1458 u32 param = svm->vmcb->control.exit_info_1 & 1; 1453 u32 param = svm->vmcb->control.exit_info_1 & 1;
1459 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1454 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1455 struct page *page;
1460 bool ret = false; 1456 bool ret = false;
1461 u32 t0, t1; 1457 u32 t0, t1;
1462 u8 *msrpm; 1458 u8 *msrpm;
@@ -1464,7 +1460,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1464 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) 1460 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1465 return false; 1461 return false;
1466 1462
1467 msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); 1463 msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1468 1464
1469 if (!msrpm) 1465 if (!msrpm)
1470 goto out; 1466 goto out;
@@ -1492,7 +1488,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1492 ret = msrpm[t1] & ((1 << param) << t0); 1488 ret = msrpm[t1] & ((1 << param) << t0);
1493 1489
1494out: 1490out:
1495 nested_svm_unmap(msrpm, KM_USER0); 1491 nested_svm_unmap(page);
1496 1492
1497 return ret; 1493 return ret;
1498} 1494}
@@ -1615,6 +1611,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1615 struct vmcb *nested_vmcb; 1611 struct vmcb *nested_vmcb;
1616 struct vmcb *hsave = svm->nested.hsave; 1612 struct vmcb *hsave = svm->nested.hsave;
1617 struct vmcb *vmcb = svm->vmcb; 1613 struct vmcb *vmcb = svm->vmcb;
1614 struct page *page;
1618 1615
1619 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, 1616 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
1620 vmcb->control.exit_info_1, 1617 vmcb->control.exit_info_1,
@@ -1622,7 +1619,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1622 vmcb->control.exit_int_info, 1619 vmcb->control.exit_int_info,
1623 vmcb->control.exit_int_info_err); 1620 vmcb->control.exit_int_info_err);
1624 1621
1625 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0); 1622 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1626 if (!nested_vmcb) 1623 if (!nested_vmcb)
1627 return 1; 1624 return 1;
1628 1625
@@ -1712,7 +1709,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1712 /* Exit nested SVM mode */ 1709 /* Exit nested SVM mode */
1713 svm->nested.vmcb = 0; 1710 svm->nested.vmcb = 0;
1714 1711
1715 nested_svm_unmap(nested_vmcb, KM_USER0); 1712 nested_svm_unmap(page);
1716 1713
1717 kvm_mmu_reset_context(&svm->vcpu); 1714 kvm_mmu_reset_context(&svm->vcpu);
1718 kvm_mmu_load(&svm->vcpu); 1715 kvm_mmu_load(&svm->vcpu);
@@ -1723,9 +1720,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1723static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 1720static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1724{ 1721{
1725 u32 *nested_msrpm; 1722 u32 *nested_msrpm;
1723 struct page *page;
1726 int i; 1724 int i;
1727 1725
1728 nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); 1726 nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1729 if (!nested_msrpm) 1727 if (!nested_msrpm)
1730 return false; 1728 return false;
1731 1729
@@ -1734,7 +1732,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1734 1732
1735 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); 1733 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1736 1734
1737 nested_svm_unmap(nested_msrpm, KM_USER0); 1735 nested_svm_unmap(page);
1738 1736
1739 return true; 1737 return true;
1740} 1738}
@@ -1744,8 +1742,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1744 struct vmcb *nested_vmcb; 1742 struct vmcb *nested_vmcb;
1745 struct vmcb *hsave = svm->nested.hsave; 1743 struct vmcb *hsave = svm->nested.hsave;
1746 struct vmcb *vmcb = svm->vmcb; 1744 struct vmcb *vmcb = svm->vmcb;
1745 struct page *page;
1747 1746
1748 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); 1747 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1749 if (!nested_vmcb) 1748 if (!nested_vmcb)
1750 return false; 1749 return false;
1751 1750
@@ -1857,7 +1856,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1857 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; 1856 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1858 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; 1857 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1859 1858
1860 nested_svm_unmap(nested_vmcb, KM_USER0); 1859 nested_svm_unmap(page);
1861 1860
1862 enable_gif(svm); 1861 enable_gif(svm);
1863 1862
@@ -1883,6 +1882,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1883static int vmload_interception(struct vcpu_svm *svm) 1882static int vmload_interception(struct vcpu_svm *svm)
1884{ 1883{
1885 struct vmcb *nested_vmcb; 1884 struct vmcb *nested_vmcb;
1885 struct page *page;
1886 1886
1887 if (nested_svm_check_permissions(svm)) 1887 if (nested_svm_check_permissions(svm))
1888 return 1; 1888 return 1;
@@ -1890,12 +1890,12 @@ static int vmload_interception(struct vcpu_svm *svm)
1890 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1890 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1891 skip_emulated_instruction(&svm->vcpu); 1891 skip_emulated_instruction(&svm->vcpu);
1892 1892
1893 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); 1893 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1894 if (!nested_vmcb) 1894 if (!nested_vmcb)
1895 return 1; 1895 return 1;
1896 1896
1897 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); 1897 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1898 nested_svm_unmap(nested_vmcb, KM_USER0); 1898 nested_svm_unmap(page);
1899 1899
1900 return 1; 1900 return 1;
1901} 1901}
@@ -1903,6 +1903,7 @@ static int vmload_interception(struct vcpu_svm *svm)
1903static int vmsave_interception(struct vcpu_svm *svm) 1903static int vmsave_interception(struct vcpu_svm *svm)
1904{ 1904{
1905 struct vmcb *nested_vmcb; 1905 struct vmcb *nested_vmcb;
1906 struct page *page;
1906 1907
1907 if (nested_svm_check_permissions(svm)) 1908 if (nested_svm_check_permissions(svm))
1908 return 1; 1909 return 1;
@@ -1910,12 +1911,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
1910 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1911 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1911 skip_emulated_instruction(&svm->vcpu); 1912 skip_emulated_instruction(&svm->vcpu);
1912 1913
1913 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); 1914 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1914 if (!nested_vmcb) 1915 if (!nested_vmcb)
1915 return 1; 1916 return 1;
1916 1917
1917 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); 1918 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1918 nested_svm_unmap(nested_vmcb, KM_USER0); 1919 nested_svm_unmap(page);
1919 1920
1920 return 1; 1921 return 1;
1921} 1922}