aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorKarimAllah Ahmed <karahmed@amazon.de>2019-01-31 15:24:34 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2019-04-30 15:32:51 -0400
commite45adf665a53df0db37f784ed87c6b57ddd81885 (patch)
tree0075832bee5e3fd9351e22852397766c1fba8f44 /virt/kvm
parentbd53cb35a3e9adb73a834a36586e9ad80e877767 (diff)
KVM: Introduce a new guest mapping API
In KVM, specially for nested guests, there is a dominant pattern of: => map guest memory -> do_something -> unmap guest memory In addition to all this unnecessarily noise in the code due to boiler plate code, most of the time the mapping function does not properly handle memory that is not backed by "struct page". This new guest mapping API encapsulate most of this boiler plate code and also handles guest memory that is not backed by "struct page". The current implementation of this API is using memremap for memory that is not backed by a "struct page" which would lead to a huge slow-down if it was used for high-frequency mapping operations. The API does not have any effect on current setups where guest memory is backed by a "struct page". Further patches are going to also introduce a pfn-cache which would significantly improve the performance of the memremap case. Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3194aa3d0b43..53de2f946f9e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1742,6 +1742,70 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1742} 1742}
1743EXPORT_SYMBOL_GPL(gfn_to_page); 1743EXPORT_SYMBOL_GPL(gfn_to_page);
1744 1744
1745static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
1746 struct kvm_host_map *map)
1747{
1748 kvm_pfn_t pfn;
1749 void *hva = NULL;
1750 struct page *page = KVM_UNMAPPED_PAGE;
1751
1752 if (!map)
1753 return -EINVAL;
1754
1755 pfn = gfn_to_pfn_memslot(slot, gfn);
1756 if (is_error_noslot_pfn(pfn))
1757 return -EINVAL;
1758
1759 if (pfn_valid(pfn)) {
1760 page = pfn_to_page(pfn);
1761 hva = kmap(page);
1762 } else {
1763 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
1764 }
1765
1766 if (!hva)
1767 return -EFAULT;
1768
1769 map->page = page;
1770 map->hva = hva;
1771 map->pfn = pfn;
1772 map->gfn = gfn;
1773
1774 return 0;
1775}
1776
1777int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
1778{
1779 return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
1780}
1781EXPORT_SYMBOL_GPL(kvm_vcpu_map);
1782
1783void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
1784 bool dirty)
1785{
1786 if (!map)
1787 return;
1788
1789 if (!map->hva)
1790 return;
1791
1792 if (map->page)
1793 kunmap(map->page);
1794 else
1795 memunmap(map->hva);
1796
1797 if (dirty) {
1798 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
1799 kvm_release_pfn_dirty(map->pfn);
1800 } else {
1801 kvm_release_pfn_clean(map->pfn);
1802 }
1803
1804 map->hva = NULL;
1805 map->page = NULL;
1806}
1807EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
1808
1745struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1809struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1746{ 1810{
1747 kvm_pfn_t pfn; 1811 kvm_pfn_t pfn;