aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-08-22 07:11:43 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:26 -0400
commit48987781eb1d1e8ded41f55cd5806615fda92c6e (patch)
treefa6db806ffec34588fbcaa434c33afac51f661e6
parent887c08ac191efb103e33e589aacbc2ce1a3f131e (diff)
KVM: MMU: introduce gfn_to_page_many_atomic() function
Introduce this function to get consecutive gfn's pages, it can reduce gup's overload, used by later patch Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--virt/kvm/kvm_main.c29
2 files changed, 31 insertions, 1 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 307d0e2c0f59..b837ec80885d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -289,6 +289,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
289void kvm_disable_largepages(void); 289void kvm_disable_largepages(void);
290void kvm_arch_flush_shadow(struct kvm *kvm); 290void kvm_arch_flush_shadow(struct kvm *kvm);
291 291
292int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
293 int nr_pages);
294
292struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 295struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
293unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 296unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
294void kvm_release_page_clean(struct page *page); 297void kvm_release_page_clean(struct page *page);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 08bd304f8bc7..2eb0b7500a2a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -927,15 +927,25 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
927 return memslot - slots->memslots; 927 return memslot - slots->memslots;
928} 928}
929 929
930unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 930static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
931 gfn_t *nr_pages)
931{ 932{
932 struct kvm_memory_slot *slot; 933 struct kvm_memory_slot *slot;
933 934
934 slot = gfn_to_memslot(kvm, gfn); 935 slot = gfn_to_memslot(kvm, gfn);
935 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 936 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
936 return bad_hva(); 937 return bad_hva();
938
939 if (nr_pages)
940 *nr_pages = slot->npages - (gfn - slot->base_gfn);
941
937 return gfn_to_hva_memslot(slot, gfn); 942 return gfn_to_hva_memslot(slot, gfn);
938} 943}
944
945unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
946{
947 return gfn_to_hva_many(kvm, gfn, NULL);
948}
939EXPORT_SYMBOL_GPL(gfn_to_hva); 949EXPORT_SYMBOL_GPL(gfn_to_hva);
940 950
941static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) 951static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
@@ -1010,6 +1020,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1010 return hva_to_pfn(kvm, addr, false); 1020 return hva_to_pfn(kvm, addr, false);
1011} 1021}
1012 1022
1023int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1024 int nr_pages)
1025{
1026 unsigned long addr;
1027 gfn_t entry;
1028
1029 addr = gfn_to_hva_many(kvm, gfn, &entry);
1030 if (kvm_is_error_hva(addr))
1031 return -1;
1032
1033 if (entry < nr_pages)
1034 return 0;
1035
1036 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1037}
1038EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1039
1013struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1040struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1014{ 1041{
1015 pfn_t pfn; 1042 pfn_t pfn;