aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2007-11-11 15:10:22 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:02 -0500
commite0506bcba5992650b1190de9125f5963a30f32e2 (patch)
tree5527a69e7f84afbe858adbc541ceb509b7c6254d /drivers/kvm/kvm_main.c
parent539cb6608ca804e7805d8e88c83377d991a552b1 (diff)
KVM: Change kvm_{read,write}_guest() to use copy_{from,to}_user()
This changes kvm_write_guest_page/kvm_read_guest_page to use copy_to_user/read_from_user, as a result we get better speed and better dirty bit tracking. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c38
1 files changed, 14 insertions, 24 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 04544aecf22f..d68901bd0693 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -630,20 +630,15 @@ static int next_segment(unsigned long len, int offset)
630int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 630int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
631 int len) 631 int len)
632{ 632{
633 void *page_virt; 633 int r;
634 struct page *page; 634 unsigned long addr;
635 635
636 page = gfn_to_page(kvm, gfn); 636 addr = gfn_to_hva(kvm, gfn);
637 if (is_error_page(page)) { 637 if (kvm_is_error_hva(addr))
638 kvm_release_page(page); 638 return -EFAULT;
639 r = copy_from_user(data, (void __user *)addr + offset, len);
640 if (r)
639 return -EFAULT; 641 return -EFAULT;
640 }
641 page_virt = kmap_atomic(page, KM_USER0);
642
643 memcpy(data, page_virt + offset, len);
644
645 kunmap_atomic(page_virt, KM_USER0);
646 kvm_release_page(page);
647 return 0; 642 return 0;
648} 643}
649EXPORT_SYMBOL_GPL(kvm_read_guest_page); 644EXPORT_SYMBOL_GPL(kvm_read_guest_page);
@@ -671,21 +666,16 @@ EXPORT_SYMBOL_GPL(kvm_read_guest);
671int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 666int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
672 int offset, int len) 667 int offset, int len)
673{ 668{
674 void *page_virt; 669 int r;
675 struct page *page; 670 unsigned long addr;
676 671
677 page = gfn_to_page(kvm, gfn); 672 addr = gfn_to_hva(kvm, gfn);
678 if (is_error_page(page)) { 673 if (kvm_is_error_hva(addr))
679 kvm_release_page(page); 674 return -EFAULT;
675 r = copy_to_user((void __user *)addr + offset, data, len);
676 if (r)
680 return -EFAULT; 677 return -EFAULT;
681 }
682 page_virt = kmap_atomic(page, KM_USER0);
683
684 memcpy(page_virt + offset, data, len);
685
686 kunmap_atomic(page_virt, KM_USER0);
687 mark_page_dirty(kvm, gfn); 678 mark_page_dirty(kvm, gfn);
688 kvm_release_page(page);
689 return 0; 679 return 0;
690} 680}
691EXPORT_SYMBOL_GPL(kvm_write_guest_page); 681EXPORT_SYMBOL_GPL(kvm_write_guest_page);