aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-04-02 08:08:20 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-04-08 04:46:54 -0400
commit3180a7fcbc0ec7ed7cc85ed5015bdd7a8c2176e8 (patch)
tree8e5b1a5a2920c1c0c2517003626eeee31ea8f077 /virt
parent9c8fd1ba2201c072bd3cf6940e2ca4d0a7aed723 (diff)
KVM: remove kvm_read_hva and kvm_read_hva_atomic
The corresponding write functions just use __copy_to_user. Do the same on the read side. This reverts what's left of commit 86ab8cffb498 (KVM: introduce gfn_to_hva_read/kvm_read_hva/kvm_read_hva_atomic, 2012-08-21) Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <1427976500-28533-1-git-send-email-pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c14
1 files changed, 2 insertions, 12 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0d06b7b63e95..aadef264bed1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1187,16 +1187,6 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1187 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1187 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1188} 1188}
1189 1189
1190static int kvm_read_hva(void *data, void __user *hva, int len)
1191{
1192 return __copy_from_user(data, hva, len);
1193}
1194
1195static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
1196{
1197 return __copy_from_user_inatomic(data, hva, len);
1198}
1199
1200static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1190static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1201 unsigned long start, int write, struct page **page) 1191 unsigned long start, int write, struct page **page)
1202{ 1192{
@@ -1548,7 +1538,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1548 addr = gfn_to_hva_prot(kvm, gfn, NULL); 1538 addr = gfn_to_hva_prot(kvm, gfn, NULL);
1549 if (kvm_is_error_hva(addr)) 1539 if (kvm_is_error_hva(addr))
1550 return -EFAULT; 1540 return -EFAULT;
1551 r = kvm_read_hva(data, (void __user *)addr + offset, len); 1541 r = __copy_from_user(data, (void __user *)addr + offset, len);
1552 if (r) 1542 if (r)
1553 return -EFAULT; 1543 return -EFAULT;
1554 return 0; 1544 return 0;
@@ -1587,7 +1577,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1587 if (kvm_is_error_hva(addr)) 1577 if (kvm_is_error_hva(addr))
1588 return -EFAULT; 1578 return -EFAULT;
1589 pagefault_disable(); 1579 pagefault_disable();
1590 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); 1580 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1591 pagefault_enable(); 1581 pagefault_enable();
1592 if (r) 1582 if (r)
1593 return -EFAULT; 1583 return -EFAULT;