aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-08-20 22:59:53 -0400
committerAvi Kivity <avi@redhat.com>2012-08-22 08:08:53 -0400
commit86ab8cffb498077e926957f099b064db3751c1de (patch)
tree95209c3c8c31eeb52774994d545a035efd444eb0 /virt
parent037d92dc5d4691ae7cf44699c55ca83b1b441992 (diff)
KVM: introduce gfn_to_hva_read/kvm_read_hva/kvm_read_hva_atomic
This set of functions is only used to read data from host space, in the later patch, we will only get a readonly hva in gfn_to_hva_read, and the function name is a good hint to let gfn_to_hva_read to pair with kvm_read_hva()/kvm_read_hva_atomic() Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c27
1 files changed, 23 insertions, 4 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 543f9b7e5aa2..6e3ea15397d4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1002,6 +1002,25 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1002} 1002}
1003EXPORT_SYMBOL_GPL(gfn_to_hva); 1003EXPORT_SYMBOL_GPL(gfn_to_hva);
1004 1004
1005/*
1006 * The hva returned by this function is only allowed to be read.
1007 * It should pair with kvm_read_hva() or kvm_read_hva_atomic().
1008 */
1009static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn)
1010{
1011 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1012}
1013
1014static int kvm_read_hva(void *data, void __user *hva, int len)
1015{
1016 return __copy_from_user(data, hva, len);
1017}
1018
1019static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
1020{
1021 return __copy_from_user_inatomic(data, hva, len);
1022}
1023
1005int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1024int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1006 unsigned long start, int write, struct page **page) 1025 unsigned long start, int write, struct page **page)
1007{ 1026{
@@ -1274,10 +1293,10 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1274 int r; 1293 int r;
1275 unsigned long addr; 1294 unsigned long addr;
1276 1295
1277 addr = gfn_to_hva(kvm, gfn); 1296 addr = gfn_to_hva_read(kvm, gfn);
1278 if (kvm_is_error_hva(addr)) 1297 if (kvm_is_error_hva(addr))
1279 return -EFAULT; 1298 return -EFAULT;
1280 r = __copy_from_user(data, (void __user *)addr + offset, len); 1299 r = kvm_read_hva(data, (void __user *)addr + offset, len);
1281 if (r) 1300 if (r)
1282 return -EFAULT; 1301 return -EFAULT;
1283 return 0; 1302 return 0;
@@ -1312,11 +1331,11 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1312 gfn_t gfn = gpa >> PAGE_SHIFT; 1331 gfn_t gfn = gpa >> PAGE_SHIFT;
1313 int offset = offset_in_page(gpa); 1332 int offset = offset_in_page(gpa);
1314 1333
1315 addr = gfn_to_hva(kvm, gfn); 1334 addr = gfn_to_hva_read(kvm, gfn);
1316 if (kvm_is_error_hva(addr)) 1335 if (kvm_is_error_hva(addr))
1317 return -EFAULT; 1336 return -EFAULT;
1318 pagefault_disable(); 1337 pagefault_disable();
1319 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1338 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
1320 pagefault_enable(); 1339 pagefault_enable();
1321 if (r) 1340 if (r)
1322 return -EFAULT; 1341 return -EFAULT;