diff options
author | Izik Eidus <izike@qumranet.com> | 2007-10-01 16:14:18 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:50 -0500 |
commit | 195aefde9cc2cee38dd54ef92a866721fba4413e (patch) | |
tree | e9d7f47e8a462ee193a53cc4c21e5b3c7254956c /drivers/kvm/kvm_main.c | |
parent | 290fc38da8187b53b78dd4d5ab27a20b88ef8b61 (diff) |
KVM: Add general accessors to read and write guest memory
Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 160 |
1 files changed, 130 insertions, 30 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 9510e2276ca3..cac328b8421c 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -400,22 +400,16 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
400 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; | 400 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; |
401 | unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; | 401 | unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; |
402 | int i; | 402 | int i; |
403 | u64 *pdpt; | ||
404 | int ret; | 403 | int ret; |
405 | struct page *page; | ||
406 | u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; | 404 | u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; |
407 | 405 | ||
408 | mutex_lock(&vcpu->kvm->lock); | 406 | mutex_lock(&vcpu->kvm->lock); |
409 | page = gfn_to_page(vcpu->kvm, pdpt_gfn); | 407 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, |
410 | if (!page) { | 408 | offset * sizeof(u64), sizeof(pdpte)); |
409 | if (ret < 0) { | ||
411 | ret = 0; | 410 | ret = 0; |
412 | goto out; | 411 | goto out; |
413 | } | 412 | } |
414 | |||
415 | pdpt = kmap_atomic(page, KM_USER0); | ||
416 | memcpy(pdpte, pdpt+offset, sizeof(pdpte)); | ||
417 | kunmap_atomic(pdpt, KM_USER0); | ||
418 | |||
419 | for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { | 413 | for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { |
420 | if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) { | 414 | if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) { |
421 | ret = 0; | 415 | ret = 0; |
@@ -962,6 +956,127 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
962 | } | 956 | } |
963 | EXPORT_SYMBOL_GPL(gfn_to_page); | 957 | EXPORT_SYMBOL_GPL(gfn_to_page); |
964 | 958 | ||
959 | static int next_segment(unsigned long len, int offset) | ||
960 | { | ||
961 | if (len > PAGE_SIZE - offset) | ||
962 | return PAGE_SIZE - offset; | ||
963 | else | ||
964 | return len; | ||
965 | } | ||
966 | |||
967 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | ||
968 | int len) | ||
969 | { | ||
970 | void *page_virt; | ||
971 | struct page *page; | ||
972 | |||
973 | page = gfn_to_page(kvm, gfn); | ||
974 | if (!page) | ||
975 | return -EFAULT; | ||
976 | page_virt = kmap_atomic(page, KM_USER0); | ||
977 | |||
978 | memcpy(data, page_virt + offset, len); | ||
979 | |||
980 | kunmap_atomic(page_virt, KM_USER0); | ||
981 | return 0; | ||
982 | } | ||
983 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); | ||
984 | |||
985 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) | ||
986 | { | ||
987 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
988 | int seg; | ||
989 | int offset = offset_in_page(gpa); | ||
990 | int ret; | ||
991 | |||
992 | while ((seg = next_segment(len, offset)) != 0) { | ||
993 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); | ||
994 | if (ret < 0) | ||
995 | return ret; | ||
996 | offset = 0; | ||
997 | len -= seg; | ||
998 | data += seg; | ||
999 | ++gfn; | ||
1000 | } | ||
1001 | return 0; | ||
1002 | } | ||
1003 | EXPORT_SYMBOL_GPL(kvm_read_guest); | ||
1004 | |||
1005 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | ||
1006 | int offset, int len) | ||
1007 | { | ||
1008 | void *page_virt; | ||
1009 | struct page *page; | ||
1010 | |||
1011 | page = gfn_to_page(kvm, gfn); | ||
1012 | if (!page) | ||
1013 | return -EFAULT; | ||
1014 | page_virt = kmap_atomic(page, KM_USER0); | ||
1015 | |||
1016 | memcpy(page_virt + offset, data, len); | ||
1017 | |||
1018 | kunmap_atomic(page_virt, KM_USER0); | ||
1019 | mark_page_dirty(kvm, gfn); | ||
1020 | return 0; | ||
1021 | } | ||
1022 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); | ||
1023 | |||
1024 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | ||
1025 | unsigned long len) | ||
1026 | { | ||
1027 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
1028 | int seg; | ||
1029 | int offset = offset_in_page(gpa); | ||
1030 | int ret; | ||
1031 | |||
1032 | while ((seg = next_segment(len, offset)) != 0) { | ||
1033 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); | ||
1034 | if (ret < 0) | ||
1035 | return ret; | ||
1036 | offset = 0; | ||
1037 | len -= seg; | ||
1038 | data += seg; | ||
1039 | ++gfn; | ||
1040 | } | ||
1041 | return 0; | ||
1042 | } | ||
1043 | |||
1044 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | ||
1045 | { | ||
1046 | void *page_virt; | ||
1047 | struct page *page; | ||
1048 | |||
1049 | page = gfn_to_page(kvm, gfn); | ||
1050 | if (!page) | ||
1051 | return -EFAULT; | ||
1052 | page_virt = kmap_atomic(page, KM_USER0); | ||
1053 | |||
1054 | memset(page_virt + offset, 0, len); | ||
1055 | |||
1056 | kunmap_atomic(page_virt, KM_USER0); | ||
1057 | return 0; | ||
1058 | } | ||
1059 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); | ||
1060 | |||
1061 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | ||
1062 | { | ||
1063 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
1064 | int seg; | ||
1065 | int offset = offset_in_page(gpa); | ||
1066 | int ret; | ||
1067 | |||
1068 | while ((seg = next_segment(len, offset)) != 0) { | ||
1069 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); | ||
1070 | if (ret < 0) | ||
1071 | return ret; | ||
1072 | offset = 0; | ||
1073 | len -= seg; | ||
1074 | ++gfn; | ||
1075 | } | ||
1076 | return 0; | ||
1077 | } | ||
1078 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | ||
1079 | |||
965 | /* WARNING: Does not work on aliased pages. */ | 1080 | /* WARNING: Does not work on aliased pages. */ |
966 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | 1081 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
967 | { | 1082 | { |
@@ -988,21 +1103,13 @@ int emulator_read_std(unsigned long addr, | |||
988 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 1103 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
989 | unsigned offset = addr & (PAGE_SIZE-1); | 1104 | unsigned offset = addr & (PAGE_SIZE-1); |
990 | unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); | 1105 | unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); |
991 | unsigned long pfn; | 1106 | int ret; |
992 | struct page *page; | ||
993 | void *page_virt; | ||
994 | 1107 | ||
995 | if (gpa == UNMAPPED_GVA) | 1108 | if (gpa == UNMAPPED_GVA) |
996 | return X86EMUL_PROPAGATE_FAULT; | 1109 | return X86EMUL_PROPAGATE_FAULT; |
997 | pfn = gpa >> PAGE_SHIFT; | 1110 | ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); |
998 | page = gfn_to_page(vcpu->kvm, pfn); | 1111 | if (ret < 0) |
999 | if (!page) | ||
1000 | return X86EMUL_UNHANDLEABLE; | 1112 | return X86EMUL_UNHANDLEABLE; |
1001 | page_virt = kmap_atomic(page, KM_USER0); | ||
1002 | |||
1003 | memcpy(data, page_virt + offset, tocopy); | ||
1004 | |||
1005 | kunmap_atomic(page_virt, KM_USER0); | ||
1006 | 1113 | ||
1007 | bytes -= tocopy; | 1114 | bytes -= tocopy; |
1008 | data += tocopy; | 1115 | data += tocopy; |
@@ -1095,19 +1202,12 @@ static int emulator_read_emulated(unsigned long addr, | |||
1095 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | 1202 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
1096 | const void *val, int bytes) | 1203 | const void *val, int bytes) |
1097 | { | 1204 | { |
1098 | struct page *page; | 1205 | int ret; |
1099 | void *virt; | ||
1100 | 1206 | ||
1101 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) | 1207 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
1102 | return 0; | 1208 | if (ret < 0) |
1103 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
1104 | if (!page) | ||
1105 | return 0; | 1209 | return 0; |
1106 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
1107 | virt = kmap_atomic(page, KM_USER0); | ||
1108 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 1210 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); |
1109 | memcpy(virt + offset_in_page(gpa), val, bytes); | ||
1110 | kunmap_atomic(virt, KM_USER0); | ||
1111 | return 1; | 1211 | return 1; |
1112 | } | 1212 | } |
1113 | 1213 | ||