aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-08-20 23:00:22 -0400
committerAvi Kivity <avi@redhat.com>2012-08-22 08:08:54 -0400
commit2fc843117d64b31c20d36c6b625c0626c9a41a41 (patch)
tree29286338f67b88225a791203f2705471f438c8a2 /virt
parent86ab8cffb498077e926957f099b064db3751c1de (diff)
KVM: reorganize hva_to_pfn
We do too many things in hva_to_pfn, this patch reorganize the code, let it be better readable Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c159
1 files changed, 97 insertions, 62 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6e3ea15397d4..aa4a38ad9d9a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1041,83 +1041,118 @@ static inline int check_user_page_hwpoison(unsigned long addr)
1041 return rc == -EHWPOISON; 1041 return rc == -EHWPOISON;
1042} 1042}
1043 1043
1044static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1044/*
1045 bool write_fault, bool *writable) 1045 * The atomic path to get the writable pfn which will be stored in @pfn,
1046 * true indicates success, otherwise false is returned.
1047 */
1048static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1049 bool write_fault, bool *writable, pfn_t *pfn)
1046{ 1050{
1047 struct page *page[1]; 1051 struct page *page[1];
1048 int npages = 0; 1052 int npages;
1049 pfn_t pfn;
1050 1053
1051 /* we can do it either atomically or asynchronously, not both */ 1054 if (!(async || atomic))
1052 BUG_ON(atomic && async); 1055 return false;
1053 1056
1054 BUG_ON(!write_fault && !writable); 1057 npages = __get_user_pages_fast(addr, 1, 1, page);
1058 if (npages == 1) {
1059 *pfn = page_to_pfn(page[0]);
1055 1060
1056 if (writable) 1061 if (writable)
1057 *writable = true; 1062 *writable = true;
1063 return true;
1064 }
1065
1066 return false;
1067}
1058 1068
1059 if (atomic || async) 1069/*
1060 npages = __get_user_pages_fast(addr, 1, 1, page); 1070 * The slow path to get the pfn of the specified host virtual address,
1071 * 1 indicates success, -errno is returned if error is detected.
1072 */
1073static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1074 bool *writable, pfn_t *pfn)
1075{
1076 struct page *page[1];
1077 int npages = 0;
1061 1078
1062 if (unlikely(npages != 1) && !atomic) { 1079 might_sleep();
1063 might_sleep();
1064 1080
1065 if (writable) 1081 if (writable)
1066 *writable = write_fault; 1082 *writable = write_fault;
1067 1083
1068 if (async) { 1084 if (async) {
1069 down_read(&current->mm->mmap_sem); 1085 down_read(&current->mm->mmap_sem);
1070 npages = get_user_page_nowait(current, current->mm, 1086 npages = get_user_page_nowait(current, current->mm,
1071 addr, write_fault, page); 1087 addr, write_fault, page);
1072 up_read(&current->mm->mmap_sem); 1088 up_read(&current->mm->mmap_sem);
1073 } else 1089 } else
1074 npages = get_user_pages_fast(addr, 1, write_fault, 1090 npages = get_user_pages_fast(addr, 1, write_fault,
1075 page); 1091 page);
1076 1092 if (npages != 1)
1077 /* map read fault as writable if possible */ 1093 return npages;
1078 if (unlikely(!write_fault) && npages == 1) { 1094
1079 struct page *wpage[1]; 1095 /* map read fault as writable if possible */
1080 1096 if (unlikely(!write_fault)) {
1081 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1097 struct page *wpage[1];
1082 if (npages == 1) { 1098
1083 *writable = true; 1099 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1084 put_page(page[0]); 1100 if (npages == 1) {
1085 page[0] = wpage[0]; 1101 *writable = true;
1086 } 1102 put_page(page[0]);
1087 npages = 1; 1103 page[0] = wpage[0];
1088 } 1104 }
1105
1106 npages = 1;
1089 } 1107 }
1108 *pfn = page_to_pfn(page[0]);
1109 return npages;
1110}
1111
1112static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1113 bool write_fault, bool *writable)
1114{
1115 struct vm_area_struct *vma;
1116 pfn_t pfn = 0;
1117 int npages;
1090 1118
1091 if (unlikely(npages != 1)) { 1119 /* we can do it either atomically or asynchronously, not both */
1092 struct vm_area_struct *vma; 1120 BUG_ON(atomic && async);
1093 1121
1094 if (atomic) 1122 BUG_ON(!write_fault && !writable);
1095 return KVM_PFN_ERR_FAULT;
1096 1123
1097 down_read(&current->mm->mmap_sem); 1124 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
1098 if (npages == -EHWPOISON || 1125 return pfn;
1099 (!async && check_user_page_hwpoison(addr))) {
1100 up_read(&current->mm->mmap_sem);
1101 return KVM_PFN_ERR_HWPOISON;
1102 }
1103 1126
1104 vma = find_vma_intersection(current->mm, addr, addr+1); 1127 if (atomic)
1105 1128 return KVM_PFN_ERR_FAULT;
1106 if (vma == NULL)
1107 pfn = KVM_PFN_ERR_FAULT;
1108 else if ((vma->vm_flags & VM_PFNMAP)) {
1109 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1110 vma->vm_pgoff;
1111 BUG_ON(!kvm_is_mmio_pfn(pfn));
1112 } else {
1113 if (async && (vma->vm_flags & VM_WRITE))
1114 *async = true;
1115 pfn = KVM_PFN_ERR_FAULT;
1116 }
1117 up_read(&current->mm->mmap_sem);
1118 } else
1119 pfn = page_to_pfn(page[0]);
1120 1129
1130 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1131 if (npages == 1)
1132 return pfn;
1133
1134 down_read(&current->mm->mmap_sem);
1135 if (npages == -EHWPOISON ||
1136 (!async && check_user_page_hwpoison(addr))) {
1137 pfn = KVM_PFN_ERR_HWPOISON;
1138 goto exit;
1139 }
1140
1141 vma = find_vma_intersection(current->mm, addr, addr + 1);
1142
1143 if (vma == NULL)
1144 pfn = KVM_PFN_ERR_FAULT;
1145 else if ((vma->vm_flags & VM_PFNMAP)) {
1146 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1147 vma->vm_pgoff;
1148 BUG_ON(!kvm_is_mmio_pfn(pfn));
1149 } else {
1150 if (async && (vma->vm_flags & VM_WRITE))
1151 *async = true;
1152 pfn = KVM_PFN_ERR_FAULT;
1153 }
1154exit:
1155 up_read(&current->mm->mmap_sem);
1121 return pfn; 1156 return pfn;
1122} 1157}
1123 1158