aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-06-30 12:34:56 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:42 -0400
commit134291bf3cb434a9039298ba6b15ef33e65ba542 (patch)
treeb4eb6e5dcb032e8bbf499f6692f65e7475c7ca35 /arch/x86/kvm/paging_tmpl.h
parentf8f7e5ee1037e347eafff8f526913b92cec54873 (diff)
KVM: MMU: Clean up the error handling of walk_addr_generic()
Avoid two step jump to the error handling part. This eliminates the use of the variables present and rsvd_fault. We also use the const type qualifier to show that write/user/fetch_fault do not change in the function. Both of these were suggested by Ingo Molnar. Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h82
1 files changed, 32 insertions, 50 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1caeb4d22e01..f0746d27e33e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
128 bool eperm, present, rsvd_fault; 128 bool eperm;
129 int offset, write_fault, user_fault, fetch_fault; 129 int offset;
130 130 const int write_fault = access & PFERR_WRITE_MASK;
131 write_fault = access & PFERR_WRITE_MASK; 131 const int user_fault = access & PFERR_USER_MASK;
132 user_fault = access & PFERR_USER_MASK; 132 const int fetch_fault = access & PFERR_FETCH_MASK;
133 fetch_fault = access & PFERR_FETCH_MASK; 133 u16 errcode = 0;
134 134
135 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, 135 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
136 fetch_fault); 136 fetch_fault);
137walk: 137walk:
138 present = true; 138 eperm = false;
139 eperm = rsvd_fault = false;
140 walker->level = mmu->root_level; 139 walker->level = mmu->root_level;
141 pte = mmu->get_cr3(vcpu); 140 pte = mmu->get_cr3(vcpu);
142 141
@@ -144,10 +143,8 @@ walk:
144 if (walker->level == PT32E_ROOT_LEVEL) { 143 if (walker->level == PT32E_ROOT_LEVEL) {
145 pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); 144 pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
146 trace_kvm_mmu_paging_element(pte, walker->level); 145 trace_kvm_mmu_paging_element(pte, walker->level);
147 if (!is_present_gpte(pte)) { 146 if (!is_present_gpte(pte))
148 present = false;
149 goto error; 147 goto error;
150 }
151 --walker->level; 148 --walker->level;
152 } 149 }
153#endif 150#endif
@@ -170,35 +167,27 @@ walk:
170 167
171 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), 168 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
172 PFERR_USER_MASK|PFERR_WRITE_MASK); 169 PFERR_USER_MASK|PFERR_WRITE_MASK);
173 if (unlikely(real_gfn == UNMAPPED_GVA)) { 170 if (unlikely(real_gfn == UNMAPPED_GVA))
174 present = false; 171 goto error;
175 break;
176 }
177 real_gfn = gpa_to_gfn(real_gfn); 172 real_gfn = gpa_to_gfn(real_gfn);
178 173
179 host_addr = gfn_to_hva(vcpu->kvm, real_gfn); 174 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
180 if (unlikely(kvm_is_error_hva(host_addr))) { 175 if (unlikely(kvm_is_error_hva(host_addr)))
181 present = false; 176 goto error;
182 break;
183 }
184 177
185 ptep_user = (pt_element_t __user *)((void *)host_addr + offset); 178 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
186 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) { 179 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
187 present = false; 180 goto error;
188 break;
189 }
190 181
191 trace_kvm_mmu_paging_element(pte, walker->level); 182 trace_kvm_mmu_paging_element(pte, walker->level);
192 183
193 if (unlikely(!is_present_gpte(pte))) { 184 if (unlikely(!is_present_gpte(pte)))
194 present = false; 185 goto error;
195 break;
196 }
197 186
198 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, 187 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
199 walker->level))) { 188 walker->level))) {
200 rsvd_fault = true; 189 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
201 break; 190 goto error;
202 } 191 }
203 192
204 if (unlikely(write_fault && !is_writable_pte(pte) 193 if (unlikely(write_fault && !is_writable_pte(pte)
@@ -213,17 +202,15 @@ walk:
213 eperm = true; 202 eperm = true;
214#endif 203#endif
215 204
216 if (!eperm && !rsvd_fault 205 if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
217 && unlikely(!(pte & PT_ACCESSED_MASK))) {
218 int ret; 206 int ret;
219 trace_kvm_mmu_set_accessed_bit(table_gfn, index, 207 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
220 sizeof(pte)); 208 sizeof(pte));
221 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, 209 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
222 pte, pte|PT_ACCESSED_MASK); 210 pte, pte|PT_ACCESSED_MASK);
223 if (unlikely(ret < 0)) { 211 if (unlikely(ret < 0))
224 present = false; 212 goto error;
225 break; 213 else if (ret)
226 } else if (ret)
227 goto walk; 214 goto walk;
228 215
229 mark_page_dirty(vcpu->kvm, table_gfn); 216 mark_page_dirty(vcpu->kvm, table_gfn);
@@ -276,8 +263,10 @@ walk:
276 --walker->level; 263 --walker->level;
277 } 264 }
278 265
279 if (unlikely(!present || eperm || rsvd_fault)) 266 if (unlikely(eperm)) {
267 errcode |= PFERR_PRESENT_MASK;
280 goto error; 268 goto error;
269 }
281 270
282 if (write_fault && unlikely(!is_dirty_gpte(pte))) { 271 if (write_fault && unlikely(!is_dirty_gpte(pte))) {
283 int ret; 272 int ret;
@@ -285,10 +274,9 @@ walk:
285 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 274 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
286 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, 275 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
287 pte, pte|PT_DIRTY_MASK); 276 pte, pte|PT_DIRTY_MASK);
288 if (unlikely(ret < 0)) { 277 if (unlikely(ret < 0))
289 present = false;
290 goto error; 278 goto error;
291 } else if (ret) 279 else if (ret)
292 goto walk; 280 goto walk;
293 281
294 mark_page_dirty(vcpu->kvm, table_gfn); 282 mark_page_dirty(vcpu->kvm, table_gfn);
@@ -303,20 +291,14 @@ walk:
303 return 1; 291 return 1;
304 292
305error: 293error:
306 walker->fault.vector = PF_VECTOR; 294 errcode |= write_fault | user_fault;
307 walker->fault.error_code_valid = true;
308 walker->fault.error_code = 0;
309 if (present)
310 walker->fault.error_code |= PFERR_PRESENT_MASK;
311
312 walker->fault.error_code |= write_fault | user_fault;
313
314 if (fetch_fault && (mmu->nx || 295 if (fetch_fault && (mmu->nx ||
315 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) 296 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
316 walker->fault.error_code |= PFERR_FETCH_MASK; 297 errcode |= PFERR_FETCH_MASK;
317 if (rsvd_fault)
318 walker->fault.error_code |= PFERR_RSVD_MASK;
319 298
299 walker->fault.vector = PF_VECTOR;
300 walker->fault.error_code_valid = true;
301 walker->fault.error_code = errcode;
320 walker->fault.address = addr; 302 walker->fault.address = addr;
321 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; 303 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
322 304