aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2008-12-25 08:10:50 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:52 -0400
commite7a04c99b54ad9acb98a56113ec3163bc1039e13 (patch)
treed9ea69f44ffb5bf071022b746aba86b1ae82f13e /arch/x86/kvm
parent9f652d21c3f887075a33abae85bf53fec64e67b1 (diff)
KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch()
Effectively reverting to the pre walk_shadow() version -- but now with the reusable for_each(). Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/paging_tmpl.h128
1 files changed, 58 insertions, 70 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9fd78b6e17ad..69c7e3311b8a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -283,91 +283,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
283/* 283/*
284 * Fetch a shadow pte for a specific level in the paging hierarchy. 284 * Fetch a shadow pte for a specific level in the paging hierarchy.
285 */ 285 */
286static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, 286static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct kvm_vcpu *vcpu, u64 addr, 287 struct guest_walker *gw,
288 u64 *sptep, int level) 288 int user_fault, int write_fault, int largepage,
289 int *ptwrite, pfn_t pfn)
289{ 290{
290 struct shadow_walker *sw =
291 container_of(_sw, struct shadow_walker, walker);
292 struct guest_walker *gw = sw->guest_walker;
293 unsigned access = gw->pt_access; 291 unsigned access = gw->pt_access;
294 struct kvm_mmu_page *shadow_page; 292 struct kvm_mmu_page *shadow_page;
295 u64 spte; 293 u64 spte, *sptep;
296 int metaphysical; 294 int metaphysical;
297 gfn_t table_gfn; 295 gfn_t table_gfn;
298 int r; 296 int r;
297 int level;
299 pt_element_t curr_pte; 298 pt_element_t curr_pte;
299 struct kvm_shadow_walk_iterator iterator;
300 300
301 if (level == PT_PAGE_TABLE_LEVEL 301 if (!is_present_pte(gw->ptes[gw->level - 1]))
302 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { 302 return NULL;
303 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
304 sw->user_fault, sw->write_fault,
305 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
306 sw->ptwrite, sw->largepage,
307 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
308 gw->gfn, sw->pfn, false);
309 sw->sptep = sptep;
310 return 1;
311 }
312
313 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
314 return 0;
315
316 if (is_large_pte(*sptep)) {
317 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
318 kvm_flush_remote_tlbs(vcpu->kvm);
319 rmap_remove(vcpu->kvm, sptep);
320 }
321 303
322 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { 304 for_each_shadow_entry(vcpu, addr, iterator) {
323 metaphysical = 1; 305 level = iterator.level;
324 if (!is_dirty_pte(gw->ptes[level - 1])) 306 sptep = iterator.sptep;
325 access &= ~ACC_WRITE_MASK; 307 if (level == PT_PAGE_TABLE_LEVEL
326 table_gfn = gpte_to_gfn(gw->ptes[level - 1]); 308 || (largepage && level == PT_DIRECTORY_LEVEL)) {
327 } else { 309 mmu_set_spte(vcpu, sptep, access,
328 metaphysical = 0; 310 gw->pte_access & access,
329 table_gfn = gw->table_gfn[level - 2]; 311 user_fault, write_fault,
330 } 312 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
331 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1, 313 ptwrite, largepage,
332 metaphysical, access, sptep); 314 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
333 if (!metaphysical) { 315 gw->gfn, pfn, false);
334 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 316 break;
335 &curr_pte, sizeof(curr_pte));
336 if (r || curr_pte != gw->ptes[level - 2]) {
337 kvm_mmu_put_page(shadow_page, sptep);
338 kvm_release_pfn_clean(sw->pfn);
339 sw->sptep = NULL;
340 return 1;
341 } 317 }
342 }
343 318
344 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK 319 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
345 | PT_WRITABLE_MASK | PT_USER_MASK; 320 continue;
346 *sptep = spte;
347 return 0;
348}
349 321
350static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 322 if (is_large_pte(*sptep)) {
351 struct guest_walker *guest_walker, 323 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
352 int user_fault, int write_fault, int largepage, 324 kvm_flush_remote_tlbs(vcpu->kvm);
353 int *ptwrite, pfn_t pfn) 325 rmap_remove(vcpu->kvm, sptep);
354{ 326 }
355 struct shadow_walker walker = {
356 .walker = { .entry = FNAME(shadow_walk_entry), },
357 .guest_walker = guest_walker,
358 .user_fault = user_fault,
359 .write_fault = write_fault,
360 .largepage = largepage,
361 .ptwrite = ptwrite,
362 .pfn = pfn,
363 };
364 327
365 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) 328 if (level == PT_DIRECTORY_LEVEL
366 return NULL; 329 && gw->level == PT_DIRECTORY_LEVEL) {
330 metaphysical = 1;
331 if (!is_dirty_pte(gw->ptes[level - 1]))
332 access &= ~ACC_WRITE_MASK;
333 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
334 } else {
335 metaphysical = 0;
336 table_gfn = gw->table_gfn[level - 2];
337 }
338 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
339 metaphysical, access, sptep);
340 if (!metaphysical) {
341 r = kvm_read_guest_atomic(vcpu->kvm,
342 gw->pte_gpa[level - 2],
343 &curr_pte, sizeof(curr_pte));
344 if (r || curr_pte != gw->ptes[level - 2]) {
345 kvm_mmu_put_page(shadow_page, sptep);
346 kvm_release_pfn_clean(pfn);
347 sptep = NULL;
348 break;
349 }
350 }
367 351
368 walk_shadow(&walker.walker, vcpu, addr); 352 spte = __pa(shadow_page->spt)
353 | PT_PRESENT_MASK | PT_ACCESSED_MASK
354 | PT_WRITABLE_MASK | PT_USER_MASK;
355 *sptep = spte;
356 }
369 357
370 return walker.sptep; 358 return sptep;
371} 359}
372 360
373/* 361/*