aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2008-12-25 08:19:00 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:53 -0400
commita461930bc3cece021f8f89a80dcc1d0691a92b52 (patch)
treeaf6a294cf15f17298059f3efceadd09358bd8fbd /arch/x86/kvm/paging_tmpl.h
parente7a04c99b54ad9acb98a56113ec3163bc1039e13 (diff)
KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg()
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h81
1 files changed, 32 insertions, 49 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 69c7e3311b8a..46b68f941f60 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -25,7 +25,6 @@
25#if PTTYPE == 64 25#if PTTYPE == 64
26 #define pt_element_t u64 26 #define pt_element_t u64
27 #define guest_walker guest_walker64 27 #define guest_walker guest_walker64
28 #define shadow_walker shadow_walker64
29 #define FNAME(name) paging##64_##name 28 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
@@ -42,7 +41,6 @@
42#elif PTTYPE == 32 41#elif PTTYPE == 32
43 #define pt_element_t u32 42 #define pt_element_t u32
44 #define guest_walker guest_walker32 43 #define guest_walker guest_walker32
45 #define shadow_walker shadow_walker32
46 #define FNAME(name) paging##32_##name 44 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
@@ -73,18 +71,6 @@ struct guest_walker {
73 u32 error_code; 71 u32 error_code;
74}; 72};
75 73
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
85 gpa_t pte_gpa;
86};
87
88static gfn_t gpte_to_gfn(pt_element_t gpte) 74static gfn_t gpte_to_gfn(pt_element_t gpte)
89{ 75{
90 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -453,54 +439,52 @@ out_unlock:
453 return 0; 439 return 0;
454} 440}
455 441
456static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, 442static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
457 struct kvm_vcpu *vcpu, u64 addr,
458 u64 *sptep, int level)
459{ 443{
460 struct shadow_walker *sw = 444 struct kvm_shadow_walk_iterator iterator;
461 container_of(_sw, struct shadow_walker, walker); 445 pt_element_t gpte;
446 gpa_t pte_gpa = -1;
447 int level;
448 u64 *sptep;
449
450 spin_lock(&vcpu->kvm->mmu_lock);
462 451
463 /* FIXME: properly handle invlpg on large guest pages */ 452 for_each_shadow_entry(vcpu, gva, iterator) {
464 if (level == PT_PAGE_TABLE_LEVEL || 453 level = iterator.level;
465 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { 454 sptep = iterator.sptep;
466 struct kvm_mmu_page *sp = page_header(__pa(sptep));
467 455
468 sw->pte_gpa = (sp->gfn << PAGE_SHIFT); 456 /* FIXME: properly handle invlpg on large guest pages */
469 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); 457 if (level == PT_PAGE_TABLE_LEVEL ||
458 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
459 struct kvm_mmu_page *sp = page_header(__pa(sptep));
470 460
471 if (is_shadow_present_pte(*sptep)) { 461 pte_gpa = (sp->gfn << PAGE_SHIFT);
472 rmap_remove(vcpu->kvm, sptep); 462 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
473 if (is_large_pte(*sptep)) 463
474 --vcpu->kvm->stat.lpages; 464 if (is_shadow_present_pte(*sptep)) {
465 rmap_remove(vcpu->kvm, sptep);
466 if (is_large_pte(*sptep))
467 --vcpu->kvm->stat.lpages;
468 }
469 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
470 break;
475 } 471 }
476 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
477 return 1;
478 }
479 if (!is_shadow_present_pte(*sptep))
480 return 1;
481 return 0;
482}
483 472
484static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 473 if (!is_shadow_present_pte(*sptep))
485{ 474 break;
486 pt_element_t gpte; 475 }
487 struct shadow_walker walker = {
488 .walker = { .entry = FNAME(shadow_invlpg_entry), },
489 .pte_gpa = -1,
490 };
491 476
492 spin_lock(&vcpu->kvm->mmu_lock);
493 walk_shadow(&walker.walker, vcpu, gva);
494 spin_unlock(&vcpu->kvm->mmu_lock); 477 spin_unlock(&vcpu->kvm->mmu_lock);
495 if (walker.pte_gpa == -1) 478
479 if (pte_gpa == -1)
496 return; 480 return;
497 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, 481 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
498 sizeof(pt_element_t))) 482 sizeof(pt_element_t)))
499 return; 483 return;
500 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 484 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
501 if (mmu_topup_memory_caches(vcpu)) 485 if (mmu_topup_memory_caches(vcpu))
502 return; 486 return;
503 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, 487 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
504 sizeof(pt_element_t), 0); 488 sizeof(pt_element_t), 0);
505 } 489 }
506} 490}
@@ -607,7 +591,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
607 591
608#undef pt_element_t 592#undef pt_element_t
609#undef guest_walker 593#undef guest_walker
610#undef shadow_walker
611#undef FNAME 594#undef FNAME
612#undef PT_BASE_ADDR_MASK 595#undef PT_BASE_ADDR_MASK
613#undef PT_INDEX 596#undef PT_INDEX