diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-12-22 15:49:30 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:55:49 -0500 |
commit | 87917239204d67a316cb89751750f86c9ed3640b (patch) | |
tree | f766f4ad19f08d16b87faa1de3c9ead2e6b3ea1e /arch/x86/kvm | |
parent | 3f353858c98dbe0240dac558a89870f4600f81bb (diff) |
KVM: MMU: handle large host sptes on invlpg/resync
The invlpg and sync walkers lack knowledge of large host sptes,
descending to non-existant pagetable level.
Stop at directory level in such case.
Fixes SMP Windows XP with hugepages.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 9 |
2 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d50ebac6a07f..83f11c7474a1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1007,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, | |||
1007 | for_each_unsync_children(sp->unsync_child_bitmap, i) { | 1007 | for_each_unsync_children(sp->unsync_child_bitmap, i) { |
1008 | u64 ent = sp->spt[i]; | 1008 | u64 ent = sp->spt[i]; |
1009 | 1009 | ||
1010 | if (is_shadow_present_pte(ent)) { | 1010 | if (is_shadow_present_pte(ent) && !is_large_pte(ent)) { |
1011 | struct kvm_mmu_page *child; | 1011 | struct kvm_mmu_page *child; |
1012 | child = page_header(ent & PT64_BASE_ADDR_MASK); | 1012 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
1013 | 1013 | ||
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d20640154216..9fd78b6e17ad 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -472,14 +472,19 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, | |||
472 | struct shadow_walker *sw = | 472 | struct shadow_walker *sw = |
473 | container_of(_sw, struct shadow_walker, walker); | 473 | container_of(_sw, struct shadow_walker, walker); |
474 | 474 | ||
475 | if (level == PT_PAGE_TABLE_LEVEL) { | 475 | /* FIXME: properly handle invlpg on large guest pages */ |
476 | if (level == PT_PAGE_TABLE_LEVEL || | ||
477 | ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { | ||
476 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | 478 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
477 | 479 | ||
478 | sw->pte_gpa = (sp->gfn << PAGE_SHIFT); | 480 | sw->pte_gpa = (sp->gfn << PAGE_SHIFT); |
479 | sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | 481 | sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); |
480 | 482 | ||
481 | if (is_shadow_present_pte(*sptep)) | 483 | if (is_shadow_present_pte(*sptep)) { |
482 | rmap_remove(vcpu->kvm, sptep); | 484 | rmap_remove(vcpu->kvm, sptep); |
485 | if (is_large_pte(*sptep)) | ||
486 | --vcpu->kvm->stat.lpages; | ||
487 | } | ||
483 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 488 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); |
484 | return 1; | 489 | return 1; |
485 | } | 490 | } |