aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-12-05 09:34:11 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2009-12-27 10:36:30 -0500
commitfb341f572d26e0786167cd96b90cc4febed830cf (patch)
tree2bf327861e4da2745b66c2dda03c5c5b0afcf7b6 /arch/x86/kvm
parent6b7b284958d47b77d06745b36bc7f36dab769d9b (diff)
KVM: MMU: remove prefault from invlpg handler
The invlpg prefault optimization breaks Windows 2008 R2 occasionally. The visible effect is that the invlpg handler instantiates a pte which is, microseconds later, written with a different gfn by another vcpu. The OS could have other mechanisms to prevent a present translation from being used, which the hypervisor is unaware of. While the documentation states that the cpu is at liberty to prefetch tlb entries, it looks like this is not heeded, so remove tlb prefetch from invlpg. Cc: stable@kernel.org Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
1 files changed, 0 insertions, 18 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a6017132fba..58a0f1e8859 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -455,8 +455,6 @@ out_unlock:
455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
456{ 456{
457 struct kvm_shadow_walk_iterator iterator; 457 struct kvm_shadow_walk_iterator iterator;
458 pt_element_t gpte;
459 gpa_t pte_gpa = -1;
460 int level; 458 int level;
461 u64 *sptep; 459 u64 *sptep;
462 int need_flush = 0; 460 int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
470 if (level == PT_PAGE_TABLE_LEVEL || 468 if (level == PT_PAGE_TABLE_LEVEL ||
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || 469 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { 470 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
473 struct kvm_mmu_page *sp = page_header(__pa(sptep));
474
475 pte_gpa = (sp->gfn << PAGE_SHIFT);
476 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
477 471
478 if (is_shadow_present_pte(*sptep)) { 472 if (is_shadow_present_pte(*sptep)) {
479 rmap_remove(vcpu->kvm, sptep); 473 rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
492 if (need_flush) 486 if (need_flush)
493 kvm_flush_remote_tlbs(vcpu->kvm); 487 kvm_flush_remote_tlbs(vcpu->kvm);
494 spin_unlock(&vcpu->kvm->mmu_lock); 488 spin_unlock(&vcpu->kvm->mmu_lock);
495
496 if (pte_gpa == -1)
497 return;
498 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
499 sizeof(pt_element_t)))
500 return;
501 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
502 if (mmu_topup_memory_caches(vcpu))
503 return;
504 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
505 sizeof(pt_element_t), 0);
506 }
507} 489}
508 490
509static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 491static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)