aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:39 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:25 -0400
commit4731d4c7a07769cf2926c327177b97bb8c68cafc (patch)
treec732e9de4dbb35c74c158962771b6804dd8db153 /arch/x86/kvm/x86.c
parent6844dec6948679d084f054235fee19ba4e3a3096 (diff)
KVM: MMU: out of sync shadow core
Allow guest pagetables to go out of sync. Instead of emulating write accesses to guest pagetables, or unshadowing them, we un-write-protect the page table and allow the guest to modify it at will. We rely on invlpg executions to synchronize individual ptes, and will synchronize the entire pagetable on tlb flushes. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index efee85ba07e5..1c5864ac0837 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -101,6 +101,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
101 { "mmu_flooded", VM_STAT(mmu_flooded) }, 101 { "mmu_flooded", VM_STAT(mmu_flooded) },
102 { "mmu_recycled", VM_STAT(mmu_recycled) }, 102 { "mmu_recycled", VM_STAT(mmu_recycled) },
103 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 103 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
104 { "mmu_unsync", VM_STAT(mmu_unsync) },
104 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 105 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
105 { "largepages", VM_STAT(lpages) }, 106 { "largepages", VM_STAT(lpages) },
106 { NULL } 107 { NULL }
@@ -3120,6 +3121,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3120 if (vcpu->requests) { 3121 if (vcpu->requests) {
3121 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 3122 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
3122 __kvm_migrate_timers(vcpu); 3123 __kvm_migrate_timers(vcpu);
3124 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3125 kvm_mmu_sync_roots(vcpu);
3123 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) 3126 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3124 kvm_x86_ops->tlb_flush(vcpu); 3127 kvm_x86_ops->tlb_flush(vcpu);
3125 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, 3128 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,