aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-01-24 04:44:11 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:17 -0400
commitd196e343361c229496adeda42335856da9d057de (patch)
tree167f9f75636769efdc9fa9dd32883b5c780a4c12 /arch/x86/kvm/paging_tmpl.h
parent1d6ad2073e5354912291277c606a57fd37330f04 (diff)
KVM: MMU: Decouple mmio from shadow page tables
Currently an mmio guest pte is encoded in the shadow pagetable as a not-present trapping pte, with the SHADOW_IO_MARK bit set. However nothing is ever done with this information, so maintaining it is a useless complication. This patch moves the check for mmio to before shadow ptes are instantiated, so the shadow code is never invoked for ptes that reference mmio. The code is simpler, and with future work, can be made to handle mmio concurrently. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c2fd2b96144f..4b55f462e2b3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -399,6 +399,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
399 page = gfn_to_page(vcpu->kvm, walker.gfn); 399 page = gfn_to_page(vcpu->kvm, walker.gfn);
400 up_read(&current->mm->mmap_sem); 400 up_read(&current->mm->mmap_sem);
401 401
402 /* mmio */
403 if (is_error_page(page)) {
404 pgprintk("gfn %x is mmio\n", walker.gfn);
405 kvm_release_page_clean(page);
406 up_read(&vcpu->kvm->slots_lock);
407 return 1;
408 }
409
402 spin_lock(&vcpu->kvm->mmu_lock); 410 spin_lock(&vcpu->kvm->mmu_lock);
403 kvm_mmu_free_some_pages(vcpu); 411 kvm_mmu_free_some_pages(vcpu);
404 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 412 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
@@ -409,15 +417,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
409 if (!write_pt) 417 if (!write_pt)
410 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 418 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
411 419
412 /*
413 * mmio: emulate if accessible, otherwise its a guest fault.
414 */
415 if (shadow_pte && is_io_pte(*shadow_pte)) {
416 spin_unlock(&vcpu->kvm->mmu_lock);
417 up_read(&vcpu->kvm->slots_lock);
418 return 1;
419 }
420
421 ++vcpu->stat.pf_fixed; 420 ++vcpu->stat.pf_fixed;
422 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 421 kvm_mmu_audit(vcpu, "post page fault (fixed)");
423 spin_unlock(&vcpu->kvm->mmu_lock); 422 spin_unlock(&vcpu->kvm->mmu_lock);