aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-06 09:57:10 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:25 -0500
commitb5f1dd1ba4042bda191cd2e72726c920e6c2867f (patch)
treedef98c0295d188d63344faa616f02e5f78e28129
parenta1ac9e17b7c934666a780772866135b9fea17f4c (diff)
KVM: MIPS/MMU: Handle dirty logging on GPA faults
Update kvm_mips_map_page() to handle logging of dirty guest physical pages. Upcoming patches will propagate the dirty bit to the GVA page tables. A fast path is added for handling protection bits that can be resolved without calling into KVM, currently just dirtying of clean pages being written to. The slow path marks the GPA page table entry writable only on writes, and at the same time marks the page dirty in the dirty page logging bitmask. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/kvm/mmu.c74
1 files changed, 70 insertions, 4 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 63a6d542ecb3..7962eea4ebc3 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -451,6 +451,58 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
451} 451}
452 452
453/** 453/**
454 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
455 * @vcpu: VCPU pointer.
456 * @gpa: Guest physical address of fault.
457 * @write_fault: Whether the fault was due to a write.
458 * @out_entry: New PTE for @gpa (written on success unless NULL).
459 * @out_buddy: New PTE for @gpa's buddy (written on success unless
460 * NULL).
461 *
462 * Perform fast path GPA fault handling, doing all that can be done without
463 * calling into KVM. This handles dirtying of clean pages (for dirty page
464 * logging).
465 *
466 * Returns: 0 on success, in which case we can update derived mappings and
467 * resume guest execution.
468 * -EFAULT on failure due to absent GPA mapping or write to
469 * read-only page, in which case KVM must be consulted.
470 */
471static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
472 bool write_fault,
473 pte_t *out_entry, pte_t *out_buddy)
474{
475 struct kvm *kvm = vcpu->kvm;
476 gfn_t gfn = gpa >> PAGE_SHIFT;
477 pte_t *ptep;
478 int ret = 0;
479
480 spin_lock(&kvm->mmu_lock);
481
482 /* Fast path - just check GPA page table for an existing entry */
483 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
484 if (!ptep || !pte_present(*ptep)) {
485 ret = -EFAULT;
486 goto out;
487 }
488
489 if (write_fault && !pte_dirty(*ptep)) {
490 /* Track dirtying of pages */
491 set_pte(ptep, pte_mkdirty(*ptep));
492 mark_page_dirty(kvm, gfn);
493 }
494
495 if (out_entry)
496 *out_entry = *ptep;
497 if (out_buddy)
498 *out_buddy = *ptep_buddy(ptep);
499
500out:
501 spin_unlock(&kvm->mmu_lock);
502 return ret;
503}
504
505/**
454 * kvm_mips_map_page() - Map a guest physical page. 506 * kvm_mips_map_page() - Map a guest physical page.
455 * @vcpu: VCPU pointer. 507 * @vcpu: VCPU pointer.
456 * @gpa: Guest physical address of fault. 508 * @gpa: Guest physical address of fault.
@@ -462,9 +514,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
462 * Handle GPA faults by creating a new GPA mapping (or updating an existing 514 * Handle GPA faults by creating a new GPA mapping (or updating an existing
463 * one). 515 * one).
464 * 516 *
465 * This takes care of asking KVM for the corresponding PFN, and creating a 517 * This takes care of marking pages dirty (dirty page tracking), asking KVM for
466 * mapping in the GPA page tables. Derived mappings (GVA page tables and TLBs) 518 * the corresponding PFN, and creating a mapping in the GPA page tables. Derived
467 * must be handled by the caller. 519 * mappings (GVA page tables and TLBs) must be handled by the caller.
468 * 520 *
469 * Returns: 0 on success, in which case the caller may use the @out_entry 521 * Returns: 0 on success, in which case the caller may use the @out_entry
470 * and @out_buddy PTEs to update derived mappings and resume guest 522 * and @out_buddy PTEs to update derived mappings and resume guest
@@ -485,7 +537,12 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
485 pte_t *ptep, entry, old_pte; 537 pte_t *ptep, entry, old_pte;
486 unsigned long prot_bits; 538 unsigned long prot_bits;
487 539
540 /* Try the fast path to handle clean pages */
488 srcu_idx = srcu_read_lock(&kvm->srcu); 541 srcu_idx = srcu_read_lock(&kvm->srcu);
542 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
543 out_buddy);
544 if (!err)
545 goto out;
489 546
490 /* We need a minimum of cached pages ready for page table creation */ 547 /* We need a minimum of cached pages ready for page table creation */
491 err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, 548 err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
@@ -493,6 +550,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
493 if (err) 550 if (err)
494 goto out; 551 goto out;
495 552
553 /* Slow path - ask KVM core whether we can access this GPA */
496 pfn = gfn_to_pfn(kvm, gfn); 554 pfn = gfn_to_pfn(kvm, gfn);
497 555
498 if (is_error_noslot_pfn(pfn)) { 556 if (is_error_noslot_pfn(pfn)) {
@@ -502,11 +560,19 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
502 560
503 spin_lock(&kvm->mmu_lock); 561 spin_lock(&kvm->mmu_lock);
504 562
563 /* Ensure page tables are allocated */
505 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa); 564 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
506 565
507 prot_bits = __READABLE | _PAGE_PRESENT | __WRITEABLE; 566 /* Set up the PTE */
567 prot_bits = __READABLE | _PAGE_PRESENT | _PAGE_WRITE |
568 _page_cachable_default;
569 if (write_fault) {
570 prot_bits |= __WRITEABLE;
571 mark_page_dirty(kvm, gfn);
572 }
508 entry = pfn_pte(pfn, __pgprot(prot_bits)); 573 entry = pfn_pte(pfn, __pgprot(prot_bits));
509 574
575 /* Write the PTE */
510 old_pte = *ptep; 576 old_pte = *ptep;
511 set_pte(ptep, entry); 577 set_pte(ptep, entry);
512 if (pte_present(old_pte)) 578 if (pte_present(old_pte))