aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-06 09:59:43 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:27 -0500
commitf9b11e51f89f6d2eca2ca8f41bb0ceb07c943e60 (patch)
treec4138efee7e3f28af17d439c8a79c98304b78eaf
parentb584f460e6d6f1bf968acfcd23aceb663ba996fa (diff)
KVM: MIPS/MMU: Pass GPA PTE bits to mapped GVA PTEs
Propagate the GPA PTE protection bits on to the GVA PTEs on a mapped fault (except _PAGE_WRITE, and filtered by the guest TLB entry), rather than always overriding the protection. This allows dirty page tracking to work in mapped guest segments as a clear dirty bit in the GPA PTE will propagate to the GVA PTEs even when the guest TLB has the dirty bit set. Since the filtering of protection bits is now abstracted, if the buddy GVA PTE is also valid, we obtain the corresponding GPA PTE using a simple non-allocating walk and load that into the GVA PTE similarly (which may itself be invalid). Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/kvm/mmu.c56
1 files changed, 37 insertions, 19 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 9cc941864aa8..8a01bbd276fc 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -782,6 +782,15 @@ static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
782 return pte; 782 return pte;
783} 783}
784 784
785static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
786{
787 /* Guest EntryLo overrides host EntryLo */
788 if (!(entrylo & ENTRYLO_D))
789 pte = pte_mkclean(pte);
790
791 return kvm_mips_gpa_pte_to_gva_unmapped(pte);
792}
793
785/* XXXKYMA: Must be called with interrupts disabled */ 794/* XXXKYMA: Must be called with interrupts disabled */
786int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, 795int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
787 struct kvm_vcpu *vcpu, 796 struct kvm_vcpu *vcpu,
@@ -825,39 +834,48 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
825 unsigned long gva, 834 unsigned long gva,
826 bool write_fault) 835 bool write_fault)
827{ 836{
828 kvm_pfn_t pfn; 837 struct kvm *kvm = vcpu->kvm;
829 long tlb_lo = 0; 838 long tlb_lo[2];
830 pte_t pte_gpa, *ptep_gva; 839 pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
831 unsigned int idx; 840 unsigned int idx = TLB_LO_IDX(*tlb, gva);
832 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); 841 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
833 842
843 tlb_lo[0] = tlb->tlb_lo[0];
844 tlb_lo[1] = tlb->tlb_lo[1];
845
834 /* 846 /*
835 * The commpage address must not be mapped to anything else if the guest 847 * The commpage address must not be mapped to anything else if the guest
836 * TLB contains entries nearby, or commpage accesses will break. 848 * TLB contains entries nearby, or commpage accesses will break.
837 */ 849 */
838 idx = TLB_LO_IDX(*tlb, gva); 850 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
839 if ((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & PAGE_MASK) 851 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
840 tlb_lo = tlb->tlb_lo[idx];
841 852
842 /* Find host PFN */ 853 /* Get the GPA page table entry */
843 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo), write_fault, 854 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
844 &pte_gpa, NULL) < 0) 855 write_fault, &pte_gpa[idx], NULL) < 0)
845 return -1; 856 return -1;
846 pfn = pte_pfn(pte_gpa);
847 857
848 /* Find GVA page table entry */ 858 /* And its GVA buddy's GPA page table entry if it also exists */
849 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva); 859 pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
860 if (tlb_lo[!idx] & ENTRYLO_V) {
861 spin_lock(&kvm->mmu_lock);
862 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
863 mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
864 if (ptep_buddy)
865 pte_gpa[!idx] = *ptep_buddy;
866 spin_unlock(&kvm->mmu_lock);
867 }
868
869 /* Get the GVA page table entry pair */
870 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
850 if (!ptep_gva) { 871 if (!ptep_gva) {
851 kvm_err("No ptep for gva %lx\n", gva); 872 kvm_err("No ptep for gva %lx\n", gva);
852 return -1; 873 return -1;
853 } 874 }
854 875
855 /* Write PFN into GVA page table, taking attributes from Guest TLB */ 876 /* Copy a pair of entries from GPA page table to GVA page table */
856 *ptep_gva = pfn_pte(pfn, (!(tlb_lo & ENTRYLO_V)) ? __pgprot(0) : 877 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
857 (tlb_lo & ENTRYLO_D) ? PAGE_SHARED : 878 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
858 PAGE_READONLY);
859 if (pte_present(*ptep_gva))
860 *ptep_gva = pte_mkyoung(pte_mkdirty(*ptep_gva));
861 879
862 /* Invalidate this entry in the TLB, current guest mode ASID only */ 880 /* Invalidate this entry in the TLB, current guest mode ASID only */
863 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); 881 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);