aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-11-25 19:37:28 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:15 -0500
commit4cf74c9c83dda79143490d7cc774b7830e257fcd (patch)
tree9bcf4a6dd67e42274f08ebfb460987d21f4df472
parent5207ce144a25aef89dd12b8fc3ccaa53aba4f2bd (diff)
KVM: MIPS/Emulate: Use lockless GVA helpers for cache emulation
Use the lockless GVA helpers to implement the reading of guest instructions for emulation. This will allow it to handle asynchronous TLB flushes when they are implemented. This is a little more complicated than the other two cases (get_inst() and dynamic translation) due to the need to emulate the appropriate guest TLB exception when the address isn't present or isn't valid in the guest TLB. Since there are several protected cache ops that may need to be performed safely, this is abstracted by kvm_mips_guest_cache_op() which is passed a protected cache op function pointer and takes care of the lockless operation and fault handling / retry if the op should fail, taking advantage of the new errors which the protected cache ops can now return. This allows the existing advance fault handling which relied on host TLB lookups to be removed, along with the now unused kvm_mips_host_tlb_lookup(), Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/kvm/emulate.c148
-rw-r--r--arch/mips/kvm/tlb.c35
3 files changed, 66 insertions, 119 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index c1e46abb5704..33d3d8ac742e 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -226,6 +226,7 @@ enum emulation_result {
226 EMULATE_FAIL, /* can't emulate this instruction */ 226 EMULATE_FAIL, /* can't emulate this instruction */
227 EMULATE_WAIT, /* WAIT instruction */ 227 EMULATE_WAIT, /* WAIT instruction */
228 EMULATE_PRIV_FAIL, 228 EMULATE_PRIV_FAIL,
229 EMULATE_EXCEPT, /* A guest exception has been generated */
229}; 230};
230 231
231#define mips3_paddr_to_tlbpfn(x) \ 232#define mips3_paddr_to_tlbpfn(x) \
@@ -614,7 +615,6 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
614 615
615extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, 616extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
616 unsigned long entryhi); 617 unsigned long entryhi);
617extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
618 618
619void kvm_mips_suspend_mm(int cpu); 619void kvm_mips_suspend_mm(int cpu);
620void kvm_mips_resume_mm(int cpu); 620void kvm_mips_resume_mm(int cpu);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b295a4a1496f..40159cf5166b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1697,12 +1697,56 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1697 return er; 1697 return er;
1698} 1698}
1699 1699
1700static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
1701 unsigned long curr_pc,
1702 unsigned long addr,
1703 struct kvm_run *run,
1704 struct kvm_vcpu *vcpu,
1705 u32 cause)
1706{
1707 int err;
1708
1709 for (;;) {
1710 /* Carefully attempt the cache operation */
1711 kvm_trap_emul_gva_lockless_begin(vcpu);
1712 err = fn(addr);
1713 kvm_trap_emul_gva_lockless_end(vcpu);
1714
1715 if (likely(!err))
1716 return EMULATE_DONE;
1717
1718 /*
1719 * Try to handle the fault and retry, maybe we just raced with a
1720 * GVA invalidation.
1721 */
1722 switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
1723 case KVM_MIPS_GVA:
1724 case KVM_MIPS_GPA:
1725 /* bad virtual or physical address */
1726 return EMULATE_FAIL;
1727 case KVM_MIPS_TLB:
1728 /* no matching guest TLB */
1729 vcpu->arch.host_cp0_badvaddr = addr;
1730 vcpu->arch.pc = curr_pc;
1731 kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
1732 return EMULATE_EXCEPT;
1733 case KVM_MIPS_TLBINV:
1734 /* invalid matching guest TLB */
1735 vcpu->arch.host_cp0_badvaddr = addr;
1736 vcpu->arch.pc = curr_pc;
1737 kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
1738 return EMULATE_EXCEPT;
1739 default:
1740 break;
1741 };
1742 }
1743}
1744
1700enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 1745enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1701 u32 *opc, u32 cause, 1746 u32 *opc, u32 cause,
1702 struct kvm_run *run, 1747 struct kvm_run *run,
1703 struct kvm_vcpu *vcpu) 1748 struct kvm_vcpu *vcpu)
1704{ 1749{
1705 struct mips_coproc *cop0 = vcpu->arch.cop0;
1706 enum emulation_result er = EMULATE_DONE; 1750 enum emulation_result er = EMULATE_DONE;
1707 u32 cache, op_inst, op, base; 1751 u32 cache, op_inst, op, base;
1708 s16 offset; 1752 s16 offset;
@@ -1759,81 +1803,16 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1759 goto done; 1803 goto done;
1760 } 1804 }
1761 1805
1762 preempt_disable();
1763 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1764 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1765 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1766 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1767 __func__, va, vcpu, read_c0_entryhi());
1768 er = EMULATE_FAIL;
1769 preempt_enable();
1770 goto done;
1771 }
1772 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1773 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1774 int index;
1775
1776 /* If an entry already exists then skip */
1777 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1778 goto skip_fault;
1779
1780 /*
1781 * If address not in the guest TLB, then give the guest a fault,
1782 * the resulting handler will do the right thing
1783 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1785 (kvm_read_c0_guest_entryhi
1786 (cop0) & KVM_ENTRYHI_ASID));
1787
1788 if (index < 0) {
1789 vcpu->arch.host_cp0_badvaddr = va;
1790 vcpu->arch.pc = curr_pc;
1791 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1792 vcpu);
1793 preempt_enable();
1794 goto dont_update_pc;
1795 } else {
1796 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1797 /*
1798 * Check if the entry is valid, if not then setup a TLB
1799 * invalid exception to the guest
1800 */
1801 if (!TLB_IS_VALID(*tlb, va)) {
1802 vcpu->arch.host_cp0_badvaddr = va;
1803 vcpu->arch.pc = curr_pc;
1804 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1805 run, vcpu);
1806 preempt_enable();
1807 goto dont_update_pc;
1808 }
1809 /*
1810 * We fault an entry from the guest tlb to the
1811 * shadow host TLB
1812 */
1813 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1814 va)) {
1815 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1816 __func__, va, index, vcpu,
1817 read_c0_entryhi());
1818 er = EMULATE_FAIL;
1819 preempt_enable();
1820 goto done;
1821 }
1822 }
1823 } else {
1824 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1825 cache, op, base, arch->gprs[base], offset);
1826 er = EMULATE_FAIL;
1827 preempt_enable();
1828 goto done;
1829
1830 }
1831
1832skip_fault:
1833 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 1806 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1834 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { 1807 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1835 protected_writeback_dcache_line(va); 1808 /*
1836 1809 * Perform the dcache part of icache synchronisation on the
1810 * guest's behalf.
1811 */
1812 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1813 curr_pc, va, run, vcpu, cause);
1814 if (er != EMULATE_DONE)
1815 goto done;
1837#ifdef CONFIG_KVM_MIPS_DYN_TRANS 1816#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1838 /* 1817 /*
1839 * Replace the CACHE instruction, with a SYNCI, not the same, 1818 * Replace the CACHE instruction, with a SYNCI, not the same,
@@ -1842,8 +1821,15 @@ skip_fault:
1842 kvm_mips_trans_cache_va(inst, opc, vcpu); 1821 kvm_mips_trans_cache_va(inst, opc, vcpu);
1843#endif 1822#endif
1844 } else if (op_inst == Hit_Invalidate_I) { 1823 } else if (op_inst == Hit_Invalidate_I) {
1845 protected_writeback_dcache_line(va); 1824 /* Perform the icache synchronisation on the guest's behalf */
1846 protected_flush_icache_line(va); 1825 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1826 curr_pc, va, run, vcpu, cause);
1827 if (er != EMULATE_DONE)
1828 goto done;
1829 er = kvm_mips_guest_cache_op(protected_flush_icache_line,
1830 curr_pc, va, run, vcpu, cause);
1831 if (er != EMULATE_DONE)
1832 goto done;
1847 1833
1848#ifdef CONFIG_KVM_MIPS_DYN_TRANS 1834#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1849 /* Replace the CACHE instruction, with a SYNCI */ 1835 /* Replace the CACHE instruction, with a SYNCI */
@@ -1855,17 +1841,13 @@ skip_fault:
1855 er = EMULATE_FAIL; 1841 er = EMULATE_FAIL;
1856 } 1842 }
1857 1843
1858 preempt_enable();
1859done: 1844done:
1860 /* Rollback PC only if emulation was unsuccessful */ 1845 /* Rollback PC only if emulation was unsuccessful */
1861 if (er == EMULATE_FAIL) 1846 if (er == EMULATE_FAIL)
1862 vcpu->arch.pc = curr_pc; 1847 vcpu->arch.pc = curr_pc;
1863 1848 /* Guest exception needs guest to resume */
1864dont_update_pc: 1849 if (er == EMULATE_EXCEPT)
1865 /* 1850 er = EMULATE_DONE;
1866 * This is for exceptions whose emulation updates the PC, so do not
1867 * overwrite the PC under any circumstances
1868 */
1869 1851
1870 return er; 1852 return er;
1871} 1853}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index cee2e9feb942..2819eb793345 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -117,41 +117,6 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
117} 117}
118EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 118EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
119 119
120int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
121{
122 unsigned long old_entryhi, flags;
123 int idx;
124
125 local_irq_save(flags);
126
127 old_entryhi = read_c0_entryhi();
128
129 if (KVM_GUEST_KERNEL_MODE(vcpu))
130 write_c0_entryhi((vaddr & VPN2_MASK) |
131 kvm_mips_get_kernel_asid(vcpu));
132 else {
133 write_c0_entryhi((vaddr & VPN2_MASK) |
134 kvm_mips_get_user_asid(vcpu));
135 }
136
137 mtc0_tlbw_hazard();
138
139 tlb_probe();
140 tlb_probe_hazard();
141 idx = read_c0_index();
142
143 /* Restore old ASID */
144 write_c0_entryhi(old_entryhi);
145 mtc0_tlbw_hazard();
146
147 local_irq_restore(flags);
148
149 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
150
151 return idx;
152}
153EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
154
155static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 120static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
156{ 121{
157 int idx; 122 int idx;