aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-08-19 10:27:22 -0400
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:01 -0500
commitdacc3ed1dd608ff9553dcede6cd05369030ed099 (patch)
tree2f48a1a472f67bd244d70a08fffc56e3770e66eb
parent7a156e9f822d2eb6c294226aea2a4c12c05caa10 (diff)
KVM: MIPS: Use uaccess to read/modify guest instructions
Now that we have GVA page tables, use standard user accesses with page faults disabled to read & modify guest instructions. This should be more robust (than the rather dodgy method of accessing guest mapped segments by just directly addressing them) and will also work with Enhanced Virtual Addressing (EVA) host kernel configurations where dedicated instructions are needed for accessing user mode memory. For simplicity and speed we do this regardless of the guest segment the address resides in, rather than handling guest KSeg0 specially with kmap_atomic() as before. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/kvm/dyntrans.c28
-rw-r--r--arch/mips/kvm/mmu.c77
-rw-r--r--arch/mips/kvm/trap_emul.c9
4 files changed, 22 insertions, 94 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 95c86dab9b1b..a26504bee21c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -639,8 +639,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
639void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 639void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
640void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 640void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
641 bool user); 641 bool user);
642extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
643 unsigned long gva);
644extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 642extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
645 struct kvm_vcpu *vcpu); 643 struct kvm_vcpu *vcpu);
646extern void kvm_local_flush_tlb_all(void); 644extern void kvm_local_flush_tlb_all(void);
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index 010cef240688..60ebf5862d2b 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -13,6 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
16#include <linux/uaccess.h>
16#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
18#include <linux/bootmem.h> 19#include <linux/bootmem.h>
@@ -29,28 +30,15 @@
29static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, 30static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
30 union mips_instruction replace) 31 union mips_instruction replace)
31{ 32{
32 unsigned long paddr, flags; 33 unsigned long vaddr = (unsigned long)opc;
33 void *vaddr; 34 int err;
34 35
35 if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) { 36 err = put_user(replace.word, opc);
36 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, 37 if (unlikely(err)) {
37 (unsigned long)opc);
38 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
39 vaddr += paddr & ~PAGE_MASK;
40 memcpy(vaddr, (void *)&replace, sizeof(u32));
41 local_flush_icache_range((unsigned long)vaddr,
42 (unsigned long)vaddr + 32);
43 kunmap_atomic(vaddr);
44 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
45 local_irq_save(flags);
46 memcpy((void *)opc, (void *)&replace, sizeof(u32));
47 __local_flush_icache_user_range((unsigned long)opc,
48 (unsigned long)opc + 32);
49 local_irq_restore(flags);
50 } else {
51 kvm_err("%s: Invalid address: %p\n", __func__, opc); 38 kvm_err("%s: Invalid address: %p\n", __func__, opc);
52 return -EFAULT; 39 return err;
53 } 40 }
41 __local_flush_icache_user_range(vaddr, vaddr + 4);
54 42
55 return 0; 43 return 0;
56} 44}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 98f1a7715a68..c4e9c65065ea 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/kvm_host.h> 13#include <linux/kvm_host.h>
14#include <linux/uaccess.h>
14#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
15#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
16 17
@@ -134,34 +135,6 @@ out:
134 return err; 135 return err;
135} 136}
136 137
137/* Translate guest KSEG0 addresses to Host PA */
138unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
139 unsigned long gva)
140{
141 gfn_t gfn;
142 unsigned long offset = gva & ~PAGE_MASK;
143 struct kvm *kvm = vcpu->kvm;
144
145 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
146 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
147 __builtin_return_address(0), gva);
148 return KVM_INVALID_PAGE;
149 }
150
151 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
152
153 if (gfn >= kvm->arch.guest_pmap_npages) {
154 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
155 gva);
156 return KVM_INVALID_PAGE;
157 }
158
159 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
160 return KVM_INVALID_ADDR;
161
162 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
163}
164
165static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, 138static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
166 unsigned long addr) 139 unsigned long addr)
167{ 140{
@@ -551,51 +524,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
551 524
552u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) 525u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
553{ 526{
554 struct mips_coproc *cop0 = vcpu->arch.cop0;
555 unsigned long paddr, flags, vpn2, asid;
556 unsigned long va = (unsigned long)opc;
557 void *vaddr;
558 u32 inst; 527 u32 inst;
559 int index; 528 int err;
560 529
561 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 || 530 err = get_user(inst, opc);
562 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 531 if (unlikely(err)) {
563 local_irq_save(flags);
564 index = kvm_mips_host_tlb_lookup(vcpu, va);
565 if (index >= 0) {
566 inst = *(opc);
567 } else {
568 vpn2 = va & VPN2_MASK;
569 asid = kvm_read_c0_guest_entryhi(cop0) &
570 KVM_ENTRYHI_ASID;
571 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
572 if (index < 0) {
573 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
574 __func__, opc, vcpu, read_c0_entryhi());
575 kvm_mips_dump_host_tlbs();
576 kvm_mips_dump_guest_tlbs(vcpu);
577 local_irq_restore(flags);
578 return KVM_INVALID_INST;
579 }
580 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
581 &vcpu->arch.guest_tlb[index], va)) {
582 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
583 __func__, opc, index, vcpu,
584 read_c0_entryhi());
585 kvm_mips_dump_guest_tlbs(vcpu);
586 local_irq_restore(flags);
587 return KVM_INVALID_INST;
588 }
589 inst = *(opc);
590 }
591 local_irq_restore(flags);
592 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
593 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
594 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
595 vaddr += paddr & ~PAGE_MASK;
596 inst = *(u32 *)vaddr;
597 kunmap_atomic(vaddr);
598 } else {
599 kvm_err("%s: illegal address: %p\n", __func__, opc); 532 kvm_err("%s: illegal address: %p\n", __func__, opc);
600 return KVM_INVALID_INST; 533 return KVM_INVALID_INST;
601 } 534 }
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 8bb82eaa4c91..ee8b5ad8c7c5 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -12,6 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/kvm_host.h> 14#include <linux/kvm_host.h>
15#include <linux/uaccess.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
17#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
@@ -798,6 +799,12 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
798 799
799 kvm_trap_emul_vcpu_reenter(run, vcpu); 800 kvm_trap_emul_vcpu_reenter(run, vcpu);
800 801
802 /*
803 * We use user accessors to access guest memory, but we don't want to
804 * invoke Linux page faulting.
805 */
806 pagefault_disable();
807
801 /* Disable hardware page table walking while in guest */ 808 /* Disable hardware page table walking while in guest */
802 htw_stop(); 809 htw_stop();
803 810
@@ -823,6 +830,8 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
823 830
824 htw_start(); 831 htw_start();
825 832
833 pagefault_enable();
834
826 return r; 835 return r;
827} 836}
828 837