aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/include/asm/kvm_host.h17
-rw-r--r--arch/mips/kvm/emulate.c78
-rw-r--r--arch/mips/kvm/mips.c30
-rw-r--r--arch/mips/kvm/mmu.c16
-rw-r--r--arch/mips/kvm/trap_emul.c18
5 files changed, 143 insertions, 16 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 5f488dc8a7d5..07f58cfc1ab9 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -107,6 +107,20 @@
107#define KVM_INVALID_INST 0xdeadbeef 107#define KVM_INVALID_INST 0xdeadbeef
108#define KVM_INVALID_ADDR 0xdeadbeef 108#define KVM_INVALID_ADDR 0xdeadbeef
109 109
110/*
111 * EVA has overlapping user & kernel address spaces, so user VAs may be >
112 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
113 * PAGE_OFFSET.
114 */
115
116#define KVM_HVA_ERR_BAD (-1UL)
117#define KVM_HVA_ERR_RO_BAD (-2UL)
118
119static inline bool kvm_is_error_hva(unsigned long addr)
120{
121 return IS_ERR_VALUE(addr);
122}
123
110extern atomic_t kvm_mips_instance; 124extern atomic_t kvm_mips_instance;
111 125
112struct kvm_vm_stat { 126struct kvm_vm_stat {
@@ -314,6 +328,9 @@ struct kvm_vcpu_arch {
314 u32 guest_kernel_asid[NR_CPUS]; 328 u32 guest_kernel_asid[NR_CPUS];
315 struct mm_struct guest_kernel_mm, guest_user_mm; 329 struct mm_struct guest_kernel_mm, guest_user_mm;
316 330
331 /* Guest ASID of last user mode execution */
332 unsigned int last_user_gasid;
333
317 int last_sched_cpu; 334 int last_sched_cpu;
318 335
319 /* WAIT executed */ 336 /* WAIT executed */
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index e788515f766b..4db4c0370859 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -846,6 +846,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
846 return EMULATE_FAIL; 846 return EMULATE_FAIL;
847} 847}
848 848
849/**
850 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
851 * @vcpu: VCPU with changed mappings.
852 * @tlb: TLB entry being removed.
853 *
854 * This is called to indicate a single change in guest MMU mappings, so that we
855 * can arrange TLB flushes on this and other CPUs.
856 */
857static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
858 struct kvm_mips_tlb *tlb)
859{
860 int cpu, i;
861 bool user;
862
863 /* No need to flush for entries which are already invalid */
864 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
865 return;
866 /* User address space doesn't need flushing for KSeg2/3 changes */
867 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
868
869 preempt_disable();
870
871 /*
872 * Probe the shadow host TLB for the entry being overwritten, if one
873 * matches, invalidate it
874 */
875 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
876
877 /* Invalidate the whole ASID on other CPUs */
878 cpu = smp_processor_id();
879 for_each_possible_cpu(i) {
880 if (i == cpu)
881 continue;
882 if (user)
883 vcpu->arch.guest_user_asid[i] = 0;
884 vcpu->arch.guest_kernel_asid[i] = 0;
885 }
886
887 preempt_enable();
888}
889
849/* Write Guest TLB Entry @ Index */ 890/* Write Guest TLB Entry @ Index */
850enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 891enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
851{ 892{
@@ -865,11 +906,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
865 } 906 }
866 907
867 tlb = &vcpu->arch.guest_tlb[index]; 908 tlb = &vcpu->arch.guest_tlb[index];
868 /* 909
869 * Probe the shadow host TLB for the entry being overwritten, if one 910 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
870 * matches, invalidate it
871 */
872 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
873 911
874 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 912 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
875 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 913 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -898,11 +936,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
898 936
899 tlb = &vcpu->arch.guest_tlb[index]; 937 tlb = &vcpu->arch.guest_tlb[index];
900 938
901 /* 939 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
902 * Probe the shadow host TLB for the entry being overwritten, if one
903 * matches, invalidate it
904 */
905 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
906 940
907 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 941 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
908 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 942 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -1026,6 +1060,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1026 enum emulation_result er = EMULATE_DONE; 1060 enum emulation_result er = EMULATE_DONE;
1027 u32 rt, rd, sel; 1061 u32 rt, rd, sel;
1028 unsigned long curr_pc; 1062 unsigned long curr_pc;
1063 int cpu, i;
1029 1064
1030 /* 1065 /*
1031 * Update PC and hold onto current PC in case there is 1066 * Update PC and hold onto current PC in case there is
@@ -1127,16 +1162,31 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1127 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1162 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1128 u32 nasid = 1163 u32 nasid =
1129 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; 1164 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1130 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && 1165 if (((kvm_read_c0_guest_entryhi(cop0) &
1131 ((kvm_read_c0_guest_entryhi(cop0) &
1132 KVM_ENTRYHI_ASID) != nasid)) { 1166 KVM_ENTRYHI_ASID) != nasid)) {
1133 trace_kvm_asid_change(vcpu, 1167 trace_kvm_asid_change(vcpu,
1134 kvm_read_c0_guest_entryhi(cop0) 1168 kvm_read_c0_guest_entryhi(cop0)
1135 & KVM_ENTRYHI_ASID, 1169 & KVM_ENTRYHI_ASID,
1136 nasid); 1170 nasid);
1137 1171
1138 /* Blow away the shadow host TLBs */ 1172 /*
1139 kvm_mips_flush_host_tlb(1); 1173 * Regenerate/invalidate kernel MMU
1174 * context.
1175 * The user MMU context will be
1176 * regenerated lazily on re-entry to
1177 * guest user if the guest ASID actually
1178 * changes.
1179 */
1180 preempt_disable();
1181 cpu = smp_processor_id();
1182 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
1183 cpu, vcpu);
1184 vcpu->arch.guest_kernel_asid[cpu] =
1185 vcpu->arch.guest_kernel_mm.context.asid[cpu];
1186 for_each_possible_cpu(i)
1187 if (i != cpu)
1188 vcpu->arch.guest_kernel_asid[i] = 0;
1189 preempt_enable();
1140 } 1190 }
1141 kvm_write_c0_guest_entryhi(cop0, 1191 kvm_write_c0_guest_entryhi(cop0,
1142 vcpu->arch.gprs[rt]); 1192 vcpu->arch.gprs[rt]);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 49b25e74d0c7..ce961495b5e1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -421,6 +421,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
421 return -ENOIOCTLCMD; 421 return -ENOIOCTLCMD;
422} 422}
423 423
424/* Must be called with preemption disabled, just before entering guest */
425static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
426{
427 struct mips_coproc *cop0 = vcpu->arch.cop0;
428 int cpu = smp_processor_id();
429 unsigned int gasid;
430
431 /*
432 * Lazy host ASID regeneration for guest user mode.
433 * If the guest ASID has changed since the last guest usermode
434 * execution, regenerate the host ASID so as to invalidate stale TLB
435 * entries.
436 */
437 if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
438 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
439 if (gasid != vcpu->arch.last_user_gasid) {
440 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu,
441 vcpu);
442 vcpu->arch.guest_user_asid[cpu] =
443 vcpu->arch.guest_user_mm.context.asid[cpu];
444 vcpu->arch.last_user_gasid = gasid;
445 }
446 }
447}
448
424int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 449int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
425{ 450{
426 int r = 0; 451 int r = 0;
@@ -448,6 +473,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
448 htw_stop(); 473 htw_stop();
449 474
450 trace_kvm_enter(vcpu); 475 trace_kvm_enter(vcpu);
476
477 kvm_mips_check_asids(vcpu);
478
451 r = vcpu->arch.vcpu_run(run, vcpu); 479 r = vcpu->arch.vcpu_run(run, vcpu);
452 trace_kvm_out(vcpu); 480 trace_kvm_out(vcpu);
453 481
@@ -1561,6 +1589,8 @@ skip_emul:
1561 if (ret == RESUME_GUEST) { 1589 if (ret == RESUME_GUEST) {
1562 trace_kvm_reenter(vcpu); 1590 trace_kvm_reenter(vcpu);
1563 1591
1592 kvm_mips_check_asids(vcpu);
1593
1564 /* 1594 /*
1565 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context 1595 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1566 * is live), restore FCR31 / MSACSR. 1596 * is live), restore FCR31 / MSACSR.
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 121008c0fcc9..03883ba806e2 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -250,15 +250,27 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
250 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); 250 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
251 vcpu->arch.guest_kernel_asid[cpu] = 251 vcpu->arch.guest_kernel_asid[cpu] =
252 vcpu->arch.guest_kernel_mm.context.asid[cpu]; 252 vcpu->arch.guest_kernel_mm.context.asid[cpu];
253 newasid++;
254
255 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
256 cpu_context(cpu, current->mm));
257 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
258 cpu, vcpu->arch.guest_kernel_asid[cpu]);
259 }
260
261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
262 asid_version_mask(cpu)) {
263 u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
264 KVM_ENTRYHI_ASID;
265
253 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); 266 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
254 vcpu->arch.guest_user_asid[cpu] = 267 vcpu->arch.guest_user_asid[cpu] =
255 vcpu->arch.guest_user_mm.context.asid[cpu]; 268 vcpu->arch.guest_user_mm.context.asid[cpu];
269 vcpu->arch.last_user_gasid = gasid;
256 newasid++; 270 newasid++;
257 271
258 kvm_debug("[%d]: cpu_context: %#lx\n", cpu, 272 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
259 cpu_context(cpu, current->mm)); 273 cpu_context(cpu, current->mm));
260 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
261 cpu, vcpu->arch.guest_kernel_asid[cpu]);
262 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, 274 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
263 vcpu->arch.guest_user_asid[cpu]); 275 vcpu->arch.guest_user_asid[cpu]);
264 } 276 }
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 091553942bcb..3a5484f9aa50 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -175,6 +175,24 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
175 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 175 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
176 ret = RESUME_HOST; 176 ret = RESUME_HOST;
177 } 177 }
178 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
179 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
180 /*
181 * With EVA we may get a TLB exception instead of an address
182 * error when the guest performs MMIO to KSeg1 addresses.
183 */
184 kvm_debug("Emulate %s MMIO space\n",
185 store ? "Store to" : "Load from");
186 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
187 if (er == EMULATE_FAIL) {
188 kvm_err("Emulate %s MMIO space failed\n",
189 store ? "Store to" : "Load from");
190 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
191 ret = RESUME_HOST;
192 } else {
193 run->exit_reason = KVM_EXIT_MMIO;
194 ret = RESUME_HOST;
195 }
178 } else { 196 } else {
179 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", 197 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
180 store ? "ST" : "LD", cause, opc, badvaddr); 198 store ? "ST" : "LD", cause, opc, badvaddr);