aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2014-11-18 09:09:12 -0500
committerJames Hogan <james.hogan@imgtec.com>2015-03-27 17:25:14 -0400
commit98e91b8457d81f53fab990fac6c57e2a43c47627 (patch)
tree84cbb2596fcc3de27e30899be37cfcff94949ab2
parentb86ecb3766abd9138289ff2a18381d25b73f4622 (diff)
MIPS: KVM: Add base guest FPU support
Add base code for supporting FPU in MIPS KVM guests. The FPU cannot yet be enabled in the guest, we're just laying the groundwork. Whether the guest's FPU context is loaded is stored in a bit in the fpu_inuse vcpu member. This allows the FPU to be disabled when the guest disables it, but keeping the FPU context loaded so it doesn't have to be reloaded if the guest re-enables it. An fpu_enabled vcpu member stores whether userland has enabled the FPU capability (which will be wired up in a later patch). New assembly code is added for saving and restoring the FPU context, and for saving/clearing and restoring FCSR (which can itself cause an FP exception depending on the value). The FCSR is restored before returning to the guest if the FPU is already enabled, and a die notifier is registered to catch the possible FP exception and step over the ctc1 instruction. The helper function kvm_lose_fpu() is added to save FPU context and disable the FPU, which is used when saving hardware state before a context switch or KVM exit (the vcpu_get_regs() callback). The helper function kvm_own_fpu() is added to enable the FPU and restore the FPU context if it isn't already loaded, which will be used in a later patch when the guest attempts to use the FPU for the first time and triggers a co-processor unusable exception. The helper function kvm_drop_fpu() is added to discard the FPU context and disable the FPU, which will be used in a later patch when the FPU state will become architecturally UNPREDICTABLE (change of FR mode) to force a reload of [stale] context in the new FR mode. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Gleb Natapov <gleb@kernel.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h27
-rw-r--r--arch/mips/kernel/asm-offsets.c38
-rw-r--r--arch/mips/kvm/Makefile2
-rw-r--r--arch/mips/kvm/fpu.S122
-rw-r--r--arch/mips/kvm/locore.S17
-rw-r--r--arch/mips/kvm/mips.c126
-rw-r--r--arch/mips/kvm/trap_emul.c2
7 files changed, 333 insertions, 1 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index fb79d67de192..866edf330e53 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -357,6 +357,8 @@ struct kvm_mips_tlb {
357 long tlb_lo1; 357 long tlb_lo1;
358}; 358};
359 359
360#define KVM_MIPS_FPU_FPU 0x1
361
360#define KVM_MIPS_GUEST_TLB_SIZE 64 362#define KVM_MIPS_GUEST_TLB_SIZE 64
361struct kvm_vcpu_arch { 363struct kvm_vcpu_arch {
362 void *host_ebase, *guest_ebase; 364 void *host_ebase, *guest_ebase;
@@ -378,6 +380,8 @@ struct kvm_vcpu_arch {
378 380
379 /* FPU State */ 381 /* FPU State */
380 struct mips_fpu_struct fpu; 382 struct mips_fpu_struct fpu;
383 /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
384 unsigned int fpu_inuse;
381 385
382 /* COP0 State */ 386 /* COP0 State */
383 struct mips_coproc *cop0; 387 struct mips_coproc *cop0;
@@ -424,6 +428,8 @@ struct kvm_vcpu_arch {
424 428
425 /* WAIT executed */ 429 /* WAIT executed */
426 int wait; 430 int wait;
431
432 u8 fpu_enabled;
427}; 433};
428 434
429 435
@@ -554,6 +560,19 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
554 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ 560 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
555} 561}
556 562
563/* Helpers */
564
565static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
566{
567 return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
568 vcpu->fpu_enabled;
569}
570
571static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
572{
573 return kvm_mips_guest_can_have_fpu(vcpu) &&
574 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
575}
557 576
558struct kvm_mips_callbacks { 577struct kvm_mips_callbacks {
559 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 578 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
@@ -597,6 +616,14 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
597/* Trampoline ASM routine to start running in "Guest" context */ 616/* Trampoline ASM routine to start running in "Guest" context */
598extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 617extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
599 618
619/* FPU context management */
620void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
621void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
622void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
623void kvm_own_fpu(struct kvm_vcpu *vcpu);
624void kvm_drop_fpu(struct kvm_vcpu *vcpu);
625void kvm_lose_fpu(struct kvm_vcpu *vcpu);
626
600/* TLB handling */ 627/* TLB handling */
601uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); 628uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
602 629
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 3ee1565c5be3..a12bcf920073 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -404,6 +404,44 @@ void output_kvm_defines(void)
404 OFFSET(VCPU_LO, kvm_vcpu_arch, lo); 404 OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
405 OFFSET(VCPU_HI, kvm_vcpu_arch, hi); 405 OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
406 OFFSET(VCPU_PC, kvm_vcpu_arch, pc); 406 OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
407 BLANK();
408
409 OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
410 OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
411 OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
412 OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
413 OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
414 OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
415 OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
416 OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
417 OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
418 OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
419 OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
420 OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
421 OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
422 OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
423 OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
424 OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
425 OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
426 OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
427 OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
428 OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
429 OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
430 OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
431 OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
432 OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
433 OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
434 OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
435 OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
436 OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
437 OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
438 OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
439 OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
440 OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
441
442 OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
443 BLANK();
444
407 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); 445 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
408 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); 446 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
409 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); 447 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 401fe027c261..78d7bcd7710a 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7 7
8kvm-objs := $(common-objs) mips.o emulate.o locore.o \ 8kvm-objs := $(common-objs) mips.o emulate.o locore.o \
9 interrupt.o stats.o commpage.o \ 9 interrupt.o stats.o commpage.o \
10 dyntrans.o trap_emul.o 10 dyntrans.o trap_emul.o fpu.o
11 11
12obj-$(CONFIG_KVM) += kvm.o 12obj-$(CONFIG_KVM) += kvm.o
13obj-y += callback.o tlb.o 13obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 000000000000..531fbf5131c0
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * FPU context handling code for KVM.
7 *
8 * Copyright (C) 2015 Imagination Technologies Ltd.
9 */
10
11#include <asm/asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/fpregdef.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16
17 .set noreorder
18 .set noat
19
20LEAF(__kvm_save_fpu)
21 .set push
22 .set mips64r2
23 SET_HARDFLOAT
24 mfc0 t0, CP0_STATUS
25 sll t0, t0, 5 # is Status.FR set?
26 bgez t0, 1f # no: skip odd doubles
27 nop
28 sdc1 $f1, VCPU_FPR1(a0)
29 sdc1 $f3, VCPU_FPR3(a0)
30 sdc1 $f5, VCPU_FPR5(a0)
31 sdc1 $f7, VCPU_FPR7(a0)
32 sdc1 $f9, VCPU_FPR9(a0)
33 sdc1 $f11, VCPU_FPR11(a0)
34 sdc1 $f13, VCPU_FPR13(a0)
35 sdc1 $f15, VCPU_FPR15(a0)
36 sdc1 $f17, VCPU_FPR17(a0)
37 sdc1 $f19, VCPU_FPR19(a0)
38 sdc1 $f21, VCPU_FPR21(a0)
39 sdc1 $f23, VCPU_FPR23(a0)
40 sdc1 $f25, VCPU_FPR25(a0)
41 sdc1 $f27, VCPU_FPR27(a0)
42 sdc1 $f29, VCPU_FPR29(a0)
43 sdc1 $f31, VCPU_FPR31(a0)
441: sdc1 $f0, VCPU_FPR0(a0)
45 sdc1 $f2, VCPU_FPR2(a0)
46 sdc1 $f4, VCPU_FPR4(a0)
47 sdc1 $f6, VCPU_FPR6(a0)
48 sdc1 $f8, VCPU_FPR8(a0)
49 sdc1 $f10, VCPU_FPR10(a0)
50 sdc1 $f12, VCPU_FPR12(a0)
51 sdc1 $f14, VCPU_FPR14(a0)
52 sdc1 $f16, VCPU_FPR16(a0)
53 sdc1 $f18, VCPU_FPR18(a0)
54 sdc1 $f20, VCPU_FPR20(a0)
55 sdc1 $f22, VCPU_FPR22(a0)
56 sdc1 $f24, VCPU_FPR24(a0)
57 sdc1 $f26, VCPU_FPR26(a0)
58 sdc1 $f28, VCPU_FPR28(a0)
59 jr ra
60 sdc1 $f30, VCPU_FPR30(a0)
61 .set pop
62 END(__kvm_save_fpu)
63
64LEAF(__kvm_restore_fpu)
65 .set push
66 .set mips64r2
67 SET_HARDFLOAT
68 mfc0 t0, CP0_STATUS
69 sll t0, t0, 5 # is Status.FR set?
70 bgez t0, 1f # no: skip odd doubles
71 nop
72 ldc1 $f1, VCPU_FPR1(a0)
73 ldc1 $f3, VCPU_FPR3(a0)
74 ldc1 $f5, VCPU_FPR5(a0)
75 ldc1 $f7, VCPU_FPR7(a0)
76 ldc1 $f9, VCPU_FPR9(a0)
77 ldc1 $f11, VCPU_FPR11(a0)
78 ldc1 $f13, VCPU_FPR13(a0)
79 ldc1 $f15, VCPU_FPR15(a0)
80 ldc1 $f17, VCPU_FPR17(a0)
81 ldc1 $f19, VCPU_FPR19(a0)
82 ldc1 $f21, VCPU_FPR21(a0)
83 ldc1 $f23, VCPU_FPR23(a0)
84 ldc1 $f25, VCPU_FPR25(a0)
85 ldc1 $f27, VCPU_FPR27(a0)
86 ldc1 $f29, VCPU_FPR29(a0)
87 ldc1 $f31, VCPU_FPR31(a0)
881: ldc1 $f0, VCPU_FPR0(a0)
89 ldc1 $f2, VCPU_FPR2(a0)
90 ldc1 $f4, VCPU_FPR4(a0)
91 ldc1 $f6, VCPU_FPR6(a0)
92 ldc1 $f8, VCPU_FPR8(a0)
93 ldc1 $f10, VCPU_FPR10(a0)
94 ldc1 $f12, VCPU_FPR12(a0)
95 ldc1 $f14, VCPU_FPR14(a0)
96 ldc1 $f16, VCPU_FPR16(a0)
97 ldc1 $f18, VCPU_FPR18(a0)
98 ldc1 $f20, VCPU_FPR20(a0)
99 ldc1 $f22, VCPU_FPR22(a0)
100 ldc1 $f24, VCPU_FPR24(a0)
101 ldc1 $f26, VCPU_FPR26(a0)
102 ldc1 $f28, VCPU_FPR28(a0)
103 jr ra
104 ldc1 $f30, VCPU_FPR30(a0)
105 .set pop
106 END(__kvm_restore_fpu)
107
108LEAF(__kvm_restore_fcsr)
109 .set push
110 SET_HARDFLOAT
111 lw t0, VCPU_FCR31(a0)
112 /*
113 * The ctc1 must stay at this offset in __kvm_restore_fcsr.
114 * See kvm_mips_csr_die_notify() which handles t0 containing a value
115 * which triggers an FP Exception, which must be stepped over and
116 * ignored since the set cause bits must remain there for the guest.
117 */
118 ctc1 t0, fcr31
119 jr ra
120 nop
121 .set pop
122 END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 4a68b176d6e4..f5594049c0c3 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -353,6 +353,23 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
353 LONG_L k0, VCPU_HOST_EBASE(k1) 353 LONG_L k0, VCPU_HOST_EBASE(k1)
354 mtc0 k0,CP0_EBASE 354 mtc0 k0,CP0_EBASE
355 355
356 /*
357 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
358 * trigger FPE for pending exceptions.
359 */
360 .set at
361 and v1, v0, ST0_CU1
362 beqz v1, 1f
363 nop
364 .set push
365 SET_HARDFLOAT
366 cfc1 t0, fcr31
367 sw t0, VCPU_FCR31(k1)
368 ctc1 zero,fcr31
369 .set pop
370 .set noat
3711:
372
356 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 373 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
357 .set at 374 .set at
358 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 375 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 73eecc779454..b26a48d81467 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/kdebug.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -1178,12 +1179,133 @@ skip_emul:
1178 } 1179 }
1179 } 1180 }
1180 1181
1182 if (ret == RESUME_GUEST) {
1183 /*
1184 * If FPU is enabled (i.e. the guest's FPU context is live),
1185 * restore FCR31.
1186 *
1187 * This should be before returning to the guest exception
1188 * vector, as it may well cause an FP exception if there are
1189 * pending exception bits unmasked. (see
1190 * kvm_mips_csr_die_notifier() for how that is handled).
1191 */
1192 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1193 read_c0_status() & ST0_CU1)
1194 __kvm_restore_fcsr(&vcpu->arch);
1195 }
1196
1181 /* Disable HTW before returning to guest or host */ 1197 /* Disable HTW before returning to guest or host */
1182 htw_stop(); 1198 htw_stop();
1183 1199
1184 return ret; 1200 return ret;
1185} 1201}
1186 1202
1203/* Enable FPU for guest and restore context */
1204void kvm_own_fpu(struct kvm_vcpu *vcpu)
1205{
1206 struct mips_coproc *cop0 = vcpu->arch.cop0;
1207 unsigned int sr, cfg5;
1208
1209 preempt_disable();
1210
1211 /*
1212 * Enable FPU for guest
1213 * We set FR and FRE according to guest context
1214 */
1215 sr = kvm_read_c0_guest_status(cop0);
1216 change_c0_status(ST0_CU1 | ST0_FR, sr);
1217 if (cpu_has_fre) {
1218 cfg5 = kvm_read_c0_guest_config5(cop0);
1219 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1220 }
1221 enable_fpu_hazard();
1222
1223 /* If guest FPU state not active, restore it now */
1224 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
1225 __kvm_restore_fpu(&vcpu->arch);
1226 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1227 }
1228
1229 preempt_enable();
1230}
1231
1232/* Drop FPU without saving it */
1233void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1234{
1235 preempt_disable();
1236 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1237 clear_c0_status(ST0_CU1 | ST0_FR);
1238 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1239 }
1240 preempt_enable();
1241}
1242
1243/* Save and disable FPU */
1244void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1245{
1246 /*
1247 * FPU gets disabled in root context (hardware) when it is disabled in
1248 * guest context (software), but the register state in the hardware may
1249 * still be in use. This is why we explicitly re-enable the hardware
1250 * before saving.
1251 */
1252
1253 preempt_disable();
1254 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1255 set_c0_status(ST0_CU1);
1256 enable_fpu_hazard();
1257
1258 __kvm_save_fpu(&vcpu->arch);
1259 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1260
1261 /* Disable FPU */
1262 clear_c0_status(ST0_CU1 | ST0_FR);
1263 }
1264 preempt_enable();
1265}
1266
1267/*
1268 * Step over a specific ctc1 to FCSR which is used to restore guest FCSR state
1269 * and may trigger a "harmless" FP exception if cause bits are set in the value
1270 * being written.
1271 */
1272static int kvm_mips_csr_die_notify(struct notifier_block *self,
1273 unsigned long cmd, void *ptr)
1274{
1275 struct die_args *args = (struct die_args *)ptr;
1276 struct pt_regs *regs = args->regs;
1277 unsigned long pc;
1278
1279 /* Only interested in FPE */
1280 if (cmd != DIE_FP)
1281 return NOTIFY_DONE;
1282
1283 /* Return immediately if guest context isn't active */
1284 if (!(current->flags & PF_VCPU))
1285 return NOTIFY_DONE;
1286
1287 /* Should never get here from user mode */
1288 BUG_ON(user_mode(regs));
1289
1290 pc = instruction_pointer(regs);
1291 switch (cmd) {
1292 case DIE_FP:
1293 /* match 2nd instruction in __kvm_restore_fcsr */
1294 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1295 return NOTIFY_DONE;
1296 break;
1297 }
1298
1299 /* Move PC forward a little and continue executing */
1300 instruction_pointer(regs) += 4;
1301
1302 return NOTIFY_STOP;
1303}
1304
1305static struct notifier_block kvm_mips_csr_die_notifier = {
1306 .notifier_call = kvm_mips_csr_die_notify,
1307};
1308
1187int __init kvm_mips_init(void) 1309int __init kvm_mips_init(void)
1188{ 1310{
1189 int ret; 1311 int ret;
@@ -1193,6 +1315,8 @@ int __init kvm_mips_init(void)
1193 if (ret) 1315 if (ret)
1194 return ret; 1316 return ret;
1195 1317
1318 register_die_notifier(&kvm_mips_csr_die_notifier);
1319
1196 /* 1320 /*
1197 * On MIPS, kernel modules are executed from "mapped space", which 1321 * On MIPS, kernel modules are executed from "mapped space", which
1198 * requires TLBs. The TLB handling code is statically linked with 1322 * requires TLBs. The TLB handling code is statically linked with
@@ -1215,6 +1339,8 @@ void __exit kvm_mips_exit(void)
1215 kvm_mips_gfn_to_pfn = NULL; 1339 kvm_mips_gfn_to_pfn = NULL;
1216 kvm_mips_release_pfn_clean = NULL; 1340 kvm_mips_release_pfn_clean = NULL;
1217 kvm_mips_is_error_pfn = NULL; 1341 kvm_mips_is_error_pfn = NULL;
1342
1343 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1218} 1344}
1219 1345
1220module_init(kvm_mips_init); 1346module_init(kvm_mips_init);
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 0d2729d202f4..408af244aed2 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -554,6 +554,8 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
554 554
555static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) 555static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
556{ 556{
557 kvm_lose_fpu(vcpu);
558
557 return 0; 559 return 0;
558} 560}
559 561