aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2015-03-05 06:43:36 -0500
committerJames Hogan <james.hogan@imgtec.com>2015-03-27 17:25:19 -0400
commit539cb89fbdfe082d00be6f83d0f2140b7802151c (patch)
treebf750a7d73667136d30a89c2ee2cb5077a5a7bb4
parent5fafd8748b366105e08c198892e9fe02ef15c021 (diff)
MIPS: KVM: Add base guest MSA support
Add base code for supporting the MIPS SIMD Architecture (MSA) in MIPS KVM guests. MSA cannot yet be enabled in the guest, we're just laying the groundwork. As with the FPU, whether the guest's MSA context is loaded is stored in another bit in the fpu_inuse vcpu member. This allows MSA to be disabled when the guest disables it, but keeping the MSA context loaded so it doesn't have to be reloaded if the guest re-enables it. New assembly code is added for saving and restoring the MSA context, restoring only the upper half of the MSA context (for if the FPU context is already loaded) and for saving/clearing and restoring MSACSR (which can itself cause an MSA FP exception depending on the value). The MSACSR is restored before returning to the guest if MSA is already enabled, and the existing FP exception die notifier is extended to catch the possible MSA FP exception and step over the ctcmsa instruction. The helper function kvm_own_msa() is added to enable MSA and restore the MSA context if it isn't already loaded, which will be used in a later patch when the guest attempts to use MSA for the first time and triggers an MSA disabled exception. The existing FPU helpers are extended to handle MSA. kvm_lose_fpu() saves the full MSA context if it is loaded (which includes the FPU context) and both kvm_lose_fpu() and kvm_drop_fpu() disable MSA. kvm_own_fpu() also needs to lose any MSA context if FR=0, since there would be a risk of getting reserved instruction exceptions if CU1 is enabled and we later try and save the MSA context. We shouldn't usually hit this case since it will be handled when emulating CU1 changes, however there's nothing to stop the guest modifying the Status register directly via the comm page, which will cause this case to get hit. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Gleb Natapov <gleb@kernel.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h21
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kvm/Makefile6
-rw-r--r--arch/mips/kvm/locore.S21
-rw-r--r--arch/mips/kvm/mips.c132
-rw-r--r--arch/mips/kvm/msa.S161
6 files changed, 323 insertions, 19 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index fb264d8695e4..1dc0dca15cbd 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -360,6 +360,7 @@ struct kvm_mips_tlb {
360}; 360};
361 361
362#define KVM_MIPS_FPU_FPU 0x1 362#define KVM_MIPS_FPU_FPU 0x1
363#define KVM_MIPS_FPU_MSA 0x2
363 364
364#define KVM_MIPS_GUEST_TLB_SIZE 64 365#define KVM_MIPS_GUEST_TLB_SIZE 64
365struct kvm_vcpu_arch { 366struct kvm_vcpu_arch {
@@ -432,6 +433,7 @@ struct kvm_vcpu_arch {
432 int wait; 433 int wait;
433 434
434 u8 fpu_enabled; 435 u8 fpu_enabled;
436 u8 msa_enabled;
435}; 437};
436 438
437 439
@@ -576,6 +578,18 @@ static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
576 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 578 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
577} 579}
578 580
581static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
582{
583 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
584 vcpu->msa_enabled;
585}
586
587static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
588{
589 return kvm_mips_guest_can_have_msa(vcpu) &&
590 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
591}
592
579struct kvm_mips_callbacks { 593struct kvm_mips_callbacks {
580 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 594 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
581 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); 595 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
@@ -619,11 +633,16 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
619/* Trampoline ASM routine to start running in "Guest" context */ 633/* Trampoline ASM routine to start running in "Guest" context */
620extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 634extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
621 635
622/* FPU context management */ 636/* FPU/MSA context management */
623void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); 637void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
624void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); 638void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
625void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); 639void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
640void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
641void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
642void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
643void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
626void kvm_own_fpu(struct kvm_vcpu *vcpu); 644void kvm_own_fpu(struct kvm_vcpu *vcpu);
645void kvm_own_msa(struct kvm_vcpu *vcpu);
627void kvm_drop_fpu(struct kvm_vcpu *vcpu); 646void kvm_drop_fpu(struct kvm_vcpu *vcpu);
628void kvm_lose_fpu(struct kvm_vcpu *vcpu); 647void kvm_lose_fpu(struct kvm_vcpu *vcpu);
629 648
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index a12bcf920073..e59fd7cfac9e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -440,6 +440,7 @@ void output_kvm_defines(void)
440 OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]); 440 OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
441 441
442 OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31); 442 OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
443 OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
443 BLANK(); 444 BLANK();
444 445
445 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); 446 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 78d7bcd7710a..637ebbebd549 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -1,11 +1,13 @@
1# Makefile for KVM support for MIPS 1# Makefile for KVM support for MIPS
2# 2#
3 3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 4common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5 5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm 6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7 7
8kvm-objs := $(common-objs) mips.o emulate.o locore.o \ 8common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
9
10kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
9 interrupt.o stats.o commpage.o \ 11 interrupt.o stats.o commpage.o \
10 dyntrans.o trap_emul.o fpu.o 12 dyntrans.o trap_emul.o fpu.o
11 13
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index f5594049c0c3..c567240386a0 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -36,6 +36,8 @@
36#define PT_HOST_USERLOCAL PT_EPC 36#define PT_HOST_USERLOCAL PT_EPC
37 37
38#define CP0_DDATA_LO $28,3 38#define CP0_DDATA_LO $28,3
39#define CP0_CONFIG3 $16,3
40#define CP0_CONFIG5 $16,5
39#define CP0_EBASE $15,1 41#define CP0_EBASE $15,1
40 42
41#define CP0_INTCTL $12,1 43#define CP0_INTCTL $12,1
@@ -370,6 +372,25 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
370 .set noat 372 .set noat
3711: 3731:
372 374
375#ifdef CONFIG_CPU_HAS_MSA
376 /*
377 * If MSA is enabled, save MSACSR and clear it so that later
378 * instructions don't trigger MSAFPE for pending exceptions.
379 */
380 mfc0 t0, CP0_CONFIG3
381 ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
382 beqz t0, 1f
383 nop
384 mfc0 t0, CP0_CONFIG5
385 ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
386 beqz t0, 1f
387 nop
388 _cfcmsa t0, MSA_CSR
389 sw t0, VCPU_MSA_CSR(k1)
390 _ctcmsa MSA_CSR, zero
3911:
392#endif
393
373 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 394 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
374 .set at 395 .set at
375 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 396 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7f86cb73d05d..a17f21015a0b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1295,17 +1295,21 @@ skip_emul:
1295 1295
1296 if (ret == RESUME_GUEST) { 1296 if (ret == RESUME_GUEST) {
1297 /* 1297 /*
1298 * If FPU is enabled (i.e. the guest's FPU context is live), 1298 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1299 * restore FCR31. 1299 * is live), restore FCR31 / MSACSR.
1300 * 1300 *
1301 * This should be before returning to the guest exception 1301 * This should be before returning to the guest exception
1302 * vector, as it may well cause an FP exception if there are 1302 * vector, as it may well cause an [MSA] FP exception if there
1303 * pending exception bits unmasked. (see 1303 * are pending exception bits unmasked. (see
1304 * kvm_mips_csr_die_notifier() for how that is handled). 1304 * kvm_mips_csr_die_notifier() for how that is handled).
1305 */ 1305 */
1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) && 1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1307 read_c0_status() & ST0_CU1) 1307 read_c0_status() & ST0_CU1)
1308 __kvm_restore_fcsr(&vcpu->arch); 1308 __kvm_restore_fcsr(&vcpu->arch);
1309
1310 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1311 read_c0_config5() & MIPS_CONF5_MSAEN)
1312 __kvm_restore_msacsr(&vcpu->arch);
1309 } 1313 }
1310 1314
1311 /* Disable HTW before returning to guest or host */ 1315 /* Disable HTW before returning to guest or host */
@@ -1322,11 +1326,26 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
1322 1326
1323 preempt_disable(); 1327 preempt_disable();
1324 1328
1329 sr = kvm_read_c0_guest_status(cop0);
1330
1331 /*
1332 * If MSA state is already live, it is undefined how it interacts with
1333 * FR=0 FPU state, and we don't want to hit reserved instruction
1334 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1335 * play it safe and save it first.
1336 *
1337 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1338 * get called when guest CU1 is set, however we can't trust the guest
1339 * not to clobber the status register directly via the commpage.
1340 */
1341 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1342 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1343 kvm_lose_fpu(vcpu);
1344
1325 /* 1345 /*
1326 * Enable FPU for guest 1346 * Enable FPU for guest
1327 * We set FR and FRE according to guest context 1347 * We set FR and FRE according to guest context
1328 */ 1348 */
1329 sr = kvm_read_c0_guest_status(cop0);
1330 change_c0_status(ST0_CU1 | ST0_FR, sr); 1349 change_c0_status(ST0_CU1 | ST0_FR, sr);
1331 if (cpu_has_fre) { 1350 if (cpu_has_fre) {
1332 cfg5 = kvm_read_c0_guest_config5(cop0); 1351 cfg5 = kvm_read_c0_guest_config5(cop0);
@@ -1343,10 +1362,73 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
1343 preempt_enable(); 1362 preempt_enable();
1344} 1363}
1345 1364
1346/* Drop FPU without saving it */ 1365#ifdef CONFIG_CPU_HAS_MSA
1366/* Enable MSA for guest and restore context */
1367void kvm_own_msa(struct kvm_vcpu *vcpu)
1368{
1369 struct mips_coproc *cop0 = vcpu->arch.cop0;
1370 unsigned int sr, cfg5;
1371
1372 preempt_disable();
1373
1374 /*
1375 * Enable FPU if enabled in guest, since we're restoring FPU context
1376 * anyway. We set FR and FRE according to guest context.
1377 */
1378 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1379 sr = kvm_read_c0_guest_status(cop0);
1380
1381 /*
1382 * If FR=0 FPU state is already live, it is undefined how it
1383 * interacts with MSA state, so play it safe and save it first.
1384 */
1385 if (!(sr & ST0_FR) &&
1386 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
1387 KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
1388 kvm_lose_fpu(vcpu);
1389
1390 change_c0_status(ST0_CU1 | ST0_FR, sr);
1391 if (sr & ST0_CU1 && cpu_has_fre) {
1392 cfg5 = kvm_read_c0_guest_config5(cop0);
1393 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1394 }
1395 }
1396
1397 /* Enable MSA for guest */
1398 set_c0_config5(MIPS_CONF5_MSAEN);
1399 enable_fpu_hazard();
1400
1401 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
1402 case KVM_MIPS_FPU_FPU:
1403 /*
1404 * Guest FPU state already loaded, only restore upper MSA state
1405 */
1406 __kvm_restore_msa_upper(&vcpu->arch);
1407 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1408 break;
1409 case 0:
1410 /* Neither FPU or MSA already active, restore full MSA state */
1411 __kvm_restore_msa(&vcpu->arch);
1412 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1413 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1414 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1415 break;
1416 default:
1417 break;
1418 }
1419
1420 preempt_enable();
1421}
1422#endif
1423
1424/* Drop FPU & MSA without saving it */
1347void kvm_drop_fpu(struct kvm_vcpu *vcpu) 1425void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1348{ 1426{
1349 preempt_disable(); 1427 preempt_disable();
1428 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1429 disable_msa();
1430 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
1431 }
1350 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1432 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1351 clear_c0_status(ST0_CU1 | ST0_FR); 1433 clear_c0_status(ST0_CU1 | ST0_FR);
1352 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; 1434 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
@@ -1354,18 +1436,29 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1354 preempt_enable(); 1436 preempt_enable();
1355} 1437}
1356 1438
1357/* Save and disable FPU */ 1439/* Save and disable FPU & MSA */
1358void kvm_lose_fpu(struct kvm_vcpu *vcpu) 1440void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1359{ 1441{
1360 /* 1442 /*
1361 * FPU gets disabled in root context (hardware) when it is disabled in 1443 * FPU & MSA get disabled in root context (hardware) when it is disabled
1362 * guest context (software), but the register state in the hardware may 1444 * in guest context (software), but the register state in the hardware
1363 * still be in use. This is why we explicitly re-enable the hardware 1445 * may still be in use. This is why we explicitly re-enable the hardware
1364 * before saving. 1446 * before saving.
1365 */ 1447 */
1366 1448
1367 preempt_disable(); 1449 preempt_disable();
1368 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1450 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1451 set_c0_config5(MIPS_CONF5_MSAEN);
1452 enable_fpu_hazard();
1453
1454 __kvm_save_msa(&vcpu->arch);
1455
1456 /* Disable MSA & FPU */
1457 disable_msa();
1458 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1459 clear_c0_status(ST0_CU1 | ST0_FR);
1460 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1461 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1369 set_c0_status(ST0_CU1); 1462 set_c0_status(ST0_CU1);
1370 enable_fpu_hazard(); 1463 enable_fpu_hazard();
1371 1464
@@ -1379,9 +1472,9 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1379} 1472}
1380 1473
1381/* 1474/*
1382 * Step over a specific ctc1 to FCSR which is used to restore guest FCSR state 1475 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1383 * and may trigger a "harmless" FP exception if cause bits are set in the value 1476 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1384 * being written. 1477 * exception if cause bits are set in the value being written.
1385 */ 1478 */
1386static int kvm_mips_csr_die_notify(struct notifier_block *self, 1479static int kvm_mips_csr_die_notify(struct notifier_block *self,
1387 unsigned long cmd, void *ptr) 1480 unsigned long cmd, void *ptr)
@@ -1390,8 +1483,8 @@ static int kvm_mips_csr_die_notify(struct notifier_block *self,
1390 struct pt_regs *regs = args->regs; 1483 struct pt_regs *regs = args->regs;
1391 unsigned long pc; 1484 unsigned long pc;
1392 1485
1393 /* Only interested in FPE */ 1486 /* Only interested in FPE and MSAFPE */
1394 if (cmd != DIE_FP) 1487 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1395 return NOTIFY_DONE; 1488 return NOTIFY_DONE;
1396 1489
1397 /* Return immediately if guest context isn't active */ 1490 /* Return immediately if guest context isn't active */
@@ -1408,6 +1501,13 @@ static int kvm_mips_csr_die_notify(struct notifier_block *self,
1408 if (pc != (unsigned long)&__kvm_restore_fcsr + 4) 1501 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1409 return NOTIFY_DONE; 1502 return NOTIFY_DONE;
1410 break; 1503 break;
1504 case DIE_MSAFP:
1505 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1506 if (!cpu_has_msa ||
1507 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1508 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1509 return NOTIFY_DONE;
1510 break;
1411 } 1511 }
1412 1512
1413 /* Move PC forward a little and continue executing */ 1513 /* Move PC forward a little and continue executing */
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 000000000000..d02f0c6cc2cc
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * MIPS SIMD Architecture (MSA) context handling code for KVM.
7 *
8 * Copyright (C) 2015 Imagination Technologies Ltd.
9 */
10
11#include <asm/asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15
16 .set noreorder
17 .set noat
18
19LEAF(__kvm_save_msa)
20 st_d 0, VCPU_FPR0, a0
21 st_d 1, VCPU_FPR1, a0
22 st_d 2, VCPU_FPR2, a0
23 st_d 3, VCPU_FPR3, a0
24 st_d 4, VCPU_FPR4, a0
25 st_d 5, VCPU_FPR5, a0
26 st_d 6, VCPU_FPR6, a0
27 st_d 7, VCPU_FPR7, a0
28 st_d 8, VCPU_FPR8, a0
29 st_d 9, VCPU_FPR9, a0
30 st_d 10, VCPU_FPR10, a0
31 st_d 11, VCPU_FPR11, a0
32 st_d 12, VCPU_FPR12, a0
33 st_d 13, VCPU_FPR13, a0
34 st_d 14, VCPU_FPR14, a0
35 st_d 15, VCPU_FPR15, a0
36 st_d 16, VCPU_FPR16, a0
37 st_d 17, VCPU_FPR17, a0
38 st_d 18, VCPU_FPR18, a0
39 st_d 19, VCPU_FPR19, a0
40 st_d 20, VCPU_FPR20, a0
41 st_d 21, VCPU_FPR21, a0
42 st_d 22, VCPU_FPR22, a0
43 st_d 23, VCPU_FPR23, a0
44 st_d 24, VCPU_FPR24, a0
45 st_d 25, VCPU_FPR25, a0
46 st_d 26, VCPU_FPR26, a0
47 st_d 27, VCPU_FPR27, a0
48 st_d 28, VCPU_FPR28, a0
49 st_d 29, VCPU_FPR29, a0
50 st_d 30, VCPU_FPR30, a0
51 st_d 31, VCPU_FPR31, a0
52 jr ra
53 nop
54 END(__kvm_save_msa)
55
56LEAF(__kvm_restore_msa)
57 ld_d 0, VCPU_FPR0, a0
58 ld_d 1, VCPU_FPR1, a0
59 ld_d 2, VCPU_FPR2, a0
60 ld_d 3, VCPU_FPR3, a0
61 ld_d 4, VCPU_FPR4, a0
62 ld_d 5, VCPU_FPR5, a0
63 ld_d 6, VCPU_FPR6, a0
64 ld_d 7, VCPU_FPR7, a0
65 ld_d 8, VCPU_FPR8, a0
66 ld_d 9, VCPU_FPR9, a0
67 ld_d 10, VCPU_FPR10, a0
68 ld_d 11, VCPU_FPR11, a0
69 ld_d 12, VCPU_FPR12, a0
70 ld_d 13, VCPU_FPR13, a0
71 ld_d 14, VCPU_FPR14, a0
72 ld_d 15, VCPU_FPR15, a0
73 ld_d 16, VCPU_FPR16, a0
74 ld_d 17, VCPU_FPR17, a0
75 ld_d 18, VCPU_FPR18, a0
76 ld_d 19, VCPU_FPR19, a0
77 ld_d 20, VCPU_FPR20, a0
78 ld_d 21, VCPU_FPR21, a0
79 ld_d 22, VCPU_FPR22, a0
80 ld_d 23, VCPU_FPR23, a0
81 ld_d 24, VCPU_FPR24, a0
82 ld_d 25, VCPU_FPR25, a0
83 ld_d 26, VCPU_FPR26, a0
84 ld_d 27, VCPU_FPR27, a0
85 ld_d 28, VCPU_FPR28, a0
86 ld_d 29, VCPU_FPR29, a0
87 ld_d 30, VCPU_FPR30, a0
88 ld_d 31, VCPU_FPR31, a0
89 jr ra
90 nop
91 END(__kvm_restore_msa)
92
93 .macro kvm_restore_msa_upper wr, off, base
94 .set push
95 .set noat
96#ifdef CONFIG_64BIT
97 ld $1, \off(\base)
98 insert_d \wr, 1
99#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
100 lw $1, \off(\base)
101 insert_w \wr, 2
102 lw $1, (\off+4)(\base)
103 insert_w \wr, 3
104#else /* CONFIG_CPU_BIG_ENDIAN */
105 lw $1, (\off+4)(\base)
106 insert_w \wr, 2
107 lw $1, \off(\base)
108 insert_w \wr, 3
109#endif
110 .set pop
111 .endm
112
113LEAF(__kvm_restore_msa_upper)
114 kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
115 kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
116 kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
117 kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
118 kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
119 kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
120 kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
121 kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
122 kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
123 kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
124 kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
125 kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
126 kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
127 kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
128 kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
129 kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
130 kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
131 kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
132 kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
133 kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
134 kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
135 kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
136 kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
137 kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
138 kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
139 kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
140 kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
141 kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
142 kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
143 kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
144 kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
145 kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
146 jr ra
147 nop
148 END(__kvm_restore_msa_upper)
149
150LEAF(__kvm_restore_msacsr)
151 lw t0, VCPU_MSA_CSR(a0)
152 /*
153 * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
154 * See kvm_mips_csr_die_notify() which handles t0 containing a value
155 * which triggers an MSA FP Exception, which must be stepped over and
156 * ignored since the set cause bits must remain there for the guest.
157 */
158 _ctcmsa MSA_CSR, t0
159 jr ra
160 nop
161 END(__kvm_restore_msacsr)