aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/mips.c
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2015-03-05 06:43:36 -0500
committerJames Hogan <james.hogan@imgtec.com>2015-03-27 17:25:19 -0400
commit539cb89fbdfe082d00be6f83d0f2140b7802151c (patch)
treebf750a7d73667136d30a89c2ee2cb5077a5a7bb4 /arch/mips/kvm/mips.c
parent5fafd8748b366105e08c198892e9fe02ef15c021 (diff)
MIPS: KVM: Add base guest MSA support
Add base code for supporting the MIPS SIMD Architecture (MSA) in MIPS KVM guests. MSA cannot yet be enabled in the guest, we're just laying the groundwork. As with the FPU, whether the guest's MSA context is loaded is stored in another bit in the fpu_inuse vcpu member. This allows MSA to be disabled when the guest disables it, but keeping the MSA context loaded so it doesn't have to be reloaded if the guest re-enables it. New assembly code is added for saving and restoring the MSA context, restoring only the upper half of the MSA context (for if the FPU context is already loaded) and for saving/clearing and restoring MSACSR (which can itself cause an MSA FP exception depending on the value). The MSACSR is restored before returning to the guest if MSA is already enabled, and the existing FP exception die notifier is extended to catch the possible MSA FP exception and step over the ctcmsa instruction. The helper function kvm_own_msa() is added to enable MSA and restore the MSA context if it isn't already loaded, which will be used in a later patch when the guest attempts to use MSA for the first time and triggers an MSA disabled exception. The existing FPU helpers are extended to handle MSA. kvm_lose_fpu() saves the full MSA context if it is loaded (which includes the FPU context) and both kvm_lose_fpu() and kvm_drop_fpu() disable MSA. kvm_own_fpu() also needs to lose any MSA context if FR=0, since there would be a risk of getting reserved instruction exceptions if CU1 is enabled and we later try and save the MSA context. We shouldn't usually hit this case since it will be handled when emulating CU1 changes, however there's nothing to stop the guest modifying the Status register directly via the comm page, which will cause this case to get hit. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Gleb Natapov <gleb@kernel.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
Diffstat (limited to 'arch/mips/kvm/mips.c')
-rw-r--r--arch/mips/kvm/mips.c132
1 files changed, 116 insertions, 16 deletions
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7f86cb73d05d..a17f21015a0b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1295,17 +1295,21 @@ skip_emul:
1295 1295
1296 if (ret == RESUME_GUEST) { 1296 if (ret == RESUME_GUEST) {
1297 /* 1297 /*
1298 * If FPU is enabled (i.e. the guest's FPU context is live), 1298 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1299 * restore FCR31. 1299 * is live), restore FCR31 / MSACSR.
1300 * 1300 *
1301 * This should be before returning to the guest exception 1301 * This should be before returning to the guest exception
1302 * vector, as it may well cause an FP exception if there are 1302 * vector, as it may well cause an [MSA] FP exception if there
1303 * pending exception bits unmasked. (see 1303 * are pending exception bits unmasked. (see
1304 * kvm_mips_csr_die_notifier() for how that is handled). 1304 * kvm_mips_csr_die_notifier() for how that is handled).
1305 */ 1305 */
1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) && 1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1307 read_c0_status() & ST0_CU1) 1307 read_c0_status() & ST0_CU1)
1308 __kvm_restore_fcsr(&vcpu->arch); 1308 __kvm_restore_fcsr(&vcpu->arch);
1309
1310 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1311 read_c0_config5() & MIPS_CONF5_MSAEN)
1312 __kvm_restore_msacsr(&vcpu->arch);
1309 } 1313 }
1310 1314
1311 /* Disable HTW before returning to guest or host */ 1315 /* Disable HTW before returning to guest or host */
@@ -1322,11 +1326,26 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
1322 1326
1323 preempt_disable(); 1327 preempt_disable();
1324 1328
1329 sr = kvm_read_c0_guest_status(cop0);
1330
1331 /*
1332 * If MSA state is already live, it is undefined how it interacts with
1333 * FR=0 FPU state, and we don't want to hit reserved instruction
1334 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1335 * play it safe and save it first.
1336 *
1337 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1338 * get called when guest CU1 is set, however we can't trust the guest
1339 * not to clobber the status register directly via the commpage.
1340 */
1341 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1342 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1343 kvm_lose_fpu(vcpu);
1344
1325 /* 1345 /*
1326 * Enable FPU for guest 1346 * Enable FPU for guest
1327 * We set FR and FRE according to guest context 1347 * We set FR and FRE according to guest context
1328 */ 1348 */
1329 sr = kvm_read_c0_guest_status(cop0);
1330 change_c0_status(ST0_CU1 | ST0_FR, sr); 1349 change_c0_status(ST0_CU1 | ST0_FR, sr);
1331 if (cpu_has_fre) { 1350 if (cpu_has_fre) {
1332 cfg5 = kvm_read_c0_guest_config5(cop0); 1351 cfg5 = kvm_read_c0_guest_config5(cop0);
@@ -1343,10 +1362,73 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
1343 preempt_enable(); 1362 preempt_enable();
1344} 1363}
1345 1364
1346/* Drop FPU without saving it */ 1365#ifdef CONFIG_CPU_HAS_MSA
1366/* Enable MSA for guest and restore context */
1367void kvm_own_msa(struct kvm_vcpu *vcpu)
1368{
1369 struct mips_coproc *cop0 = vcpu->arch.cop0;
1370 unsigned int sr, cfg5;
1371
1372 preempt_disable();
1373
1374 /*
1375 * Enable FPU if enabled in guest, since we're restoring FPU context
1376 * anyway. We set FR and FRE according to guest context.
1377 */
1378 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1379 sr = kvm_read_c0_guest_status(cop0);
1380
1381 /*
1382 * If FR=0 FPU state is already live, it is undefined how it
1383 * interacts with MSA state, so play it safe and save it first.
1384 */
1385 if (!(sr & ST0_FR) &&
1386 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
1387 KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
1388 kvm_lose_fpu(vcpu);
1389
1390 change_c0_status(ST0_CU1 | ST0_FR, sr);
1391 if (sr & ST0_CU1 && cpu_has_fre) {
1392 cfg5 = kvm_read_c0_guest_config5(cop0);
1393 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1394 }
1395 }
1396
1397 /* Enable MSA for guest */
1398 set_c0_config5(MIPS_CONF5_MSAEN);
1399 enable_fpu_hazard();
1400
1401 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
1402 case KVM_MIPS_FPU_FPU:
1403 /*
1404 * Guest FPU state already loaded, only restore upper MSA state
1405 */
1406 __kvm_restore_msa_upper(&vcpu->arch);
1407 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1408 break;
1409 case 0:
1410 /* Neither FPU or MSA already active, restore full MSA state */
1411 __kvm_restore_msa(&vcpu->arch);
1412 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1413 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1414 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1415 break;
1416 default:
1417 break;
1418 }
1419
1420 preempt_enable();
1421}
1422#endif
1423
1424/* Drop FPU & MSA without saving it */
1347void kvm_drop_fpu(struct kvm_vcpu *vcpu) 1425void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1348{ 1426{
1349 preempt_disable(); 1427 preempt_disable();
1428 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1429 disable_msa();
1430 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
1431 }
1350 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1432 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1351 clear_c0_status(ST0_CU1 | ST0_FR); 1433 clear_c0_status(ST0_CU1 | ST0_FR);
1352 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; 1434 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
@@ -1354,18 +1436,29 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1354 preempt_enable(); 1436 preempt_enable();
1355} 1437}
1356 1438
1357/* Save and disable FPU */ 1439/* Save and disable FPU & MSA */
1358void kvm_lose_fpu(struct kvm_vcpu *vcpu) 1440void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1359{ 1441{
1360 /* 1442 /*
1361 * FPU gets disabled in root context (hardware) when it is disabled in 1443 * FPU & MSA get disabled in root context (hardware) when it is disabled
1362 * guest context (software), but the register state in the hardware may 1444 * in guest context (software), but the register state in the hardware
1363 * still be in use. This is why we explicitly re-enable the hardware 1445 * may still be in use. This is why we explicitly re-enable the hardware
1364 * before saving. 1446 * before saving.
1365 */ 1447 */
1366 1448
1367 preempt_disable(); 1449 preempt_disable();
1368 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1450 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1451 set_c0_config5(MIPS_CONF5_MSAEN);
1452 enable_fpu_hazard();
1453
1454 __kvm_save_msa(&vcpu->arch);
1455
1456 /* Disable MSA & FPU */
1457 disable_msa();
1458 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1459 clear_c0_status(ST0_CU1 | ST0_FR);
1460 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1461 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1369 set_c0_status(ST0_CU1); 1462 set_c0_status(ST0_CU1);
1370 enable_fpu_hazard(); 1463 enable_fpu_hazard();
1371 1464
@@ -1379,9 +1472,9 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1379} 1472}
1380 1473
1381/* 1474/*
1382 * Step over a specific ctc1 to FCSR which is used to restore guest FCSR state 1475 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1383 * and may trigger a "harmless" FP exception if cause bits are set in the value 1476 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1384 * being written. 1477 * exception if cause bits are set in the value being written.
1385 */ 1478 */
1386static int kvm_mips_csr_die_notify(struct notifier_block *self, 1479static int kvm_mips_csr_die_notify(struct notifier_block *self,
1387 unsigned long cmd, void *ptr) 1480 unsigned long cmd, void *ptr)
@@ -1390,8 +1483,8 @@ static int kvm_mips_csr_die_notify(struct notifier_block *self,
1390 struct pt_regs *regs = args->regs; 1483 struct pt_regs *regs = args->regs;
1391 unsigned long pc; 1484 unsigned long pc;
1392 1485
1393 /* Only interested in FPE */ 1486 /* Only interested in FPE and MSAFPE */
1394 if (cmd != DIE_FP) 1487 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1395 return NOTIFY_DONE; 1488 return NOTIFY_DONE;
1396 1489
1397 /* Return immediately if guest context isn't active */ 1490 /* Return immediately if guest context isn't active */
@@ -1408,6 +1501,13 @@ static int kvm_mips_csr_die_notify(struct notifier_block *self,
1408 if (pc != (unsigned long)&__kvm_restore_fcsr + 4) 1501 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1409 return NOTIFY_DONE; 1502 return NOTIFY_DONE;
1410 break; 1503 break;
1504 case DIE_MSAFP:
1505 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1506 if (!cpu_has_msa ||
1507 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1508 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1509 return NOTIFY_DONE;
1510 break;
1411 } 1511 }
1412 1512
1413 /* Move PC forward a little and continue executing */ 1513 /* Move PC forward a little and continue executing */