aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_pr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r--arch/powerpc/kvm/book3s_pr.c112
1 files changed, 61 insertions, 51 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index b853696b6d8e..5c496ecf5718 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -81,9 +81,7 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
81 svcpu_put(svcpu); 81 svcpu_put(svcpu);
82#endif 82#endif
83 83
84 kvmppc_giveup_ext(vcpu, MSR_FP); 84 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
85 kvmppc_giveup_ext(vcpu, MSR_VEC);
86 kvmppc_giveup_ext(vcpu, MSR_VSX);
87 vcpu->cpu = -1; 85 vcpu->cpu = -1;
88} 86}
89 87
@@ -433,10 +431,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
433 431
434static inline int get_fpr_index(int i) 432static inline int get_fpr_index(int i)
435{ 433{
436#ifdef CONFIG_VSX 434 return i * TS_FPRWIDTH;
437 i *= 2;
438#endif
439 return i;
440} 435}
441 436
442/* Give up external provider (FPU, Altivec, VSX) */ 437/* Give up external provider (FPU, Altivec, VSX) */
@@ -450,41 +445,49 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
450 u64 *thread_fpr = (u64*)t->fpr; 445 u64 *thread_fpr = (u64*)t->fpr;
451 int i; 446 int i;
452 447
453 if (!(vcpu->arch.guest_owned_ext & msr)) 448 /*
449 * VSX instructions can access FP and vector registers, so if
450 * we are giving up VSX, make sure we give up FP and VMX as well.
451 */
452 if (msr & MSR_VSX)
453 msr |= MSR_FP | MSR_VEC;
454
455 msr &= vcpu->arch.guest_owned_ext;
456 if (!msr)
454 return; 457 return;
455 458
456#ifdef DEBUG_EXT 459#ifdef DEBUG_EXT
457 printk(KERN_INFO "Giving up ext 0x%lx\n", msr); 460 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
458#endif 461#endif
459 462
460 switch (msr) { 463 if (msr & MSR_FP) {
461 case MSR_FP: 464 /*
465 * Note that on CPUs with VSX, giveup_fpu stores
466 * both the traditional FP registers and the added VSX
467 * registers into thread.fpr[].
468 */
462 giveup_fpu(current); 469 giveup_fpu(current);
463 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 470 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
464 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 471 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
465 472
466 vcpu->arch.fpscr = t->fpscr.val; 473 vcpu->arch.fpscr = t->fpscr.val;
467 break; 474
468 case MSR_VEC: 475#ifdef CONFIG_VSX
476 if (cpu_has_feature(CPU_FTR_VSX))
477 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
478 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
479#endif
480 }
481
469#ifdef CONFIG_ALTIVEC 482#ifdef CONFIG_ALTIVEC
483 if (msr & MSR_VEC) {
470 giveup_altivec(current); 484 giveup_altivec(current);
471 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 485 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
472 vcpu->arch.vscr = t->vscr; 486 vcpu->arch.vscr = t->vscr;
473#endif
474 break;
475 case MSR_VSX:
476#ifdef CONFIG_VSX
477 __giveup_vsx(current);
478 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
479 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
480#endif
481 break;
482 default:
483 BUG();
484 } 487 }
488#endif
485 489
486 vcpu->arch.guest_owned_ext &= ~msr; 490 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
487 current->thread.regs->msr &= ~msr;
488 kvmppc_recalc_shadow_msr(vcpu); 491 kvmppc_recalc_shadow_msr(vcpu);
489} 492}
490 493
@@ -544,47 +547,56 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
544 return RESUME_GUEST; 547 return RESUME_GUEST;
545 } 548 }
546 549
547 /* We already own the ext */ 550 if (msr == MSR_VSX) {
548 if (vcpu->arch.guest_owned_ext & msr) { 551 /* No VSX? Give an illegal instruction interrupt */
549 return RESUME_GUEST; 552#ifdef CONFIG_VSX
553 if (!cpu_has_feature(CPU_FTR_VSX))
554#endif
555 {
556 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
557 return RESUME_GUEST;
558 }
559
560 /*
561 * We have to load up all the FP and VMX registers before
562 * we can let the guest use VSX instructions.
563 */
564 msr = MSR_FP | MSR_VEC | MSR_VSX;
550 } 565 }
551 566
567 /* See if we already own all the ext(s) needed */
568 msr &= ~vcpu->arch.guest_owned_ext;
569 if (!msr)
570 return RESUME_GUEST;
571
552#ifdef DEBUG_EXT 572#ifdef DEBUG_EXT
553 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 573 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
554#endif 574#endif
555 575
556 current->thread.regs->msr |= msr; 576 current->thread.regs->msr |= msr;
557 577
558 switch (msr) { 578 if (msr & MSR_FP) {
559 case MSR_FP:
560 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 579 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
561 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 580 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
562 581#ifdef CONFIG_VSX
582 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
583 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
584#endif
563 t->fpscr.val = vcpu->arch.fpscr; 585 t->fpscr.val = vcpu->arch.fpscr;
564 t->fpexc_mode = 0; 586 t->fpexc_mode = 0;
565 kvmppc_load_up_fpu(); 587 kvmppc_load_up_fpu();
566 break; 588 }
567 case MSR_VEC: 589
590 if (msr & MSR_VEC) {
568#ifdef CONFIG_ALTIVEC 591#ifdef CONFIG_ALTIVEC
569 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 592 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
570 t->vscr = vcpu->arch.vscr; 593 t->vscr = vcpu->arch.vscr;
571 t->vrsave = -1; 594 t->vrsave = -1;
572 kvmppc_load_up_altivec(); 595 kvmppc_load_up_altivec();
573#endif 596#endif
574 break;
575 case MSR_VSX:
576#ifdef CONFIG_VSX
577 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
578 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
579 kvmppc_load_up_vsx();
580#endif
581 break;
582 default:
583 BUG();
584 } 597 }
585 598
586 vcpu->arch.guest_owned_ext |= msr; 599 vcpu->arch.guest_owned_ext |= msr;
587
588 kvmppc_recalc_shadow_msr(vcpu); 600 kvmppc_recalc_shadow_msr(vcpu);
589 601
590 return RESUME_GUEST; 602 return RESUME_GUEST;
@@ -1134,7 +1146,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1134 /* Save VSX state in stack */ 1146 /* Save VSX state in stack */
1135 used_vsr = current->thread.used_vsr; 1147 used_vsr = current->thread.used_vsr;
1136 if (used_vsr && (current->thread.regs->msr & MSR_VSX)) 1148 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1137 __giveup_vsx(current); 1149 __giveup_vsx(current);
1138#endif 1150#endif
1139 1151
1140 /* Remember the MSR with disabled extensions */ 1152 /* Remember the MSR with disabled extensions */
@@ -1151,14 +1163,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1151 /* No need for kvm_guest_exit. It's done in handle_exit. 1163 /* No need for kvm_guest_exit. It's done in handle_exit.
1152 We also get here with interrupts enabled. */ 1164 We also get here with interrupts enabled. */
1153 1165
1154 current->thread.regs->msr = ext_msr;
1155
1156 /* Make sure we save the guest FPU/Altivec/VSX state */ 1166 /* Make sure we save the guest FPU/Altivec/VSX state */
1157 kvmppc_giveup_ext(vcpu, MSR_FP); 1167 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1158 kvmppc_giveup_ext(vcpu, MSR_VEC); 1168
1159 kvmppc_giveup_ext(vcpu, MSR_VSX); 1169 current->thread.regs->msr = ext_msr;
1160 1170
1161 /* Restore FPU state from stack */ 1171 /* Restore FPU/VSX state from stack */
1162 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 1172 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1163 current->thread.fpscr.val = fpscr; 1173 current->thread.fpscr.val = fpscr;
1164 current->thread.fpexc_mode = fpexc_mode; 1174 current->thread.fpexc_mode = fpexc_mode;