aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_builtin.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c117
1 files changed, 116 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 90644db9d38e..49a2c7825e04 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -278,7 +278,8 @@ void kvmhv_commence_exit(int trap)
278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
279 int ptid = local_paca->kvm_hstate.ptid; 279 int ptid = local_paca->kvm_hstate.ptid;
280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
281 int me, ee, i; 281 int me, ee, i, t;
282 int cpu0;
282 283
283 /* Set our bit in the threads-exiting-guest map in the 0xff00 284 /* Set our bit in the threads-exiting-guest map in the 0xff00
284 bits of vcore->entry_exit_map */ 285 bits of vcore->entry_exit_map */
@@ -320,6 +321,22 @@ void kvmhv_commence_exit(int trap)
320 if ((ee >> 8) == 0) 321 if ((ee >> 8) == 0)
321 kvmhv_interrupt_vcore(vc, ee); 322 kvmhv_interrupt_vcore(vc, ee);
322 } 323 }
324
325 /*
326 * On POWER9 when running a HPT guest on a radix host (sip != NULL),
327 * we have to interrupt inactive CPU threads to get them to
328 * restore the host LPCR value.
329 */
330 if (sip->lpcr_req) {
331 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
332 vc = local_paca->kvm_hstate.kvm_vcore;
333 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
334 for (t = 1; t < threads_per_core; ++t) {
335 if (sip->napped[t])
336 kvmhv_rm_send_ipi(cpu0 + t);
337 }
338 }
339 }
323} 340}
324 341
325struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 342struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
@@ -529,6 +546,8 @@ static inline bool is_rm(void)
529 546
530unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 547unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
531{ 548{
549 if (!kvmppc_xics_enabled(vcpu))
550 return H_TOO_HARD;
532 if (xive_enabled()) { 551 if (xive_enabled()) {
533 if (is_rm()) 552 if (is_rm())
534 return xive_rm_h_xirr(vcpu); 553 return xive_rm_h_xirr(vcpu);
@@ -541,6 +560,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
541 560
542unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) 561unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
543{ 562{
563 if (!kvmppc_xics_enabled(vcpu))
564 return H_TOO_HARD;
544 vcpu->arch.gpr[5] = get_tb(); 565 vcpu->arch.gpr[5] = get_tb();
545 if (xive_enabled()) { 566 if (xive_enabled()) {
546 if (is_rm()) 567 if (is_rm())
@@ -554,6 +575,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
554 575
555unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 576unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
556{ 577{
578 if (!kvmppc_xics_enabled(vcpu))
579 return H_TOO_HARD;
557 if (xive_enabled()) { 580 if (xive_enabled()) {
558 if (is_rm()) 581 if (is_rm())
559 return xive_rm_h_ipoll(vcpu, server); 582 return xive_rm_h_ipoll(vcpu, server);
@@ -567,6 +590,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
567int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 590int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
568 unsigned long mfrr) 591 unsigned long mfrr)
569{ 592{
593 if (!kvmppc_xics_enabled(vcpu))
594 return H_TOO_HARD;
570 if (xive_enabled()) { 595 if (xive_enabled()) {
571 if (is_rm()) 596 if (is_rm())
572 return xive_rm_h_ipi(vcpu, server, mfrr); 597 return xive_rm_h_ipi(vcpu, server, mfrr);
@@ -579,6 +604,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
579 604
580int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 605int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
581{ 606{
607 if (!kvmppc_xics_enabled(vcpu))
608 return H_TOO_HARD;
582 if (xive_enabled()) { 609 if (xive_enabled()) {
583 if (is_rm()) 610 if (is_rm())
584 return xive_rm_h_cppr(vcpu, cppr); 611 return xive_rm_h_cppr(vcpu, cppr);
@@ -591,6 +618,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
591 618
592int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 619int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
593{ 620{
621 if (!kvmppc_xics_enabled(vcpu))
622 return H_TOO_HARD;
594 if (xive_enabled()) { 623 if (xive_enabled()) {
595 if (is_rm()) 624 if (is_rm())
596 return xive_rm_h_eoi(vcpu, xirr); 625 return xive_rm_h_eoi(vcpu, xirr);
@@ -601,3 +630,89 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
601 return xics_rm_h_eoi(vcpu, xirr); 630 return xics_rm_h_eoi(vcpu, xirr);
602} 631}
603#endif /* CONFIG_KVM_XICS */ 632#endif /* CONFIG_KVM_XICS */
633
634void kvmppc_bad_interrupt(struct pt_regs *regs)
635{
636 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
637 panic("Bad KVM trap");
638}
639
640/*
641 * Functions used to switch LPCR HR and UPRT bits on all threads
642 * when entering and exiting HPT guests on a radix host.
643 */
644
645#define PHASE_REALMODE 1 /* in real mode */
646#define PHASE_SET_LPCR 2 /* have set LPCR */
647#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
648#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
649
650#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
651
652static void wait_for_sync(struct kvm_split_mode *sip, int phase)
653{
654 int thr = local_paca->kvm_hstate.tid;
655
656 sip->lpcr_sync.phase[thr] |= phase;
657 phase = ALL(phase);
658 while ((sip->lpcr_sync.allphases & phase) != phase) {
659 HMT_low();
660 barrier();
661 }
662 HMT_medium();
663}
664
665void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
666{
667 unsigned long rb, set;
668
669 /* wait for every other thread to get to real mode */
670 wait_for_sync(sip, PHASE_REALMODE);
671
672 /* Set LPCR and LPIDR */
673 mtspr(SPRN_LPCR, sip->lpcr_req);
674 mtspr(SPRN_LPID, sip->lpidr_req);
675 isync();
676
677 /* Invalidate the TLB on thread 0 */
678 if (local_paca->kvm_hstate.tid == 0) {
679 sip->do_set = 0;
680 asm volatile("ptesync" : : : "memory");
681 for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
682 rb = TLBIEL_INVAL_SET_LPID +
683 (set << TLBIEL_INVAL_SET_SHIFT);
684 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
685 "r" (rb), "r" (0));
686 }
687 asm volatile("ptesync" : : : "memory");
688 }
689
690 /* indicate that we have done so and wait for others */
691 wait_for_sync(sip, PHASE_SET_LPCR);
692 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
693 smp_rmb();
694}
695
696/*
697 * Called when a thread that has been in the guest needs
698 * to reload the host LPCR value - but only on POWER9 when
699 * running a HPT guest on a radix host.
700 */
701void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
702{
703 /* we're out of the guest... */
704 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
705
706 mtspr(SPRN_LPID, 0);
707 mtspr(SPRN_LPCR, sip->host_lpcr);
708 isync();
709
710 if (local_paca->kvm_hstate.tid == 0) {
711 sip->do_restore = 0;
712 smp_wmb(); /* order store of do_restore vs. phase */
713 }
714
715 wait_for_sync(sip, PHASE_RESET_LPCR);
716 smp_mb();
717 local_paca->kvm_hstate.kvm_split_mode = NULL;
718}