aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/tm.h4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S165
4 files changed, 149 insertions, 30 deletions
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 6ba8d4af3999..af21e876837e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -213,6 +213,7 @@
213#define SPRN_ACOP 0x1F /* Available Coprocessor Register */ 213#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
214#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ 214#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
215#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ 215#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
216#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
216#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ 217#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
217#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ 218#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
218#define SPRN_CTRLF 0x088 219#define SPRN_CTRLF 0x088
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 9dfbc34bdbf5..386a3efd3497 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -7,6 +7,8 @@
7 7
8#include <uapi/asm/tm.h> 8#include <uapi/asm/tm.h>
9 9
10#ifndef __ASSEMBLY__
11
10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11extern void do_load_up_transact_fpu(struct thread_struct *thread); 13extern void do_load_up_transact_fpu(struct thread_struct *thread);
12extern void do_load_up_transact_altivec(struct thread_struct *thread); 14extern void do_load_up_transact_altivec(struct thread_struct *thread);
@@ -20,3 +22,5 @@ extern void tm_recheckpoint(struct thread_struct *thread,
20extern void tm_abort(uint8_t cause); 22extern void tm_abort(uint8_t cause);
21extern void tm_save_sprs(struct thread_struct *thread); 23extern void tm_save_sprs(struct thread_struct *thread);
22extern void tm_restore_sprs(struct thread_struct *thread); 24extern void tm_restore_sprs(struct thread_struct *thread);
25
26#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 303ece75b8e4..fb25ebc0af0c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,14 @@ int kvmppc_mmu_hv_init(void)
262 262
263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
264{ 264{
265 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); 265 unsigned long msr = vcpu->arch.intr_msr;
266
267 /* If transactional, change to suspend mode on IRQ delivery */
268 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
269 msr |= MSR_TS_S;
270 else
271 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
272 kvmppc_set_msr(vcpu, msr);
266} 273}
267 274
268/* 275/*
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7c5788c735c9..61190ddd9f3b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -28,6 +28,9 @@
28#include <asm/exception-64s.h> 28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h> 29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h> 30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
31 34
32#ifdef __LITTLE_ENDIAN__ 35#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 36#error Need to fix lppaca and SLB shadow accesses in little endian mode
@@ -597,6 +600,116 @@ BEGIN_FTR_SECTION
597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 600 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
598END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 601END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
599 602
603#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
604BEGIN_FTR_SECTION
605 b skip_tm
606END_FTR_SECTION_IFCLR(CPU_FTR_TM)
607
608 /* Turn on TM/FP/VSX/VMX so we can restore them. */
609 mfmsr r5
610 li r6, MSR_TM >> 32
611 sldi r6, r6, 32
612 or r5, r5, r6
613 ori r5, r5, MSR_FP
614 oris r5, r5, (MSR_VEC | MSR_VSX)@h
615 mtmsrd r5
616
617 /*
618 * The user may change these outside of a transaction, so they must
619 * always be context switched.
620 */
621 ld r5, VCPU_TFHAR(r4)
622 ld r6, VCPU_TFIAR(r4)
623 ld r7, VCPU_TEXASR(r4)
624 mtspr SPRN_TFHAR, r5
625 mtspr SPRN_TFIAR, r6
626 mtspr SPRN_TEXASR, r7
627
628 ld r5, VCPU_MSR(r4)
629 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
630 beq skip_tm /* TM not active in guest */
631
632 /* Make sure the failure summary is set, otherwise we'll program check
633 * when we trechkpt. It's possible that this might have been not set
634 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
635 * host.
636 */
637 oris r7, r7, (TEXASR_FS)@h
638 mtspr SPRN_TEXASR, r7
639
640 /*
641 * We need to load up the checkpointed state for the guest.
642 * We need to do this early as it will blow away any GPRs, VSRs and
643 * some SPRs.
644 */
645
646 mr r31, r4
647 addi r3, r31, VCPU_FPRS_TM
648 bl .load_fp_state
649 addi r3, r31, VCPU_VRS_TM
650 bl .load_vr_state
651 mr r4, r31
652 lwz r7, VCPU_VRSAVE_TM(r4)
653 mtspr SPRN_VRSAVE, r7
654
655 ld r5, VCPU_LR_TM(r4)
656 lwz r6, VCPU_CR_TM(r4)
657 ld r7, VCPU_CTR_TM(r4)
658 ld r8, VCPU_AMR_TM(r4)
659 ld r9, VCPU_TAR_TM(r4)
660 mtlr r5
661 mtcr r6
662 mtctr r7
663 mtspr SPRN_AMR, r8
664 mtspr SPRN_TAR, r9
665
666 /*
667 * Load up PPR and DSCR values but don't put them in the actual SPRs
668 * till the last moment to avoid running with userspace PPR and DSCR for
669 * too long.
670 */
671 ld r29, VCPU_DSCR_TM(r4)
672 ld r30, VCPU_PPR_TM(r4)
673
674 std r2, PACATMSCRATCH(r13) /* Save TOC */
675
676 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
677 li r5, 0
678 mtmsrd r5, 1
679
680 /* Load GPRs r0-r28 */
681 reg = 0
682 .rept 29
683 ld reg, VCPU_GPRS_TM(reg)(r31)
684 reg = reg + 1
685 .endr
686
687 mtspr SPRN_DSCR, r29
688 mtspr SPRN_PPR, r30
689
690 /* Load final GPRs */
691 ld 29, VCPU_GPRS_TM(29)(r31)
692 ld 30, VCPU_GPRS_TM(30)(r31)
693 ld 31, VCPU_GPRS_TM(31)(r31)
694
695 /* TM checkpointed state is now setup. All GPRs are now volatile. */
696 TRECHKPT
697
698 /* Now let's get back the state we need. */
699 HMT_MEDIUM
700 GET_PACA(r13)
701 ld r29, HSTATE_DSCR(r13)
702 mtspr SPRN_DSCR, r29
703 ld r4, HSTATE_KVM_VCPU(r13)
704 ld r1, HSTATE_HOST_R1(r13)
705 ld r2, PACATMSCRATCH(r13)
706
707 /* Set the MSR RI since we have our registers back. */
708 li r5, MSR_RI
709 mtmsrd r5, 1
710skip_tm:
711#endif
712
600 /* Load guest PMU registers */ 713 /* Load guest PMU registers */
601 /* R4 is live here (vcpu pointer) */ 714 /* R4 is live here (vcpu pointer) */
602 li r3, 1 715 li r3, 1
@@ -704,14 +817,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
704 ld r6, VCPU_VTB(r4) 817 ld r6, VCPU_VTB(r4)
705 mtspr SPRN_IC, r5 818 mtspr SPRN_IC, r5
706 mtspr SPRN_VTB, r6 819 mtspr SPRN_VTB, r6
707#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
708 ld r5, VCPU_TFHAR(r4)
709 ld r6, VCPU_TFIAR(r4)
710 ld r7, VCPU_TEXASR(r4)
711 mtspr SPRN_TFHAR, r5
712 mtspr SPRN_TFIAR, r6
713 mtspr SPRN_TEXASR, r7
714#endif
715 ld r8, VCPU_EBBHR(r4) 820 ld r8, VCPU_EBBHR(r4)
716 mtspr SPRN_EBBHR, r8 821 mtspr SPRN_EBBHR, r8
717 ld r5, VCPU_EBBRR(r4) 822 ld r5, VCPU_EBBRR(r4)
@@ -817,7 +922,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
81712: mtspr SPRN_SRR0, r10 92212: mtspr SPRN_SRR0, r10
818 mr r10,r0 923 mr r10,r0
819 mtspr SPRN_SRR1, r11 924 mtspr SPRN_SRR1, r11
820 ld r11, VCPU_INTR_MSR(r4) 925 mr r9, r4
926 bl kvmppc_msr_interrupt
8215: 9275:
822 928
823/* 929/*
@@ -1103,12 +1209,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1103BEGIN_FTR_SECTION 1209BEGIN_FTR_SECTION
1104 b 8f 1210 b 8f
1105END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1211END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1106 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107 mfmsr r8
1108 li r0, 1
1109 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110 mtmsrd r8
1111
1112 /* Save POWER8-specific registers */ 1212 /* Save POWER8-specific registers */
1113 mfspr r5, SPRN_IAMR 1213 mfspr r5, SPRN_IAMR
1114 mfspr r6, SPRN_PSPB 1214 mfspr r6, SPRN_PSPB
@@ -1122,14 +1222,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1122 std r5, VCPU_IC(r9) 1222 std r5, VCPU_IC(r9)
1123 std r6, VCPU_VTB(r9) 1223 std r6, VCPU_VTB(r9)
1124 std r7, VCPU_TAR(r9) 1224 std r7, VCPU_TAR(r9)
1125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1126 mfspr r5, SPRN_TFHAR
1127 mfspr r6, SPRN_TFIAR
1128 mfspr r7, SPRN_TEXASR
1129 std r5, VCPU_TFHAR(r9)
1130 std r6, VCPU_TFIAR(r9)
1131 std r7, VCPU_TEXASR(r9)
1132#endif
1133 mfspr r8, SPRN_EBBHR 1225 mfspr r8, SPRN_EBBHR
1134 std r8, VCPU_EBBHR(r9) 1226 std r8, VCPU_EBBHR(r9)
1135 mfspr r5, SPRN_EBBRR 1227 mfspr r5, SPRN_EBBRR
@@ -1557,7 +1649,7 @@ kvmppc_hdsi:
1557 mtspr SPRN_SRR0, r10 1649 mtspr SPRN_SRR0, r10
1558 mtspr SPRN_SRR1, r11 1650 mtspr SPRN_SRR1, r11
1559 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1651 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1560 ld r11, VCPU_INTR_MSR(r9) 1652 bl kvmppc_msr_interrupt
1561fast_interrupt_c_return: 1653fast_interrupt_c_return:
15626: ld r7, VCPU_CTR(r9) 16546: ld r7, VCPU_CTR(r9)
1563 lwz r8, VCPU_XER(r9) 1655 lwz r8, VCPU_XER(r9)
@@ -1626,7 +1718,7 @@ kvmppc_hisi:
16261: mtspr SPRN_SRR0, r10 17181: mtspr SPRN_SRR0, r10
1627 mtspr SPRN_SRR1, r11 1719 mtspr SPRN_SRR1, r11
1628 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1720 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1629 ld r11, VCPU_INTR_MSR(r9) 1721 bl kvmppc_msr_interrupt
1630 b fast_interrupt_c_return 1722 b fast_interrupt_c_return
1631 1723
16323: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 17243: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1669,7 +1761,7 @@ sc_1_fast_return:
1669 mtspr SPRN_SRR0,r10 1761 mtspr SPRN_SRR0,r10
1670 mtspr SPRN_SRR1,r11 1762 mtspr SPRN_SRR1,r11
1671 li r10, BOOK3S_INTERRUPT_SYSCALL 1763 li r10, BOOK3S_INTERRUPT_SYSCALL
1672 ld r11, VCPU_INTR_MSR(r9) 1764 bl kvmppc_msr_interrupt
1673 mr r4,r9 1765 mr r4,r9
1674 b fast_guest_return 1766 b fast_guest_return
1675 1767
@@ -1997,7 +2089,7 @@ machine_check_realmode:
1997 beq mc_cont 2089 beq mc_cont
1998 /* If not, deliver a machine check. SRR0/1 are already set */ 2090 /* If not, deliver a machine check. SRR0/1 are already set */
1999 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2091 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2000 ld r11, VCPU_INTR_MSR(r9) 2092 bl kvmppc_msr_interrupt
2001 b fast_interrupt_c_return 2093 b fast_interrupt_c_return
2002 2094
2003/* 2095/*
@@ -2138,8 +2230,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2138 mfspr r6,SPRN_VRSAVE 2230 mfspr r6,SPRN_VRSAVE
2139 stw r6,VCPU_VRSAVE(r31) 2231 stw r6,VCPU_VRSAVE(r31)
2140 mtlr r30 2232 mtlr r30
2141 mtmsrd r5
2142 isync
2143 blr 2233 blr
2144 2234
2145/* 2235/*
@@ -2186,3 +2276,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2186 */ 2276 */
2187kvmppc_bad_host_intr: 2277kvmppc_bad_host_intr:
2188 b . 2278 b .
2279
2280/*
2281 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2282 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2283 * r11 has the guest MSR value (in/out)
2284 * r9 has a vcpu pointer (in)
2285 * r0 is used as a scratch register
2286 */
2287kvmppc_msr_interrupt:
2288 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2289 cmpwi r0, 2 /* Check if we are in transactional state.. */
2290 ld r11, VCPU_INTR_MSR(r9)
2291 bne 1f
2292 /* ... if transactional, change to suspended */
2293 li r0, 1
22941: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2295 blr