aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rmhandlers.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-03-01 20:38:23 -0500
committerAvi Kivity <avi@redhat.com>2012-04-08 07:01:36 -0400
commit8943633cf9b87980d261a022e90d94bc2c55df35 (patch)
tree84ab3cd65cc67149efeb75020ecc75a50c6f1c10 /arch/powerpc/kvm/book3s_hv_rmhandlers.S
parent7657f4089b097846cc37bfa2b74fc0bd2bd60e30 (diff)
KVM: PPC: Work around POWER7 DABR corruption problem
It turns out that on POWER7, writing to the DABR can cause a corrupted value to be written if the PMU is active and updating SDAR in continuous sampling mode. To work around this, we make sure that the PMU is inactive and SDAR updates are disabled (via MMCRA) when we are context-switching DABR. When the guest sets DABR via the H_SET_DABR hypercall, we use a slightly different workaround, which is to read back the DABR and write it again if it got corrupted. While we are at it, make it consistent that the saving and restoring of the guest's non-volatile GPRs and the FPRs are done with the guest setup of the PMU active. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S93
1 files changed, 54 insertions, 39 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d595033bd449..a84aafce2a12 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -159,24 +159,15 @@ kvmppc_hv_entry:
159 mflr r0 159 mflr r0
160 std r0, HSTATE_VMHANDLER(r13) 160 std r0, HSTATE_VMHANDLER(r13)
161 161
162 ld r14, VCPU_GPR(r14)(r4) 162 /* Set partition DABR */
163 ld r15, VCPU_GPR(r15)(r4) 163 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
164 ld r16, VCPU_GPR(r16)(r4) 164 li r5,3
165 ld r17, VCPU_GPR(r17)(r4) 165 ld r6,VCPU_DABR(r4)
166 ld r18, VCPU_GPR(r18)(r4) 166 mtspr SPRN_DABRX,r5
167 ld r19, VCPU_GPR(r19)(r4) 167 mtspr SPRN_DABR,r6
168 ld r20, VCPU_GPR(r20)(r4) 168BEGIN_FTR_SECTION
169 ld r21, VCPU_GPR(r21)(r4) 169 isync
170 ld r22, VCPU_GPR(r22)(r4) 170END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
171 ld r23, VCPU_GPR(r23)(r4)
172 ld r24, VCPU_GPR(r24)(r4)
173 ld r25, VCPU_GPR(r25)(r4)
174 ld r26, VCPU_GPR(r26)(r4)
175 ld r27, VCPU_GPR(r27)(r4)
176 ld r28, VCPU_GPR(r28)(r4)
177 ld r29, VCPU_GPR(r29)(r4)
178 ld r30, VCPU_GPR(r30)(r4)
179 ld r31, VCPU_GPR(r31)(r4)
180 171
181 /* Load guest PMU registers */ 172 /* Load guest PMU registers */
182 /* R4 is live here (vcpu pointer) */ 173 /* R4 is live here (vcpu pointer) */
@@ -215,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
215 /* Load up FP, VMX and VSX registers */ 206 /* Load up FP, VMX and VSX registers */
216 bl kvmppc_load_fp 207 bl kvmppc_load_fp
217 208
209 ld r14, VCPU_GPR(r14)(r4)
210 ld r15, VCPU_GPR(r15)(r4)
211 ld r16, VCPU_GPR(r16)(r4)
212 ld r17, VCPU_GPR(r17)(r4)
213 ld r18, VCPU_GPR(r18)(r4)
214 ld r19, VCPU_GPR(r19)(r4)
215 ld r20, VCPU_GPR(r20)(r4)
216 ld r21, VCPU_GPR(r21)(r4)
217 ld r22, VCPU_GPR(r22)(r4)
218 ld r23, VCPU_GPR(r23)(r4)
219 ld r24, VCPU_GPR(r24)(r4)
220 ld r25, VCPU_GPR(r25)(r4)
221 ld r26, VCPU_GPR(r26)(r4)
222 ld r27, VCPU_GPR(r27)(r4)
223 ld r28, VCPU_GPR(r28)(r4)
224 ld r29, VCPU_GPR(r29)(r4)
225 ld r30, VCPU_GPR(r30)(r4)
226 ld r31, VCPU_GPR(r31)(r4)
227
218BEGIN_FTR_SECTION 228BEGIN_FTR_SECTION
219 /* Switch DSCR to guest value */ 229 /* Switch DSCR to guest value */
220 ld r5, VCPU_DSCR(r4) 230 ld r5, VCPU_DSCR(r4)
@@ -256,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
256 mtspr SPRN_DAR, r5 266 mtspr SPRN_DAR, r5
257 mtspr SPRN_DSISR, r6 267 mtspr SPRN_DSISR, r6
258 268
259 /* Set partition DABR */
260 li r5,3
261 ld r6,VCPU_DABR(r4)
262 mtspr SPRN_DABRX,r5
263 mtspr SPRN_DABR,r6
264
265BEGIN_FTR_SECTION 269BEGIN_FTR_SECTION
266 /* Restore AMR and UAMOR, set AMOR to all 1s */ 270 /* Restore AMR and UAMOR, set AMOR to all 1s */
267 ld r5,VCPU_AMR(r4) 271 ld r5,VCPU_AMR(r4)
@@ -955,12 +959,6 @@ BEGIN_FTR_SECTION
955 mtspr SPRN_AMR,r6 959 mtspr SPRN_AMR,r6
956END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
957 961
958 /* Restore host DABR and DABRX */
959 ld r5,HSTATE_DABR(r13)
960 li r6,7
961 mtspr SPRN_DABR,r5
962 mtspr SPRN_DABRX,r6
963
964 /* Switch DSCR back to host value */ 962 /* Switch DSCR back to host value */
965BEGIN_FTR_SECTION 963BEGIN_FTR_SECTION
966 mfspr r8, SPRN_DSCR 964 mfspr r8, SPRN_DSCR
@@ -999,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
999 std r5, VCPU_SPRG2(r9) 997 std r5, VCPU_SPRG2(r9)
1000 std r6, VCPU_SPRG3(r9) 998 std r6, VCPU_SPRG3(r9)
1001 999
1000 /* save FP state */
1001 mr r3, r9
1002 bl .kvmppc_save_fp
1003
1002 /* Increment yield count if they have a VPA */ 1004 /* Increment yield count if they have a VPA */
1003 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1005 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1004 cmpdi r8, 0 1006 cmpdi r8, 0
@@ -1013,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1013 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1015 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1014 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1016 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1015 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1017 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1018 mfspr r6, SPRN_MMCRA
1019BEGIN_FTR_SECTION
1020 /* On P7, clear MMCRA in order to disable SDAR updates */
1021 li r7, 0
1022 mtspr SPRN_MMCRA, r7
1023END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1016 isync 1024 isync
1017 beq 21f /* if no VPA, save PMU stuff anyway */ 1025 beq 21f /* if no VPA, save PMU stuff anyway */
1018 lbz r7, LPPACA_PMCINUSE(r8) 1026 lbz r7, LPPACA_PMCINUSE(r8)
@@ -1021,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1021 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1029 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1022 b 22f 1030 b 22f
102321: mfspr r5, SPRN_MMCR1 103121: mfspr r5, SPRN_MMCR1
1024 mfspr r6, SPRN_MMCRA
1025 std r4, VCPU_MMCR(r9) 1032 std r4, VCPU_MMCR(r9)
1026 std r5, VCPU_MMCR + 8(r9) 1033 std r5, VCPU_MMCR + 8(r9)
1027 std r6, VCPU_MMCR + 16(r9) 1034 std r6, VCPU_MMCR + 16(r9)
@@ -1046,17 +1053,20 @@ BEGIN_FTR_SECTION
1046 stw r11, VCPU_PMC + 28(r9) 1053 stw r11, VCPU_PMC + 28(r9)
1047END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1054END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
104822: 105522:
1049 /* save FP state */
1050 mr r3, r9
1051 bl .kvmppc_save_fp
1052 1056
1053 /* Secondary threads go off to take a nap on POWER7 */ 1057 /* Secondary threads go off to take a nap on POWER7 */
1054BEGIN_FTR_SECTION 1058BEGIN_FTR_SECTION
1055 lwz r0,VCPU_PTID(r3) 1059 lwz r0,VCPU_PTID(r9)
1056 cmpwi r0,0 1060 cmpwi r0,0
1057 bne secondary_nap 1061 bne secondary_nap
1058END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1062END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1059 1063
1064 /* Restore host DABR and DABRX */
1065 ld r5,HSTATE_DABR(r13)
1066 li r6,7
1067 mtspr SPRN_DABR,r5
1068 mtspr SPRN_DABRX,r6
1069
1060 /* 1070 /*
1061 * Reload DEC. HDEC interrupts were disabled when 1071 * Reload DEC. HDEC interrupts were disabled when
1062 * we reloaded the host's LPCR value. 1072 * we reloaded the host's LPCR value.
@@ -1393,7 +1403,12 @@ bounce_ext_interrupt:
1393 1403
1394_GLOBAL(kvmppc_h_set_dabr) 1404_GLOBAL(kvmppc_h_set_dabr)
1395 std r4,VCPU_DABR(r3) 1405 std r4,VCPU_DABR(r3)
1396 mtspr SPRN_DABR,r4 1406 /* Work around P7 bug where DABR can get corrupted on mtspr */
14071: mtspr SPRN_DABR,r4
1408 mfspr r5, SPRN_DABR
1409 cmpd r4, r5
1410 bne 1b
1411 isync
1397 li r3,0 1412 li r3,0
1398 blr 1413 blr
1399 1414
@@ -1615,8 +1630,8 @@ kvm_no_guest:
1615 * r3 = vcpu pointer 1630 * r3 = vcpu pointer
1616 */ 1631 */
1617_GLOBAL(kvmppc_save_fp) 1632_GLOBAL(kvmppc_save_fp)
1618 mfmsr r9 1633 mfmsr r5
1619 ori r8,r9,MSR_FP 1634 ori r8,r5,MSR_FP
1620#ifdef CONFIG_ALTIVEC 1635#ifdef CONFIG_ALTIVEC
1621BEGIN_FTR_SECTION 1636BEGIN_FTR_SECTION
1622 oris r8,r8,MSR_VEC@h 1637 oris r8,r8,MSR_VEC@h
@@ -1665,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1665#endif 1680#endif
1666 mfspr r6,SPRN_VRSAVE 1681 mfspr r6,SPRN_VRSAVE
1667 stw r6,VCPU_VRSAVE(r3) 1682 stw r6,VCPU_VRSAVE(r3)
1668 mtmsrd r9 1683 mtmsrd r5
1669 isync 1684 isync
1670 blr 1685 blr
1671 1686