aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Guo <wei.guo.simon@gmail.com>2018-05-23 03:01:48 -0400
committerPaul Mackerras <paulus@ozlabs.org>2018-05-31 20:27:59 -0400
commit6f597c6b63b6f3675914b5ec8fcd008a58678650 (patch)
treef65908df9c0063b4e2c453653a53940171efe3b5
parent009c872a8bc4d38f487a9bd62423d019e4322517 (diff)
KVM: PPC: Book3S PR: Add guest MSR parameter for kvmppc_save_tm()/kvmppc_restore_tm()
HV KVM and PR KVM need different MSR source to indicate whether treclaim. or trecheckpoint. is necessary. This patch add new parameter (guest MSR) for these kvmppc_save_tm/ kvmppc_restore_tm() APIs: - For HV KVM, it is VCPU_MSR - For PR KVM, it is current host MSR or VCPU_SHADOW_SRR1 This enhancement enables these 2 APIs to be reused by PR KVM later. And the patch keeps HV KVM logic unchanged. This patch also reworks kvmppc_save_tm()/kvmppc_restore_tm() to have a clean ABI: r3 for vcpu and r4 for guest_msr. During kvmppc_save_tm/kvmppc_restore_tm(), the R1 need to be saved or restored. Currently the R1 is saved into HSTATE_HOST_R1. In PR KVM, we are going to add a C function wrapper for kvmppc_save_tm/kvmppc_restore_tm() where the R1 will be incremented with added stackframe and save into HSTATE_HOST_R1. There are several places in HV KVM to load HSTATE_HOST_R1 as R1, and we don't want to bring risk or confusion by TM code. This patch will use HSTATE_SCRATCH2 to save/restore R1 in kvmppc_save_tm/kvmppc_restore_tm() to avoid future confusion, since the r1 is actually a temporary/scratch value to be saved/stored. [paulus@ozlabs.org - rebased on top of 7b0e827c6970 ("KVM: PPC: Book3S HV: Factor fake-suspend handling out of kvmppc_save/restore_tm", 2018-05-30)] Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S33
-rw-r--r--arch/powerpc/kvm/tm.S71
2 files changed, 57 insertions, 47 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 8e016598692e..75e3bbf8c957 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -793,7 +793,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
793 /* 793 /*
794 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 794 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
795 */ 795 */
796 mr r3, r4
797 ld r4, VCPU_MSR(r3)
796 bl kvmppc_restore_tm_hv 798 bl kvmppc_restore_tm_hv
799 ld r4, HSTATE_KVM_VCPU(r13)
79791: 80091:
798#endif 801#endif
799 802
@@ -1777,7 +1780,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1777 /* 1780 /*
1778 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 1781 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1779 */ 1782 */
1783 mr r3, r9
1784 ld r4, VCPU_MSR(r3)
1780 bl kvmppc_save_tm_hv 1785 bl kvmppc_save_tm_hv
1786 ld r9, HSTATE_KVM_VCPU(r13)
178191: 178791:
1782#endif 1788#endif
1783 1789
@@ -2680,7 +2686,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2680 /* 2686 /*
2681 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2687 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2682 */ 2688 */
2683 ld r9, HSTATE_KVM_VCPU(r13) 2689 ld r3, HSTATE_KVM_VCPU(r13)
2690 ld r4, VCPU_MSR(r3)
2684 bl kvmppc_save_tm_hv 2691 bl kvmppc_save_tm_hv
268591: 269291:
2686#endif 2693#endif
@@ -2799,7 +2806,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2799 /* 2806 /*
2800 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR 2807 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2801 */ 2808 */
2809 mr r3, r4
2810 ld r4, VCPU_MSR(r3)
2802 bl kvmppc_restore_tm_hv 2811 bl kvmppc_restore_tm_hv
2812 ld r4, HSTATE_KVM_VCPU(r13)
280391: 281391:
2804#endif 2814#endif
2805 2815
@@ -3120,9 +3130,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3120#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3130#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3121/* 3131/*
3122 * Save transactional state and TM-related registers. 3132 * Save transactional state and TM-related registers.
3123 * Called with r9 pointing to the vcpu struct. 3133 * Called with r3 pointing to the vcpu struct and r4 containing
3134 * the guest MSR value.
3124 * This can modify all checkpointed registers, but 3135 * This can modify all checkpointed registers, but
3125 * restores r1, r2 and r9 (vcpu pointer) before exit. 3136 * restores r1 and r2 before exit.
3126 */ 3137 */
3127kvmppc_save_tm_hv: 3138kvmppc_save_tm_hv:
3128 /* See if we need to handle fake suspend mode */ 3139 /* See if we need to handle fake suspend mode */
@@ -3205,9 +3216,10 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3205 3216
3206/* 3217/*
3207 * Restore transactional state and TM-related registers. 3218 * Restore transactional state and TM-related registers.
3208 * Called with r4 pointing to the vcpu struct. 3219 * Called with r3 pointing to the vcpu struct
3220 * and r4 containing the guest MSR value.
3209 * This potentially modifies all checkpointed registers. 3221 * This potentially modifies all checkpointed registers.
3210 * It restores r1, r2, r4 from the PACA. 3222 * It restores r1 and r2 from the PACA.
3211 */ 3223 */
3212kvmppc_restore_tm_hv: 3224kvmppc_restore_tm_hv:
3213 /* 3225 /*
@@ -3234,15 +3246,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3234 * The user may change these outside of a transaction, so they must 3246 * The user may change these outside of a transaction, so they must
3235 * always be context switched. 3247 * always be context switched.
3236 */ 3248 */
3237 ld r5, VCPU_TFHAR(r4) 3249 ld r5, VCPU_TFHAR(r3)
3238 ld r6, VCPU_TFIAR(r4) 3250 ld r6, VCPU_TFIAR(r3)
3239 ld r7, VCPU_TEXASR(r4) 3251 ld r7, VCPU_TEXASR(r3)
3240 mtspr SPRN_TFHAR, r5 3252 mtspr SPRN_TFHAR, r5
3241 mtspr SPRN_TFIAR, r6 3253 mtspr SPRN_TFIAR, r6
3242 mtspr SPRN_TEXASR, r7 3254 mtspr SPRN_TEXASR, r7
3243 3255
3244 ld r5, VCPU_MSR(r4) 3256 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3245 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3246 beqlr /* TM not active in guest */ 3257 beqlr /* TM not active in guest */
3247 3258
3248 /* Make sure the failure summary is set */ 3259 /* Make sure the failure summary is set */
@@ -3255,10 +3266,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3255 b 9f /* and return */ 3266 b 9f /* and return */
325610: stdu r1, -PPC_MIN_STKFRM(r1) 326710: stdu r1, -PPC_MIN_STKFRM(r1)
3257 /* guest is in transactional state, so simulate rollback */ 3268 /* guest is in transactional state, so simulate rollback */
3258 mr r3, r4
3259 bl kvmhv_emulate_tm_rollback 3269 bl kvmhv_emulate_tm_rollback
3260 nop 3270 nop
3261 ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
3262 addi r1, r1, PPC_MIN_STKFRM 3271 addi r1, r1, PPC_MIN_STKFRM
32639: ld r0, PPC_LR_STKOFF(r1) 32729: ld r0, PPC_LR_STKOFF(r1)
3264 mtlr r0 3273 mtlr r0
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index ba97789c41ca..f027b5a0c0f0 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -26,9 +26,12 @@
26 26
27/* 27/*
28 * Save transactional state and TM-related registers. 28 * Save transactional state and TM-related registers.
29 * Called with r9 pointing to the vcpu struct. 29 * Called with:
30 * - r3 pointing to the vcpu struct
31 * - r4 points to the MSR with current TS bits:
32 * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
30 * This can modify all checkpointed registers, but 33 * This can modify all checkpointed registers, but
31 * restores r1, r2 and r9 (vcpu pointer) before exit. 34 * restores r1, r2 before exit.
32 */ 35 */
33_GLOBAL(kvmppc_save_tm) 36_GLOBAL(kvmppc_save_tm)
34 mflr r0 37 mflr r0
@@ -40,20 +43,17 @@ _GLOBAL(kvmppc_save_tm)
40 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 43 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
41 mtmsrd r8 44 mtmsrd r8
42 45
43#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 46 rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
44 ld r5, VCPU_MSR(r9)
45 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
46 beq 1f /* TM not active in guest. */ 47 beq 1f /* TM not active in guest. */
47#endif
48 48
49 std r1, HSTATE_HOST_R1(r13) 49 std r1, HSTATE_SCRATCH2(r13)
50 li r3, TM_CAUSE_KVM_RESCHED 50 std r3, HSTATE_SCRATCH1(r13)
51 51
52#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 52#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
53BEGIN_FTR_SECTION 53BEGIN_FTR_SECTION
54 /* Emulation of the treclaim instruction needs TEXASR before treclaim */ 54 /* Emulation of the treclaim instruction needs TEXASR before treclaim */
55 mfspr r6, SPRN_TEXASR 55 mfspr r6, SPRN_TEXASR
56 std r6, VCPU_ORIG_TEXASR(r9) 56 std r6, VCPU_ORIG_TEXASR(r3)
57END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) 57END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
58#endif 58#endif
59 59
@@ -61,6 +61,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
61 li r5, 0 61 li r5, 0
62 mtmsrd r5, 1 62 mtmsrd r5, 1
63 63
64 li r3, TM_CAUSE_KVM_RESCHED
65
64 /* All GPRs are volatile at this point. */ 66 /* All GPRs are volatile at this point. */
65 TRECLAIM(R3) 67 TRECLAIM(R3)
66 68
@@ -68,9 +70,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
68 SET_SCRATCH0(r13) 70 SET_SCRATCH0(r13)
69 GET_PACA(r13) 71 GET_PACA(r13)
70 std r9, PACATMSCRATCH(r13) 72 std r9, PACATMSCRATCH(r13)
71#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 73 ld r9, HSTATE_SCRATCH1(r13)
72 ld r9, HSTATE_KVM_VCPU(r13)
73#endif
74 74
75 /* Get a few more GPRs free. */ 75 /* Get a few more GPRs free. */
76 std r29, VCPU_GPRS_TM(29)(r9) 76 std r29, VCPU_GPRS_TM(29)(r9)
@@ -102,7 +102,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
102 std r4, VCPU_GPRS_TM(9)(r9) 102 std r4, VCPU_GPRS_TM(9)(r9)
103 103
104 /* Reload stack pointer and TOC. */ 104 /* Reload stack pointer and TOC. */
105 ld r1, HSTATE_HOST_R1(r13) 105 ld r1, HSTATE_SCRATCH2(r13)
106 ld r2, PACATOC(r13) 106 ld r2, PACATOC(r13)
107 107
108 /* Set MSR RI now we have r1 and r13 back. */ 108 /* Set MSR RI now we have r1 and r13 back. */
@@ -156,9 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
156 156
157/* 157/*
158 * Restore transactional state and TM-related registers. 158 * Restore transactional state and TM-related registers.
159 * Called with r4 pointing to the vcpu struct. 159 * Called with:
160 * - r3 pointing to the vcpu struct.
161 * - r4 is the guest MSR with desired TS bits:
162 * For HV KVM, it is VCPU_MSR
163 * For PR KVM, it is provided by caller
160 * This potentially modifies all checkpointed registers. 164 * This potentially modifies all checkpointed registers.
161 * It restores r1, r2, r4 from the PACA. 165 * It restores r1, r2 from the PACA.
162 */ 166 */
163_GLOBAL(kvmppc_restore_tm) 167_GLOBAL(kvmppc_restore_tm)
164 mflr r0 168 mflr r0
@@ -177,19 +181,17 @@ _GLOBAL(kvmppc_restore_tm)
177 * The user may change these outside of a transaction, so they must 181 * The user may change these outside of a transaction, so they must
178 * always be context switched. 182 * always be context switched.
179 */ 183 */
180 ld r5, VCPU_TFHAR(r4) 184 ld r5, VCPU_TFHAR(r3)
181 ld r6, VCPU_TFIAR(r4) 185 ld r6, VCPU_TFIAR(r3)
182 ld r7, VCPU_TEXASR(r4) 186 ld r7, VCPU_TEXASR(r3)
183 mtspr SPRN_TFHAR, r5 187 mtspr SPRN_TFHAR, r5
184 mtspr SPRN_TFIAR, r6 188 mtspr SPRN_TFIAR, r6
185 mtspr SPRN_TEXASR, r7 189 mtspr SPRN_TEXASR, r7
186 190
187#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 191 mr r5, r4
188 ld r5, VCPU_MSR(r4)
189 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 192 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
190 beqlr /* TM not active in guest */ 193 beqlr /* TM not active in guest */
191#endif 194 std r1, HSTATE_SCRATCH2(r13)
192 std r1, HSTATE_HOST_R1(r13)
193 195
194 /* Make sure the failure summary is set, otherwise we'll program check 196 /* Make sure the failure summary is set, otherwise we'll program check
195 * when we trechkpt. It's possible that this might have been not set 197 * when we trechkpt. It's possible that this might have been not set
@@ -205,21 +207,21 @@ _GLOBAL(kvmppc_restore_tm)
205 * some SPRs. 207 * some SPRs.
206 */ 208 */
207 209
208 mr r31, r4 210 mr r31, r3
209 addi r3, r31, VCPU_FPRS_TM 211 addi r3, r31, VCPU_FPRS_TM
210 bl load_fp_state 212 bl load_fp_state
211 addi r3, r31, VCPU_VRS_TM 213 addi r3, r31, VCPU_VRS_TM
212 bl load_vr_state 214 bl load_vr_state
213 mr r4, r31 215 mr r3, r31
214 lwz r7, VCPU_VRSAVE_TM(r4) 216 lwz r7, VCPU_VRSAVE_TM(r3)
215 mtspr SPRN_VRSAVE, r7 217 mtspr SPRN_VRSAVE, r7
216 218
217 ld r5, VCPU_LR_TM(r4) 219 ld r5, VCPU_LR_TM(r3)
218 lwz r6, VCPU_CR_TM(r4) 220 lwz r6, VCPU_CR_TM(r3)
219 ld r7, VCPU_CTR_TM(r4) 221 ld r7, VCPU_CTR_TM(r3)
220 ld r8, VCPU_AMR_TM(r4) 222 ld r8, VCPU_AMR_TM(r3)
221 ld r9, VCPU_TAR_TM(r4) 223 ld r9, VCPU_TAR_TM(r3)
222 ld r10, VCPU_XER_TM(r4) 224 ld r10, VCPU_XER_TM(r3)
223 mtlr r5 225 mtlr r5
224 mtcr r6 226 mtcr r6
225 mtctr r7 227 mtctr r7
@@ -232,8 +234,8 @@ _GLOBAL(kvmppc_restore_tm)
232 * till the last moment to avoid running with userspace PPR and DSCR for 234 * till the last moment to avoid running with userspace PPR and DSCR for
233 * too long. 235 * too long.
234 */ 236 */
235 ld r29, VCPU_DSCR_TM(r4) 237 ld r29, VCPU_DSCR_TM(r3)
236 ld r30, VCPU_PPR_TM(r4) 238 ld r30, VCPU_PPR_TM(r3)
237 239
238 std r2, PACATMSCRATCH(r13) /* Save TOC */ 240 std r2, PACATMSCRATCH(r13) /* Save TOC */
239 241
@@ -265,9 +267,8 @@ _GLOBAL(kvmppc_restore_tm)
265#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 267#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
266 ld r29, HSTATE_DSCR(r13) 268 ld r29, HSTATE_DSCR(r13)
267 mtspr SPRN_DSCR, r29 269 mtspr SPRN_DSCR, r29
268 ld r4, HSTATE_KVM_VCPU(r13)
269#endif 270#endif
270 ld r1, HSTATE_HOST_R1(r13) 271 ld r1, HSTATE_SCRATCH2(r13)
271 ld r2, PACATMSCRATCH(r13) 272 ld r2, PACATMSCRATCH(r13)
272 273
273 /* Set the MSR RI since we have our registers back. */ 274 /* Set the MSR RI since we have our registers back. */