aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Guo <wei.guo.simon@gmail.com>2018-05-23 03:02:07 -0400
committerPaul Mackerras <paulus@ozlabs.org>2018-05-31 20:30:43 -0400
commit7284ca8a5eaee311d2e4aec73b2df9bd57e0cdcb (patch)
tree9425521c48ed9b4b5f073e0ba964262451f1da93
parent68ab07b985764ec5be816e7054a84b7ad121afc7 (diff)
KVM: PPC: Book3S PR: Support TAR handling for PR KVM HTM
Currently guest kernel doesn't handle TAR facility unavailable and it always runs with TAR bit on. PR KVM will lazily enable TAR. TAR is not a frequent-use register and it is not included in SVCPU struct. Due to the above, the checkpointed TAR val might be a bogus TAR val. To solve this issue, we will make vcpu->arch.fscr tar bit consistent with shadow_fscr when TM is enabled. At the end of emulating treclaim., the correct TAR val need to be loaded into the register if FSCR_TAR bit is on. At the beginning of emulating trechkpt., TAR needs to be flushed so that the right tar val can be copied into tar_tm. Tested with: tools/testing/selftests/powerpc/tm/tm-tar tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar (remove DSCR/PPR related testing). Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c23
-rw-r--r--arch/powerpc/kvm/tm.S16
4 files changed, 38 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 2940de7bac08..1f345a0b6ba2 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -271,6 +271,8 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
271static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} 271static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
272#endif 272#endif
273 273
274void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
275
274extern int kvm_irq_bypass; 276extern int kvm_irq_bypass;
275 277
276static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 278static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 67d0fb40e8b2..fdbc695038dc 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -173,6 +173,9 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
173 guest_msr &= ~(MSR_TS_MASK); 173 guest_msr &= ~(MSR_TS_MASK);
174 kvmppc_set_msr(vcpu, guest_msr); 174 kvmppc_set_msr(vcpu, guest_msr);
175 preempt_enable(); 175 preempt_enable();
176
177 if (vcpu->arch.shadow_fscr & FSCR_TAR)
178 mtspr(SPRN_TAR, vcpu->arch.tar);
176} 179}
177 180
178static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) 181static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
@@ -185,6 +188,7 @@ static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
185 * copy. 188 * copy.
186 */ 189 */
187 kvmppc_giveup_ext(vcpu, MSR_VSX); 190 kvmppc_giveup_ext(vcpu, MSR_VSX);
191 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
188 kvmppc_copyto_vcpu_tm(vcpu); 192 kvmppc_copyto_vcpu_tm(vcpu);
189 kvmppc_save_tm_sprs(vcpu); 193 kvmppc_save_tm_sprs(vcpu);
190 194
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ad0a2ee8d8b1..eaf0c4f03c47 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -55,7 +55,9 @@
55 55
56static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 56static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
57 ulong msr); 57 ulong msr);
58static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 58#ifdef CONFIG_PPC_BOOK3S_64
59static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
60#endif
59 61
60/* Some compatibility defines */ 62/* Some compatibility defines */
61#ifdef CONFIG_PPC_BOOK3S_32 63#ifdef CONFIG_PPC_BOOK3S_32
@@ -346,6 +348,7 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
346 return; 348 return;
347 } 349 }
348 350
351 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
349 kvmppc_giveup_ext(vcpu, MSR_VSX); 352 kvmppc_giveup_ext(vcpu, MSR_VSX);
350 353
351 preempt_disable(); 354 preempt_disable();
@@ -357,8 +360,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
357{ 360{
358 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { 361 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
359 kvmppc_restore_tm_sprs(vcpu); 362 kvmppc_restore_tm_sprs(vcpu);
360 if (kvmppc_get_msr(vcpu) & MSR_TM) 363 if (kvmppc_get_msr(vcpu) & MSR_TM) {
361 kvmppc_handle_lost_math_exts(vcpu); 364 kvmppc_handle_lost_math_exts(vcpu);
365 if (vcpu->arch.fscr & FSCR_TAR)
366 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
367 }
362 return; 368 return;
363 } 369 }
364 370
@@ -366,9 +372,11 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
366 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); 372 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
367 preempt_enable(); 373 preempt_enable();
368 374
369 if (kvmppc_get_msr(vcpu) & MSR_TM) 375 if (kvmppc_get_msr(vcpu) & MSR_TM) {
370 kvmppc_handle_lost_math_exts(vcpu); 376 kvmppc_handle_lost_math_exts(vcpu);
371 377 if (vcpu->arch.fscr & FSCR_TAR)
378 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
379 }
372} 380}
373#endif 381#endif
374 382
@@ -819,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
819} 827}
820 828
821/* Give up facility (TAR / EBB / DSCR) */ 829/* Give up facility (TAR / EBB / DSCR) */
822static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) 830void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
823{ 831{
824#ifdef CONFIG_PPC_BOOK3S_64 832#ifdef CONFIG_PPC_BOOK3S_64
825 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { 833 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
@@ -1020,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1020 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { 1028 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1021 /* TAR got dropped, drop it in shadow too */ 1029 /* TAR got dropped, drop it in shadow too */
1022 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 1030 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1031 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1032 vcpu->arch.fscr = fscr;
1033 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1034 return;
1023 } 1035 }
1036
1024 vcpu->arch.fscr = fscr; 1037 vcpu->arch.fscr = fscr;
1025} 1038}
1026#endif 1039#endif
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index 4a68dd4050a4..90e330f21356 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -172,15 +172,21 @@ _GLOBAL(_kvmppc_save_tm_pr)
172 mfmsr r5 172 mfmsr r5
173 SAVE_GPR(5, r1) 173 SAVE_GPR(5, r1)
174 174
175 /* also save DSCR/CR so that it can be recovered later */ 175 /* also save DSCR/CR/TAR so that it can be recovered later */
176 mfspr r6, SPRN_DSCR 176 mfspr r6, SPRN_DSCR
177 SAVE_GPR(6, r1) 177 SAVE_GPR(6, r1)
178 178
179 mfcr r7 179 mfcr r7
180 stw r7, _CCR(r1) 180 stw r7, _CCR(r1)
181 181
182 mfspr r8, SPRN_TAR
183 SAVE_GPR(8, r1)
184
182 bl __kvmppc_save_tm 185 bl __kvmppc_save_tm
183 186
187 REST_GPR(8, r1)
188 mtspr SPRN_TAR, r8
189
184 ld r7, _CCR(r1) 190 ld r7, _CCR(r1)
185 mtcr r7 191 mtcr r7
186 192
@@ -340,15 +346,21 @@ _GLOBAL(_kvmppc_restore_tm_pr)
340 mfmsr r5 346 mfmsr r5
341 SAVE_GPR(5, r1) 347 SAVE_GPR(5, r1)
342 348
343 /* also save DSCR/CR so that it can be recovered later */ 349 /* also save DSCR/CR/TAR so that it can be recovered later */
344 mfspr r6, SPRN_DSCR 350 mfspr r6, SPRN_DSCR
345 SAVE_GPR(6, r1) 351 SAVE_GPR(6, r1)
346 352
347 mfcr r7 353 mfcr r7
348 stw r7, _CCR(r1) 354 stw r7, _CCR(r1)
349 355
356 mfspr r8, SPRN_TAR
357 SAVE_GPR(8, r1)
358
350 bl __kvmppc_restore_tm 359 bl __kvmppc_restore_tm
351 360
361 REST_GPR(8, r1)
362 mtspr SPRN_TAR, r8
363
352 ld r7, _CCR(r1) 364 ld r7, _CCR(r1)
353 mtcr r7 365 mtcr r7
354 366