summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorSimon Guo <wei.guo.simon@gmail.com>2018-05-23 03:01:58 -0400
committerPaul Mackerras <paulus@ozlabs.org>2018-05-31 20:29:55 -0400
commit8d2e2fc5e082a7b3f858cefb6e65700f28d2955e (patch)
treea19a6efb386cf16990f4d4ff6486c532bcc67d9f /arch/powerpc
parent66c33e796cf9d5f7150bf4c701786d0527b594b6 (diff)
KVM: PPC: Book3S PR: Add transaction memory save/restore skeleton
The transaction memory checkpoint area save/restore behavior is triggered when VCPU qemu process is switching out/into CPU, i.e. at kvmppc_core_vcpu_put_pr() and kvmppc_core_vcpu_load_pr(). MSR TM active state is determined by TS bits: active: 10(transactional) or 01 (suspended) inactive: 00 (non-transactional) We don't "fake" TM functionality for guest. We "sync" guest virtual MSR TM active state(10 or 01) with shadow MSR. That is to say, we don't emulate a transactional guest with a TM inactive MSR. TM SPR support(TFIAR/TFAR/TEXASR) has already been supported by commit 9916d57e64a4 ("KVM: PPC: Book3S PR: Expose TM registers"). Math register support (FPR/VMX/VSX) will be done at subsequent patch. Whether TM context need to be saved/restored can be determined by kvmppc_get_msr() TM active state: * TM active - save/restore TM context * TM inactive - no need to do so and only save/restore TM SPRs. Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Suggested-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h9
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c27
3 files changed, 36 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 20d3d5a87296..fc15ad9dfc3b 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -257,6 +257,15 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd);
257extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); 257extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
258extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); 258extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
259extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); 259extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
260
261#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
262void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
263void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
264#else
265static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
266static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
267#endif
268
260extern int kvm_irq_bypass; 269extern int kvm_irq_bypass;
261 270
262static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 271static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8dc5e439b387..fa4efa7e88f7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -627,7 +627,6 @@ struct kvm_vcpu_arch {
627 627
628 struct thread_vr_state vr_tm; 628 struct thread_vr_state vr_tm;
629 u32 vrsave_tm; /* also USPRG0 */ 629 u32 vrsave_tm; /* also USPRG0 */
630
631#endif 630#endif
632 631
633#ifdef CONFIG_KVM_EXIT_TIMING 632#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 92e467ebadf0..a14721f034fb 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -43,6 +43,7 @@
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/miscdevice.h> 44#include <linux/miscdevice.h>
45#include <asm/asm-prototypes.h> 45#include <asm/asm-prototypes.h>
46#include <asm/tm.h>
46 47
47#include "book3s.h" 48#include "book3s.h"
48 49
@@ -115,6 +116,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
115 116
116 if (kvmppc_is_split_real(vcpu)) 117 if (kvmppc_is_split_real(vcpu))
117 kvmppc_fixup_split_real(vcpu); 118 kvmppc_fixup_split_real(vcpu);
119
120 kvmppc_restore_tm_pr(vcpu);
118} 121}
119 122
120static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) 123static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
@@ -134,6 +137,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
134 137
135 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 138 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
136 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 139 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
140 kvmppc_save_tm_pr(vcpu);
137 141
138 /* Enable AIL if supported */ 142 /* Enable AIL if supported */
139 if (cpu_has_feature(CPU_FTR_HVMODE) && 143 if (cpu_has_feature(CPU_FTR_HVMODE) &&
@@ -304,6 +308,29 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
304 tm_disable(); 308 tm_disable();
305} 309}
306 310
311void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
312{
313 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
314 kvmppc_save_tm_sprs(vcpu);
315 return;
316 }
317
318 preempt_disable();
319 _kvmppc_save_tm_pr(vcpu, mfmsr());
320 preempt_enable();
321}
322
323void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
324{
325 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
326 kvmppc_restore_tm_sprs(vcpu);
327 return;
328 }
329
330 preempt_disable();
331 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
332 preempt_enable();
333}
307#endif 334#endif
308 335
309static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 336static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)