aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/book3s.c3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c18
2 files changed, 21 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c8ead7b9f03..3f2a8360c85 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -291,6 +291,9 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
291{ 291{
292 ulong mp_pa = vcpu->arch.magic_page_pa; 292 ulong mp_pa = vcpu->arch.magic_page_pa;
293 293
294 if (!(vcpu->arch.shared->msr & MSR_SF))
295 mp_pa = (uint32_t)mp_pa;
296
294 /* Magic page override */ 297 /* Magic page override */
295 if (unlikely(mp_pa) && 298 if (unlikely(mp_pa) &&
296 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 299 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 912e10fbf6e..dba282e5093 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -145,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
145 } 145 }
146 } 146 }
147 147
148 /*
149 * When switching from 32 to 64-bit, we may have a stale 32-bit
150 * magic page around, we need to flush it. Typically 32-bit magic
151 * page will be instanciated when calling into RTAS. Note: We
152 * assume that such transition only happens while in kernel mode,
153 * ie, we never transition from user 32-bit to kernel 64-bit with
154 * a 32-bit magic page around.
155 */
156 if (vcpu->arch.magic_page_pa &&
157 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
158 /* going from RTAS to normal kernel code */
159 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
160 ~0xFFFUL);
161 }
162
148 /* Preload FPU if it's enabled */ 163 /* Preload FPU if it's enabled */
149 if (vcpu->arch.shared->msr & MSR_FP) 164 if (vcpu->arch.shared->msr & MSR_FP)
150 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 165 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -252,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
252{ 267{
253 ulong mp_pa = vcpu->arch.magic_page_pa; 268 ulong mp_pa = vcpu->arch.magic_page_pa;
254 269
270 if (!(vcpu->arch.shared->msr & MSR_SF))
271 mp_pa = (uint32_t)mp_pa;
272
255 if (unlikely(mp_pa) && 273 if (unlikely(mp_pa) &&
256 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 274 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
257 return 1; 275 return 1;