aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c72
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c21
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c19
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c28
-rw-r--r--arch/powerpc/kvm/book3s_exports.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c11
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S23
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c16
-rw-r--r--arch/powerpc/kvm/book3s_pr.c97
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/emulate.c24
-rw-r--r--arch/powerpc/kvm/powerpc.c33
-rw-r--r--arch/powerpc/kvm/trace_pr.h2
15 files changed, 221 insertions, 136 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 94e597e6f15c..81abc5cef0fe 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
85 if (is_kvmppc_hv_enabled(vcpu->kvm)) 85 if (is_kvmppc_hv_enabled(vcpu->kvm))
86 return; 86 return;
87 if (pending_now) 87 if (pending_now)
88 vcpu->arch.shared->int_pending = 1; 88 kvmppc_set_int_pending(vcpu, 1);
89 else if (old_pending) 89 else if (old_pending)
90 vcpu->arch.shared->int_pending = 0; 90 kvmppc_set_int_pending(vcpu, 0);
91} 91}
92 92
93static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 93static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
@@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
99 if (is_kvmppc_hv_enabled(vcpu->kvm)) 99 if (is_kvmppc_hv_enabled(vcpu->kvm))
100 return false; 100 return false;
101 101
102 crit_raw = vcpu->arch.shared->critical; 102 crit_raw = kvmppc_get_critical(vcpu);
103 crit_r1 = kvmppc_get_gpr(vcpu, 1); 103 crit_r1 = kvmppc_get_gpr(vcpu, 1);
104 104
105 /* Truncate crit indicators in 32 bit mode */ 105 /* Truncate crit indicators in 32 bit mode */
106 if (!(vcpu->arch.shared->msr & MSR_SF)) { 106 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
107 crit_raw &= 0xffffffff; 107 crit_raw &= 0xffffffff;
108 crit_r1 &= 0xffffffff; 108 crit_r1 &= 0xffffffff;
109 } 109 }
@@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
111 /* Critical section when crit == r1 */ 111 /* Critical section when crit == r1 */
112 crit = (crit_raw == crit_r1); 112 crit = (crit_raw == crit_r1);
113 /* ... and we're in supervisor mode */ 113 /* ... and we're in supervisor mode */
114 crit = crit && !(vcpu->arch.shared->msr & MSR_PR); 114 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
115 115
116 return crit; 116 return crit;
117} 117}
118 118
119void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 119void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
120{ 120{
121 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); 121 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
122 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; 122 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
124 vcpu->arch.mmu.reset_msr(vcpu); 124 vcpu->arch.mmu.reset_msr(vcpu);
125} 125}
@@ -225,12 +225,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
225 225
226 switch (priority) { 226 switch (priority) {
227 case BOOK3S_IRQPRIO_DECREMENTER: 227 case BOOK3S_IRQPRIO_DECREMENTER:
228 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; 228 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
229 vec = BOOK3S_INTERRUPT_DECREMENTER; 229 vec = BOOK3S_INTERRUPT_DECREMENTER;
230 break; 230 break;
231 case BOOK3S_IRQPRIO_EXTERNAL: 231 case BOOK3S_IRQPRIO_EXTERNAL:
232 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 232 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
233 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; 233 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
234 vec = BOOK3S_INTERRUPT_EXTERNAL; 234 vec = BOOK3S_INTERRUPT_EXTERNAL;
235 break; 235 break;
236 case BOOK3S_IRQPRIO_SYSTEM_RESET: 236 case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -343,7 +343,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
343{ 343{
344 ulong mp_pa = vcpu->arch.magic_page_pa; 344 ulong mp_pa = vcpu->arch.magic_page_pa;
345 345
346 if (!(vcpu->arch.shared->msr & MSR_SF)) 346 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
347 mp_pa = (uint32_t)mp_pa; 347 mp_pa = (uint32_t)mp_pa;
348 348
349 /* Magic page override */ 349 /* Magic page override */
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
367static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 367static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
368 bool iswrite, struct kvmppc_pte *pte) 368 bool iswrite, struct kvmppc_pte *pte)
369{ 369{
370 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 370 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
371 int r; 371 int r;
372 372
373 if (relocated) { 373 if (relocated) {
@@ -498,18 +498,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
498 regs->ctr = kvmppc_get_ctr(vcpu); 498 regs->ctr = kvmppc_get_ctr(vcpu);
499 regs->lr = kvmppc_get_lr(vcpu); 499 regs->lr = kvmppc_get_lr(vcpu);
500 regs->xer = kvmppc_get_xer(vcpu); 500 regs->xer = kvmppc_get_xer(vcpu);
501 regs->msr = vcpu->arch.shared->msr; 501 regs->msr = kvmppc_get_msr(vcpu);
502 regs->srr0 = vcpu->arch.shared->srr0; 502 regs->srr0 = kvmppc_get_srr0(vcpu);
503 regs->srr1 = vcpu->arch.shared->srr1; 503 regs->srr1 = kvmppc_get_srr1(vcpu);
504 regs->pid = vcpu->arch.pid; 504 regs->pid = vcpu->arch.pid;
505 regs->sprg0 = vcpu->arch.shared->sprg0; 505 regs->sprg0 = kvmppc_get_sprg0(vcpu);
506 regs->sprg1 = vcpu->arch.shared->sprg1; 506 regs->sprg1 = kvmppc_get_sprg1(vcpu);
507 regs->sprg2 = vcpu->arch.shared->sprg2; 507 regs->sprg2 = kvmppc_get_sprg2(vcpu);
508 regs->sprg3 = vcpu->arch.shared->sprg3; 508 regs->sprg3 = kvmppc_get_sprg3(vcpu);
509 regs->sprg4 = vcpu->arch.shared->sprg4; 509 regs->sprg4 = kvmppc_get_sprg4(vcpu);
510 regs->sprg5 = vcpu->arch.shared->sprg5; 510 regs->sprg5 = kvmppc_get_sprg5(vcpu);
511 regs->sprg6 = vcpu->arch.shared->sprg6; 511 regs->sprg6 = kvmppc_get_sprg6(vcpu);
512 regs->sprg7 = vcpu->arch.shared->sprg7; 512 regs->sprg7 = kvmppc_get_sprg7(vcpu);
513 513
514 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 514 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
515 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 515 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -527,16 +527,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
527 kvmppc_set_lr(vcpu, regs->lr); 527 kvmppc_set_lr(vcpu, regs->lr);
528 kvmppc_set_xer(vcpu, regs->xer); 528 kvmppc_set_xer(vcpu, regs->xer);
529 kvmppc_set_msr(vcpu, regs->msr); 529 kvmppc_set_msr(vcpu, regs->msr);
530 vcpu->arch.shared->srr0 = regs->srr0; 530 kvmppc_set_srr0(vcpu, regs->srr0);
531 vcpu->arch.shared->srr1 = regs->srr1; 531 kvmppc_set_srr1(vcpu, regs->srr1);
532 vcpu->arch.shared->sprg0 = regs->sprg0; 532 kvmppc_set_sprg0(vcpu, regs->sprg0);
533 vcpu->arch.shared->sprg1 = regs->sprg1; 533 kvmppc_set_sprg1(vcpu, regs->sprg1);
534 vcpu->arch.shared->sprg2 = regs->sprg2; 534 kvmppc_set_sprg2(vcpu, regs->sprg2);
535 vcpu->arch.shared->sprg3 = regs->sprg3; 535 kvmppc_set_sprg3(vcpu, regs->sprg3);
536 vcpu->arch.shared->sprg4 = regs->sprg4; 536 kvmppc_set_sprg4(vcpu, regs->sprg4);
537 vcpu->arch.shared->sprg5 = regs->sprg5; 537 kvmppc_set_sprg5(vcpu, regs->sprg5);
538 vcpu->arch.shared->sprg6 = regs->sprg6; 538 kvmppc_set_sprg6(vcpu, regs->sprg6);
539 vcpu->arch.shared->sprg7 = regs->sprg7; 539 kvmppc_set_sprg7(vcpu, regs->sprg7);
540 540
541 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 541 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
542 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 542 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -570,10 +570,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
570 r = 0; 570 r = 0;
571 switch (reg->id) { 571 switch (reg->id) {
572 case KVM_REG_PPC_DAR: 572 case KVM_REG_PPC_DAR:
573 val = get_reg_val(reg->id, vcpu->arch.shared->dar); 573 val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
574 break; 574 break;
575 case KVM_REG_PPC_DSISR: 575 case KVM_REG_PPC_DSISR:
576 val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); 576 val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
577 break; 577 break;
578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
579 i = reg->id - KVM_REG_PPC_FPR0; 579 i = reg->id - KVM_REG_PPC_FPR0;
@@ -660,10 +660,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
660 r = 0; 660 r = 0;
661 switch (reg->id) { 661 switch (reg->id) {
662 case KVM_REG_PPC_DAR: 662 case KVM_REG_PPC_DAR:
663 vcpu->arch.shared->dar = set_reg_val(reg->id, val); 663 kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
664 break; 664 break;
665 case KVM_REG_PPC_DSISR: 665 case KVM_REG_PPC_DSISR:
666 vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); 666 kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
667 break; 667 break;
668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
669 i = reg->id - KVM_REG_PPC_FPR0; 669 i = reg->id - KVM_REG_PPC_FPR0;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 0e42b167da0b..628d90ed417d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -91,7 +91,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
91 91
92static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) 92static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
93{ 93{
94 return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; 94 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf);
95} 95}
96 96
97static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 97static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
@@ -160,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
160 else 160 else
161 bat = &vcpu_book3s->ibat[i]; 161 bat = &vcpu_book3s->ibat[i];
162 162
163 if (vcpu->arch.shared->msr & MSR_PR) { 163 if (kvmppc_get_msr(vcpu) & MSR_PR) {
164 if (!bat->vp) 164 if (!bat->vp)
165 continue; 165 continue;
166 } else { 166 } else {
@@ -242,8 +242,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
242 pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); 242 pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF);
243 pp = pte1 & 3; 243 pp = pte1 & 3;
244 244
245 if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || 245 if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) ||
246 (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) 246 (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR)))
247 pp |= 4; 247 pp |= 4;
248 248
249 pte->may_write = false; 249 pte->may_write = false;
@@ -320,7 +320,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
320 /* Magic page override */ 320 /* Magic page override */
321 if (unlikely(mp_ea) && 321 if (unlikely(mp_ea) &&
322 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 322 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
323 !(vcpu->arch.shared->msr & MSR_PR)) { 323 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
324 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 324 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
325 pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); 325 pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
326 pte->raddr &= KVM_PAM; 326 pte->raddr &= KVM_PAM;
@@ -345,13 +345,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
345 345
346static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) 346static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
347{ 347{
348 return vcpu->arch.shared->sr[srnum]; 348 return kvmppc_get_sr(vcpu, srnum);
349} 349}
350 350
351static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, 351static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
352 ulong value) 352 ulong value)
353{ 353{
354 vcpu->arch.shared->sr[srnum] = value; 354 kvmppc_set_sr(vcpu, srnum, value);
355 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); 355 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
356} 356}
357 357
@@ -371,8 +371,9 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
371 ulong ea = esid << SID_SHIFT; 371 ulong ea = esid << SID_SHIFT;
372 u32 sr; 372 u32 sr;
373 u64 gvsid = esid; 373 u64 gvsid = esid;
374 u64 msr = kvmppc_get_msr(vcpu);
374 375
375 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 376 if (msr & (MSR_DR|MSR_IR)) {
376 sr = find_sr(vcpu, ea); 377 sr = find_sr(vcpu, ea);
377 if (sr_valid(sr)) 378 if (sr_valid(sr))
378 gvsid = sr_vsid(sr); 379 gvsid = sr_vsid(sr);
@@ -381,7 +382,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
381 /* In case we only have one of MSR_IR or MSR_DR set, let's put 382 /* In case we only have one of MSR_IR or MSR_DR set, let's put
382 that in the real-mode context (and hope RM doesn't access 383 that in the real-mode context (and hope RM doesn't access
383 high memory) */ 384 high memory) */
384 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 385 switch (msr & (MSR_DR|MSR_IR)) {
385 case 0: 386 case 0:
386 *vsid = VSID_REAL | esid; 387 *vsid = VSID_REAL | esid;
387 break; 388 break;
@@ -401,7 +402,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
401 BUG(); 402 BUG();
402 } 403 }
403 404
404 if (vcpu->arch.shared->msr & MSR_PR) 405 if (msr & MSR_PR)
405 *vsid |= VSID_PR; 406 *vsid |= VSID_PR;
406 407
407 return 0; 408 return 0;
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 5fac89dfe4cd..678e75370495 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -92,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
92 struct kvmppc_sid_map *map; 92 struct kvmppc_sid_map *map;
93 u16 sid_map_mask; 93 u16 sid_map_mask;
94 94
95 if (vcpu->arch.shared->msr & MSR_PR) 95 if (kvmppc_get_msr(vcpu) & MSR_PR)
96 gvsid |= VSID_PR; 96 gvsid |= VSID_PR;
97 97
98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -279,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
279 u16 sid_map_mask; 279 u16 sid_map_mask;
280 static int backwards_map = 0; 280 static int backwards_map = 0;
281 281
282 if (vcpu->arch.shared->msr & MSR_PR) 282 if (kvmppc_get_msr(vcpu) & MSR_PR)
283 gvsid |= VSID_PR; 283 gvsid |= VSID_PR;
284 284
285 /* We might get collisions that trap in preceding order, so let's 285 /* We might get collisions that trap in preceding order, so let's
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index b93c2456253d..278729f4df80 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
226 /* Magic page override */ 226 /* Magic page override */
227 if (unlikely(mp_ea) && 227 if (unlikely(mp_ea) &&
228 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 228 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
229 !(vcpu->arch.shared->msr & MSR_PR)) { 229 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
230 gpte->eaddr = eaddr; 230 gpte->eaddr = eaddr;
231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); 232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
@@ -269,9 +269,9 @@ do_second:
269 goto no_page_found; 269 goto no_page_found;
270 } 270 }
271 271
272 if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) 272 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
273 key = 4; 273 key = 4;
274 else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) 274 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
275 key = 4; 275 key = 4;
276 276
277 for (i=0; i<16; i+=2) { 277 for (i=0; i<16; i+=2) {
@@ -482,7 +482,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
482 vcpu->arch.slb[i].origv = 0; 482 vcpu->arch.slb[i].origv = 0;
483 } 483 }
484 484
485 if (vcpu->arch.shared->msr & MSR_IR) { 485 if (kvmppc_get_msr(vcpu) & MSR_IR) {
486 kvmppc_mmu_flush_segments(vcpu); 486 kvmppc_mmu_flush_segments(vcpu);
487 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 487 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
488 } 488 }
@@ -566,7 +566,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
566{ 566{
567 ulong mp_ea = vcpu->arch.magic_page_ea; 567 ulong mp_ea = vcpu->arch.magic_page_ea;
568 568
569 return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && 569 return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
570 (mp_ea >> SID_SHIFT) == esid; 570 (mp_ea >> SID_SHIFT) == esid;
571} 571}
572#endif 572#endif
@@ -579,8 +579,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
579 u64 gvsid = esid; 579 u64 gvsid = esid;
580 ulong mp_ea = vcpu->arch.magic_page_ea; 580 ulong mp_ea = vcpu->arch.magic_page_ea;
581 int pagesize = MMU_PAGE_64K; 581 int pagesize = MMU_PAGE_64K;
582 u64 msr = kvmppc_get_msr(vcpu);
582 583
583 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 584 if (msr & (MSR_DR|MSR_IR)) {
584 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 585 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
585 if (slb) { 586 if (slb) {
586 gvsid = slb->vsid; 587 gvsid = slb->vsid;
@@ -593,7 +594,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
593 } 594 }
594 } 595 }
595 596
596 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 597 switch (msr & (MSR_DR|MSR_IR)) {
597 case 0: 598 case 0:
598 gvsid = VSID_REAL | esid; 599 gvsid = VSID_REAL | esid;
599 break; 600 break;
@@ -626,7 +627,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
626 gvsid |= VSID_64K; 627 gvsid |= VSID_64K;
627#endif 628#endif
628 629
629 if (vcpu->arch.shared->msr & MSR_PR) 630 if (kvmppc_get_msr(vcpu) & MSR_PR)
630 gvsid |= VSID_PR; 631 gvsid |= VSID_PR;
631 632
632 *vsid = gvsid; 633 *vsid = gvsid;
@@ -636,7 +637,7 @@ no_slb:
636 /* Catch magic page case */ 637 /* Catch magic page case */
637 if (unlikely(mp_ea) && 638 if (unlikely(mp_ea) &&
638 unlikely(esid == (mp_ea >> SID_SHIFT)) && 639 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
639 !(vcpu->arch.shared->msr & MSR_PR)) { 640 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
640 *vsid = VSID_REAL | esid; 641 *vsid = VSID_REAL | esid;
641 return 0; 642 return 0;
642 } 643 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0d513af62bba..e2efb85c65a3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -58,7 +58,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
58 struct kvmppc_sid_map *map; 58 struct kvmppc_sid_map *map;
59 u16 sid_map_mask; 59 u16 sid_map_mask;
60 60
61 if (vcpu->arch.shared->msr & MSR_PR) 61 if (kvmppc_get_msr(vcpu) & MSR_PR)
62 gvsid |= VSID_PR; 62 gvsid |= VSID_PR;
63 63
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -230,7 +230,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
230 u16 sid_map_mask; 230 u16 sid_map_mask;
231 static int backwards_map = 0; 231 static int backwards_map = 0;
232 232
233 if (vcpu->arch.shared->msr & MSR_PR) 233 if (kvmppc_get_msr(vcpu) & MSR_PR)
234 gvsid |= VSID_PR; 234 gvsid |= VSID_PR;
235 235
236 /* We might get collisions that trap in preceding order, so let's 236 /* We might get collisions that trap in preceding order, so let's
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 99d40f8977e8..45d0a805c3ca 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -80,7 +80,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
80 return false; 80 return false;
81 81
82 /* Limit user space to its own small SPR set */ 82 /* Limit user space to its own small SPR set */
83 if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM) 83 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
84 return false; 84 return false;
85 85
86 return true; 86 return true;
@@ -100,8 +100,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
100 switch (get_xop(inst)) { 100 switch (get_xop(inst)) {
101 case OP_19_XOP_RFID: 101 case OP_19_XOP_RFID:
102 case OP_19_XOP_RFI: 102 case OP_19_XOP_RFI:
103 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); 103 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
104 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); 104 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu));
105 *advance = 0; 105 *advance = 0;
106 break; 106 break;
107 107
@@ -113,16 +113,16 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
113 case 31: 113 case 31:
114 switch (get_xop(inst)) { 114 switch (get_xop(inst)) {
115 case OP_31_XOP_MFMSR: 115 case OP_31_XOP_MFMSR:
116 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); 116 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
117 break; 117 break;
118 case OP_31_XOP_MTMSRD: 118 case OP_31_XOP_MTMSRD:
119 { 119 {
120 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 120 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
121 if (inst & 0x10000) { 121 if (inst & 0x10000) {
122 ulong new_msr = vcpu->arch.shared->msr; 122 ulong new_msr = kvmppc_get_msr(vcpu);
123 new_msr &= ~(MSR_RI | MSR_EE); 123 new_msr &= ~(MSR_RI | MSR_EE);
124 new_msr |= rs_val & (MSR_RI | MSR_EE); 124 new_msr |= rs_val & (MSR_RI | MSR_EE);
125 vcpu->arch.shared->msr = new_msr; 125 kvmppc_set_msr_fast(vcpu, new_msr);
126 } else 126 } else
127 kvmppc_set_msr(vcpu, rs_val); 127 kvmppc_set_msr(vcpu, rs_val);
128 break; 128 break;
@@ -179,7 +179,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
179 ulong cmd = kvmppc_get_gpr(vcpu, 3); 179 ulong cmd = kvmppc_get_gpr(vcpu, 3);
180 int i; 180 int i;
181 181
182 if ((vcpu->arch.shared->msr & MSR_PR) || 182 if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
183 !vcpu->arch.papr_enabled) { 183 !vcpu->arch.papr_enabled) {
184 emulated = EMULATE_FAIL; 184 emulated = EMULATE_FAIL;
185 break; 185 break;
@@ -261,14 +261,14 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
261 ra_val = kvmppc_get_gpr(vcpu, ra); 261 ra_val = kvmppc_get_gpr(vcpu, ra);
262 262
263 addr = (ra_val + rb_val) & ~31ULL; 263 addr = (ra_val + rb_val) & ~31ULL;
264 if (!(vcpu->arch.shared->msr & MSR_SF)) 264 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
265 addr &= 0xffffffff; 265 addr &= 0xffffffff;
266 vaddr = addr; 266 vaddr = addr;
267 267
268 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 268 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
269 if ((r == -ENOENT) || (r == -EPERM)) { 269 if ((r == -ENOENT) || (r == -EPERM)) {
270 *advance = 0; 270 *advance = 0;
271 vcpu->arch.shared->dar = vaddr; 271 kvmppc_set_dar(vcpu, vaddr);
272 vcpu->arch.fault_dar = vaddr; 272 vcpu->arch.fault_dar = vaddr;
273 273
274 dsisr = DSISR_ISSTORE; 274 dsisr = DSISR_ISSTORE;
@@ -277,7 +277,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
277 else if (r == -EPERM) 277 else if (r == -EPERM)
278 dsisr |= DSISR_PROTFAULT; 278 dsisr |= DSISR_PROTFAULT;
279 279
280 vcpu->arch.shared->dsisr = dsisr; 280 kvmppc_set_dsisr(vcpu, dsisr);
281 vcpu->arch.fault_dsisr = dsisr; 281 vcpu->arch.fault_dsisr = dsisr;
282 282
283 kvmppc_book3s_queue_irqprio(vcpu, 283 kvmppc_book3s_queue_irqprio(vcpu,
@@ -356,10 +356,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
356 to_book3s(vcpu)->sdr1 = spr_val; 356 to_book3s(vcpu)->sdr1 = spr_val;
357 break; 357 break;
358 case SPRN_DSISR: 358 case SPRN_DSISR:
359 vcpu->arch.shared->dsisr = spr_val; 359 kvmppc_set_dsisr(vcpu, spr_val);
360 break; 360 break;
361 case SPRN_DAR: 361 case SPRN_DAR:
362 vcpu->arch.shared->dar = spr_val; 362 kvmppc_set_dar(vcpu, spr_val);
363 break; 363 break;
364 case SPRN_HIOR: 364 case SPRN_HIOR:
365 to_book3s(vcpu)->hior = spr_val; 365 to_book3s(vcpu)->hior = spr_val;
@@ -493,10 +493,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
493 *spr_val = to_book3s(vcpu)->sdr1; 493 *spr_val = to_book3s(vcpu)->sdr1;
494 break; 494 break;
495 case SPRN_DSISR: 495 case SPRN_DSISR:
496 *spr_val = vcpu->arch.shared->dsisr; 496 *spr_val = kvmppc_get_dsisr(vcpu);
497 break; 497 break;
498 case SPRN_DAR: 498 case SPRN_DAR:
499 *spr_val = vcpu->arch.shared->dar; 499 *spr_val = kvmppc_get_dar(vcpu);
500 break; 500 break;
501 case SPRN_HIOR: 501 case SPRN_HIOR:
502 *spr_val = to_book3s(vcpu)->hior; 502 *spr_val = to_book3s(vcpu)->hior;
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 20d4ea8e656d..0d013fbc2e13 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/export.h> 20#include <linux/export.h>
21#include <asm/kvm_ppc.h>
21#include <asm/kvm_book3s.h> 22#include <asm/kvm_book3s.h>
22 23
23#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 24#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8227dba5af0f..030821a414a8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1280,6 +1280,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1280 goto free_vcpu; 1280 goto free_vcpu;
1281 1281
1282 vcpu->arch.shared = &vcpu->arch.shregs; 1282 vcpu->arch.shared = &vcpu->arch.shregs;
1283#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1284 /*
1285 * The shared struct is never shared on HV,
1286 * so we can always use host endianness
1287 */
1288#ifdef __BIG_ENDIAN__
1289 vcpu->arch.shared_big_endian = true;
1290#else
1291 vcpu->arch.shared_big_endian = false;
1292#endif
1293#endif
1283 vcpu->arch.mmcr[0] = MMCR0_FC; 1294 vcpu->arch.mmcr[0] = MMCR0_FC;
1284 vcpu->arch.ctrl = CTRL_RUNLATCH; 1295 vcpu->arch.ctrl = CTRL_RUNLATCH;
1285 /* default to host PVR, since we can't spoof it */ 1296 /* default to host PVR, since we can't spoof it */
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3533c999194a..e2c29e381dc7 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -104,8 +104,27 @@ kvm_start_lightweight:
104 stb r3, HSTATE_RESTORE_HID5(r13) 104 stb r3, HSTATE_RESTORE_HID5(r13)
105 105
106 /* Load up guest SPRG3 value, since it's user readable */ 106 /* Load up guest SPRG3 value, since it's user readable */
107 ld r3, VCPU_SHARED(r4) 107 lwz r3, VCPU_SHAREDBE(r4)
108 ld r3, VCPU_SHARED_SPRG3(r3) 108 cmpwi r3, 0
109 ld r5, VCPU_SHARED(r4)
110 beq sprg3_little_endian
111sprg3_big_endian:
112#ifdef __BIG_ENDIAN__
113 ld r3, VCPU_SHARED_SPRG3(r5)
114#else
115 addi r5, r5, VCPU_SHARED_SPRG3
116 ldbrx r3, 0, r5
117#endif
118 b after_sprg3_load
119sprg3_little_endian:
120#ifdef __LITTLE_ENDIAN__
121 ld r3, VCPU_SHARED_SPRG3(r5)
122#else
123 addi r5, r5, VCPU_SHARED_SPRG3
124 ldbrx r3, 0, r5
125#endif
126
127after_sprg3_load:
109 mtspr SPRN_SPRG3, r3 128 mtspr SPRN_SPRG3, r3
110#endif /* CONFIG_PPC_BOOK3S_64 */ 129#endif /* CONFIG_PPC_BOOK3S_64 */
111 130
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index c1abd95063f4..6c8011fd57e6 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -165,16 +165,18 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
165 165
166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
167{ 167{
168 u64 dsisr; 168 u32 dsisr;
169 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 169 u64 msr = kvmppc_get_msr(vcpu);
170 170
171 shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); 171 msr = kvmppc_set_field(msr, 33, 36, 0);
172 shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); 172 msr = kvmppc_set_field(msr, 42, 47, 0);
173 shared->dar = eaddr; 173 kvmppc_set_msr(vcpu, msr);
174 kvmppc_set_dar(vcpu, eaddr);
174 /* Page Fault */ 175 /* Page Fault */
175 dsisr = kvmppc_set_field(0, 33, 33, 1); 176 dsisr = kvmppc_set_field(0, 33, 33, 1);
176 if (is_store) 177 if (is_store)
177 shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); 178 dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
179 kvmppc_set_dsisr(vcpu, dsisr);
178 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 180 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
179} 181}
180 182
@@ -660,7 +662,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
660 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 662 if (!kvmppc_inst_is_paired_single(vcpu, inst))
661 return EMULATE_FAIL; 663 return EMULATE_FAIL;
662 664
663 if (!(vcpu->arch.shared->msr & MSR_FP)) { 665 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
664 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 666 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
665 return EMULATE_AGAIN; 667 return EMULATE_AGAIN;
666 } 668 }
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d7b0ad2bffe4..d424ca053765 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -246,14 +246,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
246 246
247static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 247static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
248{ 248{
249 ulong smsr = vcpu->arch.shared->msr; 249 ulong guest_msr = kvmppc_get_msr(vcpu);
250 ulong smsr = guest_msr;
250 251
251 /* Guest MSR values */ 252 /* Guest MSR values */
252 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; 253 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
253 /* Process MSR values */ 254 /* Process MSR values */
254 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; 255 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
255 /* External providers the guest reserved */ 256 /* External providers the guest reserved */
256 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); 257 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
257 /* 64-bit Process MSR values */ 258 /* 64-bit Process MSR values */
258#ifdef CONFIG_PPC_BOOK3S_64 259#ifdef CONFIG_PPC_BOOK3S_64
259 smsr |= MSR_ISF | MSR_HV; 260 smsr |= MSR_ISF | MSR_HV;
@@ -263,14 +264,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
263 264
264static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) 265static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
265{ 266{
266 ulong old_msr = vcpu->arch.shared->msr; 267 ulong old_msr = kvmppc_get_msr(vcpu);
267 268
268#ifdef EXIT_DEBUG 269#ifdef EXIT_DEBUG
269 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 270 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
270#endif 271#endif
271 272
272 msr &= to_book3s(vcpu)->msr_mask; 273 msr &= to_book3s(vcpu)->msr_mask;
273 vcpu->arch.shared->msr = msr; 274 kvmppc_set_msr_fast(vcpu, msr);
274 kvmppc_recalc_shadow_msr(vcpu); 275 kvmppc_recalc_shadow_msr(vcpu);
275 276
276 if (msr & MSR_POW) { 277 if (msr & MSR_POW) {
@@ -281,11 +282,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
281 282
282 /* Unset POW bit after we woke up */ 283 /* Unset POW bit after we woke up */
283 msr &= ~MSR_POW; 284 msr &= ~MSR_POW;
284 vcpu->arch.shared->msr = msr; 285 kvmppc_set_msr_fast(vcpu, msr);
285 } 286 }
286 } 287 }
287 288
288 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != 289 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
289 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 290 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
290 kvmppc_mmu_flush_segments(vcpu); 291 kvmppc_mmu_flush_segments(vcpu);
291 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 292 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
@@ -317,7 +318,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
317 } 318 }
318 319
319 /* Preload FPU if it's enabled */ 320 /* Preload FPU if it's enabled */
320 if (vcpu->arch.shared->msr & MSR_FP) 321 if (kvmppc_get_msr(vcpu) & MSR_FP)
321 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 322 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
322} 323}
323 324
@@ -438,7 +439,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
438{ 439{
439 ulong mp_pa = vcpu->arch.magic_page_pa; 440 ulong mp_pa = vcpu->arch.magic_page_pa;
440 441
441 if (!(vcpu->arch.shared->msr & MSR_SF)) 442 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
442 mp_pa = (uint32_t)mp_pa; 443 mp_pa = (uint32_t)mp_pa;
443 444
444 if (unlikely(mp_pa) && 445 if (unlikely(mp_pa) &&
@@ -459,8 +460,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
459 int page_found = 0; 460 int page_found = 0;
460 struct kvmppc_pte pte; 461 struct kvmppc_pte pte;
461 bool is_mmio = false; 462 bool is_mmio = false;
462 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; 463 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
463 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; 464 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
464 u64 vsid; 465 u64 vsid;
465 466
466 relocated = data ? dr : ir; 467 relocated = data ? dr : ir;
@@ -480,7 +481,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
480 pte.page_size = MMU_PAGE_64K; 481 pte.page_size = MMU_PAGE_64K;
481 } 482 }
482 483
483 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 484 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
484 case 0: 485 case 0:
485 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 486 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
486 break; 487 break;
@@ -488,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
488 case MSR_IR: 489 case MSR_IR:
489 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 490 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
490 491
491 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) 492 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
492 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 493 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
493 else 494 else
494 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 495 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -511,22 +512,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
511 512
512 if (page_found == -ENOENT) { 513 if (page_found == -ENOENT) {
513 /* Page not found in guest PTE entries */ 514 /* Page not found in guest PTE entries */
514 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 515 u64 ssrr1 = vcpu->arch.shadow_srr1;
515 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; 516 u64 msr = kvmppc_get_msr(vcpu);
516 vcpu->arch.shared->msr |= 517 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
517 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; 518 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
519 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
518 kvmppc_book3s_queue_irqprio(vcpu, vec); 520 kvmppc_book3s_queue_irqprio(vcpu, vec);
519 } else if (page_found == -EPERM) { 521 } else if (page_found == -EPERM) {
520 /* Storage protection */ 522 /* Storage protection */
521 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 523 u32 dsisr = vcpu->arch.fault_dsisr;
522 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 524 u64 ssrr1 = vcpu->arch.shadow_srr1;
523 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 525 u64 msr = kvmppc_get_msr(vcpu);
524 vcpu->arch.shared->msr |= 526 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
525 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; 527 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
528 kvmppc_set_dsisr(vcpu, dsisr);
529 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
526 kvmppc_book3s_queue_irqprio(vcpu, vec); 530 kvmppc_book3s_queue_irqprio(vcpu, vec);
527 } else if (page_found == -EINVAL) { 531 } else if (page_found == -EINVAL) {
528 /* Page not found in guest SLB */ 532 /* Page not found in guest SLB */
529 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 533 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
530 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 534 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
531 } else if (!is_mmio && 535 } else if (!is_mmio &&
532 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 536 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -614,11 +618,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
614 618
615 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 619 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
616 if (ret == -ENOENT) { 620 if (ret == -ENOENT) {
617 ulong msr = vcpu->arch.shared->msr; 621 ulong msr = kvmppc_get_msr(vcpu);
618 622
619 msr = kvmppc_set_field(msr, 33, 33, 1); 623 msr = kvmppc_set_field(msr, 33, 33, 1);
620 msr = kvmppc_set_field(msr, 34, 36, 0); 624 msr = kvmppc_set_field(msr, 34, 36, 0);
621 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); 625 msr = kvmppc_set_field(msr, 42, 47, 0);
626 kvmppc_set_msr_fast(vcpu, msr);
622 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 627 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
623 return EMULATE_AGAIN; 628 return EMULATE_AGAIN;
624 } 629 }
@@ -651,7 +656,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 656 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
652 return RESUME_GUEST; 657 return RESUME_GUEST;
653 658
654 if (!(vcpu->arch.shared->msr & msr)) { 659 if (!(kvmppc_get_msr(vcpu) & msr)) {
655 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 660 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
656 return RESUME_GUEST; 661 return RESUME_GUEST;
657 } 662 }
@@ -792,7 +797,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
792 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 797 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
793 r = RESUME_GUEST; 798 r = RESUME_GUEST;
794 } else { 799 } else {
795 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; 800 u64 msr = kvmppc_get_msr(vcpu);
801 msr |= shadow_srr1 & 0x58000000;
802 kvmppc_set_msr_fast(vcpu, msr);
796 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 803 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
797 r = RESUME_GUEST; 804 r = RESUME_GUEST;
798 } 805 }
@@ -832,8 +839,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
832 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 839 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
833 srcu_read_unlock(&vcpu->kvm->srcu, idx); 840 srcu_read_unlock(&vcpu->kvm->srcu, idx);
834 } else { 841 } else {
835 vcpu->arch.shared->dar = dar; 842 kvmppc_set_dar(vcpu, dar);
836 vcpu->arch.shared->dsisr = fault_dsisr; 843 kvmppc_set_dsisr(vcpu, fault_dsisr);
837 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 844 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
838 r = RESUME_GUEST; 845 r = RESUME_GUEST;
839 } 846 }
@@ -841,7 +848,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
841 } 848 }
842 case BOOK3S_INTERRUPT_DATA_SEGMENT: 849 case BOOK3S_INTERRUPT_DATA_SEGMENT:
843 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 850 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
844 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 851 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
845 kvmppc_book3s_queue_irqprio(vcpu, 852 kvmppc_book3s_queue_irqprio(vcpu,
846 BOOK3S_INTERRUPT_DATA_SEGMENT); 853 BOOK3S_INTERRUPT_DATA_SEGMENT);
847 } 854 }
@@ -879,7 +886,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
879program_interrupt: 886program_interrupt:
880 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 887 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
881 888
882 if (vcpu->arch.shared->msr & MSR_PR) { 889 if (kvmppc_get_msr(vcpu) & MSR_PR) {
883#ifdef EXIT_DEBUG 890#ifdef EXIT_DEBUG
884 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 891 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
885#endif 892#endif
@@ -921,7 +928,7 @@ program_interrupt:
921 case BOOK3S_INTERRUPT_SYSCALL: 928 case BOOK3S_INTERRUPT_SYSCALL:
922 if (vcpu->arch.papr_enabled && 929 if (vcpu->arch.papr_enabled &&
923 (kvmppc_get_last_sc(vcpu) == 0x44000022) && 930 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
924 !(vcpu->arch.shared->msr & MSR_PR)) { 931 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
925 /* SC 1 papr hypercalls */ 932 /* SC 1 papr hypercalls */
926 ulong cmd = kvmppc_get_gpr(vcpu, 3); 933 ulong cmd = kvmppc_get_gpr(vcpu, 3);
927 int i; 934 int i;
@@ -953,7 +960,7 @@ program_interrupt:
953 gprs[i] = kvmppc_get_gpr(vcpu, i); 960 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1; 961 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV; 962 r = RESUME_HOST_NV;
956 } else if (!(vcpu->arch.shared->msr & MSR_PR) && 963 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
957 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 964 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
958 /* KVM PV hypercalls */ 965 /* KVM PV hypercalls */
959 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); 966 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
@@ -994,10 +1001,16 @@ program_interrupt:
994 } 1001 }
995 case BOOK3S_INTERRUPT_ALIGNMENT: 1002 case BOOK3S_INTERRUPT_ALIGNMENT:
996 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1003 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
997 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, 1004 u32 last_inst = kvmppc_get_last_inst(vcpu);
998 kvmppc_get_last_inst(vcpu)); 1005 u32 dsisr;
999 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, 1006 u64 dar;
1000 kvmppc_get_last_inst(vcpu)); 1007
1008 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1009 dar = kvmppc_alignment_dar(vcpu, last_inst);
1010
1011 kvmppc_set_dsisr(vcpu, dsisr);
1012 kvmppc_set_dar(vcpu, dar);
1013
1001 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1014 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1002 } 1015 }
1003 r = RESUME_GUEST; 1016 r = RESUME_GUEST;
@@ -1062,7 +1075,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1062 } 1075 }
1063 } else { 1076 } else {
1064 for (i = 0; i < 16; i++) 1077 for (i = 0; i < 16; i++)
1065 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; 1078 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1066 1079
1067 for (i = 0; i < 8; i++) { 1080 for (i = 0; i < 8; i++) {
1068 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; 1081 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
@@ -1198,8 +1211,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1198 goto uninit_vcpu; 1211 goto uninit_vcpu;
1199 /* the real shared page fills the last 4k of our page */ 1212 /* the real shared page fills the last 4k of our page */
1200 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1213 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1201
1202#ifdef CONFIG_PPC_BOOK3S_64 1214#ifdef CONFIG_PPC_BOOK3S_64
1215 /* Always start the shared struct in native endian mode */
1216#ifdef __BIG_ENDIAN__
1217 vcpu->arch.shared_big_endian = true;
1218#else
1219 vcpu->arch.shared_big_endian = false;
1220#endif
1221
1203 /* 1222 /*
1204 * Default to the same as the host if we're on sufficiently 1223 * Default to the same as the host if we're on sufficiently
1205 * recent machine that we have 1TB segments; 1224 * recent machine that we have 1TB segments;
@@ -1293,7 +1312,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1293#endif 1312#endif
1294 1313
1295 /* Preload FPU if it's enabled */ 1314 /* Preload FPU if it's enabled */
1296 if (vcpu->arch.shared->msr & MSR_FP) 1315 if (kvmppc_get_msr(vcpu) & MSR_FP)
1297 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1316 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1298 1317
1299 kvmppc_fix_ee_before_entry(); 1318 kvmppc_fix_ee_before_entry();
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 255e5b1da1e0..52a63bfe3f07 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -278,7 +278,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
278 case H_PUT_TCE: 278 case H_PUT_TCE:
279 return kvmppc_h_pr_put_tce(vcpu); 279 return kvmppc_h_pr_put_tce(vcpu);
280 case H_CEDE: 280 case H_CEDE:
281 vcpu->arch.shared->msr |= MSR_EE; 281 kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
282 kvm_vcpu_block(vcpu); 282 kvm_vcpu_block(vcpu);
283 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 283 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
284 vcpu->stat.halt_wakeup++; 284 vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c2b887be2c29..da86d9ba3476 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -97,10 +97,10 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
97 97
98 switch (sprn) { 98 switch (sprn) {
99 case SPRN_SRR0: 99 case SPRN_SRR0:
100 vcpu->arch.shared->srr0 = spr_val; 100 kvmppc_set_srr0(vcpu, spr_val);
101 break; 101 break;
102 case SPRN_SRR1: 102 case SPRN_SRR1:
103 vcpu->arch.shared->srr1 = spr_val; 103 kvmppc_set_srr1(vcpu, spr_val);
104 break; 104 break;
105 105
106 /* XXX We need to context-switch the timebase for 106 /* XXX We need to context-switch the timebase for
@@ -114,16 +114,16 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
114 break; 114 break;
115 115
116 case SPRN_SPRG0: 116 case SPRN_SPRG0:
117 vcpu->arch.shared->sprg0 = spr_val; 117 kvmppc_set_sprg0(vcpu, spr_val);
118 break; 118 break;
119 case SPRN_SPRG1: 119 case SPRN_SPRG1:
120 vcpu->arch.shared->sprg1 = spr_val; 120 kvmppc_set_sprg1(vcpu, spr_val);
121 break; 121 break;
122 case SPRN_SPRG2: 122 case SPRN_SPRG2:
123 vcpu->arch.shared->sprg2 = spr_val; 123 kvmppc_set_sprg2(vcpu, spr_val);
124 break; 124 break;
125 case SPRN_SPRG3: 125 case SPRN_SPRG3:
126 vcpu->arch.shared->sprg3 = spr_val; 126 kvmppc_set_sprg3(vcpu, spr_val);
127 break; 127 break;
128 128
129 /* PIR can legally be written, but we ignore it */ 129 /* PIR can legally be written, but we ignore it */
@@ -150,10 +150,10 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
150 150
151 switch (sprn) { 151 switch (sprn) {
152 case SPRN_SRR0: 152 case SPRN_SRR0:
153 spr_val = vcpu->arch.shared->srr0; 153 spr_val = kvmppc_get_srr0(vcpu);
154 break; 154 break;
155 case SPRN_SRR1: 155 case SPRN_SRR1:
156 spr_val = vcpu->arch.shared->srr1; 156 spr_val = kvmppc_get_srr1(vcpu);
157 break; 157 break;
158 case SPRN_PVR: 158 case SPRN_PVR:
159 spr_val = vcpu->arch.pvr; 159 spr_val = vcpu->arch.pvr;
@@ -173,16 +173,16 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
173 break; 173 break;
174 174
175 case SPRN_SPRG0: 175 case SPRN_SPRG0:
176 spr_val = vcpu->arch.shared->sprg0; 176 spr_val = kvmppc_get_sprg0(vcpu);
177 break; 177 break;
178 case SPRN_SPRG1: 178 case SPRN_SPRG1:
179 spr_val = vcpu->arch.shared->sprg1; 179 spr_val = kvmppc_get_sprg1(vcpu);
180 break; 180 break;
181 case SPRN_SPRG2: 181 case SPRN_SPRG2:
182 spr_val = vcpu->arch.shared->sprg2; 182 spr_val = kvmppc_get_sprg2(vcpu);
183 break; 183 break;
184 case SPRN_SPRG3: 184 case SPRN_SPRG3:
185 spr_val = vcpu->arch.shared->sprg3; 185 spr_val = kvmppc_get_sprg3(vcpu);
186 break; 186 break;
187 /* Note: SPRG4-7 are user-readable, so we don't get 187 /* Note: SPRG4-7 are user-readable, so we don't get
188 * a trap. */ 188 * a trap. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a9bd0ff0e173..b4e15bf3ff88 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -125,6 +125,27 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
125} 125}
126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
127 127
128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130{
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132 int i;
133
134 shared->sprg0 = swab64(shared->sprg0);
135 shared->sprg1 = swab64(shared->sprg1);
136 shared->sprg2 = swab64(shared->sprg2);
137 shared->sprg3 = swab64(shared->sprg3);
138 shared->srr0 = swab64(shared->srr0);
139 shared->srr1 = swab64(shared->srr1);
140 shared->dar = swab64(shared->dar);
141 shared->msr = swab64(shared->msr);
142 shared->dsisr = swab32(shared->dsisr);
143 shared->int_pending = swab32(shared->int_pending);
144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 shared->sr[i] = swab32(shared->sr[i]);
146}
147#endif
148
128int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
129{ 150{
130 int nr = kvmppc_get_gpr(vcpu, 11); 151 int nr = kvmppc_get_gpr(vcpu, 11);
@@ -135,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
135 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
136 unsigned long r2 = 0; 157 unsigned long r2 = 0;
137 158
138 if (!(vcpu->arch.shared->msr & MSR_SF)) { 159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
139 /* 32 bit mode */ 160 /* 32 bit mode */
140 param1 &= 0xffffffff; 161 param1 &= 0xffffffff;
141 param2 &= 0xffffffff; 162 param2 &= 0xffffffff;
@@ -146,6 +167,16 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
146 switch (nr) { 167 switch (nr) {
147 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
148 { 169 {
170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian = true;
173 if (vcpu->arch.intr_msr & MSR_LE)
174 shared_big_endian = false;
175 if (shared_big_endian != vcpu->arch.shared_big_endian)
176 kvmppc_swab_shared(vcpu);
177 vcpu->arch.shared_big_endian = shared_big_endian;
178#endif
179
149 vcpu->arch.magic_page_pa = param1; 180 vcpu->arch.magic_page_pa = param1;
150 vcpu->arch.magic_page_ea = param2; 181 vcpu->arch.magic_page_ea = param2;
151 182
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 8b22e4748344..e1357cd8dc1f 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_exit,
255 __entry->exit_nr = exit_nr; 255 __entry->exit_nr = exit_nr;
256 __entry->pc = kvmppc_get_pc(vcpu); 256 __entry->pc = kvmppc_get_pc(vcpu);
257 __entry->dar = kvmppc_get_fault_dar(vcpu); 257 __entry->dar = kvmppc_get_fault_dar(vcpu);
258 __entry->msr = vcpu->arch.shared->msr; 258 __entry->msr = kvmppc_get_msr(vcpu);
259 __entry->srr1 = vcpu->arch.shadow_srr1; 259 __entry->srr1 = vcpu->arch.shadow_srr1;
260 __entry->last_inst = vcpu->arch.last_inst; 260 __entry->last_inst = vcpu->arch.last_inst;
261 ), 261 ),