aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_para.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/44x_tlb.c8
-rw-r--r--arch/powerpc/kvm/book3s.c65
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c12
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c12
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c9
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c7
-rw-r--r--arch/powerpc/kvm/booke.c20
-rw-r--r--arch/powerpc/kvm/booke.h6
-rw-r--r--arch/powerpc/kvm/booke_emulate.c6
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S3
-rw-r--r--arch/powerpc/kvm/e500_tlb.c12
-rw-r--r--arch/powerpc/kvm/e500_tlb.h2
-rw-r--r--arch/powerpc/kvm/powerpc.c3
18 files changed, 93 insertions, 84 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 53edacdf6940..ba20f90655f3 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -211,7 +211,6 @@ struct kvm_vcpu_arch {
211 u32 cr; 211 u32 cr;
212#endif 212#endif
213 213
214 ulong msr;
215#ifdef CONFIG_PPC_BOOK3S 214#ifdef CONFIG_PPC_BOOK3S
216 ulong shadow_msr; 215 ulong shadow_msr;
217 ulong hflags; 216 ulong hflags;
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 1485ba87a52a..a17dc5229d99 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24 24
25struct kvm_vcpu_arch_shared { 25struct kvm_vcpu_arch_shared {
26 __u64 msr;
26}; 27};
27 28
28#ifdef __KERNEL__ 29#ifdef __KERNEL__
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 60e7db4c13af..1221bcdff52f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -394,13 +394,13 @@ int main(void)
394 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 394 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
397 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
398 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 397 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
399 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 398 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
400 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 399 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
401 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 400 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
402 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 401 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
403 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 402 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
403 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
404 404
405 /* book3s */ 405 /* book3s */
406#ifdef CONFIG_PPC_BOOK3S 406#ifdef CONFIG_PPC_BOOK3S
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 9b9b5cdea840..9f71b8d6eb0d 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -221,14 +221,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
221 221
222int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 222int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
223{ 223{
224 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 224 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
225 225
226 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 226 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
227} 227}
228 228
229int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 229int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
230{ 230{
231 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 231 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
232 232
233 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 233 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
234} 234}
@@ -354,7 +354,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
354 354
355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, 356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
357 vcpu->arch.msr & MSR_PR); 357 vcpu->arch.shared->msr & MSR_PR);
358 stlbe.tid = !(asid & 0xff); 358 stlbe.tid = !(asid & 0xff);
359 359
360 /* Keep track of the reference so we can properly release it later. */ 360 /* Keep track of the reference so we can properly release it later. */
@@ -423,7 +423,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
423 423
424 /* Does it match current guest AS? */ 424 /* Does it match current guest AS? */
425 /* XXX what about IS != DS? */ 425 /* XXX what about IS != DS? */
426 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 426 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
427 return 0; 427 return 0;
428 428
429 gpa = get_tlb_raddr(tlbe); 429 gpa = get_tlb_raddr(tlbe);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b3385dd6f28d..2efe69240e1b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -115,31 +115,31 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
115 115
116static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 116static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
117{ 117{
118 vcpu->arch.shadow_msr = vcpu->arch.msr; 118 ulong smsr = vcpu->arch.shared->msr;
119
119 /* Guest MSR values */ 120 /* Guest MSR values */
120 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | 121 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
121 MSR_BE | MSR_DE;
122 /* Process MSR values */ 122 /* Process MSR values */
123 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | 123 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
124 MSR_EE;
125 /* External providers the guest reserved */ 124 /* External providers the guest reserved */
126 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); 125 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
127 /* 64-bit Process MSR values */ 126 /* 64-bit Process MSR values */
128#ifdef CONFIG_PPC_BOOK3S_64 127#ifdef CONFIG_PPC_BOOK3S_64
129 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; 128 smsr |= MSR_ISF | MSR_HV;
130#endif 129#endif
130 vcpu->arch.shadow_msr = smsr;
131} 131}
132 132
133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
134{ 134{
135 ulong old_msr = vcpu->arch.msr; 135 ulong old_msr = vcpu->arch.shared->msr;
136 136
137#ifdef EXIT_DEBUG 137#ifdef EXIT_DEBUG
138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
139#endif 139#endif
140 140
141 msr &= to_book3s(vcpu)->msr_mask; 141 msr &= to_book3s(vcpu)->msr_mask;
142 vcpu->arch.msr = msr; 142 vcpu->arch.shared->msr = msr;
143 kvmppc_recalc_shadow_msr(vcpu); 143 kvmppc_recalc_shadow_msr(vcpu);
144 144
145 if (msr & (MSR_WE|MSR_POW)) { 145 if (msr & (MSR_WE|MSR_POW)) {
@@ -149,21 +149,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
149 } 149 }
150 } 150 }
151 151
152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != 152 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
154 kvmppc_mmu_flush_segments(vcpu); 154 kvmppc_mmu_flush_segments(vcpu);
155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
156 } 156 }
157 157
158 /* Preload FPU if it's enabled */ 158 /* Preload FPU if it's enabled */
159 if (vcpu->arch.msr & MSR_FP) 159 if (vcpu->arch.shared->msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
161} 161}
162 162
163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
164{ 164{
165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu); 165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
166 vcpu->arch.srr1 = vcpu->arch.msr | flags; 166 vcpu->arch.srr1 = vcpu->arch.shared->msr | flags;
167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
168 vcpu->arch.mmu.reset_msr(vcpu); 168 vcpu->arch.mmu.reset_msr(vcpu);
169} 169}
@@ -254,11 +254,11 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
254 254
255 switch (priority) { 255 switch (priority) {
256 case BOOK3S_IRQPRIO_DECREMENTER: 256 case BOOK3S_IRQPRIO_DECREMENTER:
257 deliver = vcpu->arch.msr & MSR_EE; 257 deliver = vcpu->arch.shared->msr & MSR_EE;
258 vec = BOOK3S_INTERRUPT_DECREMENTER; 258 vec = BOOK3S_INTERRUPT_DECREMENTER;
259 break; 259 break;
260 case BOOK3S_IRQPRIO_EXTERNAL: 260 case BOOK3S_IRQPRIO_EXTERNAL:
261 deliver = vcpu->arch.msr & MSR_EE; 261 deliver = vcpu->arch.shared->msr & MSR_EE;
262 vec = BOOK3S_INTERRUPT_EXTERNAL; 262 vec = BOOK3S_INTERRUPT_EXTERNAL;
263 break; 263 break;
264 case BOOK3S_IRQPRIO_SYSTEM_RESET: 264 case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -437,7 +437,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
438 struct kvmppc_pte *pte) 438 struct kvmppc_pte *pte)
439{ 439{
440 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); 440 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
441 int r; 441 int r;
442 442
443 if (relocated) { 443 if (relocated) {
@@ -545,8 +545,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
545 int page_found = 0; 545 int page_found = 0;
546 struct kvmppc_pte pte; 546 struct kvmppc_pte pte;
547 bool is_mmio = false; 547 bool is_mmio = false;
548 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 548 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
549 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 549 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
550 u64 vsid; 550 u64 vsid;
551 551
552 relocated = data ? dr : ir; 552 relocated = data ? dr : ir;
@@ -563,7 +563,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
563 pte.vpage = eaddr >> 12; 563 pte.vpage = eaddr >> 12;
564 } 564 }
565 565
566 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 566 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
567 case 0: 567 case 0:
568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
569 break; 569 break;
@@ -571,7 +571,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
571 case MSR_IR: 571 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573 573
574 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) 574 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else 576 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -596,14 +596,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
596 /* Page not found in guest PTE entries */ 596 /* Page not found in guest PTE entries */
597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
599 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 599 vcpu->arch.shared->msr |=
600 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
600 kvmppc_book3s_queue_irqprio(vcpu, vec); 601 kvmppc_book3s_queue_irqprio(vcpu, vec);
601 } else if (page_found == -EPERM) { 602 } else if (page_found == -EPERM) {
602 /* Storage protection */ 603 /* Storage protection */
603 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 604 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
604 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; 605 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
605 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 606 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
606 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 607 vcpu->arch.shared->msr |=
608 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
607 kvmppc_book3s_queue_irqprio(vcpu, vec); 609 kvmppc_book3s_queue_irqprio(vcpu, vec);
608 } else if (page_found == -EINVAL) { 610 } else if (page_found == -EINVAL) {
609 /* Page not found in guest SLB */ 611 /* Page not found in guest SLB */
@@ -695,9 +697,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
695 697
696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 698 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
697 if (ret == -ENOENT) { 699 if (ret == -ENOENT) {
698 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); 700 ulong msr = vcpu->arch.shared->msr;
699 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); 701
700 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 702 msr = kvmppc_set_field(msr, 33, 33, 1);
703 msr = kvmppc_set_field(msr, 34, 36, 0);
704 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
701 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 705 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
702 return EMULATE_AGAIN; 706 return EMULATE_AGAIN;
703 } 707 }
@@ -736,7 +740,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 740 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
737 return RESUME_GUEST; 741 return RESUME_GUEST;
738 742
739 if (!(vcpu->arch.msr & msr)) { 743 if (!(vcpu->arch.shared->msr & msr)) {
740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 744 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
741 return RESUME_GUEST; 745 return RESUME_GUEST;
742 } 746 }
@@ -804,7 +808,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
804 if ((exit_nr != 0x900) && (exit_nr != 0x500)) 808 if ((exit_nr != 0x900) && (exit_nr != 0x500))
805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", 809 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), 810 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
807 vcpu->arch.msr); 811 vcpu->arch.shared->msr);
808#endif 812#endif
809 kvm_resched(vcpu); 813 kvm_resched(vcpu);
810 switch (exit_nr) { 814 switch (exit_nr) {
@@ -836,7 +840,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 840 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
837 r = RESUME_GUEST; 841 r = RESUME_GUEST;
838 } else { 842 } else {
839 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 843 vcpu->arch.shared->msr |=
844 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
840 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 845 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
841 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 846 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
842 r = RESUME_GUEST; 847 r = RESUME_GUEST;
@@ -904,7 +909,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
904program_interrupt: 909program_interrupt:
905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 910 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
906 911
907 if (vcpu->arch.msr & MSR_PR) { 912 if (vcpu->arch.shared->msr & MSR_PR) {
908#ifdef EXIT_DEBUG 913#ifdef EXIT_DEBUG
909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 914 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
910#endif 915#endif
@@ -1052,7 +1057,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1052 regs->ctr = kvmppc_get_ctr(vcpu); 1057 regs->ctr = kvmppc_get_ctr(vcpu);
1053 regs->lr = kvmppc_get_lr(vcpu); 1058 regs->lr = kvmppc_get_lr(vcpu);
1054 regs->xer = kvmppc_get_xer(vcpu); 1059 regs->xer = kvmppc_get_xer(vcpu);
1055 regs->msr = vcpu->arch.msr; 1060 regs->msr = vcpu->arch.shared->msr;
1056 regs->srr0 = vcpu->arch.srr0; 1061 regs->srr0 = vcpu->arch.srr0;
1057 regs->srr1 = vcpu->arch.srr1; 1062 regs->srr1 = vcpu->arch.srr1;
1058 regs->pid = vcpu->arch.pid; 1063 regs->pid = vcpu->arch.pid;
@@ -1353,7 +1358,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1353 local_irq_enable(); 1358 local_irq_enable();
1354 1359
1355 /* Preload FPU if it's enabled */ 1360 /* Preload FPU if it's enabled */
1356 if (vcpu->arch.msr & MSR_FP) 1361 if (vcpu->arch.shared->msr & MSR_FP)
1357 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1362 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1358 1363
1359 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1364 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 3292d76101d2..449bce5f021a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -133,7 +133,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
133 else 133 else
134 bat = &vcpu_book3s->ibat[i]; 134 bat = &vcpu_book3s->ibat[i];
135 135
136 if (vcpu->arch.msr & MSR_PR) { 136 if (vcpu->arch.shared->msr & MSR_PR) {
137 if (!bat->vp) 137 if (!bat->vp)
138 continue; 138 continue;
139 } else { 139 } else {
@@ -214,8 +214,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); 214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
215 pp = pteg[i+1] & 3; 215 pp = pteg[i+1] & 3;
216 216
217 if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || 217 if ((sre->Kp && (vcpu->arch.shared->msr & MSR_PR)) ||
218 (sre->Ks && !(vcpu->arch.msr & MSR_PR))) 218 (sre->Ks && !(vcpu->arch.shared->msr & MSR_PR)))
219 pp |= 4; 219 pp |= 4;
220 220
221 pte->may_write = false; 221 pte->may_write = false;
@@ -334,7 +334,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
334 struct kvmppc_sr *sr; 334 struct kvmppc_sr *sr;
335 u64 gvsid = esid; 335 u64 gvsid = esid;
336 336
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 337 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea); 338 sr = find_sr(to_book3s(vcpu), ea);
339 if (sr->valid) 339 if (sr->valid)
340 gvsid = sr->vsid; 340 gvsid = sr->vsid;
@@ -343,7 +343,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
343 /* In case we only have one of MSR_IR or MSR_DR set, let's put 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put
344 that in the real-mode context (and hope RM doesn't access 344 that in the real-mode context (and hope RM doesn't access
345 high memory) */ 345 high memory) */
346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
347 case 0: 347 case 0:
348 *vsid = VSID_REAL | esid; 348 *vsid = VSID_REAL | esid;
349 break; 349 break;
@@ -363,7 +363,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
363 BUG(); 363 BUG();
364 } 364 }
365 365
366 if (vcpu->arch.msr & MSR_PR) 366 if (vcpu->arch.shared->msr & MSR_PR)
367 *vsid |= VSID_PR; 367 *vsid |= VSID_PR;
368 368
369 return 0; 369 return 0;
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 0b51ef872c1e..67b8c38d932f 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -86,7 +86,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
86 struct kvmppc_sid_map *map; 86 struct kvmppc_sid_map *map;
87 u16 sid_map_mask; 87 u16 sid_map_mask;
88 88
89 if (vcpu->arch.msr & MSR_PR) 89 if (vcpu->arch.shared->msr & MSR_PR)
90 gvsid |= VSID_PR; 90 gvsid |= VSID_PR;
91 91
92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -253,7 +253,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
253 u16 sid_map_mask; 253 u16 sid_map_mask;
254 static int backwards_map = 0; 254 static int backwards_map = 0;
255 255
256 if (vcpu->arch.msr & MSR_PR) 256 if (vcpu->arch.shared->msr & MSR_PR)
257 gvsid |= VSID_PR; 257 gvsid |= VSID_PR;
258 258
259 /* We might get collisions that trap in preceding order, so let's 259 /* We might get collisions that trap in preceding order, so let's
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 4025ea26b3c1..58aa8409dae0 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -180,9 +180,9 @@ do_second:
180 goto no_page_found; 180 goto no_page_found;
181 } 181 }
182 182
183 if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) 183 if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
184 key = 4; 184 key = 4;
185 else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) 185 else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
186 key = 4; 186 key = 4;
187 187
188 for (i=0; i<16; i+=2) { 188 for (i=0; i<16; i+=2) {
@@ -381,7 +381,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
381 for (i = 1; i < vcpu_book3s->slb_nr; i++) 381 for (i = 1; i < vcpu_book3s->slb_nr; i++)
382 vcpu_book3s->slb[i].valid = false; 382 vcpu_book3s->slb[i].valid = false;
383 383
384 if (vcpu->arch.msr & MSR_IR) { 384 if (vcpu->arch.shared->msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu); 385 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
387 } 387 }
@@ -446,13 +446,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
446 struct kvmppc_slb *slb; 446 struct kvmppc_slb *slb;
447 u64 gvsid = esid; 447 u64 gvsid = esid;
448 448
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 449 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb) 451 if (slb)
452 gvsid = slb->vsid; 452 gvsid = slb->vsid;
453 } 453 }
454 454
455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
456 case 0: 456 case 0:
457 *vsid = VSID_REAL | esid; 457 *vsid = VSID_REAL | esid;
458 break; 458 break;
@@ -473,7 +473,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
473 break; 473 break;
474 } 474 }
475 475
476 if (vcpu->arch.msr & MSR_PR) 476 if (vcpu->arch.shared->msr & MSR_PR)
477 *vsid |= VSID_PR; 477 *vsid |= VSID_PR;
478 478
479 return 0; 479 return 0;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 384179a5002b..71c1f9027abb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -66,7 +66,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
66 struct kvmppc_sid_map *map; 66 struct kvmppc_sid_map *map;
67 u16 sid_map_mask; 67 u16 sid_map_mask;
68 68
69 if (vcpu->arch.msr & MSR_PR) 69 if (vcpu->arch.shared->msr & MSR_PR)
70 gvsid |= VSID_PR; 70 gvsid |= VSID_PR;
71 71
72 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 72 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -191,7 +191,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
191 u16 sid_map_mask; 191 u16 sid_map_mask;
192 static int backwards_map = 0; 192 static int backwards_map = 0;
193 193
194 if (vcpu->arch.msr & MSR_PR) 194 if (vcpu->arch.shared->msr & MSR_PR)
195 gvsid |= VSID_PR; 195 gvsid |= VSID_PR;
196 196
197 /* We might get collisions that trap in preceding order, so let's 197 /* We might get collisions that trap in preceding order, so let's
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index c85f906038ce..35d3c16b2938 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 case 31: 86 case 31:
87 switch (get_xop(inst)) { 87 switch (get_xop(inst)) {
88 case OP_31_XOP_MFMSR: 88 case OP_31_XOP_MFMSR:
89 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); 89 kvmppc_set_gpr(vcpu, get_rt(inst),
90 vcpu->arch.shared->msr);
90 break; 91 break;
91 case OP_31_XOP_MTMSRD: 92 case OP_31_XOP_MTMSRD:
92 { 93 {
93 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 94 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
94 if (inst & 0x10000) { 95 if (inst & 0x10000) {
95 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 96 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
96 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 97 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
97 } else 98 } else
98 kvmppc_set_msr(vcpu, rs); 99 kvmppc_set_msr(vcpu, rs);
99 break; 100 break;
@@ -204,7 +205,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
204 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 205 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
205 206
206 addr = (ra + rb) & ~31ULL; 207 addr = (ra + rb) & ~31ULL;
207 if (!(vcpu->arch.msr & MSR_SF)) 208 if (!(vcpu->arch.shared->msr & MSR_SF))
208 addr &= 0xffffffff; 209 addr &= 0xffffffff;
209 vaddr = addr; 210 vaddr = addr;
210 211
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 474f2e24050a..626e6efaa79f 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -165,9 +165,10 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
165static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 165static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
166{ 166{
167 u64 dsisr; 167 u64 dsisr;
168 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
168 169
169 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); 170 shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
170 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 171 shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
171 vcpu->arch.dear = eaddr; 172 vcpu->arch.dear = eaddr;
172 /* Page Fault */ 173 /* Page Fault */
173 dsisr = kvmppc_set_field(0, 33, 33, 1); 174 dsisr = kvmppc_set_field(0, 33, 33, 1);
@@ -658,7 +659,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
658 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 659 if (!kvmppc_inst_is_paired_single(vcpu, inst))
659 return EMULATE_FAIL; 660 return EMULATE_FAIL;
660 661
661 if (!(vcpu->arch.msr & MSR_FP)) { 662 if (!(vcpu->arch.shared->msr & MSR_FP)) {
662 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 663 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
663 return EMULATE_AGAIN; 664 return EMULATE_AGAIN;
664 } 665 }
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8d4e35f5372c..4ec9d49a1cb9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -62,7 +62,7 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62{ 62{
63 int i; 63 int i;
64 64
65 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); 65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); 66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); 67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
68 68
@@ -169,34 +169,34 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
169 break; 169 break;
170 case BOOKE_IRQPRIO_CRITICAL: 170 case BOOKE_IRQPRIO_CRITICAL:
171 case BOOKE_IRQPRIO_WATCHDOG: 171 case BOOKE_IRQPRIO_WATCHDOG:
172 allowed = vcpu->arch.msr & MSR_CE; 172 allowed = vcpu->arch.shared->msr & MSR_CE;
173 msr_mask = MSR_ME; 173 msr_mask = MSR_ME;
174 break; 174 break;
175 case BOOKE_IRQPRIO_MACHINE_CHECK: 175 case BOOKE_IRQPRIO_MACHINE_CHECK:
176 allowed = vcpu->arch.msr & MSR_ME; 176 allowed = vcpu->arch.shared->msr & MSR_ME;
177 msr_mask = 0; 177 msr_mask = 0;
178 break; 178 break;
179 case BOOKE_IRQPRIO_EXTERNAL: 179 case BOOKE_IRQPRIO_EXTERNAL:
180 case BOOKE_IRQPRIO_DECREMENTER: 180 case BOOKE_IRQPRIO_DECREMENTER:
181 case BOOKE_IRQPRIO_FIT: 181 case BOOKE_IRQPRIO_FIT:
182 allowed = vcpu->arch.msr & MSR_EE; 182 allowed = vcpu->arch.shared->msr & MSR_EE;
183 msr_mask = MSR_CE|MSR_ME|MSR_DE; 183 msr_mask = MSR_CE|MSR_ME|MSR_DE;
184 break; 184 break;
185 case BOOKE_IRQPRIO_DEBUG: 185 case BOOKE_IRQPRIO_DEBUG:
186 allowed = vcpu->arch.msr & MSR_DE; 186 allowed = vcpu->arch.shared->msr & MSR_DE;
187 msr_mask = MSR_ME; 187 msr_mask = MSR_ME;
188 break; 188 break;
189 } 189 }
190 190
191 if (allowed) { 191 if (allowed) {
192 vcpu->arch.srr0 = vcpu->arch.pc; 192 vcpu->arch.srr0 = vcpu->arch.pc;
193 vcpu->arch.srr1 = vcpu->arch.msr; 193 vcpu->arch.srr1 = vcpu->arch.shared->msr;
194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
195 if (update_esr == true) 195 if (update_esr == true)
196 vcpu->arch.esr = vcpu->arch.queued_esr; 196 vcpu->arch.esr = vcpu->arch.queued_esr;
197 if (update_dear == true) 197 if (update_dear == true)
198 vcpu->arch.dear = vcpu->arch.queued_dear; 198 vcpu->arch.dear = vcpu->arch.queued_dear;
199 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 199 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
200 200
201 clear_bit(priority, &vcpu->arch.pending_exceptions); 201 clear_bit(priority, &vcpu->arch.pending_exceptions);
202 } 202 }
@@ -265,7 +265,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 break; 265 break;
266 266
267 case BOOKE_INTERRUPT_PROGRAM: 267 case BOOKE_INTERRUPT_PROGRAM:
268 if (vcpu->arch.msr & MSR_PR) { 268 if (vcpu->arch.shared->msr & MSR_PR) {
269 /* Program traps generated by user-level software must be handled 269 /* Program traps generated by user-level software must be handled
270 * by the guest kernel. */ 270 * by the guest kernel. */
271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
@@ -467,7 +467,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
467int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 467int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
468{ 468{
469 vcpu->arch.pc = 0; 469 vcpu->arch.pc = 0;
470 vcpu->arch.msr = 0; 470 vcpu->arch.shared->msr = 0;
471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
472 472
473 vcpu->arch.shadow_pid = 1; 473 vcpu->arch.shadow_pid = 1;
@@ -490,7 +490,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
490 regs->ctr = vcpu->arch.ctr; 490 regs->ctr = vcpu->arch.ctr;
491 regs->lr = vcpu->arch.lr; 491 regs->lr = vcpu->arch.lr;
492 regs->xer = kvmppc_get_xer(vcpu); 492 regs->xer = kvmppc_get_xer(vcpu);
493 regs->msr = vcpu->arch.msr; 493 regs->msr = vcpu->arch.shared->msr;
494 regs->srr0 = vcpu->arch.srr0; 494 regs->srr0 = vcpu->arch.srr0;
495 regs->srr1 = vcpu->arch.srr1; 495 regs->srr1 = vcpu->arch.srr1;
496 regs->pid = vcpu->arch.pid; 496 regs->pid = vcpu->arch.pid;
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index d59bcca1f9d8..88258acc98fa 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -54,12 +54,12 @@ extern unsigned long kvmppc_booke_handlers;
54 * changing. */ 54 * changing. */
55static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 55static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
56{ 56{
57 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) 57 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
58 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 58 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
59 59
60 vcpu->arch.msr = new_msr; 60 vcpu->arch.shared->msr = new_msr;
61 61
62 if (vcpu->arch.msr & MSR_WE) { 62 if (vcpu->arch.shared->msr & MSR_WE) {
63 kvm_vcpu_block(vcpu); 63 kvm_vcpu_block(vcpu);
64 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 64 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
65 }; 65 };
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index cbc790ee1928..b115203ac118 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -62,7 +62,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 62
63 case OP_31_XOP_MFMSR: 63 case OP_31_XOP_MFMSR:
64 rt = get_rt(inst); 64 rt = get_rt(inst);
65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); 65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
67 break; 67 break;
68 68
@@ -74,13 +74,13 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
74 74
75 case OP_31_XOP_WRTEE: 75 case OP_31_XOP_WRTEE:
76 rs = get_rs(inst); 76 rs = get_rs(inst);
77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 77 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); 78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
80 break; 80 break;
81 81
82 case OP_31_XOP_WRTEEI: 82 case OP_31_XOP_WRTEEI:
83 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 83 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
84 | (inst & MSR_EE); 84 | (inst & MSR_EE);
85 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 85 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
86 break; 86 break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 380a78cf484d..049846911ce4 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -415,7 +415,8 @@ lightweight_exit:
415 lwz r8, VCPU_GPR(r8)(r4) 415 lwz r8, VCPU_GPR(r8)(r4)
416 lwz r3, VCPU_PC(r4) 416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3 417 mtsrr0 r3
418 lwz r3, VCPU_MSR(r4) 418 lwz r3, VCPU_SHARED(r4)
419 lwz r3, VCPU_SHARED_MSR(r3)
419 oris r3, r3, KVMPPC_MSR_MASK@h 420 oris r3, r3, KVMPPC_MSR_MASK@h
420 ori r3, r3, KVMPPC_MSR_MASK@l 421 ori r3, r3, KVMPPC_MSR_MASK@l
421 mtsrr1 r3 422 mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 21011e12caeb..092a390876f3 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -314,10 +314,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
314 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 314 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
315 stlbe->mas2 = (gvaddr & MAS2_EPN) 315 stlbe->mas2 = (gvaddr & MAS2_EPN)
316 | e500_shadow_mas2_attrib(gtlbe->mas2, 316 | e500_shadow_mas2_attrib(gtlbe->mas2,
317 vcpu_e500->vcpu.arch.msr & MSR_PR); 317 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
318 stlbe->mas3 = (hpaddr & MAS3_RPN) 318 stlbe->mas3 = (hpaddr & MAS3_RPN)
319 | e500_shadow_mas3_attrib(gtlbe->mas3, 319 | e500_shadow_mas3_attrib(gtlbe->mas3,
320 vcpu_e500->vcpu.arch.msr & MSR_PR); 320 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
321 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; 321 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
322 322
323 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 323 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
@@ -576,28 +576,28 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
576 576
577int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 577int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
578{ 578{
579 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 579 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
580 580
581 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 581 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
582} 582}
583 583
584int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 584int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
585{ 585{
586 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 586 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
587 587
588 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 588 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
589} 589}
590 590
591void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) 591void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
592{ 592{
593 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 593 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
594 594
595 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); 595 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
596} 596}
597 597
598void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) 598void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
599{ 599{
600 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 600 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
601 601
602 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); 602 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
603} 603}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index d28e3010a5e2..458946b4775d 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -171,7 +171,7 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
171 171
172 /* Does it match current guest AS? */ 172 /* Does it match current guest AS? */
173 /* XXX what about IS != DS? */ 173 /* XXX what about IS != DS? */
174 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 174 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
175 return 0; 175 return 0;
176 176
177 gpa = get_tlb_raddr(tlbe); 177 gpa = get_tlb_raddr(tlbe);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 72a4ad86ee91..22f6fa2982f2 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -38,7 +38,8 @@
38 38
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{ 40{
41 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 41 return !(v->arch.shared->msr & MSR_WE) ||
42 !!(v->arch.pending_exceptions);
42} 43}
43 44
44 45