aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-07 20:58:01 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:47 -0500
commit8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c (patch)
tree4e2d003852ce327a47153b6c100239c6d8e1418f /arch
parent0d178975d0a5afe5e0fd3211bd1397905b225be5 (diff)
KVM: PPC: Use accessor functions for GPR access
All code in PPC KVM currently accesses gprs in the vcpu struct directly. While there's nothing wrong with that wrt the current way gprs are stored and loaded, it doesn't suffice for the PACA acceleration that will follow in this patchset. So let's just create little wrapper inline functions that we call whenever a GPR needs to be read from or written to. The compiled code shouldn't really change at all for now. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h26
-rw-r--r--arch/powerpc/kvm/44x_emulate.c25
-rw-r--r--arch/powerpc/kvm/44x_tlb.c14
-rw-r--r--arch/powerpc/kvm/book3s.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c77
-rw-r--r--arch/powerpc/kvm/booke.c16
-rw-r--r--arch/powerpc/kvm/booke_emulate.c107
-rw-r--r--arch/powerpc/kvm/e500_emulate.c95
-rw-r--r--arch/powerpc/kvm/e500_tlb.c4
-rw-r--r--arch/powerpc/kvm/emulate.c106
-rw-r--r--arch/powerpc/kvm/powerpc.c21
11 files changed, 274 insertions, 225 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index abfd0c4d567b..ba01b9c1d388 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -96,4 +96,30 @@ extern void kvmppc_booke_exit(void);
96 96
97extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 97extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
98 98
99#ifdef CONFIG_PPC_BOOK3S
100
101static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
102{
103 vcpu->arch.gpr[num] = val;
104}
105
106static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
107{
108 return vcpu->arch.gpr[num];
109}
110
111#else
112
113static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
114{
115 vcpu->arch.gpr[num] = val;
116}
117
118static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
119{
120 return vcpu->arch.gpr[num];
121}
122
123#endif
124
99#endif /* __POWERPC_KVM_PPC_H__ */ 125#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 61af58fcecee..65ea083a5b27 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
65 */ 65 */
66 switch (dcrn) { 66 switch (dcrn) {
67 case DCRN_CPR0_CONFIG_ADDR: 67 case DCRN_CPR0_CONFIG_ADDR:
68 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; 68 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
69 break; 69 break;
70 case DCRN_CPR0_CONFIG_DATA: 70 case DCRN_CPR0_CONFIG_DATA:
71 local_irq_disable(); 71 local_irq_disable();
72 mtdcr(DCRN_CPR0_CONFIG_ADDR, 72 mtdcr(DCRN_CPR0_CONFIG_ADDR,
73 vcpu->arch.cpr0_cfgaddr); 73 vcpu->arch.cpr0_cfgaddr);
74 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); 74 kvmppc_set_gpr(vcpu, rt,
75 mfdcr(DCRN_CPR0_CONFIG_DATA));
75 local_irq_enable(); 76 local_irq_enable();
76 break; 77 break;
77 default: 78 default:
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
93 /* emulate some access in kernel */ 94 /* emulate some access in kernel */
94 switch (dcrn) { 95 switch (dcrn) {
95 case DCRN_CPR0_CONFIG_ADDR: 96 case DCRN_CPR0_CONFIG_ADDR:
96 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; 97 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
97 break; 98 break;
98 default: 99 default:
99 run->dcr.dcrn = dcrn; 100 run->dcr.dcrn = dcrn;
100 run->dcr.data = vcpu->arch.gpr[rs]; 101 run->dcr.data = kvmppc_get_gpr(vcpu, rs);
101 run->dcr.is_write = 1; 102 run->dcr.is_write = 1;
102 vcpu->arch.dcr_needed = 1; 103 vcpu->arch.dcr_needed = 1;
103 kvmppc_account_exit(vcpu, DCR_EXITS); 104 kvmppc_account_exit(vcpu, DCR_EXITS);
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
146 147
147 switch (sprn) { 148 switch (sprn) {
148 case SPRN_PID: 149 case SPRN_PID:
149 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; 150 kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
150 case SPRN_MMUCR: 151 case SPRN_MMUCR:
151 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; 152 vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
152 case SPRN_CCR0: 153 case SPRN_CCR0:
153 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; 154 vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
154 case SPRN_CCR1: 155 case SPRN_CCR1:
155 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; 156 vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
156 default: 157 default:
157 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
158 } 159 }
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
167 168
168 switch (sprn) { 169 switch (sprn) {
169 case SPRN_PID: 170 case SPRN_PID:
170 vcpu->arch.gpr[rt] = vcpu->arch.pid; break; 171 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
171 case SPRN_MMUCR: 172 case SPRN_MMUCR:
172 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; 173 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
173 case SPRN_CCR0: 174 case SPRN_CCR0:
174 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; 175 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
175 case SPRN_CCR1: 176 case SPRN_CCR1:
176 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; 177 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
177 default: 178 default:
178 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 179 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
179 } 180 }
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ff3cb63b8117..8b3773669c97 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
439 struct kvmppc_44x_tlbe *tlbe; 439 struct kvmppc_44x_tlbe *tlbe;
440 unsigned int gtlb_index; 440 unsigned int gtlb_index;
441 441
442 gtlb_index = vcpu->arch.gpr[ra]; 442 gtlb_index = kvmppc_get_gpr(vcpu, ra);
443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { 443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
444 printk("%s: index %d\n", __func__, gtlb_index); 444 printk("%s: index %d\n", __func__, gtlb_index);
445 kvmppc_dump_vcpu(vcpu); 445 kvmppc_dump_vcpu(vcpu);
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
455 switch (ws) { 455 switch (ws) {
456 case PPC44x_TLB_PAGEID: 456 case PPC44x_TLB_PAGEID:
457 tlbe->tid = get_mmucr_stid(vcpu); 457 tlbe->tid = get_mmucr_stid(vcpu);
458 tlbe->word0 = vcpu->arch.gpr[rs]; 458 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
459 break; 459 break;
460 460
461 case PPC44x_TLB_XLAT: 461 case PPC44x_TLB_XLAT:
462 tlbe->word1 = vcpu->arch.gpr[rs]; 462 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
463 break; 463 break;
464 464
465 case PPC44x_TLB_ATTRIB: 465 case PPC44x_TLB_ATTRIB:
466 tlbe->word2 = vcpu->arch.gpr[rs]; 466 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
467 break; 467 break;
468 468
469 default: 469 default:
@@ -500,9 +500,9 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
500 unsigned int as = get_mmucr_sts(vcpu); 500 unsigned int as = get_mmucr_sts(vcpu);
501 unsigned int pid = get_mmucr_stid(vcpu); 501 unsigned int pid = get_mmucr_stid(vcpu);
502 502
503 ea = vcpu->arch.gpr[rb]; 503 ea = kvmppc_get_gpr(vcpu, rb);
504 if (ra) 504 if (ra)
505 ea += vcpu->arch.gpr[ra]; 505 ea += kvmppc_get_gpr(vcpu, ra);
506 506
507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); 507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
508 if (rc) { 508 if (rc) {
@@ -511,7 +511,7 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
511 else 511 else
512 vcpu->arch.cr |= 0x20000000; 512 vcpu->arch.cr |= 0x20000000;
513 } 513 }
514 vcpu->arch.gpr[rt] = gtlb_index; 514 kvmppc_set_gpr(vcpu, rt, gtlb_index);
515 515
516 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 516 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
517 return EMULATE_DONE; 517 return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index fd2a4d531582..574b24fb6e0e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -658,7 +658,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
658 } 658 }
659 case BOOK3S_INTERRUPT_SYSCALL: 659 case BOOK3S_INTERRUPT_SYSCALL:
660#ifdef EXIT_DEBUG 660#ifdef EXIT_DEBUG
661 printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); 661 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
662#endif 662#endif
663 vcpu->stat.syscall_exits++; 663 vcpu->stat.syscall_exits++;
664 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 664 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
@@ -734,7 +734,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
734 regs->sprg7 = vcpu->arch.sprg6; 734 regs->sprg7 = vcpu->arch.sprg6;
735 735
736 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 736 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
737 regs->gpr[i] = vcpu->arch.gpr[i]; 737 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
738 738
739 return 0; 739 return 0;
740} 740}
@@ -759,8 +759,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
759 vcpu->arch.sprg6 = regs->sprg5; 759 vcpu->arch.sprg6 = regs->sprg5;
760 vcpu->arch.sprg7 = regs->sprg6; 760 vcpu->arch.sprg7 = regs->sprg6;
761 761
762 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 762 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
763 vcpu->arch.gpr[i] = regs->gpr[i]; 763 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
764 764
765 return 0; 765 return 0;
766} 766}
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index 1027eac6d474..2b0ee7e040c9 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
65 case 31: 65 case 31:
66 switch (get_xop(inst)) { 66 switch (get_xop(inst)) {
67 case OP_31_XOP_MFMSR: 67 case OP_31_XOP_MFMSR:
68 vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr; 68 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
69 break; 69 break;
70 case OP_31_XOP_MTMSRD: 70 case OP_31_XOP_MTMSRD:
71 { 71 {
72 ulong rs = vcpu->arch.gpr[get_rs(inst)]; 72 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
73 if (inst & 0x10000) { 73 if (inst & 0x10000) {
74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 break; 78 break;
79 } 79 }
80 case OP_31_XOP_MTMSR: 80 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]); 81 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
82 break; 82 break;
83 case OP_31_XOP_MFSRIN: 83 case OP_31_XOP_MFSRIN:
84 { 84 {
85 int srnum; 85 int srnum;
86 86
87 srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf; 87 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
88 if (vcpu->arch.mmu.mfsrin) { 88 if (vcpu->arch.mmu.mfsrin) {
89 u32 sr; 89 u32 sr;
90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
91 vcpu->arch.gpr[get_rt(inst)] = sr; 91 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
92 } 92 }
93 break; 93 break;
94 } 94 }
95 case OP_31_XOP_MTSRIN: 95 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu, 96 vcpu->arch.mmu.mtsrin(vcpu,
97 (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf, 97 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
98 vcpu->arch.gpr[get_rs(inst)]); 98 kvmppc_get_gpr(vcpu, get_rs(inst)));
99 break; 99 break;
100 case OP_31_XOP_TLBIE: 100 case OP_31_XOP_TLBIE:
101 case OP_31_XOP_TLBIEL: 101 case OP_31_XOP_TLBIEL:
102 { 102 {
103 bool large = (inst & 0x00200000) ? true : false; 103 bool large = (inst & 0x00200000) ? true : false;
104 ulong addr = vcpu->arch.gpr[get_rb(inst)]; 104 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
105 vcpu->arch.mmu.tlbie(vcpu, addr, large); 105 vcpu->arch.mmu.tlbie(vcpu, addr, large);
106 break; 106 break;
107 } 107 }
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
111 if (!vcpu->arch.mmu.slbmte) 111 if (!vcpu->arch.mmu.slbmte)
112 return EMULATE_FAIL; 112 return EMULATE_FAIL;
113 113
114 vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)], 114 vcpu->arch.mmu.slbmte(vcpu,
115 vcpu->arch.gpr[get_rb(inst)]); 115 kvmppc_get_gpr(vcpu, get_rs(inst)),
116 kvmppc_get_gpr(vcpu, get_rb(inst)));
116 break; 117 break;
117 case OP_31_XOP_SLBIE: 118 case OP_31_XOP_SLBIE:
118 if (!vcpu->arch.mmu.slbie) 119 if (!vcpu->arch.mmu.slbie)
119 return EMULATE_FAIL; 120 return EMULATE_FAIL;
120 121
121 vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]); 122 vcpu->arch.mmu.slbie(vcpu,
123 kvmppc_get_gpr(vcpu, get_rb(inst)));
122 break; 124 break;
123 case OP_31_XOP_SLBIA: 125 case OP_31_XOP_SLBIA:
124 if (!vcpu->arch.mmu.slbia) 126 if (!vcpu->arch.mmu.slbia)
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
132 } else { 134 } else {
133 ulong t, rb; 135 ulong t, rb;
134 136
135 rb = vcpu->arch.gpr[get_rb(inst)]; 137 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
136 t = vcpu->arch.mmu.slbmfee(vcpu, rb); 138 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
137 vcpu->arch.gpr[get_rt(inst)] = t; 139 kvmppc_set_gpr(vcpu, get_rt(inst), t);
138 } 140 }
139 break; 141 break;
140 case OP_31_XOP_SLBMFEV: 142 case OP_31_XOP_SLBMFEV:
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
143 } else { 145 } else {
144 ulong t, rb; 146 ulong t, rb;
145 147
146 rb = vcpu->arch.gpr[get_rb(inst)]; 148 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
147 t = vcpu->arch.mmu.slbmfev(vcpu, rb); 149 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
148 vcpu->arch.gpr[get_rt(inst)] = t; 150 kvmppc_set_gpr(vcpu, get_rt(inst), t);
149 } 151 }
150 break; 152 break;
151 case OP_31_XOP_DCBZ: 153 case OP_31_XOP_DCBZ:
152 { 154 {
153 ulong rb = vcpu->arch.gpr[get_rb(inst)]; 155 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
154 ulong ra = 0; 156 ulong ra = 0;
155 ulong addr; 157 ulong addr;
156 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 158 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
157 159
158 if (get_ra(inst)) 160 if (get_ra(inst))
159 ra = vcpu->arch.gpr[get_ra(inst)]; 161 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
160 162
161 addr = (ra + rb) & ~31ULL; 163 addr = (ra + rb) & ~31ULL;
162 if (!(vcpu->arch.msr & MSR_SF)) 164 if (!(vcpu->arch.msr & MSR_SF))
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
233int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 235int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
234{ 236{
235 int emulated = EMULATE_DONE; 237 int emulated = EMULATE_DONE;
238 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
236 239
237 switch (sprn) { 240 switch (sprn) {
238 case SPRN_SDR1: 241 case SPRN_SDR1:
239 to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs]; 242 to_book3s(vcpu)->sdr1 = spr_val;
240 break; 243 break;
241 case SPRN_DSISR: 244 case SPRN_DSISR:
242 to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs]; 245 to_book3s(vcpu)->dsisr = spr_val;
243 break; 246 break;
244 case SPRN_DAR: 247 case SPRN_DAR:
245 vcpu->arch.dear = vcpu->arch.gpr[rs]; 248 vcpu->arch.dear = spr_val;
246 break; 249 break;
247 case SPRN_HIOR: 250 case SPRN_HIOR:
248 to_book3s(vcpu)->hior = vcpu->arch.gpr[rs]; 251 to_book3s(vcpu)->hior = spr_val;
249 break; 252 break;
250 case SPRN_IBAT0U ... SPRN_IBAT3L: 253 case SPRN_IBAT0U ... SPRN_IBAT3L:
251 case SPRN_IBAT4U ... SPRN_IBAT7L: 254 case SPRN_IBAT4U ... SPRN_IBAT7L:
252 case SPRN_DBAT0U ... SPRN_DBAT3L: 255 case SPRN_DBAT0U ... SPRN_DBAT3L:
253 case SPRN_DBAT4U ... SPRN_DBAT7L: 256 case SPRN_DBAT4U ... SPRN_DBAT7L:
254 kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); 257 kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
255 /* BAT writes happen so rarely that we're ok to flush 258 /* BAT writes happen so rarely that we're ok to flush
256 * everything here */ 259 * everything here */
257 kvmppc_mmu_pte_flush(vcpu, 0, 0); 260 kvmppc_mmu_pte_flush(vcpu, 0, 0);
258 break; 261 break;
259 case SPRN_HID0: 262 case SPRN_HID0:
260 to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs]; 263 to_book3s(vcpu)->hid[0] = spr_val;
261 break; 264 break;
262 case SPRN_HID1: 265 case SPRN_HID1:
263 to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs]; 266 to_book3s(vcpu)->hid[1] = spr_val;
264 break; 267 break;
265 case SPRN_HID2: 268 case SPRN_HID2:
266 to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs]; 269 to_book3s(vcpu)->hid[2] = spr_val;
267 break; 270 break;
268 case SPRN_HID4: 271 case SPRN_HID4:
269 to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs]; 272 to_book3s(vcpu)->hid[4] = spr_val;
270 break; 273 break;
271 case SPRN_HID5: 274 case SPRN_HID5:
272 to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs]; 275 to_book3s(vcpu)->hid[5] = spr_val;
273 /* guest HID5 set can change is_dcbz32 */ 276 /* guest HID5 set can change is_dcbz32 */
274 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 277 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
275 (mfmsr() & MSR_HV)) 278 (mfmsr() & MSR_HV))
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
299 302
300 switch (sprn) { 303 switch (sprn) {
301 case SPRN_SDR1: 304 case SPRN_SDR1:
302 vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1; 305 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
303 break; 306 break;
304 case SPRN_DSISR: 307 case SPRN_DSISR:
305 vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr; 308 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
306 break; 309 break;
307 case SPRN_DAR: 310 case SPRN_DAR:
308 vcpu->arch.gpr[rt] = vcpu->arch.dear; 311 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
309 break; 312 break;
310 case SPRN_HIOR: 313 case SPRN_HIOR:
311 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior; 314 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
312 break; 315 break;
313 case SPRN_HID0: 316 case SPRN_HID0:
314 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0]; 317 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
315 break; 318 break;
316 case SPRN_HID1: 319 case SPRN_HID1:
317 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1]; 320 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
318 break; 321 break;
319 case SPRN_HID2: 322 case SPRN_HID2:
320 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2]; 323 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
321 break; 324 break;
322 case SPRN_HID4: 325 case SPRN_HID4:
323 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4]; 326 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
324 break; 327 break;
325 case SPRN_HID5: 328 case SPRN_HID5:
326 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5]; 329 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
327 break; 330 break;
328 case SPRN_THRM1: 331 case SPRN_THRM1:
329 case SPRN_THRM2: 332 case SPRN_THRM2:
330 case SPRN_THRM3: 333 case SPRN_THRM3:
331 case SPRN_CTRLF: 334 case SPRN_CTRLF:
332 case SPRN_CTRLT: 335 case SPRN_CTRLT:
333 vcpu->arch.gpr[rt] = 0; 336 kvmppc_set_gpr(vcpu, rt, 0);
334 break; 337 break;
335 default: 338 default:
336 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 339 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d8b63420acf8..49af80e4a6e1 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
69 69
70 for (i = 0; i < 32; i += 4) { 70 for (i = 0; i < 32; i += 4) {
71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, 71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 vcpu->arch.gpr[i], 72 kvmppc_get_gpr(vcpu, i),
73 vcpu->arch.gpr[i+1], 73 kvmppc_get_gpr(vcpu, i+1),
74 vcpu->arch.gpr[i+2], 74 kvmppc_get_gpr(vcpu, i+2),
75 vcpu->arch.gpr[i+3]); 75 kvmppc_get_gpr(vcpu, i+3));
76 } 76 }
77} 77}
78 78
@@ -431,7 +431,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
431{ 431{
432 vcpu->arch.pc = 0; 432 vcpu->arch.pc = 0;
433 vcpu->arch.msr = 0; 433 vcpu->arch.msr = 0;
434 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 434 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
435 435
436 vcpu->arch.shadow_pid = 1; 436 vcpu->arch.shadow_pid = 1;
437 437
@@ -466,7 +466,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
466 regs->sprg7 = vcpu->arch.sprg6; 466 regs->sprg7 = vcpu->arch.sprg6;
467 467
468 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 468 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
469 regs->gpr[i] = vcpu->arch.gpr[i]; 469 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
470 470
471 return 0; 471 return 0;
472} 472}
@@ -491,8 +491,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
491 vcpu->arch.sprg6 = regs->sprg5; 491 vcpu->arch.sprg6 = regs->sprg5;
492 vcpu->arch.sprg7 = regs->sprg6; 492 vcpu->arch.sprg7 = regs->sprg6;
493 493
494 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 494 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
495 vcpu->arch.gpr[i] = regs->gpr[i]; 495 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
496 496
497 return 0; 497 return 0;
498} 498}
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index aebc65e93f4b..cbc790ee1928 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 62
63 case OP_31_XOP_MFMSR: 63 case OP_31_XOP_MFMSR:
64 rt = get_rt(inst); 64 rt = get_rt(inst);
65 vcpu->arch.gpr[rt] = vcpu->arch.msr; 65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
67 break; 67 break;
68 68
69 case OP_31_XOP_MTMSR: 69 case OP_31_XOP_MTMSR:
70 rs = get_rs(inst); 70 rs = get_rs(inst);
71 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); 71 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
72 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); 72 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
73 break; 73 break;
74 74
75 case OP_31_XOP_WRTEE: 75 case OP_31_XOP_WRTEE:
76 rs = get_rs(inst); 76 rs = get_rs(inst);
77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
78 | (vcpu->arch.gpr[rs] & MSR_EE); 78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
80 break; 80 break;
81 81
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
101int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 101int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
102{ 102{
103 int emulated = EMULATE_DONE; 103 int emulated = EMULATE_DONE;
104 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
104 105
105 switch (sprn) { 106 switch (sprn) {
106 case SPRN_DEAR: 107 case SPRN_DEAR:
107 vcpu->arch.dear = vcpu->arch.gpr[rs]; break; 108 vcpu->arch.dear = spr_val; break;
108 case SPRN_ESR: 109 case SPRN_ESR:
109 vcpu->arch.esr = vcpu->arch.gpr[rs]; break; 110 vcpu->arch.esr = spr_val; break;
110 case SPRN_DBCR0: 111 case SPRN_DBCR0:
111 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; 112 vcpu->arch.dbcr0 = spr_val; break;
112 case SPRN_DBCR1: 113 case SPRN_DBCR1:
113 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; 114 vcpu->arch.dbcr1 = spr_val; break;
114 case SPRN_DBSR: 115 case SPRN_DBSR:
115 vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; 116 vcpu->arch.dbsr &= ~spr_val; break;
116 case SPRN_TSR: 117 case SPRN_TSR:
117 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; 118 vcpu->arch.tsr &= ~spr_val; break;
118 case SPRN_TCR: 119 case SPRN_TCR:
119 vcpu->arch.tcr = vcpu->arch.gpr[rs]; 120 vcpu->arch.tcr = spr_val;
120 kvmppc_emulate_dec(vcpu); 121 kvmppc_emulate_dec(vcpu);
121 break; 122 break;
122 123
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
124 * loaded into the real SPRGs when resuming the 125 * loaded into the real SPRGs when resuming the
125 * guest. */ 126 * guest. */
126 case SPRN_SPRG4: 127 case SPRN_SPRG4:
127 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; 128 vcpu->arch.sprg4 = spr_val; break;
128 case SPRN_SPRG5: 129 case SPRN_SPRG5:
129 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; 130 vcpu->arch.sprg5 = spr_val; break;
130 case SPRN_SPRG6: 131 case SPRN_SPRG6:
131 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; 132 vcpu->arch.sprg6 = spr_val; break;
132 case SPRN_SPRG7: 133 case SPRN_SPRG7:
133 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; 134 vcpu->arch.sprg7 = spr_val; break;
134 135
135 case SPRN_IVPR: 136 case SPRN_IVPR:
136 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; 137 vcpu->arch.ivpr = spr_val;
137 break; 138 break;
138 case SPRN_IVOR0: 139 case SPRN_IVOR0:
139 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; 140 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
140 break; 141 break;
141 case SPRN_IVOR1: 142 case SPRN_IVOR1:
142 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; 143 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
143 break; 144 break;
144 case SPRN_IVOR2: 145 case SPRN_IVOR2:
145 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; 146 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
146 break; 147 break;
147 case SPRN_IVOR3: 148 case SPRN_IVOR3:
148 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; 149 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
149 break; 150 break;
150 case SPRN_IVOR4: 151 case SPRN_IVOR4:
151 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; 152 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
152 break; 153 break;
153 case SPRN_IVOR5: 154 case SPRN_IVOR5:
154 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; 155 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
155 break; 156 break;
156 case SPRN_IVOR6: 157 case SPRN_IVOR6:
157 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; 158 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
158 break; 159 break;
159 case SPRN_IVOR7: 160 case SPRN_IVOR7:
160 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; 161 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
161 break; 162 break;
162 case SPRN_IVOR8: 163 case SPRN_IVOR8:
163 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; 164 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
164 break; 165 break;
165 case SPRN_IVOR9: 166 case SPRN_IVOR9:
166 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; 167 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
167 break; 168 break;
168 case SPRN_IVOR10: 169 case SPRN_IVOR10:
169 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; 170 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
170 break; 171 break;
171 case SPRN_IVOR11: 172 case SPRN_IVOR11:
172 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; 173 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
173 break; 174 break;
174 case SPRN_IVOR12: 175 case SPRN_IVOR12:
175 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; 176 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
176 break; 177 break;
177 case SPRN_IVOR13: 178 case SPRN_IVOR13:
178 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; 179 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
179 break; 180 break;
180 case SPRN_IVOR14: 181 case SPRN_IVOR14:
181 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; 182 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
182 break; 183 break;
183 case SPRN_IVOR15: 184 case SPRN_IVOR15:
184 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; 185 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
185 break; 186 break;
186 187
187 default: 188 default:
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
197 198
198 switch (sprn) { 199 switch (sprn) {
199 case SPRN_IVPR: 200 case SPRN_IVPR:
200 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; 201 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
201 case SPRN_DEAR: 202 case SPRN_DEAR:
202 vcpu->arch.gpr[rt] = vcpu->arch.dear; break; 203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
203 case SPRN_ESR: 204 case SPRN_ESR:
204 vcpu->arch.gpr[rt] = vcpu->arch.esr; break; 205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
205 case SPRN_DBCR0: 206 case SPRN_DBCR0:
206 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; 207 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
207 case SPRN_DBCR1: 208 case SPRN_DBCR1:
208 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; 209 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
209 case SPRN_DBSR: 210 case SPRN_DBSR:
210 vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; 211 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
211 212
212 case SPRN_IVOR0: 213 case SPRN_IVOR0:
213 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; 214 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
214 break; 215 break;
215 case SPRN_IVOR1: 216 case SPRN_IVOR1:
216 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; 217 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
217 break; 218 break;
218 case SPRN_IVOR2: 219 case SPRN_IVOR2:
219 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; 220 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
220 break; 221 break;
221 case SPRN_IVOR3: 222 case SPRN_IVOR3:
222 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; 223 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
223 break; 224 break;
224 case SPRN_IVOR4: 225 case SPRN_IVOR4:
225 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; 226 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
226 break; 227 break;
227 case SPRN_IVOR5: 228 case SPRN_IVOR5:
228 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; 229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
229 break; 230 break;
230 case SPRN_IVOR6: 231 case SPRN_IVOR6:
231 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; 232 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
232 break; 233 break;
233 case SPRN_IVOR7: 234 case SPRN_IVOR7:
234 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; 235 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
235 break; 236 break;
236 case SPRN_IVOR8: 237 case SPRN_IVOR8:
237 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; 238 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
238 break; 239 break;
239 case SPRN_IVOR9: 240 case SPRN_IVOR9:
240 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; 241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
241 break; 242 break;
242 case SPRN_IVOR10: 243 case SPRN_IVOR10:
243 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; 244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
244 break; 245 break;
245 case SPRN_IVOR11: 246 case SPRN_IVOR11:
246 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; 247 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
247 break; 248 break;
248 case SPRN_IVOR12: 249 case SPRN_IVOR12:
249 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; 250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
250 break; 251 break;
251 case SPRN_IVOR13: 252 case SPRN_IVOR13:
252 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 253 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
253 break; 254 break;
254 case SPRN_IVOR14: 255 case SPRN_IVOR14:
255 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 256 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
256 break; 257 break;
257 case SPRN_IVOR15: 258 case SPRN_IVOR15:
258 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
259 break; 260 break;
260 261
261 default: 262 default:
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index be95b8d8e3b7..7644f7a9bac3 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -74,54 +74,55 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
74{ 74{
75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
76 int emulated = EMULATE_DONE; 76 int emulated = EMULATE_DONE;
77 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
77 78
78 switch (sprn) { 79 switch (sprn) {
79 case SPRN_PID: 80 case SPRN_PID:
80 vcpu_e500->pid[0] = vcpu->arch.shadow_pid = 81 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
81 vcpu->arch.pid = vcpu->arch.gpr[rs]; 82 vcpu->arch.pid = spr_val;
82 break; 83 break;
83 case SPRN_PID1: 84 case SPRN_PID1:
84 vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; 85 vcpu_e500->pid[1] = spr_val; break;
85 case SPRN_PID2: 86 case SPRN_PID2:
86 vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; 87 vcpu_e500->pid[2] = spr_val; break;
87 case SPRN_MAS0: 88 case SPRN_MAS0:
88 vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; 89 vcpu_e500->mas0 = spr_val; break;
89 case SPRN_MAS1: 90 case SPRN_MAS1:
90 vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; 91 vcpu_e500->mas1 = spr_val; break;
91 case SPRN_MAS2: 92 case SPRN_MAS2:
92 vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; 93 vcpu_e500->mas2 = spr_val; break;
93 case SPRN_MAS3: 94 case SPRN_MAS3:
94 vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; 95 vcpu_e500->mas3 = spr_val; break;
95 case SPRN_MAS4: 96 case SPRN_MAS4:
96 vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; 97 vcpu_e500->mas4 = spr_val; break;
97 case SPRN_MAS6: 98 case SPRN_MAS6:
98 vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; 99 vcpu_e500->mas6 = spr_val; break;
99 case SPRN_MAS7: 100 case SPRN_MAS7:
100 vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; 101 vcpu_e500->mas7 = spr_val; break;
101 case SPRN_L1CSR1: 102 case SPRN_L1CSR1:
102 vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; 103 vcpu_e500->l1csr1 = spr_val; break;
103 case SPRN_HID0: 104 case SPRN_HID0:
104 vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; 105 vcpu_e500->hid0 = spr_val; break;
105 case SPRN_HID1: 106 case SPRN_HID1:
106 vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; 107 vcpu_e500->hid1 = spr_val; break;
107 108
108 case SPRN_MMUCSR0: 109 case SPRN_MMUCSR0:
109 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, 110 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
110 vcpu->arch.gpr[rs]); 111 spr_val);
111 break; 112 break;
112 113
113 /* extra exceptions */ 114 /* extra exceptions */
114 case SPRN_IVOR32: 115 case SPRN_IVOR32:
115 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; 116 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
116 break; 117 break;
117 case SPRN_IVOR33: 118 case SPRN_IVOR33:
118 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; 119 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
119 break; 120 break;
120 case SPRN_IVOR34: 121 case SPRN_IVOR34:
121 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; 122 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
122 break; 123 break;
123 case SPRN_IVOR35: 124 case SPRN_IVOR35:
124 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; 125 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
125 break; 126 break;
126 127
127 default: 128 default:
@@ -138,63 +139,71 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
138 139
139 switch (sprn) { 140 switch (sprn) {
140 case SPRN_PID: 141 case SPRN_PID:
141 vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; 142 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
142 case SPRN_PID1: 143 case SPRN_PID1:
143 vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; 144 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
144 case SPRN_PID2: 145 case SPRN_PID2:
145 vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; 146 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
146 case SPRN_MAS0: 147 case SPRN_MAS0:
147 vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; 148 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
148 case SPRN_MAS1: 149 case SPRN_MAS1:
149 vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; 150 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
150 case SPRN_MAS2: 151 case SPRN_MAS2:
151 vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; 152 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
152 case SPRN_MAS3: 153 case SPRN_MAS3:
153 vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; 154 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
154 case SPRN_MAS4: 155 case SPRN_MAS4:
155 vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; 156 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
156 case SPRN_MAS6: 157 case SPRN_MAS6:
157 vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; 158 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
158 case SPRN_MAS7: 159 case SPRN_MAS7:
159 vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; 160 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
160 161
161 case SPRN_TLB0CFG: 162 case SPRN_TLB0CFG:
162 vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); 163 {
163 vcpu->arch.gpr[rt] &= ~0xfffUL; 164 ulong tmp = SPRN_TLB0CFG;
164 vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0]; 165
166 tmp &= ~0xfffUL;
167 tmp |= vcpu_e500->guest_tlb_size[0];
168 kvmppc_set_gpr(vcpu, rt, tmp);
165 break; 169 break;
170 }
166 171
167 case SPRN_TLB1CFG: 172 case SPRN_TLB1CFG:
168 vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); 173 {
169 vcpu->arch.gpr[rt] &= ~0xfffUL; 174 ulong tmp = SPRN_TLB1CFG;
170 vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; 175
176 tmp &= ~0xfffUL;
177 tmp |= vcpu_e500->guest_tlb_size[1];
178 kvmppc_set_gpr(vcpu, rt, tmp);
171 break; 179 break;
180 }
172 181
173 case SPRN_L1CSR1: 182 case SPRN_L1CSR1:
174 vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; 183 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
175 case SPRN_HID0: 184 case SPRN_HID0:
176 vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; 185 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
177 case SPRN_HID1: 186 case SPRN_HID1:
178 vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; 187 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
179 188
180 case SPRN_MMUCSR0: 189 case SPRN_MMUCSR0:
181 vcpu->arch.gpr[rt] = 0; break; 190 kvmppc_set_gpr(vcpu, rt, 0); break;
182 191
183 case SPRN_MMUCFG: 192 case SPRN_MMUCFG:
184 vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; 193 kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
185 194
186 /* extra exceptions */ 195 /* extra exceptions */
187 case SPRN_IVOR32: 196 case SPRN_IVOR32:
188 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 197 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
189 break; 198 break;
190 case SPRN_IVOR33: 199 case SPRN_IVOR33:
191 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; 200 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
192 break; 201 break;
193 case SPRN_IVOR34: 202 case SPRN_IVOR34:
194 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; 203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
195 break; 204 break;
196 case SPRN_IVOR35: 205 case SPRN_IVOR35:
197 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; 206 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
198 break; 207 break;
199 default: 208 default:
200 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 209 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index fb1e1dc11ba5..6a7fc012b93f 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
417 int esel, tlbsel; 417 int esel, tlbsel;
418 gva_t ea; 418 gva_t ea;
419 419
420 ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; 420 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
421 421
422 ia = (ea >> 2) & 0x1; 422 ia = (ea >> 2) & 0x1;
423 423
@@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
470 struct tlbe *gtlbe = NULL; 470 struct tlbe *gtlbe = NULL;
471 gva_t ea; 471 gva_t ea;
472 472
473 ea = vcpu->arch.gpr[rb]; 473 ea = kvmppc_get_gpr(vcpu, rb);
474 474
475 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 475 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
476 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 476 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 303457b2f52a..38219af0cd0e 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -170,14 +170,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
170 case OP_31_XOP_STWX: 170 case OP_31_XOP_STWX:
171 rs = get_rs(inst); 171 rs = get_rs(inst);
172 emulated = kvmppc_handle_store(run, vcpu, 172 emulated = kvmppc_handle_store(run, vcpu,
173 vcpu->arch.gpr[rs], 173 kvmppc_get_gpr(vcpu, rs),
174 4, 1); 174 4, 1);
175 break; 175 break;
176 176
177 case OP_31_XOP_STBX: 177 case OP_31_XOP_STBX:
178 rs = get_rs(inst); 178 rs = get_rs(inst);
179 emulated = kvmppc_handle_store(run, vcpu, 179 emulated = kvmppc_handle_store(run, vcpu,
180 vcpu->arch.gpr[rs], 180 kvmppc_get_gpr(vcpu, rs),
181 1, 1); 181 1, 1);
182 break; 182 break;
183 183
@@ -186,14 +186,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
186 ra = get_ra(inst); 186 ra = get_ra(inst);
187 rb = get_rb(inst); 187 rb = get_rb(inst);
188 188
189 ea = vcpu->arch.gpr[rb]; 189 ea = kvmppc_get_gpr(vcpu, rb);
190 if (ra) 190 if (ra)
191 ea += vcpu->arch.gpr[ra]; 191 ea += kvmppc_get_gpr(vcpu, ra);
192 192
193 emulated = kvmppc_handle_store(run, vcpu, 193 emulated = kvmppc_handle_store(run, vcpu,
194 vcpu->arch.gpr[rs], 194 kvmppc_get_gpr(vcpu, rs),
195 1, 1); 195 1, 1);
196 vcpu->arch.gpr[rs] = ea; 196 kvmppc_set_gpr(vcpu, rs, ea);
197 break; 197 break;
198 198
199 case OP_31_XOP_LHZX: 199 case OP_31_XOP_LHZX:
@@ -206,12 +206,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
206 ra = get_ra(inst); 206 ra = get_ra(inst);
207 rb = get_rb(inst); 207 rb = get_rb(inst);
208 208
209 ea = vcpu->arch.gpr[rb]; 209 ea = kvmppc_get_gpr(vcpu, rb);
210 if (ra) 210 if (ra)
211 ea += vcpu->arch.gpr[ra]; 211 ea += kvmppc_get_gpr(vcpu, ra);
212 212
213 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 213 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
214 vcpu->arch.gpr[ra] = ea; 214 kvmppc_set_gpr(vcpu, ra, ea);
215 break; 215 break;
216 216
217 case OP_31_XOP_MFSPR: 217 case OP_31_XOP_MFSPR:
@@ -220,47 +220,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
220 220
221 switch (sprn) { 221 switch (sprn) {
222 case SPRN_SRR0: 222 case SPRN_SRR0:
223 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 223 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
224 case SPRN_SRR1: 224 case SPRN_SRR1:
225 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 225 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
226 case SPRN_PVR: 226 case SPRN_PVR:
227 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 227 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
228 case SPRN_PIR: 228 case SPRN_PIR:
229 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; 229 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
230 case SPRN_MSSSR0: 230 case SPRN_MSSSR0:
231 vcpu->arch.gpr[rt] = 0; break; 231 kvmppc_set_gpr(vcpu, rt, 0); break;
232 232
233 /* Note: mftb and TBRL/TBWL are user-accessible, so 233 /* Note: mftb and TBRL/TBWL are user-accessible, so
234 * the guest can always access the real TB anyways. 234 * the guest can always access the real TB anyways.
235 * In fact, we probably will never see these traps. */ 235 * In fact, we probably will never see these traps. */
236 case SPRN_TBWL: 236 case SPRN_TBWL:
237 vcpu->arch.gpr[rt] = get_tb() >> 32; break; 237 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
238 case SPRN_TBWU: 238 case SPRN_TBWU:
239 vcpu->arch.gpr[rt] = get_tb(); break; 239 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
240 240
241 case SPRN_SPRG0: 241 case SPRN_SPRG0:
242 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 242 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
243 case SPRN_SPRG1: 243 case SPRN_SPRG1:
244 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
245 case SPRN_SPRG2: 245 case SPRN_SPRG2:
246 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
247 case SPRN_SPRG3: 247 case SPRN_SPRG3:
248 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
249 /* Note: SPRG4-7 are user-readable, so we don't get 249 /* Note: SPRG4-7 are user-readable, so we don't get
250 * a trap. */ 250 * a trap. */
251 251
252 case SPRN_DEC: 252 case SPRN_DEC:
253 { 253 {
254 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 254 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
255 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; 255 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
256 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); 256 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
257 vcpu->arch.dec, jd,
258 kvmppc_get_gpr(vcpu, rt));
257 break; 259 break;
258 } 260 }
259 default: 261 default:
260 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 262 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
261 if (emulated == EMULATE_FAIL) { 263 if (emulated == EMULATE_FAIL) {
262 printk("mfspr: unknown spr %x\n", sprn); 264 printk("mfspr: unknown spr %x\n", sprn);
263 vcpu->arch.gpr[rt] = 0; 265 kvmppc_set_gpr(vcpu, rt, 0);
264 } 266 }
265 break; 267 break;
266 } 268 }
@@ -272,7 +274,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
272 rb = get_rb(inst); 274 rb = get_rb(inst);
273 275
274 emulated = kvmppc_handle_store(run, vcpu, 276 emulated = kvmppc_handle_store(run, vcpu,
275 vcpu->arch.gpr[rs], 277 kvmppc_get_gpr(vcpu, rs),
276 2, 1); 278 2, 1);
277 break; 279 break;
278 280
@@ -281,14 +283,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
281 ra = get_ra(inst); 283 ra = get_ra(inst);
282 rb = get_rb(inst); 284 rb = get_rb(inst);
283 285
284 ea = vcpu->arch.gpr[rb]; 286 ea = kvmppc_get_gpr(vcpu, rb);
285 if (ra) 287 if (ra)
286 ea += vcpu->arch.gpr[ra]; 288 ea += kvmppc_get_gpr(vcpu, ra);
287 289
288 emulated = kvmppc_handle_store(run, vcpu, 290 emulated = kvmppc_handle_store(run, vcpu,
289 vcpu->arch.gpr[rs], 291 kvmppc_get_gpr(vcpu, rs),
290 2, 1); 292 2, 1);
291 vcpu->arch.gpr[ra] = ea; 293 kvmppc_set_gpr(vcpu, ra, ea);
292 break; 294 break;
293 295
294 case OP_31_XOP_MTSPR: 296 case OP_31_XOP_MTSPR:
@@ -296,9 +298,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
296 rs = get_rs(inst); 298 rs = get_rs(inst);
297 switch (sprn) { 299 switch (sprn) {
298 case SPRN_SRR0: 300 case SPRN_SRR0:
299 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 301 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
300 case SPRN_SRR1: 302 case SPRN_SRR1:
301 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 303 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
302 304
303 /* XXX We need to context-switch the timebase for 305 /* XXX We need to context-switch the timebase for
304 * watchdog and FIT. */ 306 * watchdog and FIT. */
@@ -308,18 +310,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
308 case SPRN_MSSSR0: break; 310 case SPRN_MSSSR0: break;
309 311
310 case SPRN_DEC: 312 case SPRN_DEC:
311 vcpu->arch.dec = vcpu->arch.gpr[rs]; 313 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
312 kvmppc_emulate_dec(vcpu); 314 kvmppc_emulate_dec(vcpu);
313 break; 315 break;
314 316
315 case SPRN_SPRG0: 317 case SPRN_SPRG0:
316 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 318 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
317 case SPRN_SPRG1: 319 case SPRN_SPRG1:
318 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 320 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
319 case SPRN_SPRG2: 321 case SPRN_SPRG2:
320 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 322 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
321 case SPRN_SPRG3: 323 case SPRN_SPRG3:
322 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 324 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
323 325
324 default: 326 default:
325 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 327 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -351,7 +353,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
351 rb = get_rb(inst); 353 rb = get_rb(inst);
352 354
353 emulated = kvmppc_handle_store(run, vcpu, 355 emulated = kvmppc_handle_store(run, vcpu,
354 vcpu->arch.gpr[rs], 356 kvmppc_get_gpr(vcpu, rs),
355 4, 0); 357 4, 0);
356 break; 358 break;
357 359
@@ -366,7 +368,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
366 rb = get_rb(inst); 368 rb = get_rb(inst);
367 369
368 emulated = kvmppc_handle_store(run, vcpu, 370 emulated = kvmppc_handle_store(run, vcpu,
369 vcpu->arch.gpr[rs], 371 kvmppc_get_gpr(vcpu, rs),
370 2, 0); 372 2, 0);
371 break; 373 break;
372 374
@@ -385,7 +387,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
385 ra = get_ra(inst); 387 ra = get_ra(inst);
386 rt = get_rt(inst); 388 rt = get_rt(inst);
387 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 389 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
388 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 390 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
389 break; 391 break;
390 392
391 case OP_LBZ: 393 case OP_LBZ:
@@ -397,35 +399,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
397 ra = get_ra(inst); 399 ra = get_ra(inst);
398 rt = get_rt(inst); 400 rt = get_rt(inst);
399 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 401 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
400 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 402 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
401 break; 403 break;
402 404
403 case OP_STW: 405 case OP_STW:
404 rs = get_rs(inst); 406 rs = get_rs(inst);
405 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 407 emulated = kvmppc_handle_store(run, vcpu,
408 kvmppc_get_gpr(vcpu, rs),
406 4, 1); 409 4, 1);
407 break; 410 break;
408 411
409 case OP_STWU: 412 case OP_STWU:
410 ra = get_ra(inst); 413 ra = get_ra(inst);
411 rs = get_rs(inst); 414 rs = get_rs(inst);
412 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 415 emulated = kvmppc_handle_store(run, vcpu,
416 kvmppc_get_gpr(vcpu, rs),
413 4, 1); 417 4, 1);
414 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 418 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
415 break; 419 break;
416 420
417 case OP_STB: 421 case OP_STB:
418 rs = get_rs(inst); 422 rs = get_rs(inst);
419 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 423 emulated = kvmppc_handle_store(run, vcpu,
424 kvmppc_get_gpr(vcpu, rs),
420 1, 1); 425 1, 1);
421 break; 426 break;
422 427
423 case OP_STBU: 428 case OP_STBU:
424 ra = get_ra(inst); 429 ra = get_ra(inst);
425 rs = get_rs(inst); 430 rs = get_rs(inst);
426 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 431 emulated = kvmppc_handle_store(run, vcpu,
432 kvmppc_get_gpr(vcpu, rs),
427 1, 1); 433 1, 1);
428 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 434 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
429 break; 435 break;
430 436
431 case OP_LHZ: 437 case OP_LHZ:
@@ -437,21 +443,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
437 ra = get_ra(inst); 443 ra = get_ra(inst);
438 rt = get_rt(inst); 444 rt = get_rt(inst);
439 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 445 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
440 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 446 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
441 break; 447 break;
442 448
443 case OP_STH: 449 case OP_STH:
444 rs = get_rs(inst); 450 rs = get_rs(inst);
445 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 451 emulated = kvmppc_handle_store(run, vcpu,
452 kvmppc_get_gpr(vcpu, rs),
446 2, 1); 453 2, 1);
447 break; 454 break;
448 455
449 case OP_STHU: 456 case OP_STHU:
450 ra = get_ra(inst); 457 ra = get_ra(inst);
451 rs = get_rs(inst); 458 rs = get_rs(inst);
452 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 459 emulated = kvmppc_handle_store(run, vcpu,
460 kvmppc_get_gpr(vcpu, rs),
453 2, 1); 461 2, 1);
454 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 462 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
455 break; 463 break;
456 464
457 default: 465 default:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 4633e7850dd2..2c291161df89 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -270,34 +270,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
270static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 270static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
271 struct kvm_run *run) 271 struct kvm_run *run)
272{ 272{
273 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 273 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
274 *gpr = run->dcr.data;
275} 274}
276 275
277static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 276static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
278 struct kvm_run *run) 277 struct kvm_run *run)
279{ 278{
280 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 279 ulong gpr;
281 280
282 if (run->mmio.len > sizeof(*gpr)) { 281 if (run->mmio.len > sizeof(gpr)) {
283 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 282 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
284 return; 283 return;
285 } 284 }
286 285
287 if (vcpu->arch.mmio_is_bigendian) { 286 if (vcpu->arch.mmio_is_bigendian) {
288 switch (run->mmio.len) { 287 switch (run->mmio.len) {
289 case 4: *gpr = *(u32 *)run->mmio.data; break; 288 case 4: gpr = *(u32 *)run->mmio.data; break;
290 case 2: *gpr = *(u16 *)run->mmio.data; break; 289 case 2: gpr = *(u16 *)run->mmio.data; break;
291 case 1: *gpr = *(u8 *)run->mmio.data; break; 290 case 1: gpr = *(u8 *)run->mmio.data; break;
292 } 291 }
293 } else { 292 } else {
294 /* Convert BE data from userland back to LE. */ 293 /* Convert BE data from userland back to LE. */
295 switch (run->mmio.len) { 294 switch (run->mmio.len) {
296 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 295 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
297 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 296 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
298 case 1: *gpr = *(u8 *)run->mmio.data; break; 297 case 1: gpr = *(u8 *)run->mmio.data; break;
299 } 298 }
300 } 299 }
300
301 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
301} 302}
302 303
303int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 304int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,