aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-05-04 08:55:12 -0400
committerAlexander Graf <agraf@suse.de>2012-05-06 10:19:13 -0400
commit54771e6217ce05a474827d9b23ff03de9d2ef2a0 (patch)
tree4555f93d29863b6c0bbd4be61c60bfe7b80ce6c9
parentc46dc9a86148bc37c31d67a22a3887144ba7aa81 (diff)
KVM: PPC: Emulator: clean up SPR reads and writes
When reading and writing SPRs, every SPR emulation piece had to read or write the respective GPR the value was read from or stored in itself. This approach is pretty prone to failure. What if we accidentally implement mfspr emulation where we just do "break" and nothing else? Suddenly we would get a random value in the return register - which is always a bad idea. So let's consolidate the generic code paths and only give the core specific SPR handling code readily made variables to read/write from/to. Functionally, this patch doesn't change anything, but it increases the readability of the code and makes is less prone to bugs. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h6
-rw-r--r--arch/powerpc/kvm/44x_emulate.c24
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c34
-rw-r--r--arch/powerpc/kvm/book3s_hv.c4
-rw-r--r--arch/powerpc/kvm/booke.h4
-rw-r--r--arch/powerpc/kvm/booke_emulate.c88
-rw-r--r--arch/powerpc/kvm/e500_emulate.c110
-rw-r--r--arch/powerpc/kvm/emulate.c64
8 files changed, 190 insertions, 144 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c87e3b503fd..f68c22fa2fc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
107 107
108extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 108extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
109 unsigned int op, int *advance); 109 unsigned int op, int *advance);
110extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 110extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
111extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 111 ulong val);
112extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong *val);
112 114
113extern int kvmppc_booke_init(void); 115extern int kvmppc_booke_init(void);
114extern void kvmppc_booke_exit(void); 116extern void kvmppc_booke_exit(void);
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index da81a2d9238..c8c61578fdf 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -128,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
128 return emulated; 128 return emulated;
129} 129}
130 130
131int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 131int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
132{ 132{
133 int emulated = EMULATE_DONE; 133 int emulated = EMULATE_DONE;
134 134
135 switch (sprn) { 135 switch (sprn) {
136 case SPRN_PID: 136 case SPRN_PID:
137 kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; 137 kvmppc_set_pid(vcpu, spr_val); break;
138 case SPRN_MMUCR: 138 case SPRN_MMUCR:
139 vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; 139 vcpu->arch.mmucr = spr_val; break;
140 case SPRN_CCR0: 140 case SPRN_CCR0:
141 vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; 141 vcpu->arch.ccr0 = spr_val; break;
142 case SPRN_CCR1: 142 case SPRN_CCR1:
143 vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; 143 vcpu->arch.ccr1 = spr_val; break;
144 default: 144 default:
145 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 145 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
146 } 146 }
147 147
148 return emulated; 148 return emulated;
149} 149}
150 150
151int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 151int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
152{ 152{
153 int emulated = EMULATE_DONE; 153 int emulated = EMULATE_DONE;
154 154
155 switch (sprn) { 155 switch (sprn) {
156 case SPRN_PID: 156 case SPRN_PID:
157 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; 157 *spr_val = vcpu->arch.pid; break;
158 case SPRN_MMUCR: 158 case SPRN_MMUCR:
159 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; 159 *spr_val = vcpu->arch.mmucr; break;
160 case SPRN_CCR0: 160 case SPRN_CCR0:
161 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; 161 *spr_val = vcpu->arch.ccr0; break;
162 case SPRN_CCR1: 162 case SPRN_CCR1:
163 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; 163 *spr_val = vcpu->arch.ccr1; break;
164 default: 164 default:
165 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 165 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
166 } 166 }
167 167
168 return emulated; 168 return emulated;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index c023bcd253f..b9a989dc76c 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -318,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
318 return bat; 318 return bat;
319} 319}
320 320
321int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 321int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
322{ 322{
323 int emulated = EMULATE_DONE; 323 int emulated = EMULATE_DONE;
324 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
325 324
326 switch (sprn) { 325 switch (sprn) {
327 case SPRN_SDR1: 326 case SPRN_SDR1:
@@ -433,7 +432,7 @@ unprivileged:
433 return emulated; 432 return emulated;
434} 433}
435 434
436int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 435int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
437{ 436{
438 int emulated = EMULATE_DONE; 437 int emulated = EMULATE_DONE;
439 438
@@ -446,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
446 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 445 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
447 446
448 if (sprn % 2) 447 if (sprn % 2)
449 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); 448 *spr_val = bat->raw >> 32;
450 else 449 else
451 kvmppc_set_gpr(vcpu, rt, bat->raw); 450 *spr_val = bat->raw;
452 451
453 break; 452 break;
454 } 453 }
455 case SPRN_SDR1: 454 case SPRN_SDR1:
456 if (!spr_allowed(vcpu, PRIV_HYPER)) 455 if (!spr_allowed(vcpu, PRIV_HYPER))
457 goto unprivileged; 456 goto unprivileged;
458 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 457 *spr_val = to_book3s(vcpu)->sdr1;
459 break; 458 break;
460 case SPRN_DSISR: 459 case SPRN_DSISR:
461 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); 460 *spr_val = vcpu->arch.shared->dsisr;
462 break; 461 break;
463 case SPRN_DAR: 462 case SPRN_DAR:
464 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); 463 *spr_val = vcpu->arch.shared->dar;
465 break; 464 break;
466 case SPRN_HIOR: 465 case SPRN_HIOR:
467 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); 466 *spr_val = to_book3s(vcpu)->hior;
468 break; 467 break;
469 case SPRN_HID0: 468 case SPRN_HID0:
470 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); 469 *spr_val = to_book3s(vcpu)->hid[0];
471 break; 470 break;
472 case SPRN_HID1: 471 case SPRN_HID1:
473 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); 472 *spr_val = to_book3s(vcpu)->hid[1];
474 break; 473 break;
475 case SPRN_HID2: 474 case SPRN_HID2:
476 case SPRN_HID2_GEKKO: 475 case SPRN_HID2_GEKKO:
477 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); 476 *spr_val = to_book3s(vcpu)->hid[2];
478 break; 477 break;
479 case SPRN_HID4: 478 case SPRN_HID4:
480 case SPRN_HID4_GEKKO: 479 case SPRN_HID4_GEKKO:
481 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); 480 *spr_val = to_book3s(vcpu)->hid[4];
482 break; 481 break;
483 case SPRN_HID5: 482 case SPRN_HID5:
484 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); 483 *spr_val = to_book3s(vcpu)->hid[5];
485 break; 484 break;
486 case SPRN_CFAR: 485 case SPRN_CFAR:
487 case SPRN_PURR: 486 case SPRN_PURR:
488 kvmppc_set_gpr(vcpu, rt, 0); 487 *spr_val = 0;
489 break; 488 break;
490 case SPRN_GQR0: 489 case SPRN_GQR0:
491 case SPRN_GQR1: 490 case SPRN_GQR1:
@@ -495,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
495 case SPRN_GQR5: 494 case SPRN_GQR5:
496 case SPRN_GQR6: 495 case SPRN_GQR6:
497 case SPRN_GQR7: 496 case SPRN_GQR7:
498 kvmppc_set_gpr(vcpu, rt, 497 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
499 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
500 break; 498 break;
501 case SPRN_THRM1: 499 case SPRN_THRM1:
502 case SPRN_THRM2: 500 case SPRN_THRM2:
@@ -511,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
511 case SPRN_PMC3_GEKKO: 509 case SPRN_PMC3_GEKKO:
512 case SPRN_PMC4_GEKKO: 510 case SPRN_PMC4_GEKKO:
513 case SPRN_WPAR_GEKKO: 511 case SPRN_WPAR_GEKKO:
514 kvmppc_set_gpr(vcpu, rt, 0); 512 *spr_val = 0;
515 break; 513 break;
516 default: 514 default:
517unprivileged: 515unprivileged:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index bb5a0f4b4bb..db36598a90d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1505,12 +1505,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1505 return EMULATE_FAIL; 1505 return EMULATE_FAIL;
1506} 1506}
1507 1507
1508int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 1508int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
1509{ 1509{
1510 return EMULATE_FAIL; 1510 return EMULATE_FAIL;
1511} 1511}
1512 1512
1513int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 1513int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
1514{ 1514{
1515 return EMULATE_FAIL; 1515 return EMULATE_FAIL;
1516} 1516}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 62c4fe55d19..ba61974c1e2 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -75,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
75 75
76int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 76int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
77 unsigned int inst, int *advance); 77 unsigned int inst, int *advance);
78int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 78int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
79int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 79int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
80 80
81/* low-level asm code to transfer guest state */ 81/* low-level asm code to transfer guest state */
82void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); 82void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index e14f7b23fd3..6c76397f2af 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -102,22 +102,26 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
102 * will return the wrong result if called for them in another context 102 * will return the wrong result if called for them in another context
103 * (such as debugging). 103 * (such as debugging).
104 */ 104 */
105int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 105int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
106{ 106{
107 int emulated = EMULATE_DONE; 107 int emulated = EMULATE_DONE;
108 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
109 108
110 switch (sprn) { 109 switch (sprn) {
111 case SPRN_DEAR: 110 case SPRN_DEAR:
112 vcpu->arch.shared->dar = spr_val; break; 111 vcpu->arch.shared->dar = spr_val;
112 break;
113 case SPRN_ESR: 113 case SPRN_ESR:
114 vcpu->arch.shared->esr = spr_val; break; 114 vcpu->arch.shared->esr = spr_val;
115 break;
115 case SPRN_DBCR0: 116 case SPRN_DBCR0:
116 vcpu->arch.dbcr0 = spr_val; break; 117 vcpu->arch.dbcr0 = spr_val;
118 break;
117 case SPRN_DBCR1: 119 case SPRN_DBCR1:
118 vcpu->arch.dbcr1 = spr_val; break; 120 vcpu->arch.dbcr1 = spr_val;
121 break;
119 case SPRN_DBSR: 122 case SPRN_DBSR:
120 vcpu->arch.dbsr &= ~spr_val; break; 123 vcpu->arch.dbsr &= ~spr_val;
124 break;
121 case SPRN_TSR: 125 case SPRN_TSR:
122 kvmppc_clr_tsr_bits(vcpu, spr_val); 126 kvmppc_clr_tsr_bits(vcpu, spr_val);
123 break; 127 break;
@@ -131,13 +135,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
131 * guest (PR-mode only). 135 * guest (PR-mode only).
132 */ 136 */
133 case SPRN_SPRG4: 137 case SPRN_SPRG4:
134 vcpu->arch.shared->sprg4 = spr_val; break; 138 vcpu->arch.shared->sprg4 = spr_val;
139 break;
135 case SPRN_SPRG5: 140 case SPRN_SPRG5:
136 vcpu->arch.shared->sprg5 = spr_val; break; 141 vcpu->arch.shared->sprg5 = spr_val;
142 break;
137 case SPRN_SPRG6: 143 case SPRN_SPRG6:
138 vcpu->arch.shared->sprg6 = spr_val; break; 144 vcpu->arch.shared->sprg6 = spr_val;
145 break;
139 case SPRN_SPRG7: 146 case SPRN_SPRG7:
140 vcpu->arch.shared->sprg7 = spr_val; break; 147 vcpu->arch.shared->sprg7 = spr_val;
148 break;
141 149
142 case SPRN_IVPR: 150 case SPRN_IVPR:
143 vcpu->arch.ivpr = spr_val; 151 vcpu->arch.ivpr = spr_val;
@@ -207,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
207 return emulated; 215 return emulated;
208} 216}
209 217
210int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 218int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
211{ 219{
212 int emulated = EMULATE_DONE; 220 int emulated = EMULATE_DONE;
213 221
214 switch (sprn) { 222 switch (sprn) {
215 case SPRN_IVPR: 223 case SPRN_IVPR:
216 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; 224 *spr_val = vcpu->arch.ivpr;
225 break;
217 case SPRN_DEAR: 226 case SPRN_DEAR:
218 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; 227 *spr_val = vcpu->arch.shared->dar;
228 break;
219 case SPRN_ESR: 229 case SPRN_ESR:
220 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; 230 *spr_val = vcpu->arch.shared->esr;
231 break;
221 case SPRN_DBCR0: 232 case SPRN_DBCR0:
222 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; 233 *spr_val = vcpu->arch.dbcr0;
234 break;
223 case SPRN_DBCR1: 235 case SPRN_DBCR1:
224 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; 236 *spr_val = vcpu->arch.dbcr1;
237 break;
225 case SPRN_DBSR: 238 case SPRN_DBSR:
226 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; 239 *spr_val = vcpu->arch.dbsr;
240 break;
227 case SPRN_TSR: 241 case SPRN_TSR:
228 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; 242 *spr_val = vcpu->arch.tsr;
243 break;
229 case SPRN_TCR: 244 case SPRN_TCR:
230 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; 245 *spr_val = vcpu->arch.tcr;
246 break;
231 247
232 case SPRN_IVOR0: 248 case SPRN_IVOR0:
233 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); 249 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
234 break; 250 break;
235 case SPRN_IVOR1: 251 case SPRN_IVOR1:
236 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); 252 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
237 break; 253 break;
238 case SPRN_IVOR2: 254 case SPRN_IVOR2:
239 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); 255 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
240 break; 256 break;
241 case SPRN_IVOR3: 257 case SPRN_IVOR3:
242 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); 258 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
243 break; 259 break;
244 case SPRN_IVOR4: 260 case SPRN_IVOR4:
245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); 261 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
246 break; 262 break;
247 case SPRN_IVOR5: 263 case SPRN_IVOR5:
248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); 264 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
249 break; 265 break;
250 case SPRN_IVOR6: 266 case SPRN_IVOR6:
251 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); 267 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
252 break; 268 break;
253 case SPRN_IVOR7: 269 case SPRN_IVOR7:
254 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); 270 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
255 break; 271 break;
256 case SPRN_IVOR8: 272 case SPRN_IVOR8:
257 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); 273 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
258 break; 274 break;
259 case SPRN_IVOR9: 275 case SPRN_IVOR9:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); 276 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
261 break; 277 break;
262 case SPRN_IVOR10: 278 case SPRN_IVOR10:
263 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); 279 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
264 break; 280 break;
265 case SPRN_IVOR11: 281 case SPRN_IVOR11:
266 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); 282 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
267 break; 283 break;
268 case SPRN_IVOR12: 284 case SPRN_IVOR12:
269 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); 285 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
270 break; 286 break;
271 case SPRN_IVOR13: 287 case SPRN_IVOR13:
272 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); 288 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
273 break; 289 break;
274 case SPRN_IVOR14: 290 case SPRN_IVOR14:
275 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); 291 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
276 break; 292 break;
277 case SPRN_IVOR15: 293 case SPRN_IVOR15:
278 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); 294 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
279 break; 295 break;
280 296
281 default: 297 default:
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 9b2dcda7195..8b99e076dc8 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -140,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
140 return emulated; 140 return emulated;
141} 141}
142 142
143int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 143int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
144{ 144{
145 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 145 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
146 int emulated = EMULATE_DONE; 146 int emulated = EMULATE_DONE;
147 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
148 147
149 switch (sprn) { 148 switch (sprn) {
150#ifndef CONFIG_KVM_BOOKE_HV 149#ifndef CONFIG_KVM_BOOKE_HV
@@ -154,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
154 case SPRN_PID1: 153 case SPRN_PID1:
155 if (spr_val != 0) 154 if (spr_val != 0)
156 return EMULATE_FAIL; 155 return EMULATE_FAIL;
157 vcpu_e500->pid[1] = spr_val; break; 156 vcpu_e500->pid[1] = spr_val;
157 break;
158 case SPRN_PID2: 158 case SPRN_PID2:
159 if (spr_val != 0) 159 if (spr_val != 0)
160 return EMULATE_FAIL; 160 return EMULATE_FAIL;
161 vcpu_e500->pid[2] = spr_val; break; 161 vcpu_e500->pid[2] = spr_val;
162 break;
162 case SPRN_MAS0: 163 case SPRN_MAS0:
163 vcpu->arch.shared->mas0 = spr_val; break; 164 vcpu->arch.shared->mas0 = spr_val;
165 break;
164 case SPRN_MAS1: 166 case SPRN_MAS1:
165 vcpu->arch.shared->mas1 = spr_val; break; 167 vcpu->arch.shared->mas1 = spr_val;
168 break;
166 case SPRN_MAS2: 169 case SPRN_MAS2:
167 vcpu->arch.shared->mas2 = spr_val; break; 170 vcpu->arch.shared->mas2 = spr_val;
171 break;
168 case SPRN_MAS3: 172 case SPRN_MAS3:
169 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; 173 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
170 vcpu->arch.shared->mas7_3 |= spr_val; 174 vcpu->arch.shared->mas7_3 |= spr_val;
171 break; 175 break;
172 case SPRN_MAS4: 176 case SPRN_MAS4:
173 vcpu->arch.shared->mas4 = spr_val; break; 177 vcpu->arch.shared->mas4 = spr_val;
178 break;
174 case SPRN_MAS6: 179 case SPRN_MAS6:
175 vcpu->arch.shared->mas6 = spr_val; break; 180 vcpu->arch.shared->mas6 = spr_val;
181 break;
176 case SPRN_MAS7: 182 case SPRN_MAS7:
177 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; 183 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
178 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; 184 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
@@ -183,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
183 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); 189 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
184 break; 190 break;
185 case SPRN_L1CSR1: 191 case SPRN_L1CSR1:
186 vcpu_e500->l1csr1 = spr_val; break; 192 vcpu_e500->l1csr1 = spr_val;
193 break;
187 case SPRN_HID0: 194 case SPRN_HID0:
188 vcpu_e500->hid0 = spr_val; break; 195 vcpu_e500->hid0 = spr_val;
196 break;
189 case SPRN_HID1: 197 case SPRN_HID1:
190 vcpu_e500->hid1 = spr_val; break; 198 vcpu_e500->hid1 = spr_val;
199 break;
191 200
192 case SPRN_MMUCSR0: 201 case SPRN_MMUCSR0:
193 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, 202 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -216,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
216 break; 225 break;
217#endif 226#endif
218 default: 227 default:
219 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 228 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
220 } 229 }
221 230
222 return emulated; 231 return emulated;
223} 232}
224 233
225int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 234int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
226{ 235{
227 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 236 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
228 int emulated = EMULATE_DONE; 237 int emulated = EMULATE_DONE;
229 238
230 switch (sprn) { 239 switch (sprn) {
231#ifndef CONFIG_KVM_BOOKE_HV 240#ifndef CONFIG_KVM_BOOKE_HV
232 unsigned long val;
233
234 case SPRN_PID: 241 case SPRN_PID:
235 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; 242 *spr_val = vcpu_e500->pid[0];
243 break;
236 case SPRN_PID1: 244 case SPRN_PID1:
237 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; 245 *spr_val = vcpu_e500->pid[1];
246 break;
238 case SPRN_PID2: 247 case SPRN_PID2:
239 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; 248 *spr_val = vcpu_e500->pid[2];
249 break;
240 case SPRN_MAS0: 250 case SPRN_MAS0:
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; 251 *spr_val = vcpu->arch.shared->mas0;
252 break;
242 case SPRN_MAS1: 253 case SPRN_MAS1:
243 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; 254 *spr_val = vcpu->arch.shared->mas1;
255 break;
244 case SPRN_MAS2: 256 case SPRN_MAS2:
245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; 257 *spr_val = vcpu->arch.shared->mas2;
258 break;
246 case SPRN_MAS3: 259 case SPRN_MAS3:
247 val = (u32)vcpu->arch.shared->mas7_3; 260 *spr_val = (u32)vcpu->arch.shared->mas7_3;
248 kvmppc_set_gpr(vcpu, rt, val);
249 break; 261 break;
250 case SPRN_MAS4: 262 case SPRN_MAS4:
251 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; 263 *spr_val = vcpu->arch.shared->mas4;
264 break;
252 case SPRN_MAS6: 265 case SPRN_MAS6:
253 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; 266 *spr_val = vcpu->arch.shared->mas6;
267 break;
254 case SPRN_MAS7: 268 case SPRN_MAS7:
255 val = vcpu->arch.shared->mas7_3 >> 32; 269 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
256 kvmppc_set_gpr(vcpu, rt, val);
257 break; 270 break;
258#endif 271#endif
259 case SPRN_TLB0CFG: 272 case SPRN_TLB0CFG:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break; 273 *spr_val = vcpu->arch.tlbcfg[0];
274 break;
261 case SPRN_TLB1CFG: 275 case SPRN_TLB1CFG:
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break; 276 *spr_val = vcpu->arch.tlbcfg[1];
277 break;
263 case SPRN_L1CSR0: 278 case SPRN_L1CSR0:
264 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; 279 *spr_val = vcpu_e500->l1csr0;
280 break;
265 case SPRN_L1CSR1: 281 case SPRN_L1CSR1:
266 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; 282 *spr_val = vcpu_e500->l1csr1;
283 break;
267 case SPRN_HID0: 284 case SPRN_HID0:
268 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; 285 *spr_val = vcpu_e500->hid0;
286 break;
269 case SPRN_HID1: 287 case SPRN_HID1:
270 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; 288 *spr_val = vcpu_e500->hid1;
289 break;
271 case SPRN_SVR: 290 case SPRN_SVR:
272 kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; 291 *spr_val = vcpu_e500->svr;
292 break;
273 293
274 case SPRN_MMUCSR0: 294 case SPRN_MMUCSR0:
275 kvmppc_set_gpr(vcpu, rt, 0); break; 295 *spr_val = 0;
296 break;
276 297
277 case SPRN_MMUCFG: 298 case SPRN_MMUCFG:
278 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break; 299 *spr_val = vcpu->arch.mmucfg;
300 break;
279 301
280 /* extra exceptions */ 302 /* extra exceptions */
281 case SPRN_IVOR32: 303 case SPRN_IVOR32:
282 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); 304 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
283 break; 305 break;
284 case SPRN_IVOR33: 306 case SPRN_IVOR33:
285 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); 307 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
286 break; 308 break;
287 case SPRN_IVOR34: 309 case SPRN_IVOR34:
288 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); 310 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
289 break; 311 break;
290 case SPRN_IVOR35: 312 case SPRN_IVOR35:
291 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); 313 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
292 break; 314 break;
293#ifdef CONFIG_KVM_BOOKE_HV 315#ifdef CONFIG_KVM_BOOKE_HV
294 case SPRN_IVOR36: 316 case SPRN_IVOR36:
295 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]); 317 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
296 break; 318 break;
297 case SPRN_IVOR37: 319 case SPRN_IVOR37:
298 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]); 320 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
299 break; 321 break;
300#endif 322#endif
301 default: 323 default:
302 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 324 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
303 } 325 }
304 326
305 return emulated; 327 return emulated;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index f63b5cbd822..f90e86dea7a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -154,6 +154,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
154 int sprn = get_sprn(inst); 154 int sprn = get_sprn(inst);
155 enum emulation_result emulated = EMULATE_DONE; 155 enum emulation_result emulated = EMULATE_DONE;
156 int advance = 1; 156 int advance = 1;
157 ulong spr_val = 0;
157 158
158 /* this default type might be overwritten by subcategories */ 159 /* this default type might be overwritten by subcategories */
159 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 160 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -235,55 +236,59 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
235 case OP_31_XOP_MFSPR: 236 case OP_31_XOP_MFSPR:
236 switch (sprn) { 237 switch (sprn) {
237 case SPRN_SRR0: 238 case SPRN_SRR0:
238 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 239 spr_val = vcpu->arch.shared->srr0;
239 break; 240 break;
240 case SPRN_SRR1: 241 case SPRN_SRR1:
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); 242 spr_val = vcpu->arch.shared->srr1;
242 break; 243 break;
243 case SPRN_PVR: 244 case SPRN_PVR:
244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 245 spr_val = vcpu->arch.pvr;
246 break;
245 case SPRN_PIR: 247 case SPRN_PIR:
246 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 248 spr_val = vcpu->vcpu_id;
249 break;
247 case SPRN_MSSSR0: 250 case SPRN_MSSSR0:
248 kvmppc_set_gpr(vcpu, rt, 0); break; 251 spr_val = 0;
252 break;
249 253
250 /* Note: mftb and TBRL/TBWL are user-accessible, so 254 /* Note: mftb and TBRL/TBWL are user-accessible, so
251 * the guest can always access the real TB anyways. 255 * the guest can always access the real TB anyways.
252 * In fact, we probably will never see these traps. */ 256 * In fact, we probably will never see these traps. */
253 case SPRN_TBWL: 257 case SPRN_TBWL:
254 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 258 spr_val = get_tb() >> 32;
259 break;
255 case SPRN_TBWU: 260 case SPRN_TBWU:
256 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 261 spr_val = get_tb();
262 break;
257 263
258 case SPRN_SPRG0: 264 case SPRN_SPRG0:
259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); 265 spr_val = vcpu->arch.shared->sprg0;
260 break; 266 break;
261 case SPRN_SPRG1: 267 case SPRN_SPRG1:
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); 268 spr_val = vcpu->arch.shared->sprg1;
263 break; 269 break;
264 case SPRN_SPRG2: 270 case SPRN_SPRG2:
265 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); 271 spr_val = vcpu->arch.shared->sprg2;
266 break; 272 break;
267 case SPRN_SPRG3: 273 case SPRN_SPRG3:
268 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); 274 spr_val = vcpu->arch.shared->sprg3;
269 break; 275 break;
270 /* Note: SPRG4-7 are user-readable, so we don't get 276 /* Note: SPRG4-7 are user-readable, so we don't get
271 * a trap. */ 277 * a trap. */
272 278
273 case SPRN_DEC: 279 case SPRN_DEC:
274 { 280 spr_val = kvmppc_get_dec(vcpu, get_tb());
275 kvmppc_set_gpr(vcpu, rt,
276 kvmppc_get_dec(vcpu, get_tb()));
277 break; 281 break;
278 }
279 default: 282 default:
280 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
281 if (emulated == EMULATE_FAIL) { 284 &spr_val);
282 printk("mfspr: unknown spr %x\n", sprn); 285 if (unlikely(emulated == EMULATE_FAIL)) {
283 kvmppc_set_gpr(vcpu, rt, 0); 286 printk(KERN_INFO "mfspr: unknown spr "
287 "0x%x\n", sprn);
284 } 288 }
285 break; 289 break;
286 } 290 }
291 kvmppc_set_gpr(vcpu, rt, spr_val);
287 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); 292 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
288 break; 293 break;
289 294
@@ -301,12 +306,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
301 break; 306 break;
302 307
303 case OP_31_XOP_MTSPR: 308 case OP_31_XOP_MTSPR:
309 spr_val = kvmppc_get_gpr(vcpu, rs);
304 switch (sprn) { 310 switch (sprn) {
305 case SPRN_SRR0: 311 case SPRN_SRR0:
306 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 312 vcpu->arch.shared->srr0 = spr_val;
307 break; 313 break;
308 case SPRN_SRR1: 314 case SPRN_SRR1:
309 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); 315 vcpu->arch.shared->srr1 = spr_val;
310 break; 316 break;
311 317
312 /* XXX We need to context-switch the timebase for 318 /* XXX We need to context-switch the timebase for
@@ -317,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
317 case SPRN_MSSSR0: break; 323 case SPRN_MSSSR0: break;
318 324
319 case SPRN_DEC: 325 case SPRN_DEC:
320 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 326 vcpu->arch.dec = spr_val;
321 kvmppc_emulate_dec(vcpu); 327 kvmppc_emulate_dec(vcpu);
322 break; 328 break;
323 329
324 case SPRN_SPRG0: 330 case SPRN_SPRG0:
325 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); 331 vcpu->arch.shared->sprg0 = spr_val;
326 break; 332 break;
327 case SPRN_SPRG1: 333 case SPRN_SPRG1:
328 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); 334 vcpu->arch.shared->sprg1 = spr_val;
329 break; 335 break;
330 case SPRN_SPRG2: 336 case SPRN_SPRG2:
331 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); 337 vcpu->arch.shared->sprg2 = spr_val;
332 break; 338 break;
333 case SPRN_SPRG3: 339 case SPRN_SPRG3:
334 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); 340 vcpu->arch.shared->sprg3 = spr_val;
335 break; 341 break;
336 342
337 default: 343 default:
338 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 344 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
345 spr_val);
339 if (emulated == EMULATE_FAIL) 346 if (emulated == EMULATE_FAIL)
340 printk("mtspr: unknown spr %x\n", sprn); 347 printk(KERN_INFO "mtspr: unknown spr "
348 "0x%x\n", sprn);
341 break; 349 break;
342 } 350 }
343 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); 351 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);