aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-05-04 08:55:12 -0400
committerAlexander Graf <agraf@suse.de>2012-05-06 10:19:13 -0400
commit54771e6217ce05a474827d9b23ff03de9d2ef2a0 (patch)
tree4555f93d29863b6c0bbd4be61c60bfe7b80ce6c9 /arch/powerpc/kvm/emulate.c
parentc46dc9a86148bc37c31d67a22a3887144ba7aa81 (diff)
KVM: PPC: Emulator: clean up SPR reads and writes
When reading and writing SPRs, every SPR emulation piece had to read or write the respective GPR the value was read from or stored in itself. This approach is pretty prone to failure. What if we accidentally implement mfspr emulation where we just do "break" and nothing else? Suddenly we would get a random value in the return register - which is always a bad idea. So let's consolidate the generic code paths and only give the core specific SPR handling code readily made variables to read/write from/to. Functionally, this patch doesn't change anything, but it increases the readability of the code and makes is less prone to bugs. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index f63b5cbd8221..f90e86dea7a2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -154,6 +154,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
154 int sprn = get_sprn(inst); 154 int sprn = get_sprn(inst);
155 enum emulation_result emulated = EMULATE_DONE; 155 enum emulation_result emulated = EMULATE_DONE;
156 int advance = 1; 156 int advance = 1;
157 ulong spr_val = 0;
157 158
158 /* this default type might be overwritten by subcategories */ 159 /* this default type might be overwritten by subcategories */
159 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 160 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -235,55 +236,59 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
235 case OP_31_XOP_MFSPR: 236 case OP_31_XOP_MFSPR:
236 switch (sprn) { 237 switch (sprn) {
237 case SPRN_SRR0: 238 case SPRN_SRR0:
238 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 239 spr_val = vcpu->arch.shared->srr0;
239 break; 240 break;
240 case SPRN_SRR1: 241 case SPRN_SRR1:
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); 242 spr_val = vcpu->arch.shared->srr1;
242 break; 243 break;
243 case SPRN_PVR: 244 case SPRN_PVR:
244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 245 spr_val = vcpu->arch.pvr;
246 break;
245 case SPRN_PIR: 247 case SPRN_PIR:
246 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 248 spr_val = vcpu->vcpu_id;
249 break;
247 case SPRN_MSSSR0: 250 case SPRN_MSSSR0:
248 kvmppc_set_gpr(vcpu, rt, 0); break; 251 spr_val = 0;
252 break;
249 253
250 /* Note: mftb and TBRL/TBWL are user-accessible, so 254 /* Note: mftb and TBRL/TBWL are user-accessible, so
251 * the guest can always access the real TB anyways. 255 * the guest can always access the real TB anyways.
252 * In fact, we probably will never see these traps. */ 256 * In fact, we probably will never see these traps. */
253 case SPRN_TBWL: 257 case SPRN_TBWL:
254 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 258 spr_val = get_tb() >> 32;
259 break;
255 case SPRN_TBWU: 260 case SPRN_TBWU:
256 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 261 spr_val = get_tb();
262 break;
257 263
258 case SPRN_SPRG0: 264 case SPRN_SPRG0:
259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); 265 spr_val = vcpu->arch.shared->sprg0;
260 break; 266 break;
261 case SPRN_SPRG1: 267 case SPRN_SPRG1:
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); 268 spr_val = vcpu->arch.shared->sprg1;
263 break; 269 break;
264 case SPRN_SPRG2: 270 case SPRN_SPRG2:
265 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); 271 spr_val = vcpu->arch.shared->sprg2;
266 break; 272 break;
267 case SPRN_SPRG3: 273 case SPRN_SPRG3:
268 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); 274 spr_val = vcpu->arch.shared->sprg3;
269 break; 275 break;
270 /* Note: SPRG4-7 are user-readable, so we don't get 276 /* Note: SPRG4-7 are user-readable, so we don't get
271 * a trap. */ 277 * a trap. */
272 278
273 case SPRN_DEC: 279 case SPRN_DEC:
274 { 280 spr_val = kvmppc_get_dec(vcpu, get_tb());
275 kvmppc_set_gpr(vcpu, rt,
276 kvmppc_get_dec(vcpu, get_tb()));
277 break; 281 break;
278 }
279 default: 282 default:
280 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
281 if (emulated == EMULATE_FAIL) { 284 &spr_val);
282 printk("mfspr: unknown spr %x\n", sprn); 285 if (unlikely(emulated == EMULATE_FAIL)) {
283 kvmppc_set_gpr(vcpu, rt, 0); 286 printk(KERN_INFO "mfspr: unknown spr "
287 "0x%x\n", sprn);
284 } 288 }
285 break; 289 break;
286 } 290 }
291 kvmppc_set_gpr(vcpu, rt, spr_val);
287 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); 292 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
288 break; 293 break;
289 294
@@ -301,12 +306,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
301 break; 306 break;
302 307
303 case OP_31_XOP_MTSPR: 308 case OP_31_XOP_MTSPR:
309 spr_val = kvmppc_get_gpr(vcpu, rs);
304 switch (sprn) { 310 switch (sprn) {
305 case SPRN_SRR0: 311 case SPRN_SRR0:
306 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 312 vcpu->arch.shared->srr0 = spr_val;
307 break; 313 break;
308 case SPRN_SRR1: 314 case SPRN_SRR1:
309 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); 315 vcpu->arch.shared->srr1 = spr_val;
310 break; 316 break;
311 317
312 /* XXX We need to context-switch the timebase for 318 /* XXX We need to context-switch the timebase for
@@ -317,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
317 case SPRN_MSSSR0: break; 323 case SPRN_MSSSR0: break;
318 324
319 case SPRN_DEC: 325 case SPRN_DEC:
320 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 326 vcpu->arch.dec = spr_val;
321 kvmppc_emulate_dec(vcpu); 327 kvmppc_emulate_dec(vcpu);
322 break; 328 break;
323 329
324 case SPRN_SPRG0: 330 case SPRN_SPRG0:
325 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); 331 vcpu->arch.shared->sprg0 = spr_val;
326 break; 332 break;
327 case SPRN_SPRG1: 333 case SPRN_SPRG1:
328 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); 334 vcpu->arch.shared->sprg1 = spr_val;
329 break; 335 break;
330 case SPRN_SPRG2: 336 case SPRN_SPRG2:
331 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); 337 vcpu->arch.shared->sprg2 = spr_val;
332 break; 338 break;
333 case SPRN_SPRG3: 339 case SPRN_SPRG3:
334 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); 340 vcpu->arch.shared->sprg3 = spr_val;
335 break; 341 break;
336 342
337 default: 343 default:
338 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 344 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
345 spr_val);
339 if (emulated == EMULATE_FAIL) 346 if (emulated == EMULATE_FAIL)
340 printk("mtspr: unknown spr %x\n", sprn); 347 printk(KERN_INFO "mtspr: unknown spr "
348 "0x%x\n", sprn);
341 break; 349 break;
342 } 350 }
343 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); 351 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);