aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-05 10:36:16 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:52:21 -0500
commit75f74f0dbe086c239b4b0cc5ed75b903ea3e663f (patch)
treec6774128934667d1c82a6e458d9a4233574a95a4 /arch/powerpc/kvm/emulate.c
parentc381a04313e7c0fb04246b1ff711e0b5726de6c0 (diff)
KVM: ppc: refactor instruction emulation into generic and core-specific pieces
Cores provide 3 emulation hooks, implemented for example in the new 4xx_emulate.c: kvmppc_core_emulate_op kvmppc_core_emulate_mtspr kvmppc_core_emulate_mfspr Strictly speaking the last two aren't necessary, but provide for more informative error reporting ("unknown SPR"). Long term I'd like to have instruction decoding autogenerated from tables of opcodes, and that way we could aggregate universal, Book E, and core-specific instructions more easily and without redundant switch statements. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c272
1 files changed, 22 insertions, 250 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 5fd9cf779be5..30a49f8c49b2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,14 +23,13 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26#include <asm/dcr.h> 26#include <asm/reg.h>
27#include <asm/dcr-regs.h>
28#include <asm/time.h> 27#include <asm/time.h>
29#include <asm/byteorder.h> 28#include <asm/byteorder.h>
30#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
31#include <asm/disassemble.h> 30#include <asm/disassemble.h>
32 31
33static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 32void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
34{ 33{
35 if (vcpu->arch.tcr & TCR_DIE) { 34 if (vcpu->arch.tcr & TCR_DIE) {
36 /* The decrementer ticks at the same rate as the timebase, so 35 /* The decrementer ticks at the same rate as the timebase, so
@@ -46,12 +45,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
46 } 45 }
47} 46}
48 47
49static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
50{
51 vcpu->arch.pc = vcpu->arch.srr0;
52 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
53}
54
55/* XXX to do: 48/* XXX to do:
56 * lhax 49 * lhax
57 * lhaux 50 * lhaux
@@ -66,18 +59,17 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
66 * 59 *
67 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 60 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
68 */ 61 */
62/* XXX Should probably auto-generate instruction decoding for a particular core
63 * from opcode tables in the future. */
69int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 64int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
70{ 65{
71 u32 inst = vcpu->arch.last_inst; 66 u32 inst = vcpu->arch.last_inst;
72 u32 ea; 67 u32 ea;
73 int ra; 68 int ra;
74 int rb; 69 int rb;
75 int rc;
76 int rs; 70 int rs;
77 int rt; 71 int rt;
78 int ws;
79 int sprn; 72 int sprn;
80 int dcrn;
81 enum emulation_result emulated = EMULATE_DONE; 73 enum emulation_result emulated = EMULATE_DONE;
82 int advance = 1; 74 int advance = 1;
83 75
@@ -88,19 +80,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
88 advance = 0; 80 advance = 0;
89 break; 81 break;
90 82
91 case 19:
92 switch (get_xop(inst)) {
93 case 50: /* rfi */
94 kvmppc_emul_rfi(vcpu);
95 advance = 0;
96 break;
97
98 default:
99 emulated = EMULATE_FAIL;
100 break;
101 }
102 break;
103
104 case 31: 83 case 31:
105 switch (get_xop(inst)) { 84 switch (get_xop(inst)) {
106 85
@@ -109,27 +88,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
109 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 88 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
110 break; 89 break;
111 90
112 case 83: /* mfmsr */
113 rt = get_rt(inst);
114 vcpu->arch.gpr[rt] = vcpu->arch.msr;
115 break;
116
117 case 87: /* lbzx */ 91 case 87: /* lbzx */
118 rt = get_rt(inst); 92 rt = get_rt(inst);
119 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 93 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
120 break; 94 break;
121 95
122 case 131: /* wrtee */
123 rs = get_rs(inst);
124 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
125 | (vcpu->arch.gpr[rs] & MSR_EE);
126 break;
127
128 case 146: /* mtmsr */
129 rs = get_rs(inst);
130 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
131 break;
132
133 case 151: /* stwx */ 96 case 151: /* stwx */
134 rs = get_rs(inst); 97 rs = get_rs(inst);
135 emulated = kvmppc_handle_store(run, vcpu, 98 emulated = kvmppc_handle_store(run, vcpu,
@@ -137,11 +100,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
137 4, 1); 100 4, 1);
138 break; 101 break;
139 102
140 case 163: /* wrteei */
141 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
142 | (inst & MSR_EE);
143 break;
144
145 case 215: /* stbx */ 103 case 215: /* stbx */
146 rs = get_rs(inst); 104 rs = get_rs(inst);
147 emulated = kvmppc_handle_store(run, vcpu, 105 emulated = kvmppc_handle_store(run, vcpu,
@@ -182,42 +140,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
182 vcpu->arch.gpr[ra] = ea; 140 vcpu->arch.gpr[ra] = ea;
183 break; 141 break;
184 142
185 case 323: /* mfdcr */
186 dcrn = get_dcrn(inst);
187 rt = get_rt(inst);
188
189 /* The guest may access CPR0 registers to determine the timebase
190 * frequency, and it must know the real host frequency because it
191 * can directly access the timebase registers.
192 *
193 * It would be possible to emulate those accesses in userspace,
194 * but userspace can really only figure out the end frequency.
195 * We could decompose that into the factors that compute it, but
196 * that's tricky math, and it's easier to just report the real
197 * CPR0 values.
198 */
199 switch (dcrn) {
200 case DCRN_CPR0_CONFIG_ADDR:
201 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
202 break;
203 case DCRN_CPR0_CONFIG_DATA:
204 local_irq_disable();
205 mtdcr(DCRN_CPR0_CONFIG_ADDR,
206 vcpu->arch.cpr0_cfgaddr);
207 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
208 local_irq_enable();
209 break;
210 default:
211 run->dcr.dcrn = dcrn;
212 run->dcr.data = 0;
213 run->dcr.is_write = 0;
214 vcpu->arch.io_gpr = rt;
215 vcpu->arch.dcr_needed = 1;
216 emulated = EMULATE_DO_DCR;
217 }
218
219 break;
220
221 case 339: /* mfspr */ 143 case 339: /* mfspr */
222 sprn = get_sprn(inst); 144 sprn = get_sprn(inst);
223 rt = get_rt(inst); 145 rt = get_rt(inst);
@@ -227,26 +149,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
227 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 149 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
228 case SPRN_SRR1: 150 case SPRN_SRR1:
229 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 151 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
230 case SPRN_MMUCR:
231 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
232 case SPRN_PID:
233 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
234 case SPRN_IVPR:
235 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
236 case SPRN_CCR0:
237 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
238 case SPRN_CCR1:
239 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
240 case SPRN_PVR: 152 case SPRN_PVR:
241 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 153 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
242 case SPRN_DEAR:
243 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
244 case SPRN_ESR:
245 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
246 case SPRN_DBCR0:
247 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
248 case SPRN_DBCR1:
249 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
250 154
251 /* Note: mftb and TBRL/TBWL are user-accessible, so 155 /* Note: mftb and TBRL/TBWL are user-accessible, so
252 * the guest can always access the real TB anyways. 156 * the guest can always access the real TB anyways.
@@ -267,42 +171,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
267 /* Note: SPRG4-7 are user-readable, so we don't get 171 /* Note: SPRG4-7 are user-readable, so we don't get
268 * a trap. */ 172 * a trap. */
269 173
270 case SPRN_IVOR0:
271 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
272 case SPRN_IVOR1:
273 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
274 case SPRN_IVOR2:
275 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
276 case SPRN_IVOR3:
277 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
278 case SPRN_IVOR4:
279 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
280 case SPRN_IVOR5:
281 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
282 case SPRN_IVOR6:
283 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
284 case SPRN_IVOR7:
285 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
286 case SPRN_IVOR8:
287 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
288 case SPRN_IVOR9:
289 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
290 case SPRN_IVOR10:
291 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
292 case SPRN_IVOR11:
293 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
294 case SPRN_IVOR12:
295 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
296 case SPRN_IVOR13:
297 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
298 case SPRN_IVOR14:
299 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
300 case SPRN_IVOR15:
301 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
302
303 default: 174 default:
304 printk("mfspr: unknown spr %x\n", sprn); 175 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
305 vcpu->arch.gpr[rt] = 0; 176 if (emulated == EMULATE_FAIL) {
177 printk("mfspr: unknown spr %x\n", sprn);
178 vcpu->arch.gpr[rt] = 0;
179 }
306 break; 180 break;
307 } 181 }
308 break; 182 break;
@@ -332,25 +206,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
332 vcpu->arch.gpr[ra] = ea; 206 vcpu->arch.gpr[ra] = ea;
333 break; 207 break;
334 208
335 case 451: /* mtdcr */
336 dcrn = get_dcrn(inst);
337 rs = get_rs(inst);
338
339 /* emulate some access in kernel */
340 switch (dcrn) {
341 case DCRN_CPR0_CONFIG_ADDR:
342 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
343 break;
344 default:
345 run->dcr.dcrn = dcrn;
346 run->dcr.data = vcpu->arch.gpr[rs];
347 run->dcr.is_write = 1;
348 vcpu->arch.dcr_needed = 1;
349 emulated = EMULATE_DO_DCR;
350 }
351
352 break;
353
354 case 467: /* mtspr */ 209 case 467: /* mtspr */
355 sprn = get_sprn(inst); 210 sprn = get_sprn(inst);
356 rs = get_rs(inst); 211 rs = get_rs(inst);
@@ -359,22 +214,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
359 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 214 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
360 case SPRN_SRR1: 215 case SPRN_SRR1:
361 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 216 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
362 case SPRN_MMUCR:
363 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
364 case SPRN_PID:
365 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
366 case SPRN_CCR0:
367 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
368 case SPRN_CCR1:
369 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
370 case SPRN_DEAR:
371 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
372 case SPRN_ESR:
373 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
374 case SPRN_DBCR0:
375 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
376 case SPRN_DBCR1:
377 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
378 217
379 /* XXX We need to context-switch the timebase for 218 /* XXX We need to context-switch the timebase for
380 * watchdog and FIT. */ 219 * watchdog and FIT. */
@@ -386,14 +225,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
386 kvmppc_emulate_dec(vcpu); 225 kvmppc_emulate_dec(vcpu);
387 break; 226 break;
388 227
389 case SPRN_TSR:
390 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
391
392 case SPRN_TCR:
393 vcpu->arch.tcr = vcpu->arch.gpr[rs];
394 kvmppc_emulate_dec(vcpu);
395 break;
396
397 case SPRN_SPRG0: 228 case SPRN_SPRG0:
398 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 229 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
399 case SPRN_SPRG1: 230 case SPRN_SPRG1:
@@ -403,56 +234,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
403 case SPRN_SPRG3: 234 case SPRN_SPRG3:
404 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 235 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
405 236
406 /* Note: SPRG4-7 are user-readable. These values are
407 * loaded into the real SPRGs when resuming the
408 * guest. */
409 case SPRN_SPRG4:
410 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
411 case SPRN_SPRG5:
412 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
413 case SPRN_SPRG6:
414 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
415 case SPRN_SPRG7:
416 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
417
418 case SPRN_IVPR:
419 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
420 case SPRN_IVOR0:
421 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
422 case SPRN_IVOR1:
423 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
424 case SPRN_IVOR2:
425 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
426 case SPRN_IVOR3:
427 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
428 case SPRN_IVOR4:
429 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
430 case SPRN_IVOR5:
431 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
432 case SPRN_IVOR6:
433 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
434 case SPRN_IVOR7:
435 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
436 case SPRN_IVOR8:
437 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
438 case SPRN_IVOR9:
439 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
440 case SPRN_IVOR10:
441 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
442 case SPRN_IVOR11:
443 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
444 case SPRN_IVOR12:
445 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
446 case SPRN_IVOR13:
447 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
448 case SPRN_IVOR14:
449 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
450 case SPRN_IVOR15:
451 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
452
453 default: 237 default:
454 printk("mtspr: unknown spr %x\n", sprn); 238 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
455 emulated = EMULATE_FAIL; 239 if (emulated == EMULATE_FAIL)
240 printk("mtspr: unknown spr %x\n", sprn);
456 break; 241 break;
457 } 242 }
458 break; 243 break;
@@ -483,21 +268,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
483 4, 0); 268 4, 0);
484 break; 269 break;
485 270
486 case 978: /* tlbwe */
487 ra = get_ra(inst);
488 rs = get_rs(inst);
489 ws = get_ws(inst);
490 emulated = kvmppc_emul_tlbwe(vcpu, ra, rs, ws);
491 break;
492
493 case 914: /* tlbsx */
494 rt = get_rt(inst);
495 ra = get_ra(inst);
496 rb = get_rb(inst);
497 rc = get_rc(inst);
498 emulated = kvmppc_emul_tlbsx(vcpu, rt, ra, rb, rc);
499 break;
500
501 case 790: /* lhbrx */ 271 case 790: /* lhbrx */
502 rt = get_rt(inst); 272 rt = get_rt(inst);
503 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 273 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
@@ -513,14 +283,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
513 2, 0); 283 2, 0);
514 break; 284 break;
515 285
516 case 966: /* iccci */
517 break;
518
519 default: 286 default:
520 printk("unknown: op %d xop %d\n", get_op(inst), 287 /* Attempt core-specific emulation below. */
521 get_xop(inst));
522 emulated = EMULATE_FAIL; 288 emulated = EMULATE_FAIL;
523 break;
524 } 289 }
525 break; 290 break;
526 291
@@ -603,9 +368,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
603 break; 368 break;
604 369
605 default: 370 default:
606 printk("unknown op %d\n", get_op(inst));
607 emulated = EMULATE_FAIL; 371 emulated = EMULATE_FAIL;
608 break; 372 }
373
374 if (emulated == EMULATE_FAIL) {
375 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
376 if (emulated == EMULATE_FAIL) {
377 advance = 0;
378 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
379 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
380 }
609 } 381 }
610 382
611 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); 383 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);