diff options
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 55 |
1 files changed, 44 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index cb72a65f4ecc..4568ec386c2a 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -38,10 +38,12 @@ | |||
38 | #define OP_31_XOP_LBZX 87 | 38 | #define OP_31_XOP_LBZX 87 |
39 | #define OP_31_XOP_STWX 151 | 39 | #define OP_31_XOP_STWX 151 |
40 | #define OP_31_XOP_STBX 215 | 40 | #define OP_31_XOP_STBX 215 |
41 | #define OP_31_XOP_LBZUX 119 | ||
41 | #define OP_31_XOP_STBUX 247 | 42 | #define OP_31_XOP_STBUX 247 |
42 | #define OP_31_XOP_LHZX 279 | 43 | #define OP_31_XOP_LHZX 279 |
43 | #define OP_31_XOP_LHZUX 311 | 44 | #define OP_31_XOP_LHZUX 311 |
44 | #define OP_31_XOP_MFSPR 339 | 45 | #define OP_31_XOP_MFSPR 339 |
46 | #define OP_31_XOP_LHAX 343 | ||
45 | #define OP_31_XOP_STHX 407 | 47 | #define OP_31_XOP_STHX 407 |
46 | #define OP_31_XOP_STHUX 439 | 48 | #define OP_31_XOP_STHUX 439 |
47 | #define OP_31_XOP_MTSPR 467 | 49 | #define OP_31_XOP_MTSPR 467 |
@@ -62,10 +64,12 @@ | |||
62 | #define OP_STBU 39 | 64 | #define OP_STBU 39 |
63 | #define OP_LHZ 40 | 65 | #define OP_LHZ 40 |
64 | #define OP_LHZU 41 | 66 | #define OP_LHZU 41 |
67 | #define OP_LHA 42 | ||
68 | #define OP_LHAU 43 | ||
65 | #define OP_STH 44 | 69 | #define OP_STH 44 |
66 | #define OP_STHU 45 | 70 | #define OP_STHU 45 |
67 | 71 | ||
68 | #ifdef CONFIG_PPC64 | 72 | #ifdef CONFIG_PPC_BOOK3S |
69 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) | 73 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) |
70 | { | 74 | { |
71 | return 1; | 75 | return 1; |
@@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
82 | unsigned long dec_nsec; | 86 | unsigned long dec_nsec; |
83 | 87 | ||
84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); | 88 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); |
85 | #ifdef CONFIG_PPC64 | 89 | #ifdef CONFIG_PPC_BOOK3S |
86 | /* mtdec lowers the interrupt line when positive. */ | 90 | /* mtdec lowers the interrupt line when positive. */ |
87 | kvmppc_core_dequeue_dec(vcpu); | 91 | kvmppc_core_dequeue_dec(vcpu); |
88 | 92 | ||
@@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
128 | * from opcode tables in the future. */ | 132 | * from opcode tables in the future. */ |
129 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 133 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
130 | { | 134 | { |
131 | u32 inst = vcpu->arch.last_inst; | 135 | u32 inst = kvmppc_get_last_inst(vcpu); |
132 | u32 ea; | 136 | u32 ea; |
133 | int ra; | 137 | int ra; |
134 | int rb; | 138 | int rb; |
@@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
143 | 147 | ||
144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 148 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
145 | 149 | ||
146 | /* Try again next time */ | ||
147 | if (inst == KVM_INST_FETCH_FAILED) | ||
148 | return EMULATE_DONE; | ||
149 | |||
150 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
151 | case OP_TRAP: | 151 | case OP_TRAP: |
152 | #ifdef CONFIG_PPC64 | 152 | #ifdef CONFIG_PPC_BOOK3S |
153 | case OP_TRAP_64: | 153 | case OP_TRAP_64: |
154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | 154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); |
155 | #else | 155 | #else |
@@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
171 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 171 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
172 | break; | 172 | break; |
173 | 173 | ||
174 | case OP_31_XOP_LBZUX: | ||
175 | rt = get_rt(inst); | ||
176 | ra = get_ra(inst); | ||
177 | rb = get_rb(inst); | ||
178 | |||
179 | ea = kvmppc_get_gpr(vcpu, rb); | ||
180 | if (ra) | ||
181 | ea += kvmppc_get_gpr(vcpu, ra); | ||
182 | |||
183 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
184 | kvmppc_set_gpr(vcpu, ra, ea); | ||
185 | break; | ||
186 | |||
174 | case OP_31_XOP_STWX: | 187 | case OP_31_XOP_STWX: |
175 | rs = get_rs(inst); | 188 | rs = get_rs(inst); |
176 | emulated = kvmppc_handle_store(run, vcpu, | 189 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
200 | kvmppc_set_gpr(vcpu, rs, ea); | 213 | kvmppc_set_gpr(vcpu, rs, ea); |
201 | break; | 214 | break; |
202 | 215 | ||
216 | case OP_31_XOP_LHAX: | ||
217 | rt = get_rt(inst); | ||
218 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
219 | break; | ||
220 | |||
203 | case OP_31_XOP_LHZX: | 221 | case OP_31_XOP_LHZX: |
204 | rt = get_rt(inst); | 222 | rt = get_rt(inst); |
205 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 223 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
@@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 468 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
451 | break; | 469 | break; |
452 | 470 | ||
471 | case OP_LHA: | ||
472 | rt = get_rt(inst); | ||
473 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
474 | break; | ||
475 | |||
476 | case OP_LHAU: | ||
477 | ra = get_ra(inst); | ||
478 | rt = get_rt(inst); | ||
479 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
480 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | ||
481 | break; | ||
482 | |||
453 | case OP_STH: | 483 | case OP_STH: |
454 | rs = get_rs(inst); | 484 | rs = get_rs(inst); |
455 | emulated = kvmppc_handle_store(run, vcpu, | 485 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
472 | 502 | ||
473 | if (emulated == EMULATE_FAIL) { | 503 | if (emulated == EMULATE_FAIL) { |
474 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); | 504 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); |
475 | if (emulated == EMULATE_FAIL) { | 505 | if (emulated == EMULATE_AGAIN) { |
506 | advance = 0; | ||
507 | } else if (emulated == EMULATE_FAIL) { | ||
476 | advance = 0; | 508 | advance = 0; |
477 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | 509 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " |
478 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | 510 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); |
@@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
480 | } | 512 | } |
481 | } | 513 | } |
482 | 514 | ||
483 | trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); | 515 | trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); |
484 | 516 | ||
517 | /* Advance past emulated instruction. */ | ||
485 | if (advance) | 518 | if (advance) |
486 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | 519 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); |
487 | 520 | ||
488 | return emulated; | 521 | return emulated; |
489 | } | 522 | } |