diff options
author | Mihai Caraman <mihai.caraman@freescale.com> | 2014-07-23 12:06:21 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-07-28 09:23:14 -0400 |
commit | 51f047261e717b74b226f837a16455994b61ae30 (patch) | |
tree | e94a57703fed176a72acef425cea442a8395746b /arch | |
parent | 9a26af64d6bba72c9dfd62cc0cab0e79f8a66d7b (diff) |
KVM: PPC: Allow kvmppc_get_last_inst() to fail
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.
This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.
Make kvmppc_get_last_inst() definition common between architectures.
Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 26 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_booke.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 31 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 38 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 45 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 11 |
11 files changed, 140 insertions, 77 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 20fb6f2890a0..a86ca652028c 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | |||
276 | return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); | 276 | return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); |
277 | } | 277 | } |
278 | 278 | ||
279 | static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) | ||
280 | { | ||
281 | /* Load the instruction manually if it failed to do so in the | ||
282 | * exit path */ | ||
283 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | ||
284 | kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); | ||
285 | |||
286 | return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : | ||
287 | vcpu->arch.last_inst; | ||
288 | } | ||
289 | |||
290 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
291 | { | ||
292 | return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu)); | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Like kvmppc_get_last_inst(), but for fetching a sc instruction. | ||
297 | * Because the sc instruction sets SRR0 to point to the following | ||
298 | * instruction, we have to fetch from pc - 4. | ||
299 | */ | ||
300 | static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) | ||
301 | { | ||
302 | return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4); | ||
303 | } | ||
304 | |||
305 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | 279 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) |
306 | { | 280 | { |
307 | return vcpu->arch.fault_dar; | 281 | return vcpu->arch.fault_dar; |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index c7aed6105ff9..cbb19906ca36 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | |||
69 | return false; | 69 | return false; |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
73 | { | ||
74 | return vcpu->arch.last_inst; | ||
75 | } | ||
76 | |||
77 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 72 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
78 | { | 73 | { |
79 | vcpu->arch.ctr = val; | 74 | vcpu->arch.ctr = val; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 246fb9a7df33..e38136304c1f 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -47,6 +47,11 @@ enum emulation_result { | |||
47 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ | 47 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ |
48 | }; | 48 | }; |
49 | 49 | ||
50 | enum instruction_type { | ||
51 | INST_GENERIC, | ||
52 | INST_SC, /* system call */ | ||
53 | }; | ||
54 | |||
50 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 55 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
51 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 56 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
52 | extern void kvmppc_handler_highmem(void); | 57 | extern void kvmppc_handler_highmem(void); |
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | u64 val, unsigned int bytes, | 67 | u64 val, unsigned int bytes, |
63 | int is_default_endian); | 68 | int is_default_endian); |
64 | 69 | ||
70 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, | ||
71 | enum instruction_type type, u32 *inst); | ||
72 | |||
65 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | 73 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
66 | struct kvm_vcpu *vcpu); | 74 | struct kvm_vcpu *vcpu); |
67 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 75 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
@@ -234,6 +242,29 @@ struct kvmppc_ops { | |||
234 | extern struct kvmppc_ops *kvmppc_hv_ops; | 242 | extern struct kvmppc_ops *kvmppc_hv_ops; |
235 | extern struct kvmppc_ops *kvmppc_pr_ops; | 243 | extern struct kvmppc_ops *kvmppc_pr_ops; |
236 | 244 | ||
245 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, | ||
246 | enum instruction_type type, u32 *inst) | ||
247 | { | ||
248 | int ret = EMULATE_DONE; | ||
249 | u32 fetched_inst; | ||
250 | |||
251 | /* Load the instruction manually if it failed to do so in the | ||
252 | * exit path */ | ||
253 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | ||
254 | ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); | ||
255 | |||
256 | /* Write fetch_failed unswapped if the fetch failed */ | ||
257 | if (ret == EMULATE_DONE) | ||
258 | fetched_inst = kvmppc_need_byteswap(vcpu) ? | ||
259 | swab32(vcpu->arch.last_inst) : | ||
260 | vcpu->arch.last_inst; | ||
261 | else | ||
262 | fetched_inst = vcpu->arch.last_inst; | ||
263 | |||
264 | *inst = fetched_inst; | ||
265 | return ret; | ||
266 | } | ||
267 | |||
237 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) | 268 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) |
238 | { | 269 | { |
239 | return kvm->arch.kvm_ops == kvmppc_hv_ops; | 270 | return kvm->arch.kvm_ops == kvmppc_hv_ops; |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 31facfc1314b..37ca8a0897c3 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -488,6 +488,23 @@ mmio: | |||
488 | } | 488 | } |
489 | EXPORT_SYMBOL_GPL(kvmppc_ld); | 489 | EXPORT_SYMBOL_GPL(kvmppc_ld); |
490 | 490 | ||
491 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | ||
492 | u32 *inst) | ||
493 | { | ||
494 | ulong pc = kvmppc_get_pc(vcpu); | ||
495 | int r; | ||
496 | |||
497 | if (type == INST_SC) | ||
498 | pc -= 4; | ||
499 | |||
500 | r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); | ||
501 | if (r == EMULATE_DONE) | ||
502 | return r; | ||
503 | else | ||
504 | return EMULATE_AGAIN; | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); | ||
507 | |||
491 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 508 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
492 | { | 509 | { |
493 | return 0; | 510 | return 0; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2d154d9319b3..fa944a3abdf0 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -530,21 +530,14 @@ static int instruction_is_store(unsigned int instr) | |||
530 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | 530 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
531 | unsigned long gpa, gva_t ea, int is_store) | 531 | unsigned long gpa, gva_t ea, int is_store) |
532 | { | 532 | { |
533 | int ret; | ||
534 | u32 last_inst; | 533 | u32 last_inst; |
535 | unsigned long srr0 = kvmppc_get_pc(vcpu); | ||
536 | 534 | ||
537 | /* We try to load the last instruction. We don't let | 535 | /* |
538 | * emulate_instruction do it as it doesn't check what | ||
539 | * kvmppc_ld returns. | ||
540 | * If we fail, we just return to the guest and try executing it again. | 536 | * If we fail, we just return to the guest and try executing it again. |
541 | */ | 537 | */ |
542 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { | 538 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != |
543 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 539 | EMULATE_DONE) |
544 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) | 540 | return RESUME_GUEST; |
545 | return RESUME_GUEST; | ||
546 | vcpu->arch.last_inst = last_inst; | ||
547 | } | ||
548 | 541 | ||
549 | /* | 542 | /* |
550 | * WARNING: We do not know for sure whether the instruction we just | 543 | * WARNING: We do not know for sure whether the instruction we just |
@@ -558,7 +551,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
558 | * we just return and retry the instruction. | 551 | * we just return and retry the instruction. |
559 | */ | 552 | */ |
560 | 553 | ||
561 | if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) | 554 | if (instruction_is_store(last_inst) != !!is_store) |
562 | return RESUME_GUEST; | 555 | return RESUME_GUEST; |
563 | 556 | ||
564 | /* | 557 | /* |
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 6c8011fd57e6..bfb8035314e3 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -639,26 +639,36 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
639 | 639 | ||
640 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | 640 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) |
641 | { | 641 | { |
642 | u32 inst = kvmppc_get_last_inst(vcpu); | 642 | u32 inst; |
643 | enum emulation_result emulated = EMULATE_DONE; | 643 | enum emulation_result emulated = EMULATE_DONE; |
644 | int ax_rd, ax_ra, ax_rb, ax_rc; | ||
645 | short full_d; | ||
646 | u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c; | ||
644 | 647 | ||
645 | int ax_rd = inst_get_field(inst, 6, 10); | 648 | bool rcomp; |
646 | int ax_ra = inst_get_field(inst, 11, 15); | 649 | u32 cr; |
647 | int ax_rb = inst_get_field(inst, 16, 20); | ||
648 | int ax_rc = inst_get_field(inst, 21, 25); | ||
649 | short full_d = inst_get_field(inst, 16, 31); | ||
650 | |||
651 | u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); | ||
652 | u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); | ||
653 | u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); | ||
654 | u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); | ||
655 | |||
656 | bool rcomp = (inst & 1) ? true : false; | ||
657 | u32 cr = kvmppc_get_cr(vcpu); | ||
658 | #ifdef DEBUG | 650 | #ifdef DEBUG |
659 | int i; | 651 | int i; |
660 | #endif | 652 | #endif |
661 | 653 | ||
654 | emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); | ||
655 | if (emulated != EMULATE_DONE) | ||
656 | return emulated; | ||
657 | |||
658 | ax_rd = inst_get_field(inst, 6, 10); | ||
659 | ax_ra = inst_get_field(inst, 11, 15); | ||
660 | ax_rb = inst_get_field(inst, 16, 20); | ||
661 | ax_rc = inst_get_field(inst, 21, 25); | ||
662 | full_d = inst_get_field(inst, 16, 31); | ||
663 | |||
664 | fpr_d = &VCPU_FPR(vcpu, ax_rd); | ||
665 | fpr_a = &VCPU_FPR(vcpu, ax_ra); | ||
666 | fpr_b = &VCPU_FPR(vcpu, ax_rb); | ||
667 | fpr_c = &VCPU_FPR(vcpu, ax_rc); | ||
668 | |||
669 | rcomp = (inst & 1) ? true : false; | ||
670 | cr = kvmppc_get_cr(vcpu); | ||
671 | |||
662 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 672 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
663 | return EMULATE_FAIL; | 673 | return EMULATE_FAIL; |
664 | 674 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index e76aec38ec21..b18f2d4fac59 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -1018,15 +1018,24 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1018 | { | 1018 | { |
1019 | enum emulation_result er; | 1019 | enum emulation_result er; |
1020 | ulong flags; | 1020 | ulong flags; |
1021 | u32 last_inst; | ||
1022 | int emul; | ||
1021 | 1023 | ||
1022 | program_interrupt: | 1024 | program_interrupt: |
1023 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | 1025 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
1024 | 1026 | ||
1027 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | ||
1028 | if (emul != EMULATE_DONE) { | ||
1029 | r = RESUME_GUEST; | ||
1030 | break; | ||
1031 | } | ||
1032 | |||
1025 | if (kvmppc_get_msr(vcpu) & MSR_PR) { | 1033 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
1026 | #ifdef EXIT_DEBUG | 1034 | #ifdef EXIT_DEBUG |
1027 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 1035 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", |
1036 | kvmppc_get_pc(vcpu), last_inst); | ||
1028 | #endif | 1037 | #endif |
1029 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | 1038 | if ((last_inst & 0xff0007ff) != |
1030 | (INS_DCBZ & 0xfffffff7)) { | 1039 | (INS_DCBZ & 0xfffffff7)) { |
1031 | kvmppc_core_queue_program(vcpu, flags); | 1040 | kvmppc_core_queue_program(vcpu, flags); |
1032 | r = RESUME_GUEST; | 1041 | r = RESUME_GUEST; |
@@ -1045,7 +1054,7 @@ program_interrupt: | |||
1045 | break; | 1054 | break; |
1046 | case EMULATE_FAIL: | 1055 | case EMULATE_FAIL: |
1047 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 1056 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
1048 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 1057 | __func__, kvmppc_get_pc(vcpu), last_inst); |
1049 | kvmppc_core_queue_program(vcpu, flags); | 1058 | kvmppc_core_queue_program(vcpu, flags); |
1050 | r = RESUME_GUEST; | 1059 | r = RESUME_GUEST; |
1051 | break; | 1060 | break; |
@@ -1062,8 +1071,23 @@ program_interrupt: | |||
1062 | break; | 1071 | break; |
1063 | } | 1072 | } |
1064 | case BOOK3S_INTERRUPT_SYSCALL: | 1073 | case BOOK3S_INTERRUPT_SYSCALL: |
1074 | { | ||
1075 | u32 last_sc; | ||
1076 | int emul; | ||
1077 | |||
1078 | /* Get last sc for papr */ | ||
1079 | if (vcpu->arch.papr_enabled) { | ||
1080 | /* The sc instuction points SRR0 to the next inst */ | ||
1081 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | ||
1082 | if (emul != EMULATE_DONE) { | ||
1083 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | ||
1084 | r = RESUME_GUEST; | ||
1085 | break; | ||
1086 | } | ||
1087 | } | ||
1088 | |||
1065 | if (vcpu->arch.papr_enabled && | 1089 | if (vcpu->arch.papr_enabled && |
1066 | (kvmppc_get_last_sc(vcpu) == 0x44000022) && | 1090 | (last_sc == 0x44000022) && |
1067 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | 1091 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
1068 | /* SC 1 papr hypercalls */ | 1092 | /* SC 1 papr hypercalls */ |
1069 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 1093 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
@@ -1108,21 +1132,19 @@ program_interrupt: | |||
1108 | r = RESUME_GUEST; | 1132 | r = RESUME_GUEST; |
1109 | } | 1133 | } |
1110 | break; | 1134 | break; |
1135 | } | ||
1111 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 1136 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1112 | case BOOK3S_INTERRUPT_ALTIVEC: | 1137 | case BOOK3S_INTERRUPT_ALTIVEC: |
1113 | case BOOK3S_INTERRUPT_VSX: | 1138 | case BOOK3S_INTERRUPT_VSX: |
1114 | { | 1139 | { |
1115 | int ext_msr = 0; | 1140 | int ext_msr = 0; |
1116 | int emul; | 1141 | int emul; |
1117 | ulong pc; | ||
1118 | u32 last_inst; | 1142 | u32 last_inst; |
1119 | 1143 | ||
1120 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | 1144 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { |
1121 | /* Do paired single instruction emulation */ | 1145 | /* Do paired single instruction emulation */ |
1122 | pc = kvmppc_get_pc(vcpu); | 1146 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1123 | last_inst = kvmppc_get_last_inst(vcpu); | 1147 | &last_inst); |
1124 | emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst, | ||
1125 | false); | ||
1126 | if (emul == EMULATE_DONE) | 1148 | if (emul == EMULATE_DONE) |
1127 | goto program_interrupt; | 1149 | goto program_interrupt; |
1128 | else | 1150 | else |
@@ -1151,9 +1173,8 @@ program_interrupt: | |||
1151 | } | 1173 | } |
1152 | case BOOK3S_INTERRUPT_ALIGNMENT: | 1174 | case BOOK3S_INTERRUPT_ALIGNMENT: |
1153 | { | 1175 | { |
1154 | ulong pc = kvmppc_get_pc(vcpu); | 1176 | u32 last_inst; |
1155 | u32 last_inst = kvmppc_get_last_inst(vcpu); | 1177 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
1156 | int emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst, false); | ||
1157 | 1178 | ||
1158 | if (emul == EMULATE_DONE) { | 1179 | if (emul == EMULATE_DONE) { |
1159 | u32 dsisr; | 1180 | u32 dsisr; |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a06ef6b30258..50df5e3072cc 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -702,6 +702,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
702 | * they were actually modified by emulation. */ | 702 | * they were actually modified by emulation. */ |
703 | return RESUME_GUEST_NV; | 703 | return RESUME_GUEST_NV; |
704 | 704 | ||
705 | case EMULATE_AGAIN: | ||
706 | return RESUME_GUEST; | ||
707 | |||
705 | case EMULATE_DO_DCR: | 708 | case EMULATE_DO_DCR: |
706 | run->exit_reason = KVM_EXIT_DCR; | 709 | run->exit_reason = KVM_EXIT_DCR; |
707 | return RESUME_HOST; | 710 | return RESUME_HOST; |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 79677d76d1a4..4385c14fca84 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -610,6 +610,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
610 | } | 610 | } |
611 | } | 611 | } |
612 | 612 | ||
613 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | ||
614 | u32 *instr) | ||
615 | { | ||
616 | return EMULATE_AGAIN; | ||
617 | } | ||
618 | |||
613 | /************* MMU Notifiers *************/ | 619 | /************* MMU Notifiers *************/ |
614 | 620 | ||
615 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 621 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index da86d9ba3476..c5c64b6e7eb2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -224,19 +224,25 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
224 | * from opcode tables in the future. */ | 224 | * from opcode tables in the future. */ |
225 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 225 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
226 | { | 226 | { |
227 | u32 inst = kvmppc_get_last_inst(vcpu); | 227 | u32 inst; |
228 | int ra = get_ra(inst); | 228 | int ra, rs, rt, sprn; |
229 | int rs = get_rs(inst); | 229 | enum emulation_result emulated; |
230 | int rt = get_rt(inst); | ||
231 | int sprn = get_sprn(inst); | ||
232 | enum emulation_result emulated = EMULATE_DONE; | ||
233 | int advance = 1; | 230 | int advance = 1; |
234 | 231 | ||
235 | /* this default type might be overwritten by subcategories */ | 232 | /* this default type might be overwritten by subcategories */ |
236 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 233 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
237 | 234 | ||
235 | emulated = kvmppc_get_last_inst(vcpu, false, &inst); | ||
236 | if (emulated != EMULATE_DONE) | ||
237 | return emulated; | ||
238 | |||
238 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 239 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
239 | 240 | ||
241 | ra = get_ra(inst); | ||
242 | rs = get_rs(inst); | ||
243 | rt = get_rt(inst); | ||
244 | sprn = get_sprn(inst); | ||
245 | |||
240 | switch (get_op(inst)) { | 246 | switch (get_op(inst)) { |
241 | case OP_TRAP: | 247 | case OP_TRAP: |
242 | #ifdef CONFIG_PPC_BOOK3S | 248 | #ifdef CONFIG_PPC_BOOK3S |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index fe0257a8e335..cfa6cfabf4a3 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -280,6 +280,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
280 | * actually modified. */ | 280 | * actually modified. */ |
281 | r = RESUME_GUEST_NV; | 281 | r = RESUME_GUEST_NV; |
282 | break; | 282 | break; |
283 | case EMULATE_AGAIN: | ||
284 | r = RESUME_GUEST; | ||
285 | break; | ||
283 | case EMULATE_DO_MMIO: | 286 | case EMULATE_DO_MMIO: |
284 | run->exit_reason = KVM_EXIT_MMIO; | 287 | run->exit_reason = KVM_EXIT_MMIO; |
285 | /* We must reload nonvolatiles because "update" load/store | 288 | /* We must reload nonvolatiles because "update" load/store |
@@ -289,11 +292,15 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
289 | r = RESUME_HOST_NV; | 292 | r = RESUME_HOST_NV; |
290 | break; | 293 | break; |
291 | case EMULATE_FAIL: | 294 | case EMULATE_FAIL: |
295 | { | ||
296 | u32 last_inst; | ||
297 | |||
298 | kvmppc_get_last_inst(vcpu, false, &last_inst); | ||
292 | /* XXX Deliver Program interrupt to guest. */ | 299 | /* XXX Deliver Program interrupt to guest. */ |
293 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | 300 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
294 | kvmppc_get_last_inst(vcpu)); | ||
295 | r = RESUME_HOST; | 301 | r = RESUME_HOST; |
296 | break; | 302 | break; |
303 | } | ||
297 | default: | 304 | default: |
298 | WARN_ON(1); | 305 | WARN_ON(1); |
299 | r = RESUME_GUEST; | 306 | r = RESUME_GUEST; |