aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h3
-rw-r--r--arch/powerpc/include/asm/mce.h2
-rw-r--r--arch/powerpc/kernel/mce.c8
-rw-r--r--arch/powerpc/kvm/book3s.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv.c25
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c58
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S66
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
8 files changed, 71 insertions, 100 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b3bf4f61b30c..d283d3179fbc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -143,6 +143,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
143 143
144extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); 144extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
145extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 145extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
146extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
146extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); 147extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
147extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); 148extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
148extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); 149extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
@@ -646,7 +647,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
646 unsigned int yield_count); 647 unsigned int yield_count);
647long kvmppc_h_random(struct kvm_vcpu *vcpu); 648long kvmppc_h_random(struct kvm_vcpu *vcpu);
648void kvmhv_commence_exit(int trap); 649void kvmhv_commence_exit(int trap);
649long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); 650void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
650void kvmppc_subcore_enter_guest(void); 651void kvmppc_subcore_enter_guest(void);
651void kvmppc_subcore_exit_guest(void); 652void kvmppc_subcore_exit_guest(void);
652long kvmppc_realmode_hmi_handler(void); 653long kvmppc_realmode_hmi_handler(void);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index a8b8903e1844..17996bc9382b 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release);
209extern void release_mce_event(void); 209extern void release_mce_event(void);
210extern void machine_check_queue_event(void); 210extern void machine_check_queue_event(void);
211extern void machine_check_print_event_info(struct machine_check_event *evt, 211extern void machine_check_print_event_info(struct machine_check_event *evt,
212 bool user_mode); 212 bool user_mode, bool in_guest);
213#ifdef CONFIG_PPC_BOOK3S_64 213#ifdef CONFIG_PPC_BOOK3S_64
214void flush_and_reload_slb(void); 214void flush_and_reload_slb(void);
215#endif /* CONFIG_PPC_BOOK3S_64 */ 215#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index bd933a75f0bc..d501b48f287e 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -301,13 +301,13 @@ static void machine_check_process_queued_event(struct irq_work *work)
301 while (__this_cpu_read(mce_queue_count) > 0) { 301 while (__this_cpu_read(mce_queue_count) > 0) {
302 index = __this_cpu_read(mce_queue_count) - 1; 302 index = __this_cpu_read(mce_queue_count) - 1;
303 evt = this_cpu_ptr(&mce_event_queue[index]); 303 evt = this_cpu_ptr(&mce_event_queue[index]);
304 machine_check_print_event_info(evt, false); 304 machine_check_print_event_info(evt, false, false);
305 __this_cpu_dec(mce_queue_count); 305 __this_cpu_dec(mce_queue_count);
306 } 306 }
307} 307}
308 308
309void machine_check_print_event_info(struct machine_check_event *evt, 309void machine_check_print_event_info(struct machine_check_event *evt,
310 bool user_mode) 310 bool user_mode, bool in_guest)
311{ 311{
312 const char *level, *sevstr, *subtype; 312 const char *level, *sevstr, *subtype;
313 static const char *mc_ue_types[] = { 313 static const char *mc_ue_types[] = {
@@ -387,7 +387,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
387 evt->disposition == MCE_DISPOSITION_RECOVERED ? 387 evt->disposition == MCE_DISPOSITION_RECOVERED ?
388 "Recovered" : "Not recovered"); 388 "Recovered" : "Not recovered");
389 389
390 if (user_mode) { 390 if (in_guest) {
391 printk("%s Guest NIP: %016llx\n", level, evt->srr0);
392 } else if (user_mode) {
391 printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level, 393 printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
392 evt->srr0, current->pid, current->comm); 394 evt->srr0, current->pid, current->comm);
393 } else { 395 } else {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 22a46c64536b..10c5579d20ce 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -195,6 +195,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
195} 195}
196EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 196EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
197 197
198void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
199{
200 /* might as well deliver this straight away */
201 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
202}
203EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
204
198void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 205void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
199{ 206{
200 /* might as well deliver this straight away */ 207 /* might as well deliver this straight away */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ba8db3881ef9..60458947b99e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1217,6 +1217,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1217 r = RESUME_GUEST; 1217 r = RESUME_GUEST;
1218 break; 1218 break;
1219 case BOOK3S_INTERRUPT_MACHINE_CHECK: 1219 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1220 /* Print the MCE event to host console. */
1221 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1222
1223 /*
1224 * If the guest can do FWNMI, exit to userspace so it can
1225 * deliver a FWNMI to the guest.
1226 * Otherwise we synthesize a machine check for the guest
1227 * so that it knows that the machine check occurred.
1228 */
1229 if (!vcpu->kvm->arch.fwnmi_enabled) {
1230 ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1231 kvmppc_core_queue_machine_check(vcpu, flags);
1232 r = RESUME_GUEST;
1233 break;
1234 }
1235
1220 /* Exit to guest with KVM_EXIT_NMI as exit reason */ 1236 /* Exit to guest with KVM_EXIT_NMI as exit reason */
1221 run->exit_reason = KVM_EXIT_NMI; 1237 run->exit_reason = KVM_EXIT_NMI;
1222 run->hw.hardware_exit_reason = vcpu->arch.trap; 1238 run->hw.hardware_exit_reason = vcpu->arch.trap;
@@ -1229,8 +1245,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1229 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; 1245 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1230 1246
1231 r = RESUME_HOST; 1247 r = RESUME_HOST;
1232 /* Print the MCE event to host console. */
1233 machine_check_print_event_info(&vcpu->arch.mce_evt, false);
1234 break; 1248 break;
1235 case BOOK3S_INTERRUPT_PROGRAM: 1249 case BOOK3S_INTERRUPT_PROGRAM:
1236 { 1250 {
@@ -1394,7 +1408,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1394 /* Pass the machine check to the L1 guest */ 1408 /* Pass the machine check to the L1 guest */
1395 r = RESUME_HOST; 1409 r = RESUME_HOST;
1396 /* Print the MCE event to host console. */ 1410 /* Print the MCE event to host console. */
1397 machine_check_print_event_info(&vcpu->arch.mce_evt, false); 1411 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1398 break; 1412 break;
1399 /* 1413 /*
1400 * We get these next two if the guest accesses a page which it thinks 1414 * We get these next two if the guest accesses a page which it thinks
@@ -3457,6 +3471,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3457 unsigned long host_dscr = mfspr(SPRN_DSCR); 3471 unsigned long host_dscr = mfspr(SPRN_DSCR);
3458 unsigned long host_tidr = mfspr(SPRN_TIDR); 3472 unsigned long host_tidr = mfspr(SPRN_TIDR);
3459 unsigned long host_iamr = mfspr(SPRN_IAMR); 3473 unsigned long host_iamr = mfspr(SPRN_IAMR);
3474 unsigned long host_amr = mfspr(SPRN_AMR);
3460 s64 dec; 3475 s64 dec;
3461 u64 tb; 3476 u64 tb;
3462 int trap, save_pmu; 3477 int trap, save_pmu;
@@ -3573,13 +3588,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3573 3588
3574 mtspr(SPRN_PSPB, 0); 3589 mtspr(SPRN_PSPB, 0);
3575 mtspr(SPRN_WORT, 0); 3590 mtspr(SPRN_WORT, 0);
3576 mtspr(SPRN_AMR, 0);
3577 mtspr(SPRN_UAMOR, 0); 3591 mtspr(SPRN_UAMOR, 0);
3578 mtspr(SPRN_DSCR, host_dscr); 3592 mtspr(SPRN_DSCR, host_dscr);
3579 mtspr(SPRN_TIDR, host_tidr); 3593 mtspr(SPRN_TIDR, host_tidr);
3580 mtspr(SPRN_IAMR, host_iamr); 3594 mtspr(SPRN_IAMR, host_iamr);
3581 mtspr(SPRN_PSPB, 0); 3595 mtspr(SPRN_PSPB, 0);
3582 3596
3597 if (host_amr != vcpu->arch.amr)
3598 mtspr(SPRN_AMR, host_amr);
3599
3583 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); 3600 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3584 store_fp_state(&vcpu->arch.fp); 3601 store_fp_state(&vcpu->arch.fp);
3585#ifdef CONFIG_ALTIVEC 3602#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 0787f12c1a1b..8c24c3bea0bf 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu)
66/* 66/*
67 * On POWER7, see if we can handle a machine check that occurred inside 67 * On POWER7, see if we can handle a machine check that occurred inside
68 * the guest in real mode, without switching to the host partition. 68 * the guest in real mode, without switching to the host partition.
69 *
70 * Returns: 0 => exit guest, 1 => deliver machine check to guest
71 */ 69 */
72static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) 70static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
73{ 71{
74 unsigned long srr1 = vcpu->arch.shregs.msr; 72 unsigned long srr1 = vcpu->arch.shregs.msr;
75 struct machine_check_event mce_evt; 73 struct machine_check_event mce_evt;
@@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
111 } 109 }
112 110
113 /* 111 /*
114 * See if we have already handled the condition in the linux host. 112 * Now get the event and stash it in the vcpu struct so it can
115 * We assume that if the condition is recovered then linux host 113 * be handled by the primary thread in virtual mode. We can't
116 * will have generated an error log event that we will pick 114 * call machine_check_queue_event() here if we are running on
117 * up and log later. 115 * an offline secondary thread.
118 * Don't release mce event now. We will queue up the event so that
119 * we can log the MCE event info on host console.
120 */ 116 */
121 if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) 117 if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
122 goto out; 118 if (handled && mce_evt.version == MCE_V1)
123 119 mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
124 if (mce_evt.version == MCE_V1 && 120 } else {
125 (mce_evt.severity == MCE_SEV_NO_ERROR || 121 memset(&mce_evt, 0, sizeof(mce_evt));
126 mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) 122 }
127 handled = 1;
128
129out:
130 /*
131 * For guest that supports FWNMI capability, hook the MCE event into
132 * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
133 * exit reason. On our way to exit we will pull this event from vcpu
134 * structure and print it from thread 0 of the core/subcore.
135 *
136 * For guest that does not support FWNMI capability (old QEMU):
137 * We are now going enter guest either through machine check
138 * interrupt (for unhandled errors) or will continue from
139 * current HSRR0 (for handled errors) in guest. Hence
140 * queue up the event so that we can log it from host console later.
141 */
142 if (vcpu->kvm->arch.fwnmi_enabled) {
143 /*
144 * Hook up the mce event on to vcpu structure.
145 * First clear the old event.
146 */
147 memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
148 if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
149 vcpu->arch.mce_evt = mce_evt;
150 }
151 } else
152 machine_check_queue_event();
153 123
154 return handled; 124 vcpu->arch.mce_evt = mce_evt;
155} 125}
156 126
157long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) 127void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
158{ 128{
159 return kvmppc_realmode_mc_power7(vcpu); 129 kvmppc_realmode_mc_power7(vcpu);
160} 130}
161 131
162/* Check if dynamic split is in force and return subcore size accordingly. */ 132/* Check if dynamic split is in force and return subcore size accordingly. */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 541b121477e4..3a5e719ef032 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
58#define STACK_SLOT_DAWR (SFS-56) 58#define STACK_SLOT_DAWR (SFS-56)
59#define STACK_SLOT_DAWRX (SFS-64) 59#define STACK_SLOT_DAWRX (SFS-64)
60#define STACK_SLOT_HFSCR (SFS-72) 60#define STACK_SLOT_HFSCR (SFS-72)
61#define STACK_SLOT_AMR (SFS-80)
62#define STACK_SLOT_UAMOR (SFS-88)
61/* the following is used by the P9 short path */ 63/* the following is used by the P9 short path */
62#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 64#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
63 65
@@ -726,11 +728,9 @@ BEGIN_FTR_SECTION
726 mfspr r5, SPRN_TIDR 728 mfspr r5, SPRN_TIDR
727 mfspr r6, SPRN_PSSCR 729 mfspr r6, SPRN_PSSCR
728 mfspr r7, SPRN_PID 730 mfspr r7, SPRN_PID
729 mfspr r8, SPRN_IAMR
730 std r5, STACK_SLOT_TID(r1) 731 std r5, STACK_SLOT_TID(r1)
731 std r6, STACK_SLOT_PSSCR(r1) 732 std r6, STACK_SLOT_PSSCR(r1)
732 std r7, STACK_SLOT_PID(r1) 733 std r7, STACK_SLOT_PID(r1)
733 std r8, STACK_SLOT_IAMR(r1)
734 mfspr r5, SPRN_HFSCR 734 mfspr r5, SPRN_HFSCR
735 std r5, STACK_SLOT_HFSCR(r1) 735 std r5, STACK_SLOT_HFSCR(r1)
736END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 736END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -738,11 +738,18 @@ BEGIN_FTR_SECTION
738 mfspr r5, SPRN_CIABR 738 mfspr r5, SPRN_CIABR
739 mfspr r6, SPRN_DAWR 739 mfspr r6, SPRN_DAWR
740 mfspr r7, SPRN_DAWRX 740 mfspr r7, SPRN_DAWRX
741 mfspr r8, SPRN_IAMR
741 std r5, STACK_SLOT_CIABR(r1) 742 std r5, STACK_SLOT_CIABR(r1)
742 std r6, STACK_SLOT_DAWR(r1) 743 std r6, STACK_SLOT_DAWR(r1)
743 std r7, STACK_SLOT_DAWRX(r1) 744 std r7, STACK_SLOT_DAWRX(r1)
745 std r8, STACK_SLOT_IAMR(r1)
744END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 746END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
745 747
748 mfspr r5, SPRN_AMR
749 std r5, STACK_SLOT_AMR(r1)
750 mfspr r6, SPRN_UAMOR
751 std r6, STACK_SLOT_UAMOR(r1)
752
746BEGIN_FTR_SECTION 753BEGIN_FTR_SECTION
747 /* Set partition DABR */ 754 /* Set partition DABR */
748 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 755 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1631 mtspr SPRN_PSPB, r0 1638 mtspr SPRN_PSPB, r0
1632 mtspr SPRN_WORT, r0 1639 mtspr SPRN_WORT, r0
1633BEGIN_FTR_SECTION 1640BEGIN_FTR_SECTION
1634 mtspr SPRN_IAMR, r0
1635 mtspr SPRN_TCSCR, r0 1641 mtspr SPRN_TCSCR, r0
1636 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1642 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1637 li r0, 1 1643 li r0, 1
1638 sldi r0, r0, 31 1644 sldi r0, r0, 31
1639 mtspr SPRN_MMCRS, r0 1645 mtspr SPRN_MMCRS, r0
1640END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1646END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
16418:
1642 1647
1643 /* Save and reset AMR and UAMOR before turning on the MMU */ 1648 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1649 ld r8, STACK_SLOT_IAMR(r1)
1650 mtspr SPRN_IAMR, r8
1651
16528: /* Power7 jumps back in here */
1644 mfspr r5,SPRN_AMR 1653 mfspr r5,SPRN_AMR
1645 mfspr r6,SPRN_UAMOR 1654 mfspr r6,SPRN_UAMOR
1646 std r5,VCPU_AMR(r9) 1655 std r5,VCPU_AMR(r9)
1647 std r6,VCPU_UAMOR(r9) 1656 std r6,VCPU_UAMOR(r9)
1648 li r6,0 1657 ld r5,STACK_SLOT_AMR(r1)
1649 mtspr SPRN_AMR,r6 1658 ld r6,STACK_SLOT_UAMOR(r1)
1659 mtspr SPRN_AMR, r5
1650 mtspr SPRN_UAMOR, r6 1660 mtspr SPRN_UAMOR, r6
1651 1661
1652 /* Switch DSCR back to host value */ 1662 /* Switch DSCR back to host value */
@@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION
1746 ld r5, STACK_SLOT_TID(r1) 1756 ld r5, STACK_SLOT_TID(r1)
1747 ld r6, STACK_SLOT_PSSCR(r1) 1757 ld r6, STACK_SLOT_PSSCR(r1)
1748 ld r7, STACK_SLOT_PID(r1) 1758 ld r7, STACK_SLOT_PID(r1)
1749 ld r8, STACK_SLOT_IAMR(r1)
1750 mtspr SPRN_TIDR, r5 1759 mtspr SPRN_TIDR, r5
1751 mtspr SPRN_PSSCR, r6 1760 mtspr SPRN_PSSCR, r6
1752 mtspr SPRN_PID, r7 1761 mtspr SPRN_PID, r7
1753 mtspr SPRN_IAMR, r8
1754END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1762END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1755 1763
1756#ifdef CONFIG_PPC_RADIX_MMU 1764#ifdef CONFIG_PPC_RADIX_MMU
@@ -2836,49 +2844,15 @@ kvm_cede_exit:
2836#endif /* CONFIG_KVM_XICS */ 2844#endif /* CONFIG_KVM_XICS */
28373: b guest_exit_cont 28453: b guest_exit_cont
2838 2846
2839 /* Try to handle a machine check in real mode */ 2847 /* Try to do machine check recovery in real mode */
2840machine_check_realmode: 2848machine_check_realmode:
2841 mr r3, r9 /* get vcpu pointer */ 2849 mr r3, r9 /* get vcpu pointer */
2842 bl kvmppc_realmode_machine_check 2850 bl kvmppc_realmode_machine_check
2843 nop 2851 nop
2852 /* all machine checks go to virtual mode for further handling */
2844 ld r9, HSTATE_KVM_VCPU(r13) 2853 ld r9, HSTATE_KVM_VCPU(r13)
2845 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2854 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2846 /* 2855 b guest_exit_cont
2847 * For the guest that is FWNMI capable, deliver all the MCE errors
2848 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2849 * reason. This new approach injects machine check errors in guest
2850 * address space to guest with additional information in the form
2851 * of RTAS event, thus enabling guest kernel to suitably handle
2852 * such errors.
2853 *
2854 * For the guest that is not FWNMI capable (old QEMU) fallback
2855 * to old behaviour for backward compatibility:
2856 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2857 * through machine check interrupt (set HSRR0 to 0x200).
2858 * For handled errors (no-fatal), just go back to guest execution
2859 * with current HSRR0.
2860 * if we receive machine check with MSR(RI=0) then deliver it to
2861 * guest as machine check causing guest to crash.
2862 */
2863 ld r11, VCPU_MSR(r9)
2864 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2865 bne guest_exit_cont /* if so, exit to host */
2866 /* Check if guest is capable of handling NMI exit */
2867 ld r10, VCPU_KVM(r9)
2868 lbz r10, KVM_FWNMI(r10)
2869 cmpdi r10, 1 /* FWNMI capable? */
2870 beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
2871
2872 /* if not, fall through for backward compatibility. */
2873 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2874 beq 1f /* Deliver a machine check to guest */
2875 ld r10, VCPU_PC(r9)
2876 cmpdi r3, 0 /* Did we handle MCE ? */
2877 bne 2f /* Continue guest execution. */
2878 /* If not, deliver a machine check. SRR0/1 are already set */
28791: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2880 bl kvmppc_msr_interrupt
28812: b fast_interrupt_c_return
2882 2856
2883/* 2857/*
2884 * Call C code to handle a HMI in real mode. 2858 * Call C code to handle a HMI in real mode.
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 79586f127521..05c85be0370f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -587,7 +587,7 @@ int opal_machine_check(struct pt_regs *regs)
587 evt.version); 587 evt.version);
588 return 0; 588 return 0;
589 } 589 }
590 machine_check_print_event_info(&evt, user_mode(regs)); 590 machine_check_print_event_info(&evt, user_mode(regs), false);
591 591
592 if (opal_recover_mce(regs, &evt)) 592 if (opal_recover_mce(regs, &evt))
593 return 1; 593 return 1;