aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSuresh E. Warrier <warrier@linux.vnet.ibm.com>2014-11-02 23:51:59 -0500
committerAlexander Graf <agraf@suse.de>2014-12-15 07:27:24 -0500
commit5b88cda665bc6ae92c9bd12060c9fd0840211eb7 (patch)
treeffe52c106f5784a902b0b4df3510f2e439cd6c59 /arch
parentb4a839009a0842759c0405662637b8f1f35ff460 (diff)
KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI
This fixes some inaccuracies in the state machine for the virtualized ICP when implementing the H_IPI hcall (Set_MFFR and related states): 1. The old code wipes out any pending interrupts when the new MFRR is more favored than the CPPR but less favored than a pending interrupt (by always modifying xisr and the pending_pri). This can cause us to lose a pending external interrupt. The correct code here is to only modify the pending_pri and xisr in the ICP if the MFRR is equal to or more favored than the current pending pri (since in this case, it is guaranteed that that there cannot be a pending external interrupt). The code changes are required in both kvmppc_rm_h_ipi and kvmppc_h_ipi. 2. Again, in both kvmppc_rm_h_ipi and kvmppc_h_ipi, there is a check for whether MFRR is being made less favored AND further if new MFFR is also less favored than the current CPPR, we check for any resends pending in the ICP. These checks look like they are designed to cover the case where if the MFRR is being made less favored, we opportunistically trigger a resend of any interrupts that had been previously rejected. Although, this is not a state described by PAPR, this is an action we actually need to do especially if the CPPR is already at 0xFF. Because in this case, the resend bit will stay on until another ICP state change which may be a long time coming and the interrupt stays pending until then. The current code which checks for MFRR < CPPR is broken when CPPR is 0xFF since it will not get triggered in that case. Ideally, we would want to do a resend only if prio(pending_interrupt) < mfrr && prio(pending_interrupt) < cppr where pending interrupt is the one that was rejected. But we don't have the priority of the pending interrupt state saved, so we simply trigger a resend whenever the MFRR is made less favored. 3. In kvmppc_rm_h_ipi, where we save state to pass resends to the virtual mode, we also need to save the ICP whose need_resend we reset since this does not need to be my ICP (vcpu->arch.icp) as is incorrectly assumed by the current code. A new field rm_resend_icp is added to the kvmppc_icp structure for this purpose. Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c36
-rw-r--r--arch/powerpc/kvm/book3s_xics.c30
-rw-r--r--arch/powerpc/kvm/book3s_xics.h1
3 files changed, 52 insertions, 15 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 3ee38e6e884f..7b066f6b02ad 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -183,8 +183,10 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
183 * state update in HW (ie bus transactions) so we can handle them 183 * state update in HW (ie bus transactions) so we can handle them
184 * separately here as well. 184 * separately here as well.
185 */ 185 */
186 if (resend) 186 if (resend) {
187 icp->rm_action |= XICS_RM_CHECK_RESEND; 187 icp->rm_action |= XICS_RM_CHECK_RESEND;
188 icp->rm_resend_icp = icp;
189 }
188} 190}
189 191
190 192
@@ -254,10 +256,25 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
254 * nothing needs to be done as there can be no XISR to 256 * nothing needs to be done as there can be no XISR to
255 * reject. 257 * reject.
256 * 258 *
259 * ICP state: Check_IPI
260 *
257 * If the CPPR is less favored, then we might be replacing 261 * If the CPPR is less favored, then we might be replacing
258 * an interrupt, and thus need to possibly reject it as in 262 * an interrupt, and thus need to possibly reject it.
259 * 263 *
260 * ICP state: Check_IPI 264 * ICP State: IPI
265 *
266 * Besides rejecting any pending interrupts, we also
267 * update XISR and pending_pri to mark IPI as pending.
268 *
269 * PAPR does not describe this state, but if the MFRR is being
270 * made less favored than its earlier value, there might be
271 * a previously-rejected interrupt needing to be resent.
272 * Ideally, we would want to resend only if
273 * prio(pending_interrupt) < mfrr &&
274 * prio(pending_interrupt) < cppr
275 * where pending interrupt is the one that was rejected. But
276 * we don't have that state, so we simply trigger a resend
277 * whenever the MFRR is made less favored.
261 */ 278 */
262 do { 279 do {
263 old_state = new_state = ACCESS_ONCE(icp->state); 280 old_state = new_state = ACCESS_ONCE(icp->state);
@@ -270,13 +287,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
270 resend = false; 287 resend = false;
271 if (mfrr < new_state.cppr) { 288 if (mfrr < new_state.cppr) {
272 /* Reject a pending interrupt if not an IPI */ 289 /* Reject a pending interrupt if not an IPI */
273 if (mfrr <= new_state.pending_pri) 290 if (mfrr <= new_state.pending_pri) {
274 reject = new_state.xisr; 291 reject = new_state.xisr;
275 new_state.pending_pri = mfrr; 292 new_state.pending_pri = mfrr;
276 new_state.xisr = XICS_IPI; 293 new_state.xisr = XICS_IPI;
294 }
277 } 295 }
278 296
279 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) { 297 if (mfrr > old_state.mfrr) {
280 resend = new_state.need_resend; 298 resend = new_state.need_resend;
281 new_state.need_resend = 0; 299 new_state.need_resend = 0;
282 } 300 }
@@ -289,8 +307,10 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
289 } 307 }
290 308
291 /* Pass resends to virtual mode */ 309 /* Pass resends to virtual mode */
292 if (resend) 310 if (resend) {
293 this_icp->rm_action |= XICS_RM_CHECK_RESEND; 311 this_icp->rm_action |= XICS_RM_CHECK_RESEND;
312 this_icp->rm_resend_icp = icp;
313 }
294 314
295 return check_too_hard(xics, this_icp); 315 return check_too_hard(xics, this_icp);
296} 316}
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index eaeb78047fb8..807351f76f84 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -613,10 +613,25 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
613 * there might be a previously-rejected interrupt needing 613 * there might be a previously-rejected interrupt needing
614 * to be resent. 614 * to be resent.
615 * 615 *
616 * ICP state: Check_IPI
617 *
616 * If the CPPR is less favored, then we might be replacing 618 * If the CPPR is less favored, then we might be replacing
617 * an interrupt, and thus need to possibly reject it as in 619 * an interrupt, and thus need to possibly reject it.
618 * 620 *
619 * ICP state: Check_IPI 621 * ICP State: IPI
622 *
623 * Besides rejecting any pending interrupts, we also
624 * update XISR and pending_pri to mark IPI as pending.
625 *
626 * PAPR does not describe this state, but if the MFRR is being
627 * made less favored than its earlier value, there might be
628 * a previously-rejected interrupt needing to be resent.
629 * Ideally, we would want to resend only if
630 * prio(pending_interrupt) < mfrr &&
631 * prio(pending_interrupt) < cppr
632 * where pending interrupt is the one that was rejected. But
633 * we don't have that state, so we simply trigger a resend
634 * whenever the MFRR is made less favored.
620 */ 635 */
621 do { 636 do {
622 old_state = new_state = ACCESS_ONCE(icp->state); 637 old_state = new_state = ACCESS_ONCE(icp->state);
@@ -629,13 +644,14 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
629 resend = false; 644 resend = false;
630 if (mfrr < new_state.cppr) { 645 if (mfrr < new_state.cppr) {
631 /* Reject a pending interrupt if not an IPI */ 646 /* Reject a pending interrupt if not an IPI */
632 if (mfrr <= new_state.pending_pri) 647 if (mfrr <= new_state.pending_pri) {
633 reject = new_state.xisr; 648 reject = new_state.xisr;
634 new_state.pending_pri = mfrr; 649 new_state.pending_pri = mfrr;
635 new_state.xisr = XICS_IPI; 650 new_state.xisr = XICS_IPI;
651 }
636 } 652 }
637 653
638 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) { 654 if (mfrr > old_state.mfrr) {
639 resend = new_state.need_resend; 655 resend = new_state.need_resend;
640 new_state.need_resend = 0; 656 new_state.need_resend = 0;
641 } 657 }
@@ -789,7 +805,7 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
789 if (icp->rm_action & XICS_RM_KICK_VCPU) 805 if (icp->rm_action & XICS_RM_KICK_VCPU)
790 kvmppc_fast_vcpu_kick(icp->rm_kick_target); 806 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
791 if (icp->rm_action & XICS_RM_CHECK_RESEND) 807 if (icp->rm_action & XICS_RM_CHECK_RESEND)
792 icp_check_resend(xics, icp); 808 icp_check_resend(xics, icp->rm_resend_icp);
793 if (icp->rm_action & XICS_RM_REJECT) 809 if (icp->rm_action & XICS_RM_REJECT)
794 icp_deliver_irq(xics, icp, icp->rm_reject); 810 icp_deliver_irq(xics, icp, icp->rm_reject);
795 if (icp->rm_action & XICS_RM_NOTIFY_EOI) 811 if (icp->rm_action & XICS_RM_NOTIFY_EOI)
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index e8aaa7a3f209..73f0f2723c07 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -74,6 +74,7 @@ struct kvmppc_icp {
74#define XICS_RM_NOTIFY_EOI 0x8 74#define XICS_RM_NOTIFY_EOI 0x8
75 u32 rm_action; 75 u32 rm_action;
76 struct kvm_vcpu *rm_kick_target; 76 struct kvm_vcpu *rm_kick_target;
77 struct kvmppc_icp *rm_resend_icp;
77 u32 rm_reject; 78 u32 rm_reject;
78 u32 rm_eoied_irq; 79 u32 rm_eoied_irq;
79 80