aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rm_xics.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_xics.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c238
1 files changed, 217 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 7c22997de906..00e45b6d4f24 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,17 +23,37 @@
23 23
24#define DEBUG_PASSUP 24#define DEBUG_PASSUP
25 25
26static inline void rm_writeb(unsigned long paddr, u8 val) 26static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
27 u32 new_irq);
28
29/* -- ICS routines -- */
30static void ics_rm_check_resend(struct kvmppc_xics *xics,
31 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
27{ 32{
28 __asm__ __volatile__("sync; stbcix %0,0,%1" 33 int i;
29 : : "r" (val), "r" (paddr) : "memory"); 34
35 arch_spin_lock(&ics->lock);
36
37 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
38 struct ics_irq_state *state = &ics->irq_state[i];
39
40 if (!state->resend)
41 continue;
42
43 arch_spin_unlock(&ics->lock);
44 icp_rm_deliver_irq(xics, icp, state->number);
45 arch_spin_lock(&ics->lock);
46 }
47
48 arch_spin_unlock(&ics->lock);
30} 49}
31 50
51/* -- ICP routines -- */
52
32static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, 53static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
33 struct kvm_vcpu *this_vcpu) 54 struct kvm_vcpu *this_vcpu)
34{ 55{
35 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; 56 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
36 unsigned long xics_phys;
37 int cpu; 57 int cpu;
38 58
39 /* Mark the target VCPU as having an interrupt pending */ 59 /* Mark the target VCPU as having an interrupt pending */
@@ -56,9 +76,8 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
56 /* In SMT cpu will always point to thread 0, we adjust it */ 76 /* In SMT cpu will always point to thread 0, we adjust it */
57 cpu += vcpu->arch.ptid; 77 cpu += vcpu->arch.ptid;
58 78
59 /* Not too hard, then poke the target */ 79 smp_mb();
60 xics_phys = paca[cpu].kvm_hstate.xics_phys; 80 kvmhv_rm_send_ipi(cpu);
61 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
62} 81}
63 82
64static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) 83static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
@@ -116,6 +135,180 @@ static inline int check_too_hard(struct kvmppc_xics *xics,
116 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; 135 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
117} 136}
118 137
138static void icp_rm_check_resend(struct kvmppc_xics *xics,
139 struct kvmppc_icp *icp)
140{
141 u32 icsid;
142
143 /* Order this load with the test for need_resend in the caller */
144 smp_rmb();
145 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
146 struct kvmppc_ics *ics = xics->ics[icsid];
147
148 if (!test_and_clear_bit(icsid, icp->resend_map))
149 continue;
150 if (!ics)
151 continue;
152 ics_rm_check_resend(xics, ics, icp);
153 }
154}
155
156static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
157 u32 *reject)
158{
159 union kvmppc_icp_state old_state, new_state;
160 bool success;
161
162 do {
163 old_state = new_state = READ_ONCE(icp->state);
164
165 *reject = 0;
166
167 /* See if we can deliver */
168 success = new_state.cppr > priority &&
169 new_state.mfrr > priority &&
170 new_state.pending_pri > priority;
171
172 /*
173 * If we can, check for a rejection and perform the
174 * delivery
175 */
176 if (success) {
177 *reject = new_state.xisr;
178 new_state.xisr = irq;
179 new_state.pending_pri = priority;
180 } else {
181 /*
182 * If we failed to deliver we set need_resend
183 * so a subsequent CPPR state change causes us
184 * to try a new delivery.
185 */
186 new_state.need_resend = true;
187 }
188
189 } while (!icp_rm_try_update(icp, old_state, new_state));
190
191 return success;
192}
193
194static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
195 u32 new_irq)
196{
197 struct ics_irq_state *state;
198 struct kvmppc_ics *ics;
199 u32 reject;
200 u16 src;
201
202 /*
203 * This is used both for initial delivery of an interrupt and
204 * for subsequent rejection.
205 *
206 * Rejection can be racy vs. resends. We have evaluated the
207 * rejection in an atomic ICP transaction which is now complete,
208 * so potentially the ICP can already accept the interrupt again.
209 *
210 * So we need to retry the delivery. Essentially the reject path
211 * boils down to a failed delivery. Always.
212 *
213 * Now the interrupt could also have moved to a different target,
214 * thus we may need to re-do the ICP lookup as well
215 */
216
217 again:
218 /* Get the ICS state and lock it */
219 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
220 if (!ics) {
221 /* Unsafe increment, but this does not need to be accurate */
222 xics->err_noics++;
223 return;
224 }
225 state = &ics->irq_state[src];
226
227 /* Get a lock on the ICS */
228 arch_spin_lock(&ics->lock);
229
230 /* Get our server */
231 if (!icp || state->server != icp->server_num) {
232 icp = kvmppc_xics_find_server(xics->kvm, state->server);
233 if (!icp) {
234 /* Unsafe increment again*/
235 xics->err_noicp++;
236 goto out;
237 }
238 }
239
240 /* Clear the resend bit of that interrupt */
241 state->resend = 0;
242
243 /*
244 * If masked, bail out
245 *
246 * Note: PAPR doesn't mention anything about masked pending
247 * when doing a resend, only when doing a delivery.
248 *
249 * However that would have the effect of losing a masked
250 * interrupt that was rejected and isn't consistent with
251 * the whole masked_pending business which is about not
252 * losing interrupts that occur while masked.
253 *
254 * I don't differentiate normal deliveries and resends, this
255 * implementation will differ from PAPR and not lose such
256 * interrupts.
257 */
258 if (state->priority == MASKED) {
259 state->masked_pending = 1;
260 goto out;
261 }
262
263 /*
264 * Try the delivery, this will set the need_resend flag
265 * in the ICP as part of the atomic transaction if the
266 * delivery is not possible.
267 *
268 * Note that if successful, the new delivery might have itself
269 * rejected an interrupt that was "delivered" before we took the
270 * ics spin lock.
271 *
272 * In this case we do the whole sequence all over again for the
273 * new guy. We cannot assume that the rejected interrupt is less
274 * favored than the new one, and thus doesn't need to be delivered,
275 * because by the time we exit icp_rm_try_to_deliver() the target
276 * processor may well have already consumed & completed it, and thus
277 * the rejected interrupt might actually be already acceptable.
278 */
279 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
280 /*
281 * Delivery was successful, did we reject somebody else ?
282 */
283 if (reject && reject != XICS_IPI) {
284 arch_spin_unlock(&ics->lock);
285 new_irq = reject;
286 goto again;
287 }
288 } else {
289 /*
290 * We failed to deliver the interrupt we need to set the
291 * resend map bit and mark the ICS state as needing a resend
292 */
293 set_bit(ics->icsid, icp->resend_map);
294 state->resend = 1;
295
296 /*
297 * If the need_resend flag got cleared in the ICP some time
298 * between icp_rm_try_to_deliver() atomic update and now, then
299 * we know it might have missed the resend_map bit. So we
300 * retry
301 */
302 smp_mb();
303 if (!icp->state.need_resend) {
304 arch_spin_unlock(&ics->lock);
305 goto again;
306 }
307 }
308 out:
309 arch_spin_unlock(&ics->lock);
310}
311
119static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 312static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
120 u8 new_cppr) 313 u8 new_cppr)
121{ 314{
@@ -184,8 +377,8 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
184 * separately here as well. 377 * separately here as well.
185 */ 378 */
186 if (resend) { 379 if (resend) {
187 icp->rm_action |= XICS_RM_CHECK_RESEND; 380 icp->n_check_resend++;
188 icp->rm_resend_icp = icp; 381 icp_rm_check_resend(xics, icp);
189 } 382 }
190} 383}
191 384
@@ -300,16 +493,16 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
300 } 493 }
301 } while (!icp_rm_try_update(icp, old_state, new_state)); 494 } while (!icp_rm_try_update(icp, old_state, new_state));
302 495
303 /* Pass rejects to virtual mode */ 496 /* Handle reject in real mode */
304 if (reject && reject != XICS_IPI) { 497 if (reject && reject != XICS_IPI) {
305 this_icp->rm_action |= XICS_RM_REJECT; 498 this_icp->n_reject++;
306 this_icp->rm_reject = reject; 499 icp_rm_deliver_irq(xics, icp, reject);
307 } 500 }
308 501
309 /* Pass resends to virtual mode */ 502 /* Handle resends in real mode */
310 if (resend) { 503 if (resend) {
311 this_icp->rm_action |= XICS_RM_CHECK_RESEND; 504 this_icp->n_check_resend++;
312 this_icp->rm_resend_icp = icp; 505 icp_rm_check_resend(xics, icp);
313 } 506 }
314 507
315 return check_too_hard(xics, this_icp); 508 return check_too_hard(xics, this_icp);
@@ -365,10 +558,13 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
365 558
366 } while (!icp_rm_try_update(icp, old_state, new_state)); 559 } while (!icp_rm_try_update(icp, old_state, new_state));
367 560
368 /* Pass rejects to virtual mode */ 561 /*
562 * Check for rejects. They are handled by doing a new delivery
563 * attempt (see comments in icp_rm_deliver_irq).
564 */
369 if (reject && reject != XICS_IPI) { 565 if (reject && reject != XICS_IPI) {
370 icp->rm_action |= XICS_RM_REJECT; 566 icp->n_reject++;
371 icp->rm_reject = reject; 567 icp_rm_deliver_irq(xics, icp, reject);
372 } 568 }
373 bail: 569 bail:
374 return check_too_hard(xics, icp); 570 return check_too_hard(xics, icp);
@@ -416,10 +612,10 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
416 goto bail; 612 goto bail;
417 state = &ics->irq_state[src]; 613 state = &ics->irq_state[src];
418 614
419 /* Still asserted, resend it, we make it look like a reject */ 615 /* Still asserted, resend it */
420 if (state->asserted) { 616 if (state->asserted) {
421 icp->rm_action |= XICS_RM_REJECT; 617 icp->n_reject++;
422 icp->rm_reject = irq; 618 icp_rm_deliver_irq(xics, icp, irq);
423 } 619 }
424 620
425 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { 621 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {