aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorSuresh Warrier <warrier@linux.vnet.ibm.com>2015-03-20 05:39:47 -0400
committerAlexander Graf <agraf@suse.de>2015-04-21 09:21:30 -0400
commitb0221556dbd3c31c47f37703f856aeeffc78abd3 (patch)
tree1bc8e3c6ea9e03e1c1310d9798348c9cfa3d1e10 /arch/powerpc
parent34cb7954c0aa7c8ad1591cb6cceae36432f55bb5 (diff)
KVM: PPC: Book3S HV: Move virtual mode ICP functions to real-mode
Interrupt-based hypercalls return H_TOO_HARD to inform KVM that it needs to switch to the host to complete the rest of hypercall function in virtual mode. This patch ports the virtual mode ICS/ICP reject and resend functions to be runnable in hypervisor real mode, thus avoiding the need to switch to the host to execute these functions in virtual mode. However, the hypercalls continue to return H_TOO_HARD for vcpu_wakeup and notify events - these events cannot be done in real mode and they will still need a switch to host virtual mode. There are sufficient differences between the real mode code and the virtual mode code for the ICS/ICP resend and reject functions that for now the code has been duplicated instead of sharing common code. In the future, we can look at creating common functions. Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c225
1 files changed, 211 insertions, 14 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 7c22997de906..73bbe9246512 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,12 +23,39 @@
23 23
24#define DEBUG_PASSUP 24#define DEBUG_PASSUP
25 25
26static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
27 u32 new_irq);
28
26static inline void rm_writeb(unsigned long paddr, u8 val) 29static inline void rm_writeb(unsigned long paddr, u8 val)
27{ 30{
28 __asm__ __volatile__("sync; stbcix %0,0,%1" 31 __asm__ __volatile__("sync; stbcix %0,0,%1"
29 : : "r" (val), "r" (paddr) : "memory"); 32 : : "r" (val), "r" (paddr) : "memory");
30} 33}
31 34
35/* -- ICS routines -- */
36static void ics_rm_check_resend(struct kvmppc_xics *xics,
37 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
38{
39 int i;
40
41 arch_spin_lock(&ics->lock);
42
43 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
44 struct ics_irq_state *state = &ics->irq_state[i];
45
46 if (!state->resend)
47 continue;
48
49 arch_spin_unlock(&ics->lock);
50 icp_rm_deliver_irq(xics, icp, state->number);
51 arch_spin_lock(&ics->lock);
52 }
53
54 arch_spin_unlock(&ics->lock);
55}
56
57/* -- ICP routines -- */
58
32static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, 59static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
33 struct kvm_vcpu *this_vcpu) 60 struct kvm_vcpu *this_vcpu)
34{ 61{
@@ -116,6 +143,178 @@ static inline int check_too_hard(struct kvmppc_xics *xics,
116 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; 143 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
117} 144}
118 145
146static void icp_rm_check_resend(struct kvmppc_xics *xics,
147 struct kvmppc_icp *icp)
148{
149 u32 icsid;
150
151 /* Order this load with the test for need_resend in the caller */
152 smp_rmb();
153 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
154 struct kvmppc_ics *ics = xics->ics[icsid];
155
156 if (!test_and_clear_bit(icsid, icp->resend_map))
157 continue;
158 if (!ics)
159 continue;
160 ics_rm_check_resend(xics, ics, icp);
161 }
162}
163
164static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
165 u32 *reject)
166{
167 union kvmppc_icp_state old_state, new_state;
168 bool success;
169
170 do {
171 old_state = new_state = READ_ONCE(icp->state);
172
173 *reject = 0;
174
175 /* See if we can deliver */
176 success = new_state.cppr > priority &&
177 new_state.mfrr > priority &&
178 new_state.pending_pri > priority;
179
180 /*
181 * If we can, check for a rejection and perform the
182 * delivery
183 */
184 if (success) {
185 *reject = new_state.xisr;
186 new_state.xisr = irq;
187 new_state.pending_pri = priority;
188 } else {
189 /*
190 * If we failed to deliver we set need_resend
191 * so a subsequent CPPR state change causes us
192 * to try a new delivery.
193 */
194 new_state.need_resend = true;
195 }
196
197 } while (!icp_rm_try_update(icp, old_state, new_state));
198
199 return success;
200}
201
202static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
203 u32 new_irq)
204{
205 struct ics_irq_state *state;
206 struct kvmppc_ics *ics;
207 u32 reject;
208 u16 src;
209
210 /*
211 * This is used both for initial delivery of an interrupt and
212 * for subsequent rejection.
213 *
214 * Rejection can be racy vs. resends. We have evaluated the
215 * rejection in an atomic ICP transaction which is now complete,
216 * so potentially the ICP can already accept the interrupt again.
217 *
218 * So we need to retry the delivery. Essentially the reject path
219 * boils down to a failed delivery. Always.
220 *
221 * Now the interrupt could also have moved to a different target,
222 * thus we may need to re-do the ICP lookup as well
223 */
224
225 again:
226 /* Get the ICS state and lock it */
227 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
228 if (!ics) {
229 /* Unsafe increment, but this does not need to be accurate */
230 return;
231 }
232 state = &ics->irq_state[src];
233
234 /* Get a lock on the ICS */
235 arch_spin_lock(&ics->lock);
236
237 /* Get our server */
238 if (!icp || state->server != icp->server_num) {
239 icp = kvmppc_xics_find_server(xics->kvm, state->server);
240 if (!icp) {
241 /* Unsafe increment again*/
242 goto out;
243 }
244 }
245
246 /* Clear the resend bit of that interrupt */
247 state->resend = 0;
248
249 /*
250 * If masked, bail out
251 *
252 * Note: PAPR doesn't mention anything about masked pending
253 * when doing a resend, only when doing a delivery.
254 *
255 * However that would have the effect of losing a masked
256 * interrupt that was rejected and isn't consistent with
257 * the whole masked_pending business which is about not
258 * losing interrupts that occur while masked.
259 *
260 * I don't differentiate normal deliveries and resends, this
261 * implementation will differ from PAPR and not lose such
262 * interrupts.
263 */
264 if (state->priority == MASKED) {
265 state->masked_pending = 1;
266 goto out;
267 }
268
269 /*
270 * Try the delivery, this will set the need_resend flag
271 * in the ICP as part of the atomic transaction if the
272 * delivery is not possible.
273 *
274 * Note that if successful, the new delivery might have itself
275 * rejected an interrupt that was "delivered" before we took the
276 * ics spin lock.
277 *
278 * In this case we do the whole sequence all over again for the
279 * new guy. We cannot assume that the rejected interrupt is less
280 * favored than the new one, and thus doesn't need to be delivered,
281 * because by the time we exit icp_rm_try_to_deliver() the target
282 * processor may well have already consumed & completed it, and thus
283 * the rejected interrupt might actually be already acceptable.
284 */
285 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
286 /*
287 * Delivery was successful, did we reject somebody else ?
288 */
289 if (reject && reject != XICS_IPI) {
290 arch_spin_unlock(&ics->lock);
291 new_irq = reject;
292 goto again;
293 }
294 } else {
295 /*
296 * We failed to deliver the interrupt we need to set the
297 * resend map bit and mark the ICS state as needing a resend
298 */
299 set_bit(ics->icsid, icp->resend_map);
300 state->resend = 1;
301
302 /*
303 * If the need_resend flag got cleared in the ICP some time
304 * between icp_rm_try_to_deliver() atomic update and now, then
305 * we know it might have missed the resend_map bit. So we
306 * retry
307 */
308 smp_mb();
309 if (!icp->state.need_resend) {
310 arch_spin_unlock(&ics->lock);
311 goto again;
312 }
313 }
314 out:
315 arch_spin_unlock(&ics->lock);
316}
317
119static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 318static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
120 u8 new_cppr) 319 u8 new_cppr)
121{ 320{
@@ -184,8 +383,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
184 * separately here as well. 383 * separately here as well.
185 */ 384 */
186 if (resend) { 385 if (resend) {
187 icp->rm_action |= XICS_RM_CHECK_RESEND; 386 icp_rm_check_resend(xics, icp);
188 icp->rm_resend_icp = icp;
189 } 387 }
190} 388}
191 389
@@ -300,16 +498,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
300 } 498 }
301 } while (!icp_rm_try_update(icp, old_state, new_state)); 499 } while (!icp_rm_try_update(icp, old_state, new_state));
302 500
303 /* Pass rejects to virtual mode */ 501 /* Handle reject in real mode */
304 if (reject && reject != XICS_IPI) { 502 if (reject && reject != XICS_IPI) {
305 this_icp->rm_action |= XICS_RM_REJECT; 503 icp_rm_deliver_irq(xics, icp, reject);
306 this_icp->rm_reject = reject;
307 } 504 }
308 505
309 /* Pass resends to virtual mode */ 506 /* Handle resends in real mode */
310 if (resend) { 507 if (resend) {
311 this_icp->rm_action |= XICS_RM_CHECK_RESEND; 508 icp_rm_check_resend(xics, icp);
312 this_icp->rm_resend_icp = icp;
313 } 509 }
314 510
315 return check_too_hard(xics, this_icp); 511 return check_too_hard(xics, this_icp);
@@ -365,10 +561,12 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
365 561
366 } while (!icp_rm_try_update(icp, old_state, new_state)); 562 } while (!icp_rm_try_update(icp, old_state, new_state));
367 563
368 /* Pass rejects to virtual mode */ 564 /*
565 * Check for rejects. They are handled by doing a new delivery
566 * attempt (see comments in icp_rm_deliver_irq).
567 */
369 if (reject && reject != XICS_IPI) { 568 if (reject && reject != XICS_IPI) {
370 icp->rm_action |= XICS_RM_REJECT; 569 icp_rm_deliver_irq(xics, icp, reject);
371 icp->rm_reject = reject;
372 } 570 }
373 bail: 571 bail:
374 return check_too_hard(xics, icp); 572 return check_too_hard(xics, icp);
@@ -416,10 +614,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
416 goto bail; 614 goto bail;
417 state = &ics->irq_state[src]; 615 state = &ics->irq_state[src];
418 616
419 /* Still asserted, resend it, we make it look like a reject */ 617 /* Still asserted, resend it */
420 if (state->asserted) { 618 if (state->asserted) {
421 icp->rm_action |= XICS_RM_REJECT; 619 icp_rm_deliver_irq(xics, icp, irq);
422 icp->rm_reject = irq;
423 } 620 }
424 621
425 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { 622 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {