aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zhong <zhong@linux.vnet.ibm.com>2016-11-10 23:57:36 -0500
committerPaul Mackerras <paulus@ozlabs.org>2017-01-26 18:27:21 -0500
commit21acd0e4df04f02176e773468658c3cebff096bb (patch)
tree88a95b3fe1edda599b48c4a4fb6c14b54133e5fe
parent17d48610ae0fa218aa386b16a538c792991a3652 (diff)
KVM: PPC: Book 3S: XICS: Don't lock twice when checking for resend
This patch improves the code that takes lock twice to check the resend flag and do the actual resending, by checking the resend flag locklessly, and add a boolean parameter check_resend to icp_[rm_]deliver_irq(), so the resend flag can be checked in the lock when doing the delivery. We need make sure when we clear the ics's bit in the icp's resend_map, we don't miss the resend flag of the irqs that set the bit. It could be ordered through the barrier in test_and_clear_bit(), and a newly added wmb between setting irq's resend flag, and icp's resend_map. Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c40
-rw-r--r--arch/powerpc/kvm/book3s_xics.c59
2 files changed, 48 insertions, 51 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 30f82c79de5d..44cfdd281fa1 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -35,7 +35,7 @@ int kvm_irq_bypass = 1;
35EXPORT_SYMBOL(kvm_irq_bypass); 35EXPORT_SYMBOL(kvm_irq_bypass);
36 36
37static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 37static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
38 u32 new_irq); 38 u32 new_irq, bool check_resend);
39static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu); 39static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
40 40
41/* -- ICS routines -- */ 41/* -- ICS routines -- */
@@ -44,22 +44,12 @@ static void ics_rm_check_resend(struct kvmppc_xics *xics,
44{ 44{
45 int i; 45 int i;
46 46
47 arch_spin_lock(&ics->lock);
48
49 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 47 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
50 struct ics_irq_state *state = &ics->irq_state[i]; 48 struct ics_irq_state *state = &ics->irq_state[i];
51 49 if (state->resend)
52 if (!state->resend) 50 icp_rm_deliver_irq(xics, icp, state->number, true);
53 continue;
54
55 state->resend = 0;
56
57 arch_spin_unlock(&ics->lock);
58 icp_rm_deliver_irq(xics, icp, state->number);
59 arch_spin_lock(&ics->lock);
60 } 51 }
61 52
62 arch_spin_unlock(&ics->lock);
63} 53}
64 54
65/* -- ICP routines -- */ 55/* -- ICP routines -- */
@@ -292,7 +282,7 @@ static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
292} 282}
293 283
294static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 284static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
295 u32 new_irq) 285 u32 new_irq, bool check_resend)
296{ 286{
297 struct ics_irq_state *state; 287 struct ics_irq_state *state;
298 struct kvmppc_ics *ics; 288 struct kvmppc_ics *ics;
@@ -337,6 +327,10 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
337 } 327 }
338 } 328 }
339 329
330 if (check_resend)
331 if (!state->resend)
332 goto out;
333
340 /* Clear the resend bit of that interrupt */ 334 /* Clear the resend bit of that interrupt */
341 state->resend = 0; 335 state->resend = 0;
342 336
@@ -384,6 +378,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
384 arch_spin_unlock(&ics->lock); 378 arch_spin_unlock(&ics->lock);
385 icp->n_reject++; 379 icp->n_reject++;
386 new_irq = reject; 380 new_irq = reject;
381 check_resend = 0;
387 goto again; 382 goto again;
388 } 383 }
389 } else { 384 } else {
@@ -391,10 +386,16 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
391 * We failed to deliver the interrupt we need to set the 386 * We failed to deliver the interrupt we need to set the
392 * resend map bit and mark the ICS state as needing a resend 387 * resend map bit and mark the ICS state as needing a resend
393 */ 388 */
394 set_bit(ics->icsid, icp->resend_map);
395 state->resend = 1; 389 state->resend = 1;
396 390
397 /* 391 /*
392 * Make sure when checking resend, we don't miss the resend
393 * if resend_map bit is seen and cleared.
394 */
395 smp_wmb();
396 set_bit(ics->icsid, icp->resend_map);
397
398 /*
398 * If the need_resend flag got cleared in the ICP some time 399 * If the need_resend flag got cleared in the ICP some time
399 * between icp_rm_try_to_deliver() atomic update and now, then 400 * between icp_rm_try_to_deliver() atomic update and now, then
400 * we know it might have missed the resend_map bit. So we 401 * we know it might have missed the resend_map bit. So we
@@ -404,6 +405,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
404 if (!icp->state.need_resend) { 405 if (!icp->state.need_resend) {
405 state->resend = 0; 406 state->resend = 0;
406 arch_spin_unlock(&ics->lock); 407 arch_spin_unlock(&ics->lock);
408 check_resend = 0;
407 goto again; 409 goto again;
408 } 410 }
409 } 411 }
@@ -598,7 +600,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
598 /* Handle reject in real mode */ 600 /* Handle reject in real mode */
599 if (reject && reject != XICS_IPI) { 601 if (reject && reject != XICS_IPI) {
600 this_icp->n_reject++; 602 this_icp->n_reject++;
601 icp_rm_deliver_irq(xics, icp, reject); 603 icp_rm_deliver_irq(xics, icp, reject, false);
602 } 604 }
603 605
604 /* Handle resends in real mode */ 606 /* Handle resends in real mode */
@@ -666,7 +668,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
666 */ 668 */
667 if (reject && reject != XICS_IPI) { 669 if (reject && reject != XICS_IPI) {
668 icp->n_reject++; 670 icp->n_reject++;
669 icp_rm_deliver_irq(xics, icp, reject); 671 icp_rm_deliver_irq(xics, icp, reject, false);
670 } 672 }
671 bail: 673 bail:
672 return check_too_hard(xics, icp); 674 return check_too_hard(xics, icp);
@@ -704,7 +706,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
704 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); 706 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
705 707
706 if (pq_new & PQ_PRESENTED) 708 if (pq_new & PQ_PRESENTED)
707 icp_rm_deliver_irq(xics, NULL, irq); 709 icp_rm_deliver_irq(xics, NULL, irq, false);
708 710
709 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { 711 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
710 icp->rm_action |= XICS_RM_NOTIFY_EOI; 712 icp->rm_action |= XICS_RM_NOTIFY_EOI;
@@ -874,7 +876,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
874 876
875 /* Test P=1, Q=0, this is the only case where we present */ 877 /* Test P=1, Q=0, this is the only case where we present */
876 if (pq_new == PQ_PRESENTED) 878 if (pq_new == PQ_PRESENTED)
877 icp_rm_deliver_irq(xics, icp, irq); 879 icp_rm_deliver_irq(xics, icp, irq, false);
878 880
879 /* EOI the interrupt */ 881 /* EOI the interrupt */
880 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr, 882 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index c7620622c846..e48803e2918d 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -63,7 +63,7 @@
63/* -- ICS routines -- */ 63/* -- ICS routines -- */
64 64
65static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 65static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
66 u32 new_irq); 66 u32 new_irq, bool check_resend);
67 67
68/* 68/*
69 * Return value ideally indicates how the interrupt was handled, but no 69 * Return value ideally indicates how the interrupt was handled, but no
@@ -117,7 +117,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
117 117
118 /* Test P=1, Q=0, this is the only case where we present */ 118 /* Test P=1, Q=0, this is the only case where we present */
119 if (pq_new == PQ_PRESENTED) 119 if (pq_new == PQ_PRESENTED)
120 icp_deliver_irq(xics, NULL, irq); 120 icp_deliver_irq(xics, NULL, irq, false);
121 121
122 /* Record which CPU this arrived on for passed-through interrupts */ 122 /* Record which CPU this arrived on for passed-through interrupts */
123 if (state->host_irq) 123 if (state->host_irq)
@@ -131,31 +131,14 @@ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
131{ 131{
132 int i; 132 int i;
133 133
134 unsigned long flags;
135
136 local_irq_save(flags);
137 arch_spin_lock(&ics->lock);
138
139 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 134 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
140 struct ics_irq_state *state = &ics->irq_state[i]; 135 struct ics_irq_state *state = &ics->irq_state[i];
141 136 if (state->resend) {
142 if (!state->resend) 137 XICS_DBG("resend %#x prio %#x\n", state->number,
143 continue; 138 state->priority);
144 139 icp_deliver_irq(xics, icp, state->number, true);
145 state->resend = 0; 140 }
146
147 XICS_DBG("resend %#x prio %#x\n", state->number,
148 state->priority);
149
150 arch_spin_unlock(&ics->lock);
151 local_irq_restore(flags);
152 icp_deliver_irq(xics, icp, state->number);
153 local_irq_save(flags);
154 arch_spin_lock(&ics->lock);
155 } 141 }
156
157 arch_spin_unlock(&ics->lock);
158 local_irq_restore(flags);
159} 142}
160 143
161static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics, 144static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
@@ -209,7 +192,7 @@ int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
209 state->masked_pending, state->resend); 192 state->masked_pending, state->resend);
210 193
211 if (write_xive(xics, ics, state, server, priority, priority)) 194 if (write_xive(xics, ics, state, server, priority, priority))
212 icp_deliver_irq(xics, icp, irq); 195 icp_deliver_irq(xics, icp, irq, false);
213 196
214 return 0; 197 return 0;
215} 198}
@@ -262,7 +245,7 @@ int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
262 245
263 if (write_xive(xics, ics, state, state->server, state->saved_priority, 246 if (write_xive(xics, ics, state, state->server, state->saved_priority,
264 state->saved_priority)) 247 state->saved_priority))
265 icp_deliver_irq(xics, icp, irq); 248 icp_deliver_irq(xics, icp, irq, false);
266 249
267 return 0; 250 return 0;
268} 251}
@@ -396,7 +379,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
396} 379}
397 380
398static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 381static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
399 u32 new_irq) 382 u32 new_irq, bool check_resend)
400{ 383{
401 struct ics_irq_state *state; 384 struct ics_irq_state *state;
402 struct kvmppc_ics *ics; 385 struct kvmppc_ics *ics;
@@ -442,6 +425,10 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
442 } 425 }
443 } 426 }
444 427
428 if (check_resend)
429 if (!state->resend)
430 goto out;
431
445 /* Clear the resend bit of that interrupt */ 432 /* Clear the resend bit of that interrupt */
446 state->resend = 0; 433 state->resend = 0;
447 434
@@ -490,6 +477,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
490 arch_spin_unlock(&ics->lock); 477 arch_spin_unlock(&ics->lock);
491 local_irq_restore(flags); 478 local_irq_restore(flags);
492 new_irq = reject; 479 new_irq = reject;
480 check_resend = 0;
493 goto again; 481 goto again;
494 } 482 }
495 } else { 483 } else {
@@ -497,10 +485,16 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
497 * We failed to deliver the interrupt we need to set the 485 * We failed to deliver the interrupt we need to set the
498 * resend map bit and mark the ICS state as needing a resend 486 * resend map bit and mark the ICS state as needing a resend
499 */ 487 */
500 set_bit(ics->icsid, icp->resend_map);
501 state->resend = 1; 488 state->resend = 1;
502 489
503 /* 490 /*
491 * Make sure when checking resend, we don't miss the resend
492 * if resend_map bit is seen and cleared.
493 */
494 smp_wmb();
495 set_bit(ics->icsid, icp->resend_map);
496
497 /*
504 * If the need_resend flag got cleared in the ICP some time 498 * If the need_resend flag got cleared in the ICP some time
505 * between icp_try_to_deliver() atomic update and now, then 499 * between icp_try_to_deliver() atomic update and now, then
506 * we know it might have missed the resend_map bit. So we 500 * we know it might have missed the resend_map bit. So we
@@ -511,6 +505,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
511 state->resend = 0; 505 state->resend = 0;
512 arch_spin_unlock(&ics->lock); 506 arch_spin_unlock(&ics->lock);
513 local_irq_restore(flags); 507 local_irq_restore(flags);
508 check_resend = 0;
514 goto again; 509 goto again;
515 } 510 }
516 } 511 }
@@ -702,7 +697,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
702 697
703 /* Handle reject */ 698 /* Handle reject */
704 if (reject && reject != XICS_IPI) 699 if (reject && reject != XICS_IPI)
705 icp_deliver_irq(xics, icp, reject); 700 icp_deliver_irq(xics, icp, reject, false);
706 701
707 /* Handle resend */ 702 /* Handle resend */
708 if (resend) 703 if (resend)
@@ -782,7 +777,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
782 * attempt (see comments in icp_deliver_irq). 777 * attempt (see comments in icp_deliver_irq).
783 */ 778 */
784 if (reject && reject != XICS_IPI) 779 if (reject && reject != XICS_IPI)
785 icp_deliver_irq(xics, icp, reject); 780 icp_deliver_irq(xics, icp, reject, false);
786} 781}
787 782
788static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq) 783static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
@@ -818,7 +813,7 @@ static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
818 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); 813 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
819 814
820 if (pq_new & PQ_PRESENTED) 815 if (pq_new & PQ_PRESENTED)
821 icp_deliver_irq(xics, icp, irq); 816 icp_deliver_irq(xics, icp, irq, false);
822 817
823 kvm_notify_acked_irq(vcpu->kvm, 0, irq); 818 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
824 819
@@ -1307,7 +1302,7 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1307 local_irq_restore(flags); 1302 local_irq_restore(flags);
1308 1303
1309 if (val & KVM_XICS_PENDING) 1304 if (val & KVM_XICS_PENDING)
1310 icp_deliver_irq(xics, NULL, irqp->number); 1305 icp_deliver_irq(xics, NULL, irqp->number, false);
1311 1306
1312 return 0; 1307 return 0;
1313} 1308}