aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_rc.c
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-07-25 14:08:28 -0400
committerRoland Dreier <rolandd@cisco.com>2007-10-09 23:05:49 -0400
commit4ee97180ac76deb5a715ac45b7d7516e6ee82ae7 (patch)
tree6683d1c34d3f36271a9d8275a645ce67222ffc56 /drivers/infiniband/hw/ipath/ipath_rc.c
parent210d6ca3db058cd1d6e6fd235ee3e25d6ac221cd (diff)
IB/ipath: Change UD to queue work requests like RC & UC
The code to post UD sends tried to process work requests at the time ib_post_send() is called without using a WQE queue. This was fine as long as HW resources were available for sending a packet. This patch changes UD to be handled more like RC and UC and shares more code. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 46744ea2bab..53259daeb4f 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -81,9 +81,8 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
81 * Note that we are in the responder's side of the QP context. 81 * Note that we are in the responder's side of the QP context.
82 * Note the QP s_lock must be held. 82 * Note the QP s_lock must be held.
83 */ 83 */
84static int ipath_make_rc_ack(struct ipath_qp *qp, 84static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
85 struct ipath_other_headers *ohdr, 85 struct ipath_other_headers *ohdr, u32 pmtu)
86 u32 pmtu, u32 *bth0p, u32 *bth2p)
87{ 86{
88 struct ipath_ack_entry *e; 87 struct ipath_ack_entry *e;
89 u32 hwords; 88 u32 hwords;
@@ -192,8 +191,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
192 } 191 }
193 qp->s_hdrwords = hwords; 192 qp->s_hdrwords = hwords;
194 qp->s_cur_size = len; 193 qp->s_cur_size = len;
195 *bth0p = bth0 | (1 << 22); /* Set M bit */ 194 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
196 *bth2p = bth2;
197 return 1; 195 return 1;
198 196
199bail: 197bail:
@@ -203,32 +201,39 @@ bail:
203/** 201/**
204 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 202 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
205 * @qp: a pointer to the QP 203 * @qp: a pointer to the QP
206 * @ohdr: a pointer to the IB header being constructed
207 * @pmtu: the path MTU
208 * @bth0p: pointer to the BTH opcode word
209 * @bth2p: pointer to the BTH PSN word
210 * 204 *
211 * Return 1 if constructed; otherwise, return 0. 205 * Return 1 if constructed; otherwise, return 0.
212 * Note the QP s_lock must be held and interrupts disabled.
213 */ 206 */
214int ipath_make_rc_req(struct ipath_qp *qp, 207int ipath_make_rc_req(struct ipath_qp *qp)
215 struct ipath_other_headers *ohdr,
216 u32 pmtu, u32 *bth0p, u32 *bth2p)
217{ 208{
218 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 209 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
210 struct ipath_other_headers *ohdr;
219 struct ipath_sge_state *ss; 211 struct ipath_sge_state *ss;
220 struct ipath_swqe *wqe; 212 struct ipath_swqe *wqe;
221 u32 hwords; 213 u32 hwords;
222 u32 len; 214 u32 len;
223 u32 bth0; 215 u32 bth0;
224 u32 bth2; 216 u32 bth2;
217 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
225 char newreq; 218 char newreq;
219 unsigned long flags;
220 int ret = 0;
221
222 ohdr = &qp->s_hdr.u.oth;
223 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
224 ohdr = &qp->s_hdr.u.l.oth;
225
226 /*
227 * The lock is needed to synchronize between the sending tasklet,
228 * the receive interrupt handler, and timeout resends.
229 */
230 spin_lock_irqsave(&qp->s_lock, flags);
226 231
227 /* Sending responses has higher priority over sending requests. */ 232 /* Sending responses has higher priority over sending requests. */
228 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || 233 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
229 (qp->s_flags & IPATH_S_ACK_PENDING) || 234 (qp->s_flags & IPATH_S_ACK_PENDING) ||
230 qp->s_ack_state != OP(ACKNOWLEDGE)) && 235 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
231 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) 236 ipath_make_rc_ack(dev, qp, ohdr, pmtu))
232 goto done; 237 goto done;
233 238
234 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || 239 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
@@ -560,13 +565,12 @@ int ipath_make_rc_req(struct ipath_qp *qp,
560 qp->s_hdrwords = hwords; 565 qp->s_hdrwords = hwords;
561 qp->s_cur_sge = ss; 566 qp->s_cur_sge = ss;
562 qp->s_cur_size = len; 567 qp->s_cur_size = len;
563 *bth0p = bth0 | (qp->s_state << 24); 568 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
564 *bth2p = bth2;
565done: 569done:
566 return 1; 570 ret = 1;
567
568bail: 571bail:
569 return 0; 572 spin_unlock_irqrestore(&qp->s_lock, flags);
573 return ret;
570} 574}
571 575
572/** 576/**
@@ -627,7 +631,7 @@ static void send_rc_ack(struct ipath_qp *qp)
627 /* 631 /*
628 * If we can send the ACK, clear the ACK state. 632 * If we can send the ACK, clear the ACK state.
629 */ 633 */
630 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) { 634 if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
631 dev->n_unicast_xmit++; 635 dev->n_unicast_xmit++;
632 goto done; 636 goto done;
633 } 637 }
@@ -757,7 +761,9 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
757 wc->vendor_err = 0; 761 wc->vendor_err = 0;
758 wc->byte_len = 0; 762 wc->byte_len = 0;
759 wc->qp = &qp->ibqp; 763 wc->qp = &qp->ibqp;
764 wc->imm_data = 0;
760 wc->src_qp = qp->remote_qpn; 765 wc->src_qp = qp->remote_qpn;
766 wc->wc_flags = 0;
761 wc->pkey_index = 0; 767 wc->pkey_index = 0;
762 wc->slid = qp->remote_ah_attr.dlid; 768 wc->slid = qp->remote_ah_attr.dlid;
763 wc->sl = qp->remote_ah_attr.sl; 769 wc->sl = qp->remote_ah_attr.sl;
@@ -1041,7 +1047,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
1041 wc.vendor_err = 0; 1047 wc.vendor_err = 0;
1042 wc.byte_len = 0; 1048 wc.byte_len = 0;
1043 wc.qp = &qp->ibqp; 1049 wc.qp = &qp->ibqp;
1050 wc.imm_data = 0;
1044 wc.src_qp = qp->remote_qpn; 1051 wc.src_qp = qp->remote_qpn;
1052 wc.wc_flags = 0;
1045 wc.pkey_index = 0; 1053 wc.pkey_index = 0;
1046 wc.slid = qp->remote_ah_attr.dlid; 1054 wc.slid = qp->remote_ah_attr.dlid;
1047 wc.sl = qp->remote_ah_attr.sl; 1055 wc.sl = qp->remote_ah_attr.sl;
@@ -1454,6 +1462,19 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1454 goto send_ack; 1462 goto send_ack;
1455 } 1463 }
1456 /* 1464 /*
1465 * Try to send a simple ACK to work around a Mellanox bug
1466 * which doesn't accept a RDMA read response or atomic
1467 * response as an ACK for earlier SENDs or RDMA writes.
1468 */
1469 if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
1470 !(qp->s_flags & IPATH_S_ACK_PENDING) &&
1471 qp->s_ack_state == OP(ACKNOWLEDGE)) {
1472 spin_unlock_irqrestore(&qp->s_lock, flags);
1473 qp->r_nak_state = 0;
1474 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1475 goto send_ack;
1476 }
1477 /*
1457 * Resend the RDMA read or atomic op which 1478 * Resend the RDMA read or atomic op which
1458 * ACKs this duplicate request. 1479 * ACKs this duplicate request.
1459 */ 1480 */