aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_rc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index a0931119bd78..eca0c41f1226 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1005 * there are still requests that haven't been acked. 1005 * there are still requests that haven't been acked.
1006 */ 1006 */
1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) 1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
1009 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1009 start_timer(qp); 1010 start_timer(qp);
1010 1011
1011 while (qp->s_last != qp->s_acked) { 1012 while (qp->s_last != qp->s_acked) {
@@ -1407,6 +1408,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1407 struct qib_ctxtdata *rcd) 1408 struct qib_ctxtdata *rcd)
1408{ 1409{
1409 struct qib_swqe *wqe; 1410 struct qib_swqe *wqe;
1411 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1410 enum ib_wc_status status; 1412 enum ib_wc_status status;
1411 unsigned long flags; 1413 unsigned long flags;
1412 int diff; 1414 int diff;
@@ -1414,7 +1416,32 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1414 u32 aeth; 1416 u32 aeth;
1415 u64 val; 1417 u64 val;
1416 1418
1419 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1420 /*
1421 * If ACK'd PSN on SDMA busy list try to make progress to
1422 * reclaim SDMA credits.
1423 */
1424 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1425 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1426
1427 /*
1428 * If send tasklet not running attempt to progress
1429 * SDMA queue.
1430 */
1431 if (!(qp->s_flags & QIB_S_BUSY)) {
1432 /* Acquire SDMA Lock */
1433 spin_lock_irqsave(&ppd->sdma_lock, flags);
1434 /* Invoke sdma make progress */
1435 qib_sdma_make_progress(ppd);
1436 /* Release SDMA Lock */
1437 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1438 }
1439 }
1440 }
1441
1417 spin_lock_irqsave(&qp->s_lock, flags); 1442 spin_lock_irqsave(&qp->s_lock, flags);
1443 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1444 goto ack_done;
1418 1445
1419 /* Ignore invalid responses. */ 1446 /* Ignore invalid responses. */
1420 if (qib_cmp24(psn, qp->s_next_psn) >= 0) 1447 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
@@ -2068,7 +2095,10 @@ send_last:
2068 goto nack_op_err; 2095 goto nack_op_err;
2069 if (!ret) 2096 if (!ret)
2070 goto rnr_nak; 2097 goto rnr_nak;
2071 goto send_last_imm; 2098 wc.ex.imm_data = ohdr->u.rc.imm_data;
2099 hdrsize += 4;
2100 wc.wc_flags = IB_WC_WITH_IMM;
2101 goto send_last;
2072 2102
2073 case OP(RDMA_READ_REQUEST): { 2103 case OP(RDMA_READ_REQUEST): {
2074 struct qib_ack_entry *e; 2104 struct qib_ack_entry *e;