aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-09-29 17:37:51 -0400
committerRoland Dreier <rolandd@cisco.com>2006-10-02 17:52:17 -0400
commit13b18c86176cab34ef30ef0a5962fcb0305f7269 (patch)
tree1fb6d0d0af9307256e1409cb030e8ed889b211eb /drivers/infiniband/hw
parent3f168d2b66d2314fea40614a3b966c1a0b6241a9 (diff)
IB/ipath: Fix RDMA reads
The PSN used to generate the request following a RDMA read was incorrect and some state booking wasn't maintained correctly. This patch fixes that. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c59
1 files changed, 32 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index a504cf67f272..ce6038743c5c 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -241,10 +241,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
241 * original work request since we may need to resend 241 * original work request since we may need to resend
242 * it. 242 * it.
243 */ 243 */
244 qp->s_sge.sge = wqe->sg_list[0]; 244 len = wqe->length;
245 qp->s_sge.sg_list = wqe->sg_list + 1;
246 qp->s_sge.num_sge = wqe->wr.num_sge;
247 qp->s_len = len = wqe->length;
248 ss = &qp->s_sge; 245 ss = &qp->s_sge;
249 bth2 = 0; 246 bth2 = 0;
250 switch (wqe->wr.opcode) { 247 switch (wqe->wr.opcode) {
@@ -368,14 +365,23 @@ int ipath_make_rc_req(struct ipath_qp *qp,
368 default: 365 default:
369 goto done; 366 goto done;
370 } 367 }
368 qp->s_sge.sge = wqe->sg_list[0];
369 qp->s_sge.sg_list = wqe->sg_list + 1;
370 qp->s_sge.num_sge = wqe->wr.num_sge;
371 qp->s_len = wqe->length;
371 if (newreq) { 372 if (newreq) {
372 qp->s_tail++; 373 qp->s_tail++;
373 if (qp->s_tail >= qp->s_size) 374 if (qp->s_tail >= qp->s_size)
374 qp->s_tail = 0; 375 qp->s_tail = 0;
375 } 376 }
376 bth2 |= qp->s_psn++ & IPATH_PSN_MASK; 377 bth2 |= qp->s_psn & IPATH_PSN_MASK;
377 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 378 if (wqe->wr.opcode == IB_WR_RDMA_READ)
378 qp->s_next_psn = qp->s_psn; 379 qp->s_psn = wqe->lpsn + 1;
380 else {
381 qp->s_psn++;
382 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
383 qp->s_next_psn = qp->s_psn;
384 }
379 /* 385 /*
380 * Put the QP on the pending list so lost ACKs will cause 386 * Put the QP on the pending list so lost ACKs will cause
381 * a retry. More than one request can be pending so the 387 * a retry. More than one request can be pending so the
@@ -690,13 +696,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
690 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 696 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
691 struct ipath_ibdev *dev; 697 struct ipath_ibdev *dev;
692 698
693 /*
694 * If there are no requests pending, we are done.
695 */
696 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
697 qp->s_last == qp->s_tail)
698 goto done;
699
700 if (qp->s_retry == 0) { 699 if (qp->s_retry == 0) {
701 wc->wr_id = wqe->wr.wr_id; 700 wc->wr_id = wqe->wr.wr_id;
702 wc->status = IB_WC_RETRY_EXC_ERR; 701 wc->status = IB_WC_RETRY_EXC_ERR;
@@ -731,8 +730,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
731 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 730 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
732 731
733 reset_psn(qp, psn); 732 reset_psn(qp, psn);
734
735done:
736 tasklet_hi_schedule(&qp->s_task); 733 tasklet_hi_schedule(&qp->s_task);
737 734
738bail: 735bail:
@@ -765,6 +762,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
765 struct ib_wc wc; 762 struct ib_wc wc;
766 struct ipath_swqe *wqe; 763 struct ipath_swqe *wqe;
767 int ret = 0; 764 int ret = 0;
765 u32 ack_psn;
768 766
769 /* 767 /*
770 * Remove the QP from the timeout queue (or RNR timeout queue). 768 * Remove the QP from the timeout queue (or RNR timeout queue).
@@ -777,26 +775,26 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
777 list_del_init(&qp->timerwait); 775 list_del_init(&qp->timerwait);
778 spin_unlock(&dev->pending_lock); 776 spin_unlock(&dev->pending_lock);
779 777
778 /* Nothing is pending to ACK/NAK. */
779 if (unlikely(qp->s_last == qp->s_tail))
780 goto bail;
781
780 /* 782 /*
781 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 783 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
782 * requests and implicitly NAK RDMA read and atomic requests issued 784 * requests and implicitly NAK RDMA read and atomic requests issued
783 * before the NAK'ed request. The MSN won't include the NAK'ed 785 * before the NAK'ed request. The MSN won't include the NAK'ed
784 * request but will include an ACK'ed request(s). 786 * request but will include an ACK'ed request(s).
785 */ 787 */
788 ack_psn = psn;
789 if (aeth >> 29)
790 ack_psn--;
786 wqe = get_swqe_ptr(qp, qp->s_last); 791 wqe = get_swqe_ptr(qp, qp->s_last);
787 792
788 /* Nothing is pending to ACK/NAK. */
789 if (qp->s_last == qp->s_tail)
790 goto bail;
791
792 /* 793 /*
793 * The MSN might be for a later WQE than the PSN indicates so 794 * The MSN might be for a later WQE than the PSN indicates so
794 * only complete WQEs that the PSN finishes. 795 * only complete WQEs that the PSN finishes.
795 */ 796 */
796 while (ipath_cmp24(psn, wqe->lpsn) >= 0) { 797 while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) {
797 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
798 if (ipath_cmp24(aeth, wqe->ssn) < 0)
799 break;
800 /* 798 /*
801 * If this request is a RDMA read or atomic, and the ACK is 799 * If this request is a RDMA read or atomic, and the ACK is
802 * for a later operation, this ACK NAKs the RDMA read or 800 * for a later operation, this ACK NAKs the RDMA read or
@@ -807,7 +805,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
807 * is sent but before the response is received. 805 * is sent but before the response is received.
808 */ 806 */
809 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 807 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
810 opcode != OP(RDMA_READ_RESPONSE_LAST)) || 808 (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
809 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
811 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 810 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
812 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 811 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
813 (opcode != OP(ATOMIC_ACKNOWLEDGE) || 812 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
@@ -825,6 +824,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
825 */ 824 */
826 goto bail; 825 goto bail;
827 } 826 }
827 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
828 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
829 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
830 tasklet_hi_schedule(&qp->s_task);
828 /* Post a send completion queue entry if requested. */ 831 /* Post a send completion queue entry if requested. */
829 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || 832 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
830 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 833 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
@@ -1055,7 +1058,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1055 /* no AETH, no ACK */ 1058 /* no AETH, no ACK */
1056 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1059 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1057 dev->n_rdma_seq++; 1060 dev->n_rdma_seq++;
1058 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1061 if (qp->s_last != qp->s_tail)
1062 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1059 goto ack_done; 1063 goto ack_done;
1060 } 1064 }
1061 rdma_read: 1065 rdma_read:
@@ -1091,7 +1095,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1091 /* ACKs READ req. */ 1095 /* ACKs READ req. */
1092 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1096 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1093 dev->n_rdma_seq++; 1097 dev->n_rdma_seq++;
1094 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1098 if (qp->s_last != qp->s_tail)
1099 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1095 goto ack_done; 1100 goto ack_done;
1096 } 1101 }
1097 /* FALLTHROUGH */ 1102 /* FALLTHROUGH */