aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-03-15 17:44:54 -0400
committerRoland Dreier <rolandd@cisco.com>2007-04-18 23:20:55 -0400
commit6f5c407460bba332d6bee52e19f2305539395511 (patch)
treefa50a9a8d5809e32a8071bd9937cfa3e409047b1 /drivers/infiniband
parent0434d271fddaabd65aaa4dbd0145112d6e8aa388 (diff)
IB/ipath: Fix PSN update for RC retries
This patch fixes a number of bugs with updating the PSN for retries of RC requests. Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c65
1 files changed, 38 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 2e4d544957af..d6aa14afa268 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -444,7 +444,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
444 qp->s_psn = wqe->lpsn + 1; 444 qp->s_psn = wqe->lpsn + 1;
445 else { 445 else {
446 qp->s_psn++; 446 qp->s_psn++;
447 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 447 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
448 qp->s_next_psn = qp->s_psn; 448 qp->s_next_psn = qp->s_psn;
449 } 449 }
450 /* 450 /*
@@ -471,7 +471,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
471 /* FALLTHROUGH */ 471 /* FALLTHROUGH */
472 case OP(SEND_MIDDLE): 472 case OP(SEND_MIDDLE):
473 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 473 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
474 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 474 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
475 qp->s_next_psn = qp->s_psn; 475 qp->s_next_psn = qp->s_psn;
476 ss = &qp->s_sge; 476 ss = &qp->s_sge;
477 len = qp->s_len; 477 len = qp->s_len;
@@ -507,7 +507,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
507 /* FALLTHROUGH */ 507 /* FALLTHROUGH */
508 case OP(RDMA_WRITE_MIDDLE): 508 case OP(RDMA_WRITE_MIDDLE):
509 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 509 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
510 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 510 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
511 qp->s_next_psn = qp->s_psn; 511 qp->s_next_psn = qp->s_psn;
512 ss = &qp->s_sge; 512 ss = &qp->s_sge;
513 len = qp->s_len; 513 len = qp->s_len;
@@ -546,7 +546,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
546 qp->s_state = OP(RDMA_READ_REQUEST); 546 qp->s_state = OP(RDMA_READ_REQUEST);
547 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 547 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
548 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 548 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
549 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 549 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
550 qp->s_next_psn = qp->s_psn; 550 qp->s_next_psn = qp->s_psn;
551 ss = NULL; 551 ss = NULL;
552 len = 0; 552 len = 0;
@@ -779,7 +779,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
779 if (wqe->wr.opcode == IB_WR_RDMA_READ) 779 if (wqe->wr.opcode == IB_WR_RDMA_READ)
780 dev->n_rc_resends++; 780 dev->n_rc_resends++;
781 else 781 else
782 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 782 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
783 783
784 reset_psn(qp, psn); 784 reset_psn(qp, psn);
785 tasklet_hi_schedule(&qp->s_task); 785 tasklet_hi_schedule(&qp->s_task);
@@ -915,15 +915,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
915 if (qp->s_last == qp->s_cur) { 915 if (qp->s_last == qp->s_cur) {
916 if (++qp->s_cur >= qp->s_size) 916 if (++qp->s_cur >= qp->s_size)
917 qp->s_cur = 0; 917 qp->s_cur = 0;
918 qp->s_last = qp->s_cur;
919 if (qp->s_last == qp->s_tail)
920 break;
918 wqe = get_swqe_ptr(qp, qp->s_cur); 921 wqe = get_swqe_ptr(qp, qp->s_cur);
919 qp->s_state = OP(SEND_LAST); 922 qp->s_state = OP(SEND_LAST);
920 qp->s_psn = wqe->psn; 923 qp->s_psn = wqe->psn;
924 } else {
925 if (++qp->s_last >= qp->s_size)
926 qp->s_last = 0;
927 if (qp->s_last == qp->s_tail)
928 break;
929 wqe = get_swqe_ptr(qp, qp->s_last);
921 } 930 }
922 if (++qp->s_last >= qp->s_size)
923 qp->s_last = 0;
924 wqe = get_swqe_ptr(qp, qp->s_last);
925 if (qp->s_last == qp->s_tail)
926 break;
927 } 931 }
928 932
929 switch (aeth >> 29) { 933 switch (aeth >> 29) {
@@ -935,6 +939,18 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
935 list_add_tail(&qp->timerwait, 939 list_add_tail(&qp->timerwait,
936 &dev->pending[dev->pending_index]); 940 &dev->pending[dev->pending_index]);
937 spin_unlock(&dev->pending_lock); 941 spin_unlock(&dev->pending_lock);
942 /*
943 * If we get a partial ACK for a resent operation,
944 * we can stop resending the earlier packets and
945 * continue with the next packet the receiver wants.
946 */
947 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
948 reset_psn(qp, psn + 1);
949 tasklet_hi_schedule(&qp->s_task);
950 }
951 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
952 qp->s_state = OP(SEND_LAST);
953 qp->s_psn = psn + 1;
938 } 954 }
939 ipath_get_credit(qp, aeth); 955 ipath_get_credit(qp, aeth);
940 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 956 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
@@ -945,22 +961,23 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
945 961
946 case 1: /* RNR NAK */ 962 case 1: /* RNR NAK */
947 dev->n_rnr_naks++; 963 dev->n_rnr_naks++;
964 if (qp->s_last == qp->s_tail)
965 goto bail;
948 if (qp->s_rnr_retry == 0) { 966 if (qp->s_rnr_retry == 0) {
949 if (qp->s_last == qp->s_tail)
950 goto bail;
951
952 wc.status = IB_WC_RNR_RETRY_EXC_ERR; 967 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
953 goto class_b; 968 goto class_b;
954 } 969 }
955 if (qp->s_rnr_retry_cnt < 7) 970 if (qp->s_rnr_retry_cnt < 7)
956 qp->s_rnr_retry--; 971 qp->s_rnr_retry--;
957 if (qp->s_last == qp->s_tail)
958 goto bail;
959 972
960 /* The last valid PSN is the previous PSN. */ 973 /* The last valid PSN is the previous PSN. */
961 update_last_psn(qp, psn - 1); 974 update_last_psn(qp, psn - 1);
962 975
963 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 976 if (wqe->wr.opcode == IB_WR_RDMA_READ)
977 dev->n_rc_resends++;
978 else
979 dev->n_rc_resends +=
980 (qp->s_psn - psn) & IPATH_PSN_MASK;
964 981
965 reset_psn(qp, psn); 982 reset_psn(qp, psn);
966 983
@@ -971,26 +988,20 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
971 goto bail; 988 goto bail;
972 989
973 case 3: /* NAK */ 990 case 3: /* NAK */
974 /* The last valid PSN seen is the previous request's. */ 991 if (qp->s_last == qp->s_tail)
975 if (qp->s_last != qp->s_tail) 992 goto bail;
976 update_last_psn(qp, wqe->psn - 1); 993 /* The last valid PSN is the previous PSN. */
994 update_last_psn(qp, psn - 1);
977 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & 995 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
978 IPATH_AETH_CREDIT_MASK) { 996 IPATH_AETH_CREDIT_MASK) {
979 case 0: /* PSN sequence error */ 997 case 0: /* PSN sequence error */
980 dev->n_seq_naks++; 998 dev->n_seq_naks++;
981 /* 999 /*
982 * Back up to the responder's expected PSN. XXX 1000 * Back up to the responder's expected PSN.
983 * Note that we might get a NAK in the middle of an 1001 * Note that we might get a NAK in the middle of an
984 * RDMA READ response which terminates the RDMA 1002 * RDMA READ response which terminates the RDMA
985 * READ. 1003 * READ.
986 */ 1004 */
987 if (qp->s_last == qp->s_tail)
988 break;
989
990 if (ipath_cmp24(psn, wqe->psn) < 0)
991 break;
992
993 /* Retry the request. */
994 ipath_restart_rc(qp, psn, &wc); 1005 ipath_restart_rc(qp, psn, &wc);
995 break; 1006 break;
996 1007