aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c54
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c127
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c165
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h6
5 files changed, 132 insertions, 224 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index dd5b6e9d57c2..6f98632877eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -374,13 +374,14 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
374} 374}
375 375
376/** 376/**
377 * ipath_error_qp - put a QP into an error state 377 * ipath_error_qp - put a QP into the error state
378 * @qp: the QP to put into an error state 378 * @qp: the QP to put into the error state
379 * @err: the receive completion error to signal if a RWQE is active 379 * @err: the receive completion error to signal if a RWQE is active
380 * 380 *
381 * Flushes both send and receive work queues. 381 * Flushes both send and receive work queues.
382 * Returns true if last WQE event should be generated. 382 * Returns true if last WQE event should be generated.
383 * The QP s_lock should be held and interrupts disabled. 383 * The QP s_lock should be held and interrupts disabled.
384 * If we are already in error state, just return.
384 */ 385 */
385 386
386int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 387int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -389,8 +390,10 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
389 struct ib_wc wc; 390 struct ib_wc wc;
390 int ret = 0; 391 int ret = 0;
391 392
392 ipath_dbg("QP%d/%d in error state (%d)\n", 393 if (qp->state == IB_QPS_ERR)
393 qp->ibqp.qp_num, qp->remote_qpn, err); 394 goto bail;
395
396 qp->state = IB_QPS_ERR;
394 397
395 spin_lock(&dev->pending_lock); 398 spin_lock(&dev->pending_lock);
396 if (!list_empty(&qp->timerwait)) 399 if (!list_empty(&qp->timerwait))
@@ -460,6 +463,7 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
460 } else if (qp->ibqp.event_handler) 463 } else if (qp->ibqp.event_handler)
461 ret = 1; 464 ret = 1;
462 465
466bail:
463 return ret; 467 return ret;
464} 468}
465 469
@@ -1026,48 +1030,6 @@ bail:
1026} 1030}
1027 1031
1028/** 1032/**
1029 * ipath_sqerror_qp - put a QP's send queue into an error state
1030 * @qp: QP who's send queue will be put into an error state
1031 * @wc: the WC responsible for putting the QP in this state
1032 *
1033 * Flushes the send work queue.
1034 * The QP s_lock should be held and interrupts disabled.
1035 */
1036
1037void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
1038{
1039 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1040 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
1041
1042 ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
1043 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
1044
1045 spin_lock(&dev->pending_lock);
1046 if (!list_empty(&qp->timerwait))
1047 list_del_init(&qp->timerwait);
1048 if (!list_empty(&qp->piowait))
1049 list_del_init(&qp->piowait);
1050 spin_unlock(&dev->pending_lock);
1051
1052 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1053 if (++qp->s_last >= qp->s_size)
1054 qp->s_last = 0;
1055
1056 wc->status = IB_WC_WR_FLUSH_ERR;
1057
1058 while (qp->s_last != qp->s_head) {
1059 wqe = get_swqe_ptr(qp, qp->s_last);
1060 wc->wr_id = wqe->wr.wr_id;
1061 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1062 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1063 if (++qp->s_last >= qp->s_size)
1064 qp->s_last = 0;
1065 }
1066 qp->s_cur = qp->s_tail = qp->s_head;
1067 qp->state = IB_QPS_SQE;
1068}
1069
1070/**
1071 * ipath_get_credit - flush the send work queue of a QP 1033 * ipath_get_credit - flush the send work queue of a QP
1072 * @qp: the qp who's send work queue to flush 1034 * @qp: the qp who's send work queue to flush
1073 * @aeth: the Acknowledge Extended Transport Header 1035 * @aeth: the Acknowledge Extended Transport Header
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 08b11b567614..b4b26c3aa613 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -771,27 +771,14 @@ done:
771 * 771 *
772 * The QP s_lock should be held and interrupts disabled. 772 * The QP s_lock should be held and interrupts disabled.
773 */ 773 */
774void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) 774void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
775{ 775{
776 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 776 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
777 struct ipath_ibdev *dev; 777 struct ipath_ibdev *dev;
778 778
779 if (qp->s_retry == 0) { 779 if (qp->s_retry == 0) {
780 wc->wr_id = wqe->wr.wr_id; 780 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
781 wc->status = IB_WC_RETRY_EXC_ERR; 781 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
782 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
783 wc->vendor_err = 0;
784 wc->byte_len = 0;
785 wc->qp = &qp->ibqp;
786 wc->imm_data = 0;
787 wc->src_qp = qp->remote_qpn;
788 wc->wc_flags = 0;
789 wc->pkey_index = 0;
790 wc->slid = qp->remote_ah_attr.dlid;
791 wc->sl = qp->remote_ah_attr.sl;
792 wc->dlid_path_bits = 0;
793 wc->port_num = 0;
794 ipath_sqerror_qp(qp, wc);
795 goto bail; 782 goto bail;
796 } 783 }
797 qp->s_retry--; 784 qp->s_retry--;
@@ -804,6 +791,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
804 spin_lock(&dev->pending_lock); 791 spin_lock(&dev->pending_lock);
805 if (!list_empty(&qp->timerwait)) 792 if (!list_empty(&qp->timerwait))
806 list_del_init(&qp->timerwait); 793 list_del_init(&qp->timerwait);
794 if (!list_empty(&qp->piowait))
795 list_del_init(&qp->piowait);
807 spin_unlock(&dev->pending_lock); 796 spin_unlock(&dev->pending_lock);
808 797
809 if (wqe->wr.opcode == IB_WR_RDMA_READ) 798 if (wqe->wr.opcode == IB_WR_RDMA_READ)
@@ -845,6 +834,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
845{ 834{
846 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 835 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
847 struct ib_wc wc; 836 struct ib_wc wc;
837 enum ib_wc_status status;
848 struct ipath_swqe *wqe; 838 struct ipath_swqe *wqe;
849 int ret = 0; 839 int ret = 0;
850 u32 ack_psn; 840 u32 ack_psn;
@@ -909,7 +899,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
909 */ 899 */
910 update_last_psn(qp, wqe->psn - 1); 900 update_last_psn(qp, wqe->psn - 1);
911 /* Retry this request. */ 901 /* Retry this request. */
912 ipath_restart_rc(qp, wqe->psn, &wc); 902 ipath_restart_rc(qp, wqe->psn);
913 /* 903 /*
914 * No need to process the ACK/NAK since we are 904 * No need to process the ACK/NAK since we are
915 * restarting an earlier request. 905 * restarting an earlier request.
@@ -937,20 +927,15 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
937 /* Post a send completion queue entry if requested. */ 927 /* Post a send completion queue entry if requested. */
938 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || 928 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
939 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 929 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
930 memset(&wc, 0, sizeof wc);
940 wc.wr_id = wqe->wr.wr_id; 931 wc.wr_id = wqe->wr.wr_id;
941 wc.status = IB_WC_SUCCESS; 932 wc.status = IB_WC_SUCCESS;
942 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 933 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
943 wc.vendor_err = 0;
944 wc.byte_len = wqe->length; 934 wc.byte_len = wqe->length;
945 wc.imm_data = 0;
946 wc.qp = &qp->ibqp; 935 wc.qp = &qp->ibqp;
947 wc.src_qp = qp->remote_qpn; 936 wc.src_qp = qp->remote_qpn;
948 wc.wc_flags = 0;
949 wc.pkey_index = 0;
950 wc.slid = qp->remote_ah_attr.dlid; 937 wc.slid = qp->remote_ah_attr.dlid;
951 wc.sl = qp->remote_ah_attr.sl; 938 wc.sl = qp->remote_ah_attr.sl;
952 wc.dlid_path_bits = 0;
953 wc.port_num = 0;
954 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 939 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
955 } 940 }
956 qp->s_retry = qp->s_retry_cnt; 941 qp->s_retry = qp->s_retry_cnt;
@@ -1012,7 +997,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
1012 if (qp->s_last == qp->s_tail) 997 if (qp->s_last == qp->s_tail)
1013 goto bail; 998 goto bail;
1014 if (qp->s_rnr_retry == 0) { 999 if (qp->s_rnr_retry == 0) {
1015 wc.status = IB_WC_RNR_RETRY_EXC_ERR; 1000 status = IB_WC_RNR_RETRY_EXC_ERR;
1016 goto class_b; 1001 goto class_b;
1017 } 1002 }
1018 if (qp->s_rnr_retry_cnt < 7) 1003 if (qp->s_rnr_retry_cnt < 7)
@@ -1050,37 +1035,25 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
1050 * RDMA READ response which terminates the RDMA 1035 * RDMA READ response which terminates the RDMA
1051 * READ. 1036 * READ.
1052 */ 1037 */
1053 ipath_restart_rc(qp, psn, &wc); 1038 ipath_restart_rc(qp, psn);
1054 break; 1039 break;
1055 1040
1056 case 1: /* Invalid Request */ 1041 case 1: /* Invalid Request */
1057 wc.status = IB_WC_REM_INV_REQ_ERR; 1042 status = IB_WC_REM_INV_REQ_ERR;
1058 dev->n_other_naks++; 1043 dev->n_other_naks++;
1059 goto class_b; 1044 goto class_b;
1060 1045
1061 case 2: /* Remote Access Error */ 1046 case 2: /* Remote Access Error */
1062 wc.status = IB_WC_REM_ACCESS_ERR; 1047 status = IB_WC_REM_ACCESS_ERR;
1063 dev->n_other_naks++; 1048 dev->n_other_naks++;
1064 goto class_b; 1049 goto class_b;
1065 1050
1066 case 3: /* Remote Operation Error */ 1051 case 3: /* Remote Operation Error */
1067 wc.status = IB_WC_REM_OP_ERR; 1052 status = IB_WC_REM_OP_ERR;
1068 dev->n_other_naks++; 1053 dev->n_other_naks++;
1069 class_b: 1054 class_b:
1070 wc.wr_id = wqe->wr.wr_id; 1055 ipath_send_complete(qp, wqe, status);
1071 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1056 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1072 wc.vendor_err = 0;
1073 wc.byte_len = 0;
1074 wc.qp = &qp->ibqp;
1075 wc.imm_data = 0;
1076 wc.src_qp = qp->remote_qpn;
1077 wc.wc_flags = 0;
1078 wc.pkey_index = 0;
1079 wc.slid = qp->remote_ah_attr.dlid;
1080 wc.sl = qp->remote_ah_attr.sl;
1081 wc.dlid_path_bits = 0;
1082 wc.port_num = 0;
1083 ipath_sqerror_qp(qp, &wc);
1084 break; 1057 break;
1085 1058
1086 default: 1059 default:
@@ -1126,8 +1099,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1126 int header_in_data) 1099 int header_in_data)
1127{ 1100{
1128 struct ipath_swqe *wqe; 1101 struct ipath_swqe *wqe;
1102 enum ib_wc_status status;
1129 unsigned long flags; 1103 unsigned long flags;
1130 struct ib_wc wc;
1131 int diff; 1104 int diff;
1132 u32 pad; 1105 u32 pad;
1133 u32 aeth; 1106 u32 aeth;
@@ -1159,6 +1132,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1159 if (unlikely(qp->s_last == qp->s_tail)) 1132 if (unlikely(qp->s_last == qp->s_tail))
1160 goto ack_done; 1133 goto ack_done;
1161 wqe = get_swqe_ptr(qp, qp->s_last); 1134 wqe = get_swqe_ptr(qp, qp->s_last);
1135 status = IB_WC_SUCCESS;
1162 1136
1163 switch (opcode) { 1137 switch (opcode) {
1164 case OP(ACKNOWLEDGE): 1138 case OP(ACKNOWLEDGE):
@@ -1200,7 +1174,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1200 /* no AETH, no ACK */ 1174 /* no AETH, no ACK */
1201 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1175 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1202 dev->n_rdma_seq++; 1176 dev->n_rdma_seq++;
1203 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1177 ipath_restart_rc(qp, qp->s_last_psn + 1);
1204 goto ack_done; 1178 goto ack_done;
1205 } 1179 }
1206 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1180 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
@@ -1261,7 +1235,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1261 /* ACKs READ req. */ 1235 /* ACKs READ req. */
1262 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1236 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1263 dev->n_rdma_seq++; 1237 dev->n_rdma_seq++;
1264 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1238 ipath_restart_rc(qp, qp->s_last_psn + 1);
1265 goto ack_done; 1239 goto ack_done;
1266 } 1240 }
1267 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1241 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
@@ -1291,31 +1265,16 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1291 goto ack_done; 1265 goto ack_done;
1292 } 1266 }
1293 1267
1294ack_done:
1295 spin_unlock_irqrestore(&qp->s_lock, flags);
1296 goto bail;
1297
1298ack_op_err: 1268ack_op_err:
1299 wc.status = IB_WC_LOC_QP_OP_ERR; 1269 status = IB_WC_LOC_QP_OP_ERR;
1300 goto ack_err; 1270 goto ack_err;
1301 1271
1302ack_len_err: 1272ack_len_err:
1303 wc.status = IB_WC_LOC_LEN_ERR; 1273 status = IB_WC_LOC_LEN_ERR;
1304ack_err: 1274ack_err:
1305 wc.wr_id = wqe->wr.wr_id; 1275 ipath_send_complete(qp, wqe, status);
1306 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1276 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1307 wc.vendor_err = 0; 1277ack_done:
1308 wc.byte_len = 0;
1309 wc.imm_data = 0;
1310 wc.qp = &qp->ibqp;
1311 wc.src_qp = qp->remote_qpn;
1312 wc.wc_flags = 0;
1313 wc.pkey_index = 0;
1314 wc.slid = qp->remote_ah_attr.dlid;
1315 wc.sl = qp->remote_ah_attr.sl;
1316 wc.dlid_path_bits = 0;
1317 wc.port_num = 0;
1318 ipath_sqerror_qp(qp, &wc);
1319 spin_unlock_irqrestore(&qp->s_lock, flags); 1278 spin_unlock_irqrestore(&qp->s_lock, flags);
1320bail: 1279bail:
1321 return; 1280 return;
@@ -1523,13 +1482,12 @@ send_ack:
1523 return 0; 1482 return 0;
1524} 1483}
1525 1484
1526static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1485void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1527{ 1486{
1528 unsigned long flags; 1487 unsigned long flags;
1529 int lastwqe; 1488 int lastwqe;
1530 1489
1531 spin_lock_irqsave(&qp->s_lock, flags); 1490 spin_lock_irqsave(&qp->s_lock, flags);
1532 qp->state = IB_QPS_ERR;
1533 lastwqe = ipath_error_qp(qp, err); 1491 lastwqe = ipath_error_qp(qp, err);
1534 spin_unlock_irqrestore(&qp->s_lock, flags); 1492 spin_unlock_irqrestore(&qp->s_lock, flags);
1535 1493
@@ -1643,11 +1601,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1643 opcode == OP(SEND_LAST) || 1601 opcode == OP(SEND_LAST) ||
1644 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 1602 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1645 break; 1603 break;
1646 nack_inv: 1604 goto nack_inv;
1647 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1648 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1649 qp->r_ack_psn = qp->r_psn;
1650 goto send_ack;
1651 1605
1652 case OP(RDMA_WRITE_FIRST): 1606 case OP(RDMA_WRITE_FIRST):
1653 case OP(RDMA_WRITE_MIDDLE): 1607 case OP(RDMA_WRITE_MIDDLE):
@@ -1673,18 +1627,13 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1673 break; 1627 break;
1674 } 1628 }
1675 1629
1676 wc.imm_data = 0; 1630 memset(&wc, 0, sizeof wc);
1677 wc.wc_flags = 0;
1678 1631
1679 /* OK, process the packet. */ 1632 /* OK, process the packet. */
1680 switch (opcode) { 1633 switch (opcode) {
1681 case OP(SEND_FIRST): 1634 case OP(SEND_FIRST):
1682 if (!ipath_get_rwqe(qp, 0)) { 1635 if (!ipath_get_rwqe(qp, 0))
1683 rnr_nak: 1636 goto rnr_nak;
1684 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1685 qp->r_ack_psn = qp->r_psn;
1686 goto send_ack;
1687 }
1688 qp->r_rcv_len = 0; 1637 qp->r_rcv_len = 0;
1689 /* FALLTHROUGH */ 1638 /* FALLTHROUGH */
1690 case OP(SEND_MIDDLE): 1639 case OP(SEND_MIDDLE):
@@ -1751,14 +1700,10 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1751 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 1700 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1752 else 1701 else
1753 wc.opcode = IB_WC_RECV; 1702 wc.opcode = IB_WC_RECV;
1754 wc.vendor_err = 0;
1755 wc.qp = &qp->ibqp; 1703 wc.qp = &qp->ibqp;
1756 wc.src_qp = qp->remote_qpn; 1704 wc.src_qp = qp->remote_qpn;
1757 wc.pkey_index = 0;
1758 wc.slid = qp->remote_ah_attr.dlid; 1705 wc.slid = qp->remote_ah_attr.dlid;
1759 wc.sl = qp->remote_ah_attr.sl; 1706 wc.sl = qp->remote_ah_attr.sl;
1760 wc.dlid_path_bits = 0;
1761 wc.port_num = 0;
1762 /* Signal completion event if the solicited bit is set. */ 1707 /* Signal completion event if the solicited bit is set. */
1763 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1708 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1764 (ohdr->bth[0] & 1709 (ohdr->bth[0] &
@@ -1951,11 +1896,21 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1951 goto send_ack; 1896 goto send_ack;
1952 goto done; 1897 goto done;
1953 1898
1899rnr_nak:
1900 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1901 qp->r_ack_psn = qp->r_psn;
1902 goto send_ack;
1903
1904nack_inv:
1905 ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
1906 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1907 qp->r_ack_psn = qp->r_psn;
1908 goto send_ack;
1909
1954nack_acc: 1910nack_acc:
1955 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR); 1911 ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
1956 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 1912 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1957 qp->r_ack_psn = qp->r_psn; 1913 qp->r_ack_psn = qp->r_psn;
1958
1959send_ack: 1914send_ack:
1960 send_rc_ack(qp); 1915 send_rc_ack(qp);
1961 1916
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 9e3fe61cbd08..c716a03dd399 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -140,20 +140,11 @@ int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
140 goto bail; 140 goto bail;
141 141
142bad_lkey: 142bad_lkey:
143 memset(&wc, 0, sizeof(wc));
143 wc.wr_id = wqe->wr_id; 144 wc.wr_id = wqe->wr_id;
144 wc.status = IB_WC_LOC_PROT_ERR; 145 wc.status = IB_WC_LOC_PROT_ERR;
145 wc.opcode = IB_WC_RECV; 146 wc.opcode = IB_WC_RECV;
146 wc.vendor_err = 0;
147 wc.byte_len = 0;
148 wc.imm_data = 0;
149 wc.qp = &qp->ibqp; 147 wc.qp = &qp->ibqp;
150 wc.src_qp = 0;
151 wc.wc_flags = 0;
152 wc.pkey_index = 0;
153 wc.slid = 0;
154 wc.sl = 0;
155 wc.dlid_path_bits = 0;
156 wc.port_num = 0;
157 /* Signal solicited completion event. */ 148 /* Signal solicited completion event. */
158 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 149 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
159 ret = 0; 150 ret = 0;
@@ -270,6 +261,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
270 struct ib_wc wc; 261 struct ib_wc wc;
271 u64 sdata; 262 u64 sdata;
272 atomic64_t *maddr; 263 atomic64_t *maddr;
264 enum ib_wc_status send_status;
273 265
274 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 266 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
275 if (!qp) { 267 if (!qp) {
@@ -300,8 +292,8 @@ again:
300 wqe = get_swqe_ptr(sqp, sqp->s_last); 292 wqe = get_swqe_ptr(sqp, sqp->s_last);
301 spin_unlock_irqrestore(&sqp->s_lock, flags); 293 spin_unlock_irqrestore(&sqp->s_lock, flags);
302 294
303 wc.wc_flags = 0; 295 memset(&wc, 0, sizeof wc);
304 wc.imm_data = 0; 296 send_status = IB_WC_SUCCESS;
305 297
306 sqp->s_sge.sge = wqe->sg_list[0]; 298 sqp->s_sge.sge = wqe->sg_list[0];
307 sqp->s_sge.sg_list = wqe->sg_list + 1; 299 sqp->s_sge.sg_list = wqe->sg_list + 1;
@@ -313,75 +305,33 @@ again:
313 wc.imm_data = wqe->wr.ex.imm_data; 305 wc.imm_data = wqe->wr.ex.imm_data;
314 /* FALLTHROUGH */ 306 /* FALLTHROUGH */
315 case IB_WR_SEND: 307 case IB_WR_SEND:
316 if (!ipath_get_rwqe(qp, 0)) { 308 if (!ipath_get_rwqe(qp, 0))
317 rnr_nak: 309 goto rnr_nak;
318 /* Handle RNR NAK */
319 if (qp->ibqp.qp_type == IB_QPT_UC)
320 goto send_comp;
321 if (sqp->s_rnr_retry == 0) {
322 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
323 goto err;
324 }
325 if (sqp->s_rnr_retry_cnt < 7)
326 sqp->s_rnr_retry--;
327 dev->n_rnr_naks++;
328 sqp->s_rnr_timeout =
329 ib_ipath_rnr_table[qp->r_min_rnr_timer];
330 ipath_insert_rnr_queue(sqp);
331 goto done;
332 }
333 break; 310 break;
334 311
335 case IB_WR_RDMA_WRITE_WITH_IMM: 312 case IB_WR_RDMA_WRITE_WITH_IMM:
336 if (unlikely(!(qp->qp_access_flags & 313 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
337 IB_ACCESS_REMOTE_WRITE))) { 314 goto inv_err;
338 wc.status = IB_WC_REM_INV_REQ_ERR;
339 goto err;
340 }
341 wc.wc_flags = IB_WC_WITH_IMM; 315 wc.wc_flags = IB_WC_WITH_IMM;
342 wc.imm_data = wqe->wr.ex.imm_data; 316 wc.imm_data = wqe->wr.ex.imm_data;
343 if (!ipath_get_rwqe(qp, 1)) 317 if (!ipath_get_rwqe(qp, 1))
344 goto rnr_nak; 318 goto rnr_nak;
345 /* FALLTHROUGH */ 319 /* FALLTHROUGH */
346 case IB_WR_RDMA_WRITE: 320 case IB_WR_RDMA_WRITE:
347 if (unlikely(!(qp->qp_access_flags & 321 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
348 IB_ACCESS_REMOTE_WRITE))) { 322 goto inv_err;
349 wc.status = IB_WC_REM_INV_REQ_ERR;
350 goto err;
351 }
352 if (wqe->length == 0) 323 if (wqe->length == 0)
353 break; 324 break;
354 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, 325 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
355 wqe->wr.wr.rdma.remote_addr, 326 wqe->wr.wr.rdma.remote_addr,
356 wqe->wr.wr.rdma.rkey, 327 wqe->wr.wr.rdma.rkey,
357 IB_ACCESS_REMOTE_WRITE))) { 328 IB_ACCESS_REMOTE_WRITE)))
358 acc_err: 329 goto acc_err;
359 wc.status = IB_WC_REM_ACCESS_ERR;
360 err:
361 wc.wr_id = wqe->wr.wr_id;
362 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
363 wc.vendor_err = 0;
364 wc.byte_len = 0;
365 wc.qp = &sqp->ibqp;
366 wc.src_qp = sqp->remote_qpn;
367 wc.pkey_index = 0;
368 wc.slid = sqp->remote_ah_attr.dlid;
369 wc.sl = sqp->remote_ah_attr.sl;
370 wc.dlid_path_bits = 0;
371 wc.port_num = 0;
372 spin_lock_irqsave(&sqp->s_lock, flags);
373 ipath_sqerror_qp(sqp, &wc);
374 spin_unlock_irqrestore(&sqp->s_lock, flags);
375 goto done;
376 }
377 break; 330 break;
378 331
379 case IB_WR_RDMA_READ: 332 case IB_WR_RDMA_READ:
380 if (unlikely(!(qp->qp_access_flags & 333 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
381 IB_ACCESS_REMOTE_READ))) { 334 goto inv_err;
382 wc.status = IB_WC_REM_INV_REQ_ERR;
383 goto err;
384 }
385 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 335 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
386 wqe->wr.wr.rdma.remote_addr, 336 wqe->wr.wr.rdma.remote_addr,
387 wqe->wr.wr.rdma.rkey, 337 wqe->wr.wr.rdma.rkey,
@@ -394,11 +344,8 @@ again:
394 344
395 case IB_WR_ATOMIC_CMP_AND_SWP: 345 case IB_WR_ATOMIC_CMP_AND_SWP:
396 case IB_WR_ATOMIC_FETCH_AND_ADD: 346 case IB_WR_ATOMIC_FETCH_AND_ADD:
397 if (unlikely(!(qp->qp_access_flags & 347 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
398 IB_ACCESS_REMOTE_ATOMIC))) { 348 goto inv_err;
399 wc.status = IB_WC_REM_INV_REQ_ERR;
400 goto err;
401 }
402 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 349 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
403 wqe->wr.wr.atomic.remote_addr, 350 wqe->wr.wr.atomic.remote_addr,
404 wqe->wr.wr.atomic.rkey, 351 wqe->wr.wr.atomic.rkey,
@@ -415,7 +362,8 @@ again:
415 goto send_comp; 362 goto send_comp;
416 363
417 default: 364 default:
418 goto done; 365 send_status = IB_WC_LOC_QP_OP_ERR;
366 goto serr;
419 } 367 }
420 368
421 sge = &sqp->s_sge.sge; 369 sge = &sqp->s_sge.sge;
@@ -458,14 +406,11 @@ again:
458 wc.opcode = IB_WC_RECV; 406 wc.opcode = IB_WC_RECV;
459 wc.wr_id = qp->r_wr_id; 407 wc.wr_id = qp->r_wr_id;
460 wc.status = IB_WC_SUCCESS; 408 wc.status = IB_WC_SUCCESS;
461 wc.vendor_err = 0;
462 wc.byte_len = wqe->length; 409 wc.byte_len = wqe->length;
463 wc.qp = &qp->ibqp; 410 wc.qp = &qp->ibqp;
464 wc.src_qp = qp->remote_qpn; 411 wc.src_qp = qp->remote_qpn;
465 wc.pkey_index = 0;
466 wc.slid = qp->remote_ah_attr.dlid; 412 wc.slid = qp->remote_ah_attr.dlid;
467 wc.sl = qp->remote_ah_attr.sl; 413 wc.sl = qp->remote_ah_attr.sl;
468 wc.dlid_path_bits = 0;
469 wc.port_num = 1; 414 wc.port_num = 1;
470 /* Signal completion event if the solicited bit is set. */ 415 /* Signal completion event if the solicited bit is set. */
471 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 416 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
@@ -473,9 +418,63 @@ again:
473 418
474send_comp: 419send_comp:
475 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 420 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
476 ipath_send_complete(sqp, wqe, IB_WC_SUCCESS); 421 ipath_send_complete(sqp, wqe, send_status);
477 goto again; 422 goto again;
478 423
424rnr_nak:
425 /* Handle RNR NAK */
426 if (qp->ibqp.qp_type == IB_QPT_UC)
427 goto send_comp;
428 /*
429 * Note: we don't need the s_lock held since the BUSY flag
430 * makes this single threaded.
431 */
432 if (sqp->s_rnr_retry == 0) {
433 send_status = IB_WC_RNR_RETRY_EXC_ERR;
434 goto serr;
435 }
436 if (sqp->s_rnr_retry_cnt < 7)
437 sqp->s_rnr_retry--;
438 spin_lock_irqsave(&sqp->s_lock, flags);
439 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
440 goto unlock;
441 dev->n_rnr_naks++;
442 sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
443 ipath_insert_rnr_queue(sqp);
444 goto unlock;
445
446inv_err:
447 send_status = IB_WC_REM_INV_REQ_ERR;
448 wc.status = IB_WC_LOC_QP_OP_ERR;
449 goto err;
450
451acc_err:
452 send_status = IB_WC_REM_ACCESS_ERR;
453 wc.status = IB_WC_LOC_PROT_ERR;
454err:
455 /* responder goes to error state */
456 ipath_rc_error(qp, wc.status);
457
458serr:
459 spin_lock_irqsave(&sqp->s_lock, flags);
460 ipath_send_complete(sqp, wqe, send_status);
461 if (sqp->ibqp.qp_type == IB_QPT_RC) {
462 int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
463
464 sqp->s_flags &= ~IPATH_S_BUSY;
465 spin_unlock_irqrestore(&sqp->s_lock, flags);
466 if (lastwqe) {
467 struct ib_event ev;
468
469 ev.device = sqp->ibqp.device;
470 ev.element.qp = &sqp->ibqp;
471 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
472 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
473 }
474 goto done;
475 }
476unlock:
477 spin_unlock_irqrestore(&sqp->s_lock, flags);
479done: 478done:
480 if (atomic_dec_and_test(&qp->refcount)) 479 if (atomic_dec_and_test(&qp->refcount))
481 wake_up(&qp->wait); 480 wake_up(&qp->wait);
@@ -651,21 +650,15 @@ void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
651 status != IB_WC_SUCCESS) { 650 status != IB_WC_SUCCESS) {
652 struct ib_wc wc; 651 struct ib_wc wc;
653 652
653 memset(&wc, 0, sizeof wc);
654 wc.wr_id = wqe->wr.wr_id; 654 wc.wr_id = wqe->wr.wr_id;
655 wc.status = status; 655 wc.status = status;
656 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 656 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
657 wc.vendor_err = 0;
658 wc.byte_len = wqe->length;
659 wc.imm_data = 0;
660 wc.qp = &qp->ibqp; 657 wc.qp = &qp->ibqp;
661 wc.src_qp = 0; 658 if (status == IB_WC_SUCCESS)
662 wc.wc_flags = 0; 659 wc.byte_len = wqe->length;
663 wc.pkey_index = 0; 660 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
664 wc.slid = 0; 661 status != IB_WC_SUCCESS);
665 wc.sl = 0;
666 wc.dlid_path_bits = 0;
667 wc.port_num = 0;
668 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
669 } 662 }
670 663
671 spin_lock_irqsave(&qp->s_lock, flags); 664 spin_lock_irqsave(&qp->s_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 5015cd2e57bd..22bb42dc8f73 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -744,12 +744,10 @@ static void ipath_ib_timer(struct ipath_ibdev *dev)
744 744
745 /* XXX What if timer fires again while this is running? */ 745 /* XXX What if timer fires again while this is running? */
746 for (qp = resend; qp != NULL; qp = qp->timer_next) { 746 for (qp = resend; qp != NULL; qp = qp->timer_next) {
747 struct ib_wc wc;
748
749 spin_lock_irqsave(&qp->s_lock, flags); 747 spin_lock_irqsave(&qp->s_lock, flags);
750 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) { 748 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
751 dev->n_timeouts++; 749 dev->n_timeouts++;
752 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 750 ipath_restart_rc(qp, qp->s_last_psn + 1);
753 } 751 }
754 spin_unlock_irqrestore(&qp->s_lock, flags); 752 spin_unlock_irqrestore(&qp->s_lock, flags);
755 753
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 6514aa8306cd..4c7c2aa8e19d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -710,8 +710,6 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt);
710 710
711int ipath_init_qp_table(struct ipath_ibdev *idev, int size); 711int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
712 712
713void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
714
715void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 713void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
716 714
717unsigned ipath_ib_rate_to_mult(enum ib_rate rate); 715unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
@@ -729,7 +727,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
729void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, 727void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
730 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 728 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
731 729
732void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); 730void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
731
732void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
733 733
734int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); 734int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
735 735