diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.c | 176 |
1 files changed, 117 insertions, 59 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 5015cd2e57bd..e0ec540042bf 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -111,16 +111,24 @@ static unsigned int ib_ipath_disable_sma; | |||
111 | module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); | 111 | module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); |
112 | MODULE_PARM_DESC(disable_sma, "Disable the SMA"); | 112 | MODULE_PARM_DESC(disable_sma, "Disable the SMA"); |
113 | 113 | ||
114 | /* | ||
115 | * Note that it is OK to post send work requests in the SQE and ERR | ||
116 | * states; ipath_do_send() will process them and generate error | ||
117 | * completions as per IB 1.2 C10-96. | ||
118 | */ | ||
114 | const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { | 119 | const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { |
115 | [IB_QPS_RESET] = 0, | 120 | [IB_QPS_RESET] = 0, |
116 | [IB_QPS_INIT] = IPATH_POST_RECV_OK, | 121 | [IB_QPS_INIT] = IPATH_POST_RECV_OK, |
117 | [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, | 122 | [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, |
118 | [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | | 123 | [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | |
119 | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, | 124 | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK | |
125 | IPATH_PROCESS_NEXT_SEND_OK, | ||
120 | [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | | 126 | [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | |
121 | IPATH_POST_SEND_OK, | 127 | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, |
122 | [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, | 128 | [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | |
123 | [IB_QPS_ERR] = 0, | 129 | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, |
130 | [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV | | ||
131 | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, | ||
124 | }; | 132 | }; |
125 | 133 | ||
126 | struct ipath_ucontext { | 134 | struct ipath_ucontext { |
@@ -230,18 +238,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) | |||
230 | } | 238 | } |
231 | } | 239 | } |
232 | 240 | ||
233 | static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr) | ||
234 | { | ||
235 | struct ib_wc wc; | ||
236 | |||
237 | memset(&wc, 0, sizeof(wc)); | ||
238 | wc.wr_id = wr->wr_id; | ||
239 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
240 | wc.opcode = ib_ipath_wc_opcode[wr->opcode]; | ||
241 | wc.qp = &qp->ibqp; | ||
242 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
243 | } | ||
244 | |||
245 | /* | 241 | /* |
246 | * Count the number of DMA descriptors needed to send length bytes of data. | 242 | * Count the number of DMA descriptors needed to send length bytes of data. |
247 | * Don't modify the ipath_sge_state to get the count. | 243 | * Don't modify the ipath_sge_state to get the count. |
@@ -347,14 +343,8 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
347 | spin_lock_irqsave(&qp->s_lock, flags); | 343 | spin_lock_irqsave(&qp->s_lock, flags); |
348 | 344 | ||
349 | /* Check that state is OK to post send. */ | 345 | /* Check that state is OK to post send. */ |
350 | if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) { | 346 | if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) |
351 | if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR) | 347 | goto bail_inval; |
352 | goto bail_inval; | ||
353 | /* C10-96 says generate a flushed completion entry. */ | ||
354 | ipath_flush_wqe(qp, wr); | ||
355 | ret = 0; | ||
356 | goto bail; | ||
357 | } | ||
358 | 348 | ||
359 | /* IB spec says that num_sge == 0 is OK. */ | 349 | /* IB spec says that num_sge == 0 is OK. */ |
360 | if (wr->num_sge > qp->s_max_sge) | 350 | if (wr->num_sge > qp->s_max_sge) |
@@ -677,6 +667,7 @@ bail:; | |||
677 | static void ipath_ib_timer(struct ipath_ibdev *dev) | 667 | static void ipath_ib_timer(struct ipath_ibdev *dev) |
678 | { | 668 | { |
679 | struct ipath_qp *resend = NULL; | 669 | struct ipath_qp *resend = NULL; |
670 | struct ipath_qp *rnr = NULL; | ||
680 | struct list_head *last; | 671 | struct list_head *last; |
681 | struct ipath_qp *qp; | 672 | struct ipath_qp *qp; |
682 | unsigned long flags; | 673 | unsigned long flags; |
@@ -703,7 +694,9 @@ static void ipath_ib_timer(struct ipath_ibdev *dev) | |||
703 | if (--qp->s_rnr_timeout == 0) { | 694 | if (--qp->s_rnr_timeout == 0) { |
704 | do { | 695 | do { |
705 | list_del_init(&qp->timerwait); | 696 | list_del_init(&qp->timerwait); |
706 | tasklet_hi_schedule(&qp->s_task); | 697 | qp->timer_next = rnr; |
698 | rnr = qp; | ||
699 | atomic_inc(&qp->refcount); | ||
707 | if (list_empty(last)) | 700 | if (list_empty(last)) |
708 | break; | 701 | break; |
709 | qp = list_entry(last->next, struct ipath_qp, | 702 | qp = list_entry(last->next, struct ipath_qp, |
@@ -743,13 +736,15 @@ static void ipath_ib_timer(struct ipath_ibdev *dev) | |||
743 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 736 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
744 | 737 | ||
745 | /* XXX What if timer fires again while this is running? */ | 738 | /* XXX What if timer fires again while this is running? */ |
746 | for (qp = resend; qp != NULL; qp = qp->timer_next) { | 739 | while (resend != NULL) { |
747 | struct ib_wc wc; | 740 | qp = resend; |
741 | resend = qp->timer_next; | ||
748 | 742 | ||
749 | spin_lock_irqsave(&qp->s_lock, flags); | 743 | spin_lock_irqsave(&qp->s_lock, flags); |
750 | if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) { | 744 | if (qp->s_last != qp->s_tail && |
745 | ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { | ||
751 | dev->n_timeouts++; | 746 | dev->n_timeouts++; |
752 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); | 747 | ipath_restart_rc(qp, qp->s_last_psn + 1); |
753 | } | 748 | } |
754 | spin_unlock_irqrestore(&qp->s_lock, flags); | 749 | spin_unlock_irqrestore(&qp->s_lock, flags); |
755 | 750 | ||
@@ -757,6 +752,19 @@ static void ipath_ib_timer(struct ipath_ibdev *dev) | |||
757 | if (atomic_dec_and_test(&qp->refcount)) | 752 | if (atomic_dec_and_test(&qp->refcount)) |
758 | wake_up(&qp->wait); | 753 | wake_up(&qp->wait); |
759 | } | 754 | } |
755 | while (rnr != NULL) { | ||
756 | qp = rnr; | ||
757 | rnr = qp->timer_next; | ||
758 | |||
759 | spin_lock_irqsave(&qp->s_lock, flags); | ||
760 | if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) | ||
761 | ipath_schedule_send(qp); | ||
762 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
763 | |||
764 | /* Notify ipath_destroy_qp() if it is waiting. */ | ||
765 | if (atomic_dec_and_test(&qp->refcount)) | ||
766 | wake_up(&qp->wait); | ||
767 | } | ||
760 | } | 768 | } |
761 | 769 | ||
762 | static void update_sge(struct ipath_sge_state *ss, u32 length) | 770 | static void update_sge(struct ipath_sge_state *ss, u32 length) |
@@ -1012,13 +1020,24 @@ static void sdma_complete(void *cookie, int status) | |||
1012 | struct ipath_verbs_txreq *tx = cookie; | 1020 | struct ipath_verbs_txreq *tx = cookie; |
1013 | struct ipath_qp *qp = tx->qp; | 1021 | struct ipath_qp *qp = tx->qp; |
1014 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 1022 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
1023 | unsigned int flags; | ||
1024 | enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? | ||
1025 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; | ||
1015 | 1026 | ||
1016 | /* Generate a completion queue entry if needed */ | 1027 | if (atomic_dec_and_test(&qp->s_dma_busy)) { |
1017 | if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) { | 1028 | spin_lock_irqsave(&qp->s_lock, flags); |
1018 | enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? | 1029 | if (tx->wqe) |
1019 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; | 1030 | ipath_send_complete(qp, tx->wqe, ibs); |
1020 | 1031 | if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && | |
1032 | qp->s_last != qp->s_head) || | ||
1033 | (qp->s_flags & IPATH_S_WAIT_DMA)) | ||
1034 | ipath_schedule_send(qp); | ||
1035 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1036 | wake_up(&qp->wait_dma); | ||
1037 | } else if (tx->wqe) { | ||
1038 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1021 | ipath_send_complete(qp, tx->wqe, ibs); | 1039 | ipath_send_complete(qp, tx->wqe, ibs); |
1040 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1022 | } | 1041 | } |
1023 | 1042 | ||
1024 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) | 1043 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) |
@@ -1029,6 +1048,21 @@ static void sdma_complete(void *cookie, int status) | |||
1029 | wake_up(&qp->wait); | 1048 | wake_up(&qp->wait); |
1030 | } | 1049 | } |
1031 | 1050 | ||
1051 | static void decrement_dma_busy(struct ipath_qp *qp) | ||
1052 | { | ||
1053 | unsigned int flags; | ||
1054 | |||
1055 | if (atomic_dec_and_test(&qp->s_dma_busy)) { | ||
1056 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1057 | if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && | ||
1058 | qp->s_last != qp->s_head) || | ||
1059 | (qp->s_flags & IPATH_S_WAIT_DMA)) | ||
1060 | ipath_schedule_send(qp); | ||
1061 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1062 | wake_up(&qp->wait_dma); | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1032 | /* | 1066 | /* |
1033 | * Compute the number of clock cycles of delay before sending the next packet. | 1067 | * Compute the number of clock cycles of delay before sending the next packet. |
1034 | * The multipliers reflect the number of clocks for the fastest rate so | 1068 | * The multipliers reflect the number of clocks for the fastest rate so |
@@ -1067,9 +1101,12 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp, | |||
1067 | if (tx) { | 1101 | if (tx) { |
1068 | qp->s_tx = NULL; | 1102 | qp->s_tx = NULL; |
1069 | /* resend previously constructed packet */ | 1103 | /* resend previously constructed packet */ |
1104 | atomic_inc(&qp->s_dma_busy); | ||
1070 | ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); | 1105 | ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); |
1071 | if (ret) | 1106 | if (ret) { |
1072 | qp->s_tx = tx; | 1107 | qp->s_tx = tx; |
1108 | decrement_dma_busy(qp); | ||
1109 | } | ||
1073 | goto bail; | 1110 | goto bail; |
1074 | } | 1111 | } |
1075 | 1112 | ||
@@ -1120,12 +1157,14 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp, | |||
1120 | tx->txreq.sg_count = ndesc; | 1157 | tx->txreq.sg_count = ndesc; |
1121 | tx->map_len = (hdrwords + 2) << 2; | 1158 | tx->map_len = (hdrwords + 2) << 2; |
1122 | tx->txreq.map_addr = &tx->hdr; | 1159 | tx->txreq.map_addr = &tx->hdr; |
1160 | atomic_inc(&qp->s_dma_busy); | ||
1123 | ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); | 1161 | ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); |
1124 | if (ret) { | 1162 | if (ret) { |
1125 | /* save ss and length in dwords */ | 1163 | /* save ss and length in dwords */ |
1126 | tx->ss = ss; | 1164 | tx->ss = ss; |
1127 | tx->len = dwords; | 1165 | tx->len = dwords; |
1128 | qp->s_tx = tx; | 1166 | qp->s_tx = tx; |
1167 | decrement_dma_busy(qp); | ||
1129 | } | 1168 | } |
1130 | goto bail; | 1169 | goto bail; |
1131 | } | 1170 | } |
@@ -1146,6 +1185,7 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp, | |||
1146 | memcpy(piobuf, hdr, hdrwords << 2); | 1185 | memcpy(piobuf, hdr, hdrwords << 2); |
1147 | ipath_copy_from_sge(piobuf + hdrwords, ss, len); | 1186 | ipath_copy_from_sge(piobuf + hdrwords, ss, len); |
1148 | 1187 | ||
1188 | atomic_inc(&qp->s_dma_busy); | ||
1149 | ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); | 1189 | ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); |
1150 | /* | 1190 | /* |
1151 | * If we couldn't queue the DMA request, save the info | 1191 | * If we couldn't queue the DMA request, save the info |
@@ -1156,6 +1196,7 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp, | |||
1156 | tx->ss = NULL; | 1196 | tx->ss = NULL; |
1157 | tx->len = 0; | 1197 | tx->len = 0; |
1158 | qp->s_tx = tx; | 1198 | qp->s_tx = tx; |
1199 | decrement_dma_busy(qp); | ||
1159 | } | 1200 | } |
1160 | dev->n_unaligned++; | 1201 | dev->n_unaligned++; |
1161 | goto bail; | 1202 | goto bail; |
@@ -1179,6 +1220,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, | |||
1179 | unsigned flush_wc; | 1220 | unsigned flush_wc; |
1180 | u32 control; | 1221 | u32 control; |
1181 | int ret; | 1222 | int ret; |
1223 | unsigned int flags; | ||
1182 | 1224 | ||
1183 | piobuf = ipath_getpiobuf(dd, plen, NULL); | 1225 | piobuf = ipath_getpiobuf(dd, plen, NULL); |
1184 | if (unlikely(piobuf == NULL)) { | 1226 | if (unlikely(piobuf == NULL)) { |
@@ -1249,8 +1291,11 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, | |||
1249 | } | 1291 | } |
1250 | copy_io(piobuf, ss, len, flush_wc); | 1292 | copy_io(piobuf, ss, len, flush_wc); |
1251 | done: | 1293 | done: |
1252 | if (qp->s_wqe) | 1294 | if (qp->s_wqe) { |
1295 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1253 | ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); | 1296 | ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); |
1297 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1298 | } | ||
1254 | ret = 0; | 1299 | ret = 0; |
1255 | bail: | 1300 | bail: |
1256 | return ret; | 1301 | return ret; |
@@ -1283,19 +1328,12 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, | |||
1283 | * can defer SDMA restart until link goes ACTIVE without | 1328 | * can defer SDMA restart until link goes ACTIVE without |
1284 | * worrying about just how we got there. | 1329 | * worrying about just how we got there. |
1285 | */ | 1330 | */ |
1286 | if (qp->ibqp.qp_type == IB_QPT_SMI) | 1331 | if (qp->ibqp.qp_type == IB_QPT_SMI || |
1332 | !(dd->ipath_flags & IPATH_HAS_SEND_DMA)) | ||
1287 | ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, | 1333 | ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, |
1288 | plen, dwords); | 1334 | plen, dwords); |
1289 | /* All non-VL15 packets are dropped if link is not ACTIVE */ | ||
1290 | else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) { | ||
1291 | if (qp->s_wqe) | ||
1292 | ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); | ||
1293 | ret = 0; | ||
1294 | } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA) | ||
1295 | ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, | ||
1296 | plen, dwords); | ||
1297 | else | 1335 | else |
1298 | ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, | 1336 | ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, |
1299 | plen, dwords); | 1337 | plen, dwords); |
1300 | 1338 | ||
1301 | return ret; | 1339 | return ret; |
@@ -1403,27 +1441,46 @@ bail: | |||
1403 | * This is called from ipath_intr() at interrupt level when a PIO buffer is | 1441 | * This is called from ipath_intr() at interrupt level when a PIO buffer is |
1404 | * available after ipath_verbs_send() returned an error that no buffers were | 1442 | * available after ipath_verbs_send() returned an error that no buffers were |
1405 | * available. Return 1 if we consumed all the PIO buffers and we still have | 1443 | * available. Return 1 if we consumed all the PIO buffers and we still have |
1406 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and | 1444 | * QPs waiting for buffers (for now, just restart the send tasklet and |
1407 | * return zero). | 1445 | * return zero). |
1408 | */ | 1446 | */ |
1409 | int ipath_ib_piobufavail(struct ipath_ibdev *dev) | 1447 | int ipath_ib_piobufavail(struct ipath_ibdev *dev) |
1410 | { | 1448 | { |
1449 | struct list_head *list; | ||
1450 | struct ipath_qp *qplist; | ||
1411 | struct ipath_qp *qp; | 1451 | struct ipath_qp *qp; |
1412 | unsigned long flags; | 1452 | unsigned long flags; |
1413 | 1453 | ||
1414 | if (dev == NULL) | 1454 | if (dev == NULL) |
1415 | goto bail; | 1455 | goto bail; |
1416 | 1456 | ||
1457 | list = &dev->piowait; | ||
1458 | qplist = NULL; | ||
1459 | |||
1417 | spin_lock_irqsave(&dev->pending_lock, flags); | 1460 | spin_lock_irqsave(&dev->pending_lock, flags); |
1418 | while (!list_empty(&dev->piowait)) { | 1461 | while (!list_empty(list)) { |
1419 | qp = list_entry(dev->piowait.next, struct ipath_qp, | 1462 | qp = list_entry(list->next, struct ipath_qp, piowait); |
1420 | piowait); | ||
1421 | list_del_init(&qp->piowait); | 1463 | list_del_init(&qp->piowait); |
1422 | clear_bit(IPATH_S_BUSY, &qp->s_busy); | 1464 | qp->pio_next = qplist; |
1423 | tasklet_hi_schedule(&qp->s_task); | 1465 | qplist = qp; |
1466 | atomic_inc(&qp->refcount); | ||
1424 | } | 1467 | } |
1425 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 1468 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
1426 | 1469 | ||
1470 | while (qplist != NULL) { | ||
1471 | qp = qplist; | ||
1472 | qplist = qp->pio_next; | ||
1473 | |||
1474 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1475 | if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) | ||
1476 | ipath_schedule_send(qp); | ||
1477 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1478 | |||
1479 | /* Notify ipath_destroy_qp() if it is waiting. */ | ||
1480 | if (atomic_dec_and_test(&qp->refcount)) | ||
1481 | wake_up(&qp->wait); | ||
1482 | } | ||
1483 | |||
1427 | bail: | 1484 | bail: |
1428 | return 0; | 1485 | return 0; |
1429 | } | 1486 | } |
@@ -2145,11 +2202,12 @@ bail: | |||
2145 | void ipath_unregister_ib_device(struct ipath_ibdev *dev) | 2202 | void ipath_unregister_ib_device(struct ipath_ibdev *dev) |
2146 | { | 2203 | { |
2147 | struct ib_device *ibdev = &dev->ibdev; | 2204 | struct ib_device *ibdev = &dev->ibdev; |
2148 | 2205 | u32 qps_inuse; | |
2149 | disable_timer(dev->dd); | ||
2150 | 2206 | ||
2151 | ib_unregister_device(ibdev); | 2207 | ib_unregister_device(ibdev); |
2152 | 2208 | ||
2209 | disable_timer(dev->dd); | ||
2210 | |||
2153 | if (!list_empty(&dev->pending[0]) || | 2211 | if (!list_empty(&dev->pending[0]) || |
2154 | !list_empty(&dev->pending[1]) || | 2212 | !list_empty(&dev->pending[1]) || |
2155 | !list_empty(&dev->pending[2])) | 2213 | !list_empty(&dev->pending[2])) |
@@ -2164,7 +2222,10 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev) | |||
2164 | * Note that ipath_unregister_ib_device() can be called before all | 2222 | * Note that ipath_unregister_ib_device() can be called before all |
2165 | * the QPs are destroyed! | 2223 | * the QPs are destroyed! |
2166 | */ | 2224 | */ |
2167 | ipath_free_all_qps(&dev->qp_table); | 2225 | qps_inuse = ipath_free_all_qps(&dev->qp_table); |
2226 | if (qps_inuse) | ||
2227 | ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n", | ||
2228 | qps_inuse); | ||
2168 | kfree(dev->qp_table.table); | 2229 | kfree(dev->qp_table.table); |
2169 | kfree(dev->lk_table.table); | 2230 | kfree(dev->lk_table.table); |
2170 | kfree(dev->txreq_bufs); | 2231 | kfree(dev->txreq_bufs); |
@@ -2215,17 +2276,14 @@ static ssize_t show_stats(struct device *device, struct device_attribute *attr, | |||
2215 | "RC OTH NAKs %d\n" | 2276 | "RC OTH NAKs %d\n" |
2216 | "RC timeouts %d\n" | 2277 | "RC timeouts %d\n" |
2217 | "RC RDMA dup %d\n" | 2278 | "RC RDMA dup %d\n" |
2218 | "RC stalls %d\n" | ||
2219 | "piobuf wait %d\n" | 2279 | "piobuf wait %d\n" |
2220 | "no piobuf %d\n" | ||
2221 | "unaligned %d\n" | 2280 | "unaligned %d\n" |
2222 | "PKT drops %d\n" | 2281 | "PKT drops %d\n" |
2223 | "WQE errs %d\n", | 2282 | "WQE errs %d\n", |
2224 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, | 2283 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, |
2225 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, | 2284 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, |
2226 | dev->n_other_naks, dev->n_timeouts, | 2285 | dev->n_other_naks, dev->n_timeouts, |
2227 | dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, | 2286 | dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned, |
2228 | dev->n_no_piobuf, dev->n_unaligned, | ||
2229 | dev->n_pkt_drops, dev->n_wqe_errs); | 2287 | dev->n_pkt_drops, dev->n_wqe_errs); |
2230 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { | 2288 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { |
2231 | const struct ipath_opcode_stats *si = &dev->opstats[i]; | 2289 | const struct ipath_opcode_stats *si = &dev->opstats[i]; |