aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c251
1 files changed, 119 insertions, 132 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 32d352a88d50..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,9 +61,9 @@ static char *states[] = {
61 NULL, 61 NULL,
62}; 62};
63 63
64static int dack_mode; 64static int dack_mode = 1;
65module_param(dack_mode, int, 0644); 65module_param(dack_mode, int, 0644);
66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); 66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
67 67
68int c4iw_max_read_depth = 8; 68int c4iw_max_read_depth = 8;
69module_param(c4iw_max_read_depth, int, 0644); 69module_param(c4iw_max_read_depth, int, 0644);
@@ -117,9 +117,9 @@ static int rcv_win = 256 * 1024;
117module_param(rcv_win, int, 0644); 117module_param(rcv_win, int, 0644);
118MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 118MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
119 119
120static int snd_win = 32 * 1024; 120static int snd_win = 128 * 1024;
121module_param(snd_win, int, 0644); 121module_param(snd_win, int, 0644);
122MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); 122MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
123 123
124static struct workqueue_struct *workq; 124static struct workqueue_struct *workq;
125 125
@@ -172,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
173 if (error < 0) 173 if (error < 0)
174 kfree_skb(skb); 174 kfree_skb(skb);
175 return error; 175 return error < 0 ? error : 0;
176} 176}
177 177
178int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 178int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
@@ -187,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
188 if (error < 0) 188 if (error < 0)
189 kfree_skb(skb); 189 kfree_skb(skb);
190 return error; 190 return error < 0 ? error : 0;
191} 191}
192 192
193static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 193static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
@@ -219,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
219 219
220static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 220static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
221{ 221{
222 unsigned long flags;
223 enum c4iw_ep_state state; 222 enum c4iw_ep_state state;
224 223
225 spin_lock_irqsave(&epc->lock, flags); 224 mutex_lock(&epc->mutex);
226 state = epc->state; 225 state = epc->state;
227 spin_unlock_irqrestore(&epc->lock, flags); 226 mutex_unlock(&epc->mutex);
228 return state; 227 return state;
229} 228}
230 229
@@ -235,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
235 234
236static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 235static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
237{ 236{
238 unsigned long flags; 237 mutex_lock(&epc->mutex);
239
240 spin_lock_irqsave(&epc->lock, flags);
241 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 238 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
242 __state_set(epc, new); 239 __state_set(epc, new);
243 spin_unlock_irqrestore(&epc->lock, flags); 240 mutex_unlock(&epc->mutex);
244 return; 241 return;
245} 242}
246 243
@@ -251,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp)
251 epc = kzalloc(size, gfp); 248 epc = kzalloc(size, gfp);
252 if (epc) { 249 if (epc) {
253 kref_init(&epc->kref); 250 kref_init(&epc->kref);
254 spin_lock_init(&epc->lock); 251 mutex_init(&epc->mutex);
255 init_waitqueue_head(&epc->waitq); 252 c4iw_init_wr_wait(&epc->wr_wait);
256 } 253 }
257 PDBG("%s alloc ep %p\n", __func__, epc); 254 PDBG("%s alloc ep %p\n", __func__, epc);
258 return epc; 255 return epc;
@@ -318,23 +315,12 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
318 __be16 peer_port, u8 tos) 315 __be16 peer_port, u8 tos)
319{ 316{
320 struct rtable *rt; 317 struct rtable *rt;
321 struct flowi fl = { 318 struct flowi4 fl4;
322 .oif = 0, 319
323 .nl_u = { 320 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
324 .ip4_u = { 321 peer_port, local_port, IPPROTO_TCP,
325 .daddr = peer_ip, 322 tos, 0);
326 .saddr = local_ip, 323 if (IS_ERR(rt))
327 .tos = tos}
328 },
329 .proto = IPPROTO_TCP,
330 .uli_u = {
331 .ports = {
332 .sport = local_port,
333 .dport = peer_port}
334 }
335 };
336
337 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
338 return NULL; 324 return NULL;
339 return rt; 325 return rt;
340} 326}
@@ -383,7 +369,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
383 16)) | FW_WR_FLOWID(ep->hwtid)); 369 16)) | FW_WR_FLOWID(ep->hwtid));
384 370
385 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 371 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
386 flowc->mnemval[0].val = cpu_to_be32(0); 372 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
387 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 373 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
388 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 374 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
389 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 375 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@ -485,6 +471,7 @@ static int send_connect(struct c4iw_ep *ep)
485 TX_CHAN(ep->tx_chan) | 471 TX_CHAN(ep->tx_chan) |
486 SMAC_SEL(ep->smac_idx) | 472 SMAC_SEL(ep->smac_idx) |
487 DSCP(ep->tos) | 473 DSCP(ep->tos) |
474 ULP_MODE(ULP_MODE_TCPDDP) |
488 RCV_BUFSIZ(rcv_win>>10); 475 RCV_BUFSIZ(rcv_win>>10);
489 opt2 = RX_CHANNEL(0) | 476 opt2 = RX_CHANNEL(0) |
490 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 477 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -1131,7 +1118,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1131{ 1118{
1132 struct c4iw_ep *ep; 1119 struct c4iw_ep *ep;
1133 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1120 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1134 unsigned long flags;
1135 int release = 0; 1121 int release = 0;
1136 unsigned int tid = GET_TID(rpl); 1122 unsigned int tid = GET_TID(rpl);
1137 struct tid_info *t = dev->rdev.lldi.tids; 1123 struct tid_info *t = dev->rdev.lldi.tids;
@@ -1139,7 +1125,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1139 ep = lookup_tid(t, tid); 1125 ep = lookup_tid(t, tid);
1140 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1126 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1141 BUG_ON(!ep); 1127 BUG_ON(!ep);
1142 spin_lock_irqsave(&ep->com.lock, flags); 1128 mutex_lock(&ep->com.mutex);
1143 switch (ep->com.state) { 1129 switch (ep->com.state) {
1144 case ABORTING: 1130 case ABORTING:
1145 __state_set(&ep->com, DEAD); 1131 __state_set(&ep->com, DEAD);
@@ -1150,7 +1136,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1150 __func__, ep, ep->com.state); 1136 __func__, ep, ep->com.state);
1151 break; 1137 break;
1152 } 1138 }
1153 spin_unlock_irqrestore(&ep->com.lock, flags); 1139 mutex_unlock(&ep->com.mutex);
1154 1140
1155 if (release) 1141 if (release)
1156 release_ep_resources(ep); 1142 release_ep_resources(ep);
@@ -1213,9 +1199,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1213 } 1199 }
1214 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1200 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1215 rpl->status, status2errno(rpl->status)); 1201 rpl->status, status2errno(rpl->status));
1216 ep->com.rpl_err = status2errno(rpl->status); 1202 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1217 ep->com.rpl_done = 1;
1218 wake_up(&ep->com.waitq);
1219 1203
1220 return 0; 1204 return 0;
1221} 1205}
@@ -1249,9 +1233,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1249 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1233 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1250 1234
1251 PDBG("%s ep %p\n", __func__, ep); 1235 PDBG("%s ep %p\n", __func__, ep);
1252 ep->com.rpl_err = status2errno(rpl->status); 1236 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1253 ep->com.rpl_done = 1;
1254 wake_up(&ep->com.waitq);
1255 return 0; 1237 return 0;
1256} 1238}
1257 1239
@@ -1278,6 +1260,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1278 TX_CHAN(ep->tx_chan) | 1260 TX_CHAN(ep->tx_chan) |
1279 SMAC_SEL(ep->smac_idx) | 1261 SMAC_SEL(ep->smac_idx) |
1280 DSCP(ep->tos) | 1262 DSCP(ep->tos) |
1263 ULP_MODE(ULP_MODE_TCPDDP) |
1281 RCV_BUFSIZ(rcv_win>>10); 1264 RCV_BUFSIZ(rcv_win>>10);
1282 opt2 = RX_CHANNEL(0) | 1265 opt2 = RX_CHANNEL(0) |
1283 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1266 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -1478,18 +1461,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1478 struct cpl_peer_close *hdr = cplhdr(skb); 1461 struct cpl_peer_close *hdr = cplhdr(skb);
1479 struct c4iw_ep *ep; 1462 struct c4iw_ep *ep;
1480 struct c4iw_qp_attributes attrs; 1463 struct c4iw_qp_attributes attrs;
1481 unsigned long flags;
1482 int disconnect = 1; 1464 int disconnect = 1;
1483 int release = 0; 1465 int release = 0;
1484 int closing = 0;
1485 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1486 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1468 int ret;
1487 1469
1488 ep = lookup_tid(t, tid); 1470 ep = lookup_tid(t, tid);
1489 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1490 dst_confirm(ep->dst); 1472 dst_confirm(ep->dst);
1491 1473
1492 spin_lock_irqsave(&ep->com.lock, flags); 1474 mutex_lock(&ep->com.mutex);
1493 switch (ep->com.state) { 1475 switch (ep->com.state) {
1494 case MPA_REQ_WAIT: 1476 case MPA_REQ_WAIT:
1495 __state_set(&ep->com, CLOSING); 1477 __state_set(&ep->com, CLOSING);
@@ -1507,23 +1489,24 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1507 * in rdma connection migration (see c4iw_accept_cr()). 1489 * in rdma connection migration (see c4iw_accept_cr()).
1508 */ 1490 */
1509 __state_set(&ep->com, CLOSING); 1491 __state_set(&ep->com, CLOSING);
1510 ep->com.rpl_done = 1;
1511 ep->com.rpl_err = -ECONNRESET;
1512 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1492 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1513 wake_up(&ep->com.waitq); 1493 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1514 break; 1494 break;
1515 case MPA_REP_SENT: 1495 case MPA_REP_SENT:
1516 __state_set(&ep->com, CLOSING); 1496 __state_set(&ep->com, CLOSING);
1517 ep->com.rpl_done = 1;
1518 ep->com.rpl_err = -ECONNRESET;
1519 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1497 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1520 wake_up(&ep->com.waitq); 1498 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1521 break; 1499 break;
1522 case FPDU_MODE: 1500 case FPDU_MODE:
1523 start_ep_timer(ep); 1501 start_ep_timer(ep);
1524 __state_set(&ep->com, CLOSING); 1502 __state_set(&ep->com, CLOSING);
1525 closing = 1; 1503 attrs.next_state = C4IW_QP_STATE_CLOSING;
1526 peer_close_upcall(ep); 1504 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1506 if (ret != -ECONNRESET) {
1507 peer_close_upcall(ep);
1508 disconnect = 1;
1509 }
1527 break; 1510 break;
1528 case ABORTING: 1511 case ABORTING:
1529 disconnect = 0; 1512 disconnect = 0;
@@ -1550,12 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1550 default: 1533 default:
1551 BUG_ON(1); 1534 BUG_ON(1);
1552 } 1535 }
1553 spin_unlock_irqrestore(&ep->com.lock, flags); 1536 mutex_unlock(&ep->com.mutex);
1554 if (closing) {
1555 attrs.next_state = C4IW_QP_STATE_CLOSING;
1556 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1557 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1558 }
1559 if (disconnect) 1537 if (disconnect)
1560 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1538 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1561 if (release) 1539 if (release)
@@ -1581,7 +1559,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1581 struct c4iw_qp_attributes attrs; 1559 struct c4iw_qp_attributes attrs;
1582 int ret; 1560 int ret;
1583 int release = 0; 1561 int release = 0;
1584 unsigned long flags;
1585 struct tid_info *t = dev->rdev.lldi.tids; 1562 struct tid_info *t = dev->rdev.lldi.tids;
1586 unsigned int tid = GET_TID(req); 1563 unsigned int tid = GET_TID(req);
1587 1564
@@ -1591,9 +1568,15 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1591 ep->hwtid); 1568 ep->hwtid);
1592 return 0; 1569 return 0;
1593 } 1570 }
1594 spin_lock_irqsave(&ep->com.lock, flags);
1595 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 1571 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1596 ep->com.state); 1572 ep->com.state);
1573
1574 /*
1575 * Wake up any threads in rdma_init() or rdma_fini().
1576 */
1577 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1578
1579 mutex_lock(&ep->com.mutex);
1597 switch (ep->com.state) { 1580 switch (ep->com.state) {
1598 case CONNECTING: 1581 case CONNECTING:
1599 break; 1582 break;
@@ -1605,23 +1588,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1605 connect_reply_upcall(ep, -ECONNRESET); 1588 connect_reply_upcall(ep, -ECONNRESET);
1606 break; 1589 break;
1607 case MPA_REP_SENT: 1590 case MPA_REP_SENT:
1608 ep->com.rpl_done = 1;
1609 ep->com.rpl_err = -ECONNRESET;
1610 PDBG("waking up ep %p\n", ep);
1611 wake_up(&ep->com.waitq);
1612 break; 1591 break;
1613 case MPA_REQ_RCVD: 1592 case MPA_REQ_RCVD:
1614
1615 /*
1616 * We're gonna mark this puppy DEAD, but keep
1617 * the reference on it until the ULP accepts or
1618 * rejects the CR. Also wake up anyone waiting
1619 * in rdma connection migration (see c4iw_accept_cr()).
1620 */
1621 ep->com.rpl_done = 1;
1622 ep->com.rpl_err = -ECONNRESET;
1623 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1624 wake_up(&ep->com.waitq);
1625 break; 1593 break;
1626 case MORIBUND: 1594 case MORIBUND:
1627 case CLOSING: 1595 case CLOSING:
@@ -1644,7 +1612,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1644 break; 1612 break;
1645 case DEAD: 1613 case DEAD:
1646 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 1614 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1647 spin_unlock_irqrestore(&ep->com.lock, flags); 1615 mutex_unlock(&ep->com.mutex);
1648 return 0; 1616 return 0;
1649 default: 1617 default:
1650 BUG_ON(1); 1618 BUG_ON(1);
@@ -1655,7 +1623,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1655 __state_set(&ep->com, DEAD); 1623 __state_set(&ep->com, DEAD);
1656 release = 1; 1624 release = 1;
1657 } 1625 }
1658 spin_unlock_irqrestore(&ep->com.lock, flags); 1626 mutex_unlock(&ep->com.mutex);
1659 1627
1660 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1628 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1661 if (!rpl_skb) { 1629 if (!rpl_skb) {
@@ -1681,7 +1649,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1681 struct c4iw_ep *ep; 1649 struct c4iw_ep *ep;
1682 struct c4iw_qp_attributes attrs; 1650 struct c4iw_qp_attributes attrs;
1683 struct cpl_close_con_rpl *rpl = cplhdr(skb); 1651 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1684 unsigned long flags;
1685 int release = 0; 1652 int release = 0;
1686 struct tid_info *t = dev->rdev.lldi.tids; 1653 struct tid_info *t = dev->rdev.lldi.tids;
1687 unsigned int tid = GET_TID(rpl); 1654 unsigned int tid = GET_TID(rpl);
@@ -1692,7 +1659,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1692 BUG_ON(!ep); 1659 BUG_ON(!ep);
1693 1660
1694 /* The cm_id may be null if we failed to connect */ 1661 /* The cm_id may be null if we failed to connect */
1695 spin_lock_irqsave(&ep->com.lock, flags); 1662 mutex_lock(&ep->com.mutex);
1696 switch (ep->com.state) { 1663 switch (ep->com.state) {
1697 case CLOSING: 1664 case CLOSING:
1698 __state_set(&ep->com, MORIBUND); 1665 __state_set(&ep->com, MORIBUND);
@@ -1717,7 +1684,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1717 BUG_ON(1); 1684 BUG_ON(1);
1718 break; 1685 break;
1719 } 1686 }
1720 spin_unlock_irqrestore(&ep->com.lock, flags); 1687 mutex_unlock(&ep->com.mutex);
1721 if (release) 1688 if (release)
1722 release_ep_resources(ep); 1689 release_ep_resources(ep);
1723 return 0; 1690 return 0;
@@ -1725,23 +1692,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1725 1692
1726static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 1693static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727{ 1694{
1728 struct c4iw_ep *ep; 1695 struct cpl_rdma_terminate *rpl = cplhdr(skb);
1729 struct cpl_rdma_terminate *term = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids; 1696 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int tid = GET_TID(term); 1697 unsigned int tid = GET_TID(rpl);
1698 struct c4iw_ep *ep;
1699 struct c4iw_qp_attributes attrs;
1732 1700
1733 ep = lookup_tid(t, tid); 1701 ep = lookup_tid(t, tid);
1702 BUG_ON(!ep);
1734 1703
1735 if (state_read(&ep->com) != FPDU_MODE) 1704 if (ep && ep->com.qp) {
1736 return 0; 1705 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
1706 ep->com.qp->wq.sq.qid);
1707 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1708 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1709 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1710 } else
1711 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
1737 1712
1738 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1739 skb_pull(skb, sizeof *term);
1740 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1741 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1742 skb->len);
1743 ep->com.qp->attr.terminate_msg_len = skb->len;
1744 ep->com.qp->attr.is_terminate_local = 0;
1745 return 0; 1713 return 0;
1746} 1714}
1747 1715
@@ -1762,8 +1730,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1762 ep = lookup_tid(t, tid); 1730 ep = lookup_tid(t, tid);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1731 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1764 if (credits == 0) { 1732 if (credits == 0) {
1765 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n", 1733 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__, ep, ep->hwtid, state_read(&ep->com)); 1734 __func__, ep, ep->hwtid, state_read(&ep->com));
1767 return 0; 1735 return 0;
1768 } 1736 }
1769 1737
@@ -2042,6 +2010,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2042 } 2010 }
2043 2011
2044 state_set(&ep->com, LISTEN); 2012 state_set(&ep->com, LISTEN);
2013 c4iw_init_wr_wait(&ep->com.wr_wait);
2045 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2014 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2046 ep->com.local_addr.sin_addr.s_addr, 2015 ep->com.local_addr.sin_addr.s_addr,
2047 ep->com.local_addr.sin_port, 2016 ep->com.local_addr.sin_port,
@@ -2050,15 +2019,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2050 goto fail3; 2019 goto fail3;
2051 2020
2052 /* wait for pass_open_rpl */ 2021 /* wait for pass_open_rpl */
2053 wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); 2022 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
2054 if (ep->com.rpl_done) 2023 __func__);
2055 err = ep->com.rpl_err;
2056 else {
2057 printk(KERN_ERR MOD "Device %s not responding!\n",
2058 pci_name(ep->com.dev->rdev.lldi.pdev));
2059 ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2060 err = -EIO;
2061 }
2062 if (!err) { 2024 if (!err) {
2063 cm_id->provider_data = ep; 2025 cm_id->provider_data = ep;
2064 goto out; 2026 goto out;
@@ -2082,20 +2044,12 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2082 2044
2083 might_sleep(); 2045 might_sleep();
2084 state_set(&ep->com, DEAD); 2046 state_set(&ep->com, DEAD);
2085 ep->com.rpl_done = 0; 2047 c4iw_init_wr_wait(&ep->com.wr_wait);
2086 ep->com.rpl_err = 0;
2087 err = listen_stop(ep); 2048 err = listen_stop(ep);
2088 if (err) 2049 if (err)
2089 goto done; 2050 goto done;
2090 wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); 2051 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
2091 if (ep->com.rpl_done) 2052 __func__);
2092 err = ep->com.rpl_err;
2093 else {
2094 printk(KERN_ERR MOD "Device %s not responding!\n",
2095 pci_name(ep->com.dev->rdev.lldi.pdev));
2096 ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2097 err = -EIO;
2098 }
2099 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2053 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2100done: 2054done:
2101 cm_id->rem_ref(cm_id); 2055 cm_id->rem_ref(cm_id);
@@ -2106,12 +2060,11 @@ done:
2106int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2060int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2107{ 2061{
2108 int ret = 0; 2062 int ret = 0;
2109 unsigned long flags;
2110 int close = 0; 2063 int close = 0;
2111 int fatal = 0; 2064 int fatal = 0;
2112 struct c4iw_rdev *rdev; 2065 struct c4iw_rdev *rdev;
2113 2066
2114 spin_lock_irqsave(&ep->com.lock, flags); 2067 mutex_lock(&ep->com.mutex);
2115 2068
2116 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2069 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2117 states[ep->com.state], abrupt); 2070 states[ep->com.state], abrupt);
@@ -2158,20 +2111,28 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2158 break; 2111 break;
2159 } 2112 }
2160 2113
2161 spin_unlock_irqrestore(&ep->com.lock, flags);
2162 if (close) { 2114 if (close) {
2163 if (abrupt) 2115 if (abrupt) {
2164 ret = abort_connection(ep, NULL, gfp); 2116 close_complete_upcall(ep);
2165 else 2117 ret = send_abort(ep, NULL, gfp);
2118 } else
2166 ret = send_halfclose(ep, gfp); 2119 ret = send_halfclose(ep, gfp);
2167 if (ret) 2120 if (ret)
2168 fatal = 1; 2121 fatal = 1;
2169 } 2122 }
2123 mutex_unlock(&ep->com.mutex);
2170 if (fatal) 2124 if (fatal)
2171 release_ep_resources(ep); 2125 release_ep_resources(ep);
2172 return ret; 2126 return ret;
2173} 2127}
2174 2128
2129static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
2130{
2131 struct cpl_fw6_msg *rpl = cplhdr(skb);
2132 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2133 return 0;
2134}
2135
2175/* 2136/*
2176 * These are the real handlers that are called from a 2137 * These are the real handlers that are called from a
2177 * work queue. 2138 * work queue.
@@ -2190,7 +2151,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2190 [CPL_ABORT_REQ_RSS] = peer_abort, 2151 [CPL_ABORT_REQ_RSS] = peer_abort,
2191 [CPL_CLOSE_CON_RPL] = close_con_rpl, 2152 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2192 [CPL_RDMA_TERMINATE] = terminate, 2153 [CPL_RDMA_TERMINATE] = terminate,
2193 [CPL_FW4_ACK] = fw4_ack 2154 [CPL_FW4_ACK] = fw4_ack,
2155 [CPL_FW6_MSG] = async_event
2194}; 2156};
2195 2157
2196static void process_timeout(struct c4iw_ep *ep) 2158static void process_timeout(struct c4iw_ep *ep)
@@ -2198,7 +2160,7 @@ static void process_timeout(struct c4iw_ep *ep)
2198 struct c4iw_qp_attributes attrs; 2160 struct c4iw_qp_attributes attrs;
2199 int abort = 1; 2161 int abort = 1;
2200 2162
2201 spin_lock_irq(&ep->com.lock); 2163 mutex_lock(&ep->com.mutex);
2202 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 2164 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2203 ep->com.state); 2165 ep->com.state);
2204 switch (ep->com.state) { 2166 switch (ep->com.state) {
@@ -2225,7 +2187,7 @@ static void process_timeout(struct c4iw_ep *ep)
2225 WARN_ON(1); 2187 WARN_ON(1);
2226 abort = 0; 2188 abort = 0;
2227 } 2189 }
2228 spin_unlock_irq(&ep->com.lock); 2190 mutex_unlock(&ep->com.mutex);
2229 if (abort) 2191 if (abort)
2230 abort_connection(ep, NULL, GFP_KERNEL); 2192 abort_connection(ep, NULL, GFP_KERNEL);
2231 c4iw_put_ep(&ep->com); 2193 c4iw_put_ep(&ep->com);
@@ -2309,6 +2271,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2309 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2271 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2310 "for tid %u\n", rpl->status, GET_TID(rpl)); 2272 "for tid %u\n", rpl->status, GET_TID(rpl));
2311 } 2273 }
2274 kfree_skb(skb);
2312 return 0; 2275 return 0;
2313} 2276}
2314 2277
@@ -2323,25 +2286,49 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2323 switch (rpl->type) { 2286 switch (rpl->type) {
2324 case 1: 2287 case 1:
2325 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2288 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2326 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; 2289 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
2327 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2290 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2328 if (wr_waitp) { 2291 if (wr_waitp)
2329 wr_waitp->ret = ret; 2292 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2330 wr_waitp->done = 1; 2293 kfree_skb(skb);
2331 wake_up(&wr_waitp->wait);
2332 }
2333 break; 2294 break;
2334 case 2: 2295 case 2:
2335 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2296 sched(dev, skb);
2336 break; 2297 break;
2337 default: 2298 default:
2338 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 2299 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2339 rpl->type); 2300 rpl->type);
2301 kfree_skb(skb);
2340 break; 2302 break;
2341 } 2303 }
2342 return 0; 2304 return 0;
2343} 2305}
2344 2306
2307static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
2308{
2309 struct cpl_abort_req_rss *req = cplhdr(skb);
2310 struct c4iw_ep *ep;
2311 struct tid_info *t = dev->rdev.lldi.tids;
2312 unsigned int tid = GET_TID(req);
2313
2314 ep = lookup_tid(t, tid);
2315 if (is_neg_adv_abort(req->status)) {
2316 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2317 ep->hwtid);
2318 kfree_skb(skb);
2319 return 0;
2320 }
2321 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2322 ep->com.state);
2323
2324 /*
2325 * Wake up any threads in rdma_init() or rdma_fini().
2326 */
2327 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2328 sched(dev, skb);
2329 return 0;
2330}
2331
2345/* 2332/*
2346 * Most upcalls from the T4 Core go to sched() to 2333 * Most upcalls from the T4 Core go to sched() to
2347 * schedule the processing on a work queue. 2334 * schedule the processing on a work queue.
@@ -2358,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2358 [CPL_PASS_ESTABLISH] = sched, 2345 [CPL_PASS_ESTABLISH] = sched,
2359 [CPL_PEER_CLOSE] = sched, 2346 [CPL_PEER_CLOSE] = sched,
2360 [CPL_CLOSE_CON_RPL] = sched, 2347 [CPL_CLOSE_CON_RPL] = sched,
2361 [CPL_ABORT_REQ_RSS] = sched, 2348 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
2362 [CPL_RDMA_TERMINATE] = sched, 2349 [CPL_RDMA_TERMINATE] = sched,
2363 [CPL_FW4_ACK] = sched, 2350 [CPL_FW4_ACK] = sched,
2364 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2351 [CPL_SET_TCB_RPL] = set_tcb_rpl,