diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/Kconfig | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 128 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 41 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 81 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/resource.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 72 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 67 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_pcie.c | 55 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 38 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 2 |
20 files changed, 415 insertions, 167 deletions
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig index d4e8983fba53..23f38cf2c5cd 100644 --- a/drivers/infiniband/hw/cxgb4/Kconfig +++ b/drivers/infiniband/hw/cxgb4/Kconfig | |||
@@ -1,10 +1,10 @@ | |||
1 | config INFINIBAND_CXGB4 | 1 | config INFINIBAND_CXGB4 |
2 | tristate "Chelsio T4 RDMA Driver" | 2 | tristate "Chelsio T4/T5 RDMA Driver" |
3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) | 3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) |
4 | select GENERIC_ALLOCATOR | 4 | select GENERIC_ALLOCATOR |
5 | ---help--- | 5 | ---help--- |
6 | This is an iWARP/RDMA driver for the Chelsio T4 1GbE and | 6 | This is an iWARP/RDMA driver for the Chelsio T4 and T5 |
7 | 10GbE adapters. | 7 | 1GbE, 10GbE adapters and T5 40GbE adapter. |
8 | 8 | ||
9 | For general information about Chelsio and our products, visit | 9 | For general information about Chelsio and our products, visit |
10 | our website at <http://www.chelsio.com>. | 10 | our website at <http://www.chelsio.com>. |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 02436d5d0dab..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -173,12 +173,15 @@ static void start_ep_timer(struct c4iw_ep *ep) | |||
173 | add_timer(&ep->timer); | 173 | add_timer(&ep->timer); |
174 | } | 174 | } |
175 | 175 | ||
176 | static void stop_ep_timer(struct c4iw_ep *ep) | 176 | static int stop_ep_timer(struct c4iw_ep *ep) |
177 | { | 177 | { |
178 | PDBG("%s ep %p stopping\n", __func__, ep); | 178 | PDBG("%s ep %p stopping\n", __func__, ep); |
179 | del_timer_sync(&ep->timer); | 179 | del_timer_sync(&ep->timer); |
180 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) | 180 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
181 | c4iw_put_ep(&ep->com); | 181 | c4iw_put_ep(&ep->com); |
182 | return 0; | ||
183 | } | ||
184 | return 1; | ||
182 | } | 185 | } |
183 | 186 | ||
184 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | 187 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, |
@@ -584,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) | |||
584 | opt2 |= SACK_EN(1); | 587 | opt2 |= SACK_EN(1); |
585 | if (wscale && enable_tcp_window_scaling) | 588 | if (wscale && enable_tcp_window_scaling) |
586 | opt2 |= WND_SCALE_EN(1); | 589 | opt2 |= WND_SCALE_EN(1); |
590 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
591 | opt2 |= T5_OPT_2_VALID; | ||
592 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
593 | } | ||
587 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | 594 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); |
588 | 595 | ||
589 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { | 596 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { |
@@ -993,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) | |||
993 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | 1000 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) |
994 | { | 1001 | { |
995 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1002 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
996 | state_set(&ep->com, ABORTING); | 1003 | __state_set(&ep->com, ABORTING); |
997 | set_bit(ABORT_CONN, &ep->com.history); | 1004 | set_bit(ABORT_CONN, &ep->com.history); |
998 | return send_abort(ep, skb, gfp); | 1005 | return send_abort(ep, skb, gfp); |
999 | } | 1006 | } |
@@ -1151,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
1151 | return credits; | 1158 | return credits; |
1152 | } | 1159 | } |
1153 | 1160 | ||
1154 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 1161 | static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
1155 | { | 1162 | { |
1156 | struct mpa_message *mpa; | 1163 | struct mpa_message *mpa; |
1157 | struct mpa_v2_conn_params *mpa_v2_params; | 1164 | struct mpa_v2_conn_params *mpa_v2_params; |
@@ -1161,17 +1168,17 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1161 | struct c4iw_qp_attributes attrs; | 1168 | struct c4iw_qp_attributes attrs; |
1162 | enum c4iw_qp_attr_mask mask; | 1169 | enum c4iw_qp_attr_mask mask; |
1163 | int err; | 1170 | int err; |
1171 | int disconnect = 0; | ||
1164 | 1172 | ||
1165 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1173 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
1166 | 1174 | ||
1167 | /* | 1175 | /* |
1168 | * Stop mpa timer. If it expired, then the state has | 1176 | * Stop mpa timer. If it expired, then |
1169 | * changed and we bail since ep_timeout already aborted | 1177 | * we ignore the MPA reply. process_timeout() |
1170 | * the connection. | 1178 | * will abort the connection. |
1171 | */ | 1179 | */ |
1172 | stop_ep_timer(ep); | 1180 | if (stop_ep_timer(ep)) |
1173 | if (ep->com.state != MPA_REQ_SENT) | 1181 | return 0; |
1174 | return; | ||
1175 | 1182 | ||
1176 | /* | 1183 | /* |
1177 | * If we get more than the supported amount of private data | 1184 | * If we get more than the supported amount of private data |
@@ -1193,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1193 | * if we don't even have the mpa message, then bail. | 1200 | * if we don't even have the mpa message, then bail. |
1194 | */ | 1201 | */ |
1195 | if (ep->mpa_pkt_len < sizeof(*mpa)) | 1202 | if (ep->mpa_pkt_len < sizeof(*mpa)) |
1196 | return; | 1203 | return 0; |
1197 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1204 | mpa = (struct mpa_message *) ep->mpa_pkt; |
1198 | 1205 | ||
1199 | /* Validate MPA header. */ | 1206 | /* Validate MPA header. */ |
@@ -1233,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1233 | * We'll continue process when more data arrives. | 1240 | * We'll continue process when more data arrives. |
1234 | */ | 1241 | */ |
1235 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | 1242 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) |
1236 | return; | 1243 | return 0; |
1237 | 1244 | ||
1238 | if (mpa->flags & MPA_REJECT) { | 1245 | if (mpa->flags & MPA_REJECT) { |
1239 | err = -ECONNREFUSED; | 1246 | err = -ECONNREFUSED; |
@@ -1335,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1335 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1342 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
1336 | attrs.ecode = MPA_NOMATCH_RTR; | 1343 | attrs.ecode = MPA_NOMATCH_RTR; |
1337 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1344 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1345 | attrs.send_term = 1; | ||
1338 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1346 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1339 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1347 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1340 | err = -ENOMEM; | 1348 | err = -ENOMEM; |
1349 | disconnect = 1; | ||
1341 | goto out; | 1350 | goto out; |
1342 | } | 1351 | } |
1343 | 1352 | ||
@@ -1353,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1353 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1362 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
1354 | attrs.ecode = MPA_INSUFF_IRD; | 1363 | attrs.ecode = MPA_INSUFF_IRD; |
1355 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1364 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1365 | attrs.send_term = 1; | ||
1356 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1366 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1357 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1367 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1358 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
1369 | disconnect = 1; | ||
1359 | goto out; | 1370 | goto out; |
1360 | } | 1371 | } |
1361 | goto out; | 1372 | goto out; |
@@ -1364,7 +1375,7 @@ err: | |||
1364 | send_abort(ep, skb, GFP_KERNEL); | 1375 | send_abort(ep, skb, GFP_KERNEL); |
1365 | out: | 1376 | out: |
1366 | connect_reply_upcall(ep, err); | 1377 | connect_reply_upcall(ep, err); |
1367 | return; | 1378 | return disconnect; |
1368 | } | 1379 | } |
1369 | 1380 | ||
1370 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1381 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
@@ -1375,15 +1386,12 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1375 | 1386 | ||
1376 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1387 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
1377 | 1388 | ||
1378 | if (ep->com.state != MPA_REQ_WAIT) | ||
1379 | return; | ||
1380 | |||
1381 | /* | 1389 | /* |
1382 | * If we get more than the supported amount of private data | 1390 | * If we get more than the supported amount of private data |
1383 | * then we must fail this connection. | 1391 | * then we must fail this connection. |
1384 | */ | 1392 | */ |
1385 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | 1393 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { |
1386 | stop_ep_timer(ep); | 1394 | (void)stop_ep_timer(ep); |
1387 | abort_connection(ep, skb, GFP_KERNEL); | 1395 | abort_connection(ep, skb, GFP_KERNEL); |
1388 | return; | 1396 | return; |
1389 | } | 1397 | } |
@@ -1413,13 +1421,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1413 | if (mpa->revision > mpa_rev) { | 1421 | if (mpa->revision > mpa_rev) { |
1414 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | 1422 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," |
1415 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | 1423 | " Received = %d\n", __func__, mpa_rev, mpa->revision); |
1416 | stop_ep_timer(ep); | 1424 | (void)stop_ep_timer(ep); |
1417 | abort_connection(ep, skb, GFP_KERNEL); | 1425 | abort_connection(ep, skb, GFP_KERNEL); |
1418 | return; | 1426 | return; |
1419 | } | 1427 | } |
1420 | 1428 | ||
1421 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | 1429 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { |
1422 | stop_ep_timer(ep); | 1430 | (void)stop_ep_timer(ep); |
1423 | abort_connection(ep, skb, GFP_KERNEL); | 1431 | abort_connection(ep, skb, GFP_KERNEL); |
1424 | return; | 1432 | return; |
1425 | } | 1433 | } |
@@ -1430,7 +1438,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1430 | * Fail if there's too much private data. | 1438 | * Fail if there's too much private data. |
1431 | */ | 1439 | */ |
1432 | if (plen > MPA_MAX_PRIVATE_DATA) { | 1440 | if (plen > MPA_MAX_PRIVATE_DATA) { |
1433 | stop_ep_timer(ep); | 1441 | (void)stop_ep_timer(ep); |
1434 | abort_connection(ep, skb, GFP_KERNEL); | 1442 | abort_connection(ep, skb, GFP_KERNEL); |
1435 | return; | 1443 | return; |
1436 | } | 1444 | } |
@@ -1439,7 +1447,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1439 | * If plen does not account for pkt size | 1447 | * If plen does not account for pkt size |
1440 | */ | 1448 | */ |
1441 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | 1449 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { |
1442 | stop_ep_timer(ep); | 1450 | (void)stop_ep_timer(ep); |
1443 | abort_connection(ep, skb, GFP_KERNEL); | 1451 | abort_connection(ep, skb, GFP_KERNEL); |
1444 | return; | 1452 | return; |
1445 | } | 1453 | } |
@@ -1496,18 +1504,24 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1496 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | 1504 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, |
1497 | ep->mpa_attr.p2p_type); | 1505 | ep->mpa_attr.p2p_type); |
1498 | 1506 | ||
1499 | __state_set(&ep->com, MPA_REQ_RCVD); | 1507 | /* |
1500 | stop_ep_timer(ep); | 1508 | * If the endpoint timer already expired, then we ignore |
1501 | 1509 | * the start request. process_timeout() will abort | |
1502 | /* drive upcall */ | 1510 | * the connection. |
1503 | mutex_lock(&ep->parent_ep->com.mutex); | 1511 | */ |
1504 | if (ep->parent_ep->com.state != DEAD) { | 1512 | if (!stop_ep_timer(ep)) { |
1505 | if (connect_request_upcall(ep)) | 1513 | __state_set(&ep->com, MPA_REQ_RCVD); |
1514 | |||
1515 | /* drive upcall */ | ||
1516 | mutex_lock(&ep->parent_ep->com.mutex); | ||
1517 | if (ep->parent_ep->com.state != DEAD) { | ||
1518 | if (connect_request_upcall(ep)) | ||
1519 | abort_connection(ep, skb, GFP_KERNEL); | ||
1520 | } else { | ||
1506 | abort_connection(ep, skb, GFP_KERNEL); | 1521 | abort_connection(ep, skb, GFP_KERNEL); |
1507 | } else { | 1522 | } |
1508 | abort_connection(ep, skb, GFP_KERNEL); | 1523 | mutex_unlock(&ep->parent_ep->com.mutex); |
1509 | } | 1524 | } |
1510 | mutex_unlock(&ep->parent_ep->com.mutex); | ||
1511 | return; | 1525 | return; |
1512 | } | 1526 | } |
1513 | 1527 | ||
@@ -1519,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1519 | unsigned int tid = GET_TID(hdr); | 1533 | unsigned int tid = GET_TID(hdr); |
1520 | struct tid_info *t = dev->rdev.lldi.tids; | 1534 | struct tid_info *t = dev->rdev.lldi.tids; |
1521 | __u8 status = hdr->status; | 1535 | __u8 status = hdr->status; |
1536 | int disconnect = 0; | ||
1522 | 1537 | ||
1523 | ep = lookup_tid(t, tid); | 1538 | ep = lookup_tid(t, tid); |
1524 | if (!ep) | 1539 | if (!ep) |
@@ -1534,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1534 | switch (ep->com.state) { | 1549 | switch (ep->com.state) { |
1535 | case MPA_REQ_SENT: | 1550 | case MPA_REQ_SENT: |
1536 | ep->rcv_seq += dlen; | 1551 | ep->rcv_seq += dlen; |
1537 | process_mpa_reply(ep, skb); | 1552 | disconnect = process_mpa_reply(ep, skb); |
1538 | break; | 1553 | break; |
1539 | case MPA_REQ_WAIT: | 1554 | case MPA_REQ_WAIT: |
1540 | ep->rcv_seq += dlen; | 1555 | ep->rcv_seq += dlen; |
@@ -1550,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1550 | ep->com.state, ep->hwtid, status); | 1565 | ep->com.state, ep->hwtid, status); |
1551 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1566 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1552 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1567 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1553 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1568 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1569 | disconnect = 1; | ||
1554 | break; | 1570 | break; |
1555 | } | 1571 | } |
1556 | default: | 1572 | default: |
1557 | break; | 1573 | break; |
1558 | } | 1574 | } |
1559 | mutex_unlock(&ep->com.mutex); | 1575 | mutex_unlock(&ep->com.mutex); |
1576 | if (disconnect) | ||
1577 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
1560 | return 0; | 1578 | return 0; |
1561 | } | 1579 | } |
1562 | 1580 | ||
@@ -2004,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2004 | if (tcph->ece && tcph->cwr) | 2022 | if (tcph->ece && tcph->cwr) |
2005 | opt2 |= CCTRL_ECN(1); | 2023 | opt2 |= CCTRL_ECN(1); |
2006 | } | 2024 | } |
2025 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
2026 | opt2 |= T5_OPT_2_VALID; | ||
2027 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
2028 | } | ||
2007 | 2029 | ||
2008 | rpl = cplhdr(skb); | 2030 | rpl = cplhdr(skb); |
2009 | INIT_TP_WR(rpl, ep->hwtid); | 2031 | INIT_TP_WR(rpl, ep->hwtid); |
@@ -2265,7 +2287,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2265 | disconnect = 0; | 2287 | disconnect = 0; |
2266 | break; | 2288 | break; |
2267 | case MORIBUND: | 2289 | case MORIBUND: |
2268 | stop_ep_timer(ep); | 2290 | (void)stop_ep_timer(ep); |
2269 | if (ep->com.cm_id && ep->com.qp) { | 2291 | if (ep->com.cm_id && ep->com.qp) { |
2270 | attrs.next_state = C4IW_QP_STATE_IDLE; | 2292 | attrs.next_state = C4IW_QP_STATE_IDLE; |
2271 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 2293 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
@@ -2325,10 +2347,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2325 | case CONNECTING: | 2347 | case CONNECTING: |
2326 | break; | 2348 | break; |
2327 | case MPA_REQ_WAIT: | 2349 | case MPA_REQ_WAIT: |
2328 | stop_ep_timer(ep); | 2350 | (void)stop_ep_timer(ep); |
2329 | break; | 2351 | break; |
2330 | case MPA_REQ_SENT: | 2352 | case MPA_REQ_SENT: |
2331 | stop_ep_timer(ep); | 2353 | (void)stop_ep_timer(ep); |
2332 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) | 2354 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) |
2333 | connect_reply_upcall(ep, -ECONNRESET); | 2355 | connect_reply_upcall(ep, -ECONNRESET); |
2334 | else { | 2356 | else { |
@@ -2433,7 +2455,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2433 | __state_set(&ep->com, MORIBUND); | 2455 | __state_set(&ep->com, MORIBUND); |
2434 | break; | 2456 | break; |
2435 | case MORIBUND: | 2457 | case MORIBUND: |
2436 | stop_ep_timer(ep); | 2458 | (void)stop_ep_timer(ep); |
2437 | if ((ep->com.cm_id) && (ep->com.qp)) { | 2459 | if ((ep->com.cm_id) && (ep->com.qp)) { |
2438 | attrs.next_state = C4IW_QP_STATE_IDLE; | 2460 | attrs.next_state = C4IW_QP_STATE_IDLE; |
2439 | c4iw_modify_qp(ep->com.qp->rhp, | 2461 | c4iw_modify_qp(ep->com.qp->rhp, |
@@ -3028,7 +3050,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
3028 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | 3050 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { |
3029 | close = 1; | 3051 | close = 1; |
3030 | if (abrupt) { | 3052 | if (abrupt) { |
3031 | stop_ep_timer(ep); | 3053 | (void)stop_ep_timer(ep); |
3032 | ep->com.state = ABORTING; | 3054 | ep->com.state = ABORTING; |
3033 | } else | 3055 | } else |
3034 | ep->com.state = MORIBUND; | 3056 | ep->com.state = MORIBUND; |
@@ -3462,14 +3484,24 @@ static void process_timeout(struct c4iw_ep *ep) | |||
3462 | __state_set(&ep->com, ABORTING); | 3484 | __state_set(&ep->com, ABORTING); |
3463 | close_complete_upcall(ep, -ETIMEDOUT); | 3485 | close_complete_upcall(ep, -ETIMEDOUT); |
3464 | break; | 3486 | break; |
3487 | case ABORTING: | ||
3488 | case DEAD: | ||
3489 | |||
3490 | /* | ||
3491 | * These states are expected if the ep timed out at the same | ||
3492 | * time as another thread was calling stop_ep_timer(). | ||
3493 | * So we silently do nothing for these states. | ||
3494 | */ | ||
3495 | abort = 0; | ||
3496 | break; | ||
3465 | default: | 3497 | default: |
3466 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", | 3498 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", |
3467 | __func__, ep, ep->hwtid, ep->com.state); | 3499 | __func__, ep, ep->hwtid, ep->com.state); |
3468 | abort = 0; | 3500 | abort = 0; |
3469 | } | 3501 | } |
3470 | mutex_unlock(&ep->com.mutex); | ||
3471 | if (abort) | 3502 | if (abort) |
3472 | abort_connection(ep, NULL, GFP_KERNEL); | 3503 | abort_connection(ep, NULL, GFP_KERNEL); |
3504 | mutex_unlock(&ep->com.mutex); | ||
3473 | c4iw_put_ep(&ep->com); | 3505 | c4iw_put_ep(&ep->com); |
3474 | } | 3506 | } |
3475 | 3507 | ||
@@ -3483,6 +3515,8 @@ static void process_timedout_eps(void) | |||
3483 | 3515 | ||
3484 | tmp = timeout_list.next; | 3516 | tmp = timeout_list.next; |
3485 | list_del(tmp); | 3517 | list_del(tmp); |
3518 | tmp->next = NULL; | ||
3519 | tmp->prev = NULL; | ||
3486 | spin_unlock_irq(&timeout_lock); | 3520 | spin_unlock_irq(&timeout_lock); |
3487 | ep = list_entry(tmp, struct c4iw_ep, entry); | 3521 | ep = list_entry(tmp, struct c4iw_ep, entry); |
3488 | process_timeout(ep); | 3522 | process_timeout(ep); |
@@ -3499,6 +3533,7 @@ static void process_work(struct work_struct *work) | |||
3499 | unsigned int opcode; | 3533 | unsigned int opcode; |
3500 | int ret; | 3534 | int ret; |
3501 | 3535 | ||
3536 | process_timedout_eps(); | ||
3502 | while ((skb = skb_dequeue(&rxq))) { | 3537 | while ((skb = skb_dequeue(&rxq))) { |
3503 | rpl = cplhdr(skb); | 3538 | rpl = cplhdr(skb); |
3504 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | 3539 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); |
@@ -3508,8 +3543,8 @@ static void process_work(struct work_struct *work) | |||
3508 | ret = work_handlers[opcode](dev, skb); | 3543 | ret = work_handlers[opcode](dev, skb); |
3509 | if (!ret) | 3544 | if (!ret) |
3510 | kfree_skb(skb); | 3545 | kfree_skb(skb); |
3546 | process_timedout_eps(); | ||
3511 | } | 3547 | } |
3512 | process_timedout_eps(); | ||
3513 | } | 3548 | } |
3514 | 3549 | ||
3515 | static DECLARE_WORK(skb_work, process_work); | 3550 | static DECLARE_WORK(skb_work, process_work); |
@@ -3521,8 +3556,13 @@ static void ep_timeout(unsigned long arg) | |||
3521 | 3556 | ||
3522 | spin_lock(&timeout_lock); | 3557 | spin_lock(&timeout_lock); |
3523 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { | 3558 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
3524 | list_add_tail(&ep->entry, &timeout_list); | 3559 | /* |
3525 | kickit = 1; | 3560 | * Only insert if it is not already on the list. |
3561 | */ | ||
3562 | if (!ep->entry.next) { | ||
3563 | list_add_tail(&ep->entry, &timeout_list); | ||
3564 | kickit = 1; | ||
3565 | } | ||
3526 | } | 3566 | } |
3527 | spin_unlock(&timeout_lock); | 3567 | spin_unlock(&timeout_lock); |
3528 | if (kickit) | 3568 | if (kickit) |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ce468e542428..cfaa56ada189 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -235,27 +235,21 @@ int c4iw_flush_sq(struct c4iw_qp *qhp) | |||
235 | struct t4_cq *cq = &chp->cq; | 235 | struct t4_cq *cq = &chp->cq; |
236 | int idx; | 236 | int idx; |
237 | struct t4_swsqe *swsqe; | 237 | struct t4_swsqe *swsqe; |
238 | int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING && | ||
239 | qhp->attr.state != C4IW_QP_STATE_IDLE); | ||
240 | 238 | ||
241 | if (wq->sq.flush_cidx == -1) | 239 | if (wq->sq.flush_cidx == -1) |
242 | wq->sq.flush_cidx = wq->sq.cidx; | 240 | wq->sq.flush_cidx = wq->sq.cidx; |
243 | idx = wq->sq.flush_cidx; | 241 | idx = wq->sq.flush_cidx; |
244 | BUG_ON(idx >= wq->sq.size); | 242 | BUG_ON(idx >= wq->sq.size); |
245 | while (idx != wq->sq.pidx) { | 243 | while (idx != wq->sq.pidx) { |
246 | if (error) { | 244 | swsqe = &wq->sq.sw_sq[idx]; |
247 | swsqe = &wq->sq.sw_sq[idx]; | 245 | BUG_ON(swsqe->flushed); |
248 | BUG_ON(swsqe->flushed); | 246 | swsqe->flushed = 1; |
249 | swsqe->flushed = 1; | 247 | insert_sq_cqe(wq, cq, swsqe); |
250 | insert_sq_cqe(wq, cq, swsqe); | 248 | if (wq->sq.oldest_read == swsqe) { |
251 | if (wq->sq.oldest_read == swsqe) { | 249 | BUG_ON(swsqe->opcode != FW_RI_READ_REQ); |
252 | BUG_ON(swsqe->opcode != FW_RI_READ_REQ); | 250 | advance_oldest_read(wq); |
253 | advance_oldest_read(wq); | ||
254 | } | ||
255 | flushed++; | ||
256 | } else { | ||
257 | t4_sq_consume(wq); | ||
258 | } | 251 | } |
252 | flushed++; | ||
259 | if (++idx == wq->sq.size) | 253 | if (++idx == wq->sq.size) |
260 | idx = 0; | 254 | idx = 0; |
261 | } | 255 | } |
@@ -678,7 +672,7 @@ skip_cqe: | |||
678 | static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | 672 | static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) |
679 | { | 673 | { |
680 | struct c4iw_qp *qhp = NULL; | 674 | struct c4iw_qp *qhp = NULL; |
681 | struct t4_cqe cqe = {0, 0}, *rd_cqe; | 675 | struct t4_cqe uninitialized_var(cqe), *rd_cqe; |
682 | struct t4_wq *wq; | 676 | struct t4_wq *wq; |
683 | u32 credit = 0; | 677 | u32 credit = 0; |
684 | u8 cqe_flushed; | 678 | u8 cqe_flushed; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 9489a388376c..f4fa50a609e2 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -682,7 +682,10 @@ static void c4iw_dealloc(struct uld_ctx *ctx) | |||
682 | idr_destroy(&ctx->dev->hwtid_idr); | 682 | idr_destroy(&ctx->dev->hwtid_idr); |
683 | idr_destroy(&ctx->dev->stid_idr); | 683 | idr_destroy(&ctx->dev->stid_idr); |
684 | idr_destroy(&ctx->dev->atid_idr); | 684 | idr_destroy(&ctx->dev->atid_idr); |
685 | iounmap(ctx->dev->rdev.oc_mw_kva); | 685 | if (ctx->dev->rdev.bar2_kva) |
686 | iounmap(ctx->dev->rdev.bar2_kva); | ||
687 | if (ctx->dev->rdev.oc_mw_kva) | ||
688 | iounmap(ctx->dev->rdev.oc_mw_kva); | ||
686 | ib_dealloc_device(&ctx->dev->ibdev); | 689 | ib_dealloc_device(&ctx->dev->ibdev); |
687 | ctx->dev = NULL; | 690 | ctx->dev = NULL; |
688 | } | 691 | } |
@@ -722,11 +725,31 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
722 | } | 725 | } |
723 | devp->rdev.lldi = *infop; | 726 | devp->rdev.lldi = *infop; |
724 | 727 | ||
725 | devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + | 728 | /* |
726 | (pci_resource_len(devp->rdev.lldi.pdev, 2) - | 729 | * For T5 devices, we map all of BAR2 with WC. |
727 | roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); | 730 | * For T4 devices with onchip qp mem, we map only that part |
728 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | 731 | * of BAR2 with WC. |
729 | devp->rdev.lldi.vr->ocq.size); | 732 | */ |
733 | devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); | ||
734 | if (is_t5(devp->rdev.lldi.adapter_type)) { | ||
735 | devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, | ||
736 | pci_resource_len(devp->rdev.lldi.pdev, 2)); | ||
737 | if (!devp->rdev.bar2_kva) { | ||
738 | pr_err(MOD "Unable to ioremap BAR2\n"); | ||
739 | return ERR_PTR(-EINVAL); | ||
740 | } | ||
741 | } else if (ocqp_supported(infop)) { | ||
742 | devp->rdev.oc_mw_pa = | ||
743 | pci_resource_start(devp->rdev.lldi.pdev, 2) + | ||
744 | pci_resource_len(devp->rdev.lldi.pdev, 2) - | ||
745 | roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); | ||
746 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | ||
747 | devp->rdev.lldi.vr->ocq.size); | ||
748 | if (!devp->rdev.oc_mw_kva) { | ||
749 | pr_err(MOD "Unable to ioremap onchip mem\n"); | ||
750 | return ERR_PTR(-EINVAL); | ||
751 | } | ||
752 | } | ||
730 | 753 | ||
731 | PDBG(KERN_INFO MOD "ocq memory: " | 754 | PDBG(KERN_INFO MOD "ocq memory: " |
732 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", | 755 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", |
@@ -1003,9 +1026,11 @@ static int enable_qp_db(int id, void *p, void *data) | |||
1003 | static void resume_rc_qp(struct c4iw_qp *qp) | 1026 | static void resume_rc_qp(struct c4iw_qp *qp) |
1004 | { | 1027 | { |
1005 | spin_lock(&qp->lock); | 1028 | spin_lock(&qp->lock); |
1006 | t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc); | 1029 | t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, |
1030 | is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); | ||
1007 | qp->wq.sq.wq_pidx_inc = 0; | 1031 | qp->wq.sq.wq_pidx_inc = 0; |
1008 | t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc); | 1032 | t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, |
1033 | is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); | ||
1009 | qp->wq.rq.wq_pidx_inc = 0; | 1034 | qp->wq.rq.wq_pidx_inc = 0; |
1010 | spin_unlock(&qp->lock); | 1035 | spin_unlock(&qp->lock); |
1011 | } | 1036 | } |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index e872203c5424..7474b490760a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -149,6 +149,8 @@ struct c4iw_rdev { | |||
149 | struct gen_pool *ocqp_pool; | 149 | struct gen_pool *ocqp_pool; |
150 | u32 flags; | 150 | u32 flags; |
151 | struct cxgb4_lld_info lldi; | 151 | struct cxgb4_lld_info lldi; |
152 | unsigned long bar2_pa; | ||
153 | void __iomem *bar2_kva; | ||
152 | unsigned long oc_mw_pa; | 154 | unsigned long oc_mw_pa; |
153 | void __iomem *oc_mw_kva; | 155 | void __iomem *oc_mw_kva; |
154 | struct c4iw_stats stats; | 156 | struct c4iw_stats stats; |
@@ -433,6 +435,7 @@ struct c4iw_qp_attributes { | |||
433 | u8 ecode; | 435 | u8 ecode; |
434 | u16 sq_db_inc; | 436 | u16 sq_db_inc; |
435 | u16 rq_db_inc; | 437 | u16 rq_db_inc; |
438 | u8 send_term; | ||
436 | }; | 439 | }; |
437 | 440 | ||
438 | struct c4iw_qp { | 441 | struct c4iw_qp { |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index f9ca072a99ed..ec7a2988a703 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -259,8 +259,12 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, | |||
259 | 259 | ||
260 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { | 260 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { |
261 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); | 261 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); |
262 | if (!stag_idx) | 262 | if (!stag_idx) { |
263 | mutex_lock(&rdev->stats.lock); | ||
264 | rdev->stats.stag.fail++; | ||
265 | mutex_unlock(&rdev->stats.lock); | ||
263 | return -ENOMEM; | 266 | return -ENOMEM; |
267 | } | ||
264 | mutex_lock(&rdev->stats.lock); | 268 | mutex_lock(&rdev->stats.lock); |
265 | rdev->stats.stag.cur += 32; | 269 | rdev->stats.stag.cur += 32; |
266 | if (rdev->stats.stag.cur > rdev->stats.stag.max) | 270 | if (rdev->stats.stag.cur > rdev->stats.stag.max) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 79429256023a..a94a3e12c349 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -328,7 +328,7 @@ static int c4iw_query_device(struct ib_device *ibdev, | |||
328 | props->max_mr = c4iw_num_stags(&dev->rdev); | 328 | props->max_mr = c4iw_num_stags(&dev->rdev); |
329 | props->max_pd = T4_MAX_NUM_PD; | 329 | props->max_pd = T4_MAX_NUM_PD; |
330 | props->local_ca_ack_delay = 0; | 330 | props->local_ca_ack_delay = 0; |
331 | props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH; | 331 | props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl); |
332 | 332 | ||
333 | return 0; | 333 | return 0; |
334 | } | 334 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cb76eb5eee1f..086f62f5dc9e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -212,13 +212,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
212 | 212 | ||
213 | wq->db = rdev->lldi.db_reg; | 213 | wq->db = rdev->lldi.db_reg; |
214 | wq->gts = rdev->lldi.gts_reg; | 214 | wq->gts = rdev->lldi.gts_reg; |
215 | if (user) { | 215 | if (user || is_t5(rdev->lldi.adapter_type)) { |
216 | wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 216 | u32 off; |
217 | (wq->sq.qid << rdev->qpshift); | 217 | |
218 | wq->sq.udb &= PAGE_MASK; | 218 | off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; |
219 | wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 219 | if (user) { |
220 | (wq->rq.qid << rdev->qpshift); | 220 | wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off); |
221 | wq->rq.udb &= PAGE_MASK; | 221 | } else { |
222 | off += 128 * (wq->sq.qid & rdev->qpmask) + 8; | ||
223 | wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off); | ||
224 | } | ||
225 | off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK; | ||
226 | if (user) { | ||
227 | wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off); | ||
228 | } else { | ||
229 | off += 128 * (wq->rq.qid & rdev->qpmask) + 8; | ||
230 | wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off); | ||
231 | } | ||
222 | } | 232 | } |
223 | wq->rdev = rdev; | 233 | wq->rdev = rdev; |
224 | wq->rq.msn = 1; | 234 | wq->rq.msn = 1; |
@@ -299,9 +309,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
299 | if (ret) | 309 | if (ret) |
300 | goto free_dma; | 310 | goto free_dma; |
301 | 311 | ||
302 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", | 312 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n", |
303 | __func__, wq->sq.qid, wq->rq.qid, wq->db, | 313 | __func__, wq->sq.qid, wq->rq.qid, wq->db, |
304 | (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); | 314 | (__force unsigned long) wq->sq.udb, |
315 | (__force unsigned long) wq->rq.udb); | ||
305 | 316 | ||
306 | return 0; | 317 | return 0; |
307 | free_dma: | 318 | free_dma: |
@@ -425,6 +436,8 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, | |||
425 | default: | 436 | default: |
426 | return -EINVAL; | 437 | return -EINVAL; |
427 | } | 438 | } |
439 | wqe->send.r3 = 0; | ||
440 | wqe->send.r4 = 0; | ||
428 | 441 | ||
429 | plen = 0; | 442 | plen = 0; |
430 | if (wr->num_sge) { | 443 | if (wr->num_sge) { |
@@ -555,7 +568,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, | |||
555 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | 568 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); |
556 | int rem; | 569 | int rem; |
557 | 570 | ||
558 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | 571 | if (wr->wr.fast_reg.page_list_len > |
572 | t4_max_fr_depth(use_dsgl)) | ||
559 | return -EINVAL; | 573 | return -EINVAL; |
560 | 574 | ||
561 | wqe->fr.qpbinde_to_dcacpu = 0; | 575 | wqe->fr.qpbinde_to_dcacpu = 0; |
@@ -650,9 +664,10 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) | |||
650 | 664 | ||
651 | spin_lock_irqsave(&qhp->rhp->lock, flags); | 665 | spin_lock_irqsave(&qhp->rhp->lock, flags); |
652 | spin_lock(&qhp->lock); | 666 | spin_lock(&qhp->lock); |
653 | if (qhp->rhp->db_state == NORMAL) { | 667 | if (qhp->rhp->db_state == NORMAL) |
654 | t4_ring_sq_db(&qhp->wq, inc); | 668 | t4_ring_sq_db(&qhp->wq, inc, |
655 | } else { | 669 | is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); |
670 | else { | ||
656 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); | 671 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
657 | qhp->wq.sq.wq_pidx_inc += inc; | 672 | qhp->wq.sq.wq_pidx_inc += inc; |
658 | } | 673 | } |
@@ -667,9 +682,10 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) | |||
667 | 682 | ||
668 | spin_lock_irqsave(&qhp->rhp->lock, flags); | 683 | spin_lock_irqsave(&qhp->rhp->lock, flags); |
669 | spin_lock(&qhp->lock); | 684 | spin_lock(&qhp->lock); |
670 | if (qhp->rhp->db_state == NORMAL) { | 685 | if (qhp->rhp->db_state == NORMAL) |
671 | t4_ring_rq_db(&qhp->wq, inc); | 686 | t4_ring_rq_db(&qhp->wq, inc, |
672 | } else { | 687 | is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL); |
688 | else { | ||
673 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); | 689 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
674 | qhp->wq.rq.wq_pidx_inc += inc; | 690 | qhp->wq.rq.wq_pidx_inc += inc; |
675 | } | 691 | } |
@@ -686,7 +702,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
686 | enum fw_wr_opcodes fw_opcode = 0; | 702 | enum fw_wr_opcodes fw_opcode = 0; |
687 | enum fw_ri_wr_flags fw_flags; | 703 | enum fw_ri_wr_flags fw_flags; |
688 | struct c4iw_qp *qhp; | 704 | struct c4iw_qp *qhp; |
689 | union t4_wr *wqe; | 705 | union t4_wr *wqe = NULL; |
690 | u32 num_wrs; | 706 | u32 num_wrs; |
691 | struct t4_swsqe *swsqe; | 707 | struct t4_swsqe *swsqe; |
692 | unsigned long flag; | 708 | unsigned long flag; |
@@ -792,7 +808,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
792 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | 808 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); |
793 | } | 809 | } |
794 | if (!qhp->rhp->rdev.status_page->db_off) { | 810 | if (!qhp->rhp->rdev.status_page->db_off) { |
795 | t4_ring_sq_db(&qhp->wq, idx); | 811 | t4_ring_sq_db(&qhp->wq, idx, |
812 | is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); | ||
796 | spin_unlock_irqrestore(&qhp->lock, flag); | 813 | spin_unlock_irqrestore(&qhp->lock, flag); |
797 | } else { | 814 | } else { |
798 | spin_unlock_irqrestore(&qhp->lock, flag); | 815 | spin_unlock_irqrestore(&qhp->lock, flag); |
@@ -806,7 +823,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
806 | { | 823 | { |
807 | int err = 0; | 824 | int err = 0; |
808 | struct c4iw_qp *qhp; | 825 | struct c4iw_qp *qhp; |
809 | union t4_recv_wr *wqe; | 826 | union t4_recv_wr *wqe = NULL; |
810 | u32 num_wrs; | 827 | u32 num_wrs; |
811 | u8 len16 = 0; | 828 | u8 len16 = 0; |
812 | unsigned long flag; | 829 | unsigned long flag; |
@@ -858,7 +875,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
858 | num_wrs--; | 875 | num_wrs--; |
859 | } | 876 | } |
860 | if (!qhp->rhp->rdev.status_page->db_off) { | 877 | if (!qhp->rhp->rdev.status_page->db_off) { |
861 | t4_ring_rq_db(&qhp->wq, idx); | 878 | t4_ring_rq_db(&qhp->wq, idx, |
879 | is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe); | ||
862 | spin_unlock_irqrestore(&qhp->lock, flag); | 880 | spin_unlock_irqrestore(&qhp->lock, flag); |
863 | } else { | 881 | } else { |
864 | spin_unlock_irqrestore(&qhp->lock, flag); | 882 | spin_unlock_irqrestore(&qhp->lock, flag); |
@@ -1352,6 +1370,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1352 | switch (attrs->next_state) { | 1370 | switch (attrs->next_state) { |
1353 | case C4IW_QP_STATE_CLOSING: | 1371 | case C4IW_QP_STATE_CLOSING: |
1354 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | 1372 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); |
1373 | t4_set_wq_in_error(&qhp->wq); | ||
1355 | set_state(qhp, C4IW_QP_STATE_CLOSING); | 1374 | set_state(qhp, C4IW_QP_STATE_CLOSING); |
1356 | ep = qhp->ep; | 1375 | ep = qhp->ep; |
1357 | if (!internal) { | 1376 | if (!internal) { |
@@ -1359,30 +1378,30 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1359 | disconnect = 1; | 1378 | disconnect = 1; |
1360 | c4iw_get_ep(&qhp->ep->com); | 1379 | c4iw_get_ep(&qhp->ep->com); |
1361 | } | 1380 | } |
1362 | t4_set_wq_in_error(&qhp->wq); | ||
1363 | ret = rdma_fini(rhp, qhp, ep); | 1381 | ret = rdma_fini(rhp, qhp, ep); |
1364 | if (ret) | 1382 | if (ret) |
1365 | goto err; | 1383 | goto err; |
1366 | break; | 1384 | break; |
1367 | case C4IW_QP_STATE_TERMINATE: | 1385 | case C4IW_QP_STATE_TERMINATE: |
1386 | t4_set_wq_in_error(&qhp->wq); | ||
1368 | set_state(qhp, C4IW_QP_STATE_TERMINATE); | 1387 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
1369 | qhp->attr.layer_etype = attrs->layer_etype; | 1388 | qhp->attr.layer_etype = attrs->layer_etype; |
1370 | qhp->attr.ecode = attrs->ecode; | 1389 | qhp->attr.ecode = attrs->ecode; |
1371 | t4_set_wq_in_error(&qhp->wq); | ||
1372 | ep = qhp->ep; | 1390 | ep = qhp->ep; |
1373 | disconnect = 1; | 1391 | if (!internal) { |
1374 | if (!internal) | 1392 | c4iw_get_ep(&qhp->ep->com); |
1375 | terminate = 1; | 1393 | terminate = 1; |
1376 | else { | 1394 | disconnect = 1; |
1395 | } else { | ||
1396 | terminate = qhp->attr.send_term; | ||
1377 | ret = rdma_fini(rhp, qhp, ep); | 1397 | ret = rdma_fini(rhp, qhp, ep); |
1378 | if (ret) | 1398 | if (ret) |
1379 | goto err; | 1399 | goto err; |
1380 | } | 1400 | } |
1381 | c4iw_get_ep(&qhp->ep->com); | ||
1382 | break; | 1401 | break; |
1383 | case C4IW_QP_STATE_ERROR: | 1402 | case C4IW_QP_STATE_ERROR: |
1384 | set_state(qhp, C4IW_QP_STATE_ERROR); | ||
1385 | t4_set_wq_in_error(&qhp->wq); | 1403 | t4_set_wq_in_error(&qhp->wq); |
1404 | set_state(qhp, C4IW_QP_STATE_ERROR); | ||
1386 | if (!internal) { | 1405 | if (!internal) { |
1387 | abort = 1; | 1406 | abort = 1; |
1388 | disconnect = 1; | 1407 | disconnect = 1; |
@@ -1677,11 +1696,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1677 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | 1696 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); |
1678 | insert_mmap(ucontext, mm2); | 1697 | insert_mmap(ucontext, mm2); |
1679 | mm3->key = uresp.sq_db_gts_key; | 1698 | mm3->key = uresp.sq_db_gts_key; |
1680 | mm3->addr = qhp->wq.sq.udb; | 1699 | mm3->addr = (__force unsigned long) qhp->wq.sq.udb; |
1681 | mm3->len = PAGE_SIZE; | 1700 | mm3->len = PAGE_SIZE; |
1682 | insert_mmap(ucontext, mm3); | 1701 | insert_mmap(ucontext, mm3); |
1683 | mm4->key = uresp.rq_db_gts_key; | 1702 | mm4->key = uresp.rq_db_gts_key; |
1684 | mm4->addr = qhp->wq.rq.udb; | 1703 | mm4->addr = (__force unsigned long) qhp->wq.rq.udb; |
1685 | mm4->len = PAGE_SIZE; | 1704 | mm4->len = PAGE_SIZE; |
1686 | insert_mmap(ucontext, mm4); | 1705 | insert_mmap(ucontext, mm4); |
1687 | if (mm5) { | 1706 | if (mm5) { |
@@ -1758,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1758 | /* | 1777 | /* |
1759 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | 1778 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for |
1760 | * ringing the queue db when we're in DB_FULL mode. | 1779 | * ringing the queue db when we're in DB_FULL mode. |
1780 | * Only allow this on T4 devices. | ||
1761 | */ | 1781 | */ |
1762 | attrs.sq_db_inc = attr->sq_psn; | 1782 | attrs.sq_db_inc = attr->sq_psn; |
1763 | attrs.rq_db_inc = attr->rq_psn; | 1783 | attrs.rq_db_inc = attr->rq_psn; |
1764 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | 1784 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; |
1765 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | 1785 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; |
1786 | if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && | ||
1787 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) | ||
1788 | return -EINVAL; | ||
1766 | 1789 | ||
1767 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | 1790 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
1768 | } | 1791 | } |
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index cdef4d7fb6d8..67df71a7012e 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c | |||
@@ -179,8 +179,12 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
179 | kfree(entry); | 179 | kfree(entry); |
180 | } else { | 180 | } else { |
181 | qid = c4iw_get_resource(&rdev->resource.qid_table); | 181 | qid = c4iw_get_resource(&rdev->resource.qid_table); |
182 | if (!qid) | 182 | if (!qid) { |
183 | mutex_lock(&rdev->stats.lock); | ||
184 | rdev->stats.qid.fail++; | ||
185 | mutex_unlock(&rdev->stats.lock); | ||
183 | goto out; | 186 | goto out; |
187 | } | ||
184 | mutex_lock(&rdev->stats.lock); | 188 | mutex_lock(&rdev->stats.lock); |
185 | rdev->stats.qid.cur += rdev->qpmask + 1; | 189 | rdev->stats.qid.cur += rdev->qpmask + 1; |
186 | mutex_unlock(&rdev->stats.lock); | 190 | mutex_unlock(&rdev->stats.lock); |
@@ -322,8 +326,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) | |||
322 | unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); | 326 | unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); |
323 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); | 327 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); |
324 | if (!addr) | 328 | if (!addr) |
325 | printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", | 329 | pr_warn_ratelimited(MOD "%s: Out of RQT memory\n", |
326 | pci_name(rdev->lldi.pdev)); | 330 | pci_name(rdev->lldi.pdev)); |
327 | mutex_lock(&rdev->stats.lock); | 331 | mutex_lock(&rdev->stats.lock); |
328 | if (addr) { | 332 | if (addr) { |
329 | rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); | 333 | rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index eeca8b1e6376..2178f3198410 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -84,7 +84,14 @@ struct t4_status_page { | |||
84 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | 84 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) |
85 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ | 85 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ |
86 | sizeof(struct fw_ri_immd)) & ~31UL) | 86 | sizeof(struct fw_ri_immd)) & ~31UL) |
87 | #define T4_MAX_FR_DEPTH (1024 / sizeof(u64)) | 87 | #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) |
88 | #define T4_MAX_FR_DSGL 1024 | ||
89 | #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64)) | ||
90 | |||
91 | static inline int t4_max_fr_depth(int use_dsgl) | ||
92 | { | ||
93 | return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH; | ||
94 | } | ||
88 | 95 | ||
89 | #define T4_RQ_NUM_SLOTS 2 | 96 | #define T4_RQ_NUM_SLOTS 2 |
90 | #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) | 97 | #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) |
@@ -292,7 +299,7 @@ struct t4_sq { | |||
292 | unsigned long phys_addr; | 299 | unsigned long phys_addr; |
293 | struct t4_swsqe *sw_sq; | 300 | struct t4_swsqe *sw_sq; |
294 | struct t4_swsqe *oldest_read; | 301 | struct t4_swsqe *oldest_read; |
295 | u64 udb; | 302 | u64 __iomem *udb; |
296 | size_t memsize; | 303 | size_t memsize; |
297 | u32 qid; | 304 | u32 qid; |
298 | u16 in_use; | 305 | u16 in_use; |
@@ -314,7 +321,7 @@ struct t4_rq { | |||
314 | dma_addr_t dma_addr; | 321 | dma_addr_t dma_addr; |
315 | DEFINE_DMA_UNMAP_ADDR(mapping); | 322 | DEFINE_DMA_UNMAP_ADDR(mapping); |
316 | struct t4_swrqe *sw_rq; | 323 | struct t4_swrqe *sw_rq; |
317 | u64 udb; | 324 | u64 __iomem *udb; |
318 | size_t memsize; | 325 | size_t memsize; |
319 | u32 qid; | 326 | u32 qid; |
320 | u32 msn; | 327 | u32 msn; |
@@ -435,15 +442,67 @@ static inline u16 t4_sq_wq_size(struct t4_wq *wq) | |||
435 | return wq->sq.size * T4_SQ_NUM_SLOTS; | 442 | return wq->sq.size * T4_SQ_NUM_SLOTS; |
436 | } | 443 | } |
437 | 444 | ||
438 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) | 445 | /* This function copies 64 byte coalesced work request to memory |
446 | * mapped BAR2 space. For coalesced WRs, the SGE fetches data | ||
447 | * from the FIFO instead of from Host. | ||
448 | */ | ||
449 | static inline void pio_copy(u64 __iomem *dst, u64 *src) | ||
450 | { | ||
451 | int count = 8; | ||
452 | |||
453 | while (count) { | ||
454 | writeq(*src, dst); | ||
455 | src++; | ||
456 | dst++; | ||
457 | count--; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, | ||
462 | union t4_wr *wqe) | ||
439 | { | 463 | { |
464 | |||
465 | /* Flush host queue memory writes. */ | ||
440 | wmb(); | 466 | wmb(); |
467 | if (t5) { | ||
468 | if (inc == 1 && wqe) { | ||
469 | PDBG("%s: WC wq->sq.pidx = %d\n", | ||
470 | __func__, wq->sq.pidx); | ||
471 | pio_copy(wq->sq.udb + 7, (void *)wqe); | ||
472 | } else { | ||
473 | PDBG("%s: DB wq->sq.pidx = %d\n", | ||
474 | __func__, wq->sq.pidx); | ||
475 | writel(PIDX_T5(inc), wq->sq.udb); | ||
476 | } | ||
477 | |||
478 | /* Flush user doorbell area writes. */ | ||
479 | wmb(); | ||
480 | return; | ||
481 | } | ||
441 | writel(QID(wq->sq.qid) | PIDX(inc), wq->db); | 482 | writel(QID(wq->sq.qid) | PIDX(inc), wq->db); |
442 | } | 483 | } |
443 | 484 | ||
444 | static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) | 485 | static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, |
486 | union t4_recv_wr *wqe) | ||
445 | { | 487 | { |
488 | |||
489 | /* Flush host queue memory writes. */ | ||
446 | wmb(); | 490 | wmb(); |
491 | if (t5) { | ||
492 | if (inc == 1 && wqe) { | ||
493 | PDBG("%s: WC wq->rq.pidx = %d\n", | ||
494 | __func__, wq->rq.pidx); | ||
495 | pio_copy(wq->rq.udb + 7, (void *)wqe); | ||
496 | } else { | ||
497 | PDBG("%s: DB wq->rq.pidx = %d\n", | ||
498 | __func__, wq->rq.pidx); | ||
499 | writel(PIDX_T5(inc), wq->rq.udb); | ||
500 | } | ||
501 | |||
502 | /* Flush user doorbell area writes. */ | ||
503 | wmb(); | ||
504 | return; | ||
505 | } | ||
447 | writel(QID(wq->rq.qid) | PIDX(inc), wq->db); | 506 | writel(QID(wq->rq.qid) | PIDX(inc), wq->db); |
448 | } | 507 | } |
449 | 508 | ||
@@ -568,6 +627,9 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | |||
568 | printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); | 627 | printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); |
569 | BUG_ON(1); | 628 | BUG_ON(1); |
570 | } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { | 629 | } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { |
630 | |||
631 | /* Ensure CQE is flushed to memory */ | ||
632 | rmb(); | ||
571 | *cqe = &cq->queue[cq->cidx]; | 633 | *cqe = &cq->queue[cq->cidx]; |
572 | ret = 0; | 634 | ret = 0; |
573 | } else | 635 | } else |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index dc193c292671..6121ca08fe58 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -836,4 +836,18 @@ struct ulptx_idata { | |||
836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) | 836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) |
837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) | 837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) |
838 | 838 | ||
839 | enum { /* TCP congestion control algorithms */ | ||
840 | CONG_ALG_RENO, | ||
841 | CONG_ALG_TAHOE, | ||
842 | CONG_ALG_NEWRENO, | ||
843 | CONG_ALG_HIGHSPEED | ||
844 | }; | ||
845 | |||
846 | #define S_CONG_CNTRL 14 | ||
847 | #define M_CONG_CNTRL 0x3 | ||
848 | #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) | ||
849 | #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) | ||
850 | |||
851 | #define T5_OPT_2_VALID (1 << 31) | ||
852 | |||
839 | #endif /* _T4FW_RI_API_H_ */ | 853 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a37..199c7896f081 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -48,6 +48,7 @@ | |||
48 | 48 | ||
49 | #include <linux/mlx4/driver.h> | 49 | #include <linux/mlx4/driver.h> |
50 | #include <linux/mlx4/cmd.h> | 50 | #include <linux/mlx4/cmd.h> |
51 | #include <linux/mlx4/qp.h> | ||
51 | 52 | ||
52 | #include "mlx4_ib.h" | 53 | #include "mlx4_ib.h" |
53 | #include "user.h" | 54 | #include "user.h" |
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, | |||
1614 | } | 1615 | } |
1615 | #endif | 1616 | #endif |
1616 | 1617 | ||
1618 | #define MLX4_IB_INVALID_MAC ((u64)-1) | ||
1619 | static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | ||
1620 | struct net_device *dev, | ||
1621 | int port) | ||
1622 | { | ||
1623 | u64 new_smac = 0; | ||
1624 | u64 release_mac = MLX4_IB_INVALID_MAC; | ||
1625 | struct mlx4_ib_qp *qp; | ||
1626 | |||
1627 | read_lock(&dev_base_lock); | ||
1628 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | ||
1629 | read_unlock(&dev_base_lock); | ||
1630 | |||
1631 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1632 | qp = ibdev->qp1_proxy[port - 1]; | ||
1633 | if (qp) { | ||
1634 | int new_smac_index; | ||
1635 | u64 old_smac = qp->pri.smac; | ||
1636 | struct mlx4_update_qp_params update_params; | ||
1637 | |||
1638 | if (new_smac == old_smac) | ||
1639 | goto unlock; | ||
1640 | |||
1641 | new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); | ||
1642 | |||
1643 | if (new_smac_index < 0) | ||
1644 | goto unlock; | ||
1645 | |||
1646 | update_params.smac_index = new_smac_index; | ||
1647 | if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, | ||
1648 | &update_params)) { | ||
1649 | release_mac = new_smac; | ||
1650 | goto unlock; | ||
1651 | } | ||
1652 | |||
1653 | qp->pri.smac = new_smac; | ||
1654 | qp->pri.smac_index = new_smac_index; | ||
1655 | |||
1656 | release_mac = old_smac; | ||
1657 | } | ||
1658 | |||
1659 | unlock: | ||
1660 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1661 | if (release_mac != MLX4_IB_INVALID_MAC) | ||
1662 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | ||
1663 | } | ||
1664 | |||
1617 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1665 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
1618 | struct mlx4_ib_dev *ibdev, u8 port) | 1666 | struct mlx4_ib_dev *ibdev, u8 port) |
1619 | { | 1667 | { |
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1689 | return 0; | 1737 | return 0; |
1690 | } | 1738 | } |
1691 | 1739 | ||
1692 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | 1740 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
1741 | struct net_device *dev, | ||
1742 | unsigned long event) | ||
1743 | |||
1693 | { | 1744 | { |
1694 | struct mlx4_ib_iboe *iboe; | 1745 | struct mlx4_ib_iboe *iboe; |
1746 | int update_qps_port = -1; | ||
1695 | int port; | 1747 | int port; |
1696 | 1748 | ||
1697 | iboe = &ibdev->iboe; | 1749 | iboe = &ibdev->iboe; |
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1719 | } | 1771 | } |
1720 | curr_master = iboe->masters[port - 1]; | 1772 | curr_master = iboe->masters[port - 1]; |
1721 | 1773 | ||
1774 | if (dev == iboe->netdevs[port - 1] && | ||
1775 | (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || | ||
1776 | event == NETDEV_UP || event == NETDEV_CHANGE)) | ||
1777 | update_qps_port = port; | ||
1778 | |||
1722 | if (curr_netdev) { | 1779 | if (curr_netdev) { |
1723 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1780 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
1724 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1781 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1752 | } | 1809 | } |
1753 | 1810 | ||
1754 | spin_unlock(&iboe->lock); | 1811 | spin_unlock(&iboe->lock); |
1812 | |||
1813 | if (update_qps_port > 0) | ||
1814 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | ||
1755 | } | 1815 | } |
1756 | 1816 | ||
1757 | static int mlx4_ib_netdev_event(struct notifier_block *this, | 1817 | static int mlx4_ib_netdev_event(struct notifier_block *this, |
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, | |||
1764 | return NOTIFY_DONE; | 1824 | return NOTIFY_DONE; |
1765 | 1825 | ||
1766 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); | 1826 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); |
1767 | mlx4_ib_scan_netdevs(ibdev); | 1827 | mlx4_ib_scan_netdevs(ibdev, dev, event); |
1768 | 1828 | ||
1769 | return NOTIFY_DONE; | 1829 | return NOTIFY_DONE; |
1770 | } | 1830 | } |
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2043 | goto err_map; | 2103 | goto err_map; |
2044 | 2104 | ||
2045 | for (i = 0; i < ibdev->num_ports; ++i) { | 2105 | for (i = 0; i < ibdev->num_ports; ++i) { |
2106 | mutex_init(&ibdev->qp1_proxy_lock[i]); | ||
2046 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2107 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
2047 | IB_LINK_LAYER_ETHERNET) { | 2108 | IB_LINK_LAYER_ETHERNET) { |
2048 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); | 2109 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); |
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2126 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2187 | for (i = 1 ; i <= ibdev->num_ports ; ++i) |
2127 | reset_gid_table(ibdev, i); | 2188 | reset_gid_table(ibdev, i); |
2128 | rtnl_lock(); | 2189 | rtnl_lock(); |
2129 | mlx4_ib_scan_netdevs(ibdev); | 2190 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); |
2130 | rtnl_unlock(); | 2191 | rtnl_unlock(); |
2131 | mlx4_ib_init_gid_table(ibdev); | 2192 | mlx4_ib_init_gid_table(ibdev); |
2132 | } | 2193 | } |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddfd..66b0b7dbd9f4 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -522,6 +522,9 @@ struct mlx4_ib_dev { | |||
522 | int steer_qpn_count; | 522 | int steer_qpn_count; |
523 | int steer_qpn_base; | 523 | int steer_qpn_base; |
524 | int steering_support; | 524 | int steering_support; |
525 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; | ||
526 | /* lock when destroying qp1_proxy and getting netdev events */ | ||
527 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; | ||
525 | }; | 528 | }; |
526 | 529 | ||
527 | struct ib_event_work { | 530 | struct ib_event_work { |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163c..dc57482ae7af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
1132 | if (is_qp0(dev, mqp)) | 1132 | if (is_qp0(dev, mqp)) |
1133 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | 1133 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
1134 | 1134 | ||
1135 | if (dev->qp1_proxy[mqp->port - 1] == mqp) { | ||
1136 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); | ||
1137 | dev->qp1_proxy[mqp->port - 1] = NULL; | ||
1138 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); | ||
1139 | } | ||
1140 | |||
1135 | pd = get_pd(mqp); | 1141 | pd = get_pd(mqp); |
1136 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); | 1142 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); |
1137 | 1143 | ||
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1646 | err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); | 1652 | err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); |
1647 | if (err) | 1653 | if (err) |
1648 | return -EINVAL; | 1654 | return -EINVAL; |
1655 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) | ||
1656 | dev->qp1_proxy[qp->port - 1] = qp; | ||
1649 | } | 1657 | } |
1650 | } | 1658 | } |
1651 | } | 1659 | } |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fa6dc870adae..364d4b6937f5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -282,6 +282,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
282 | props->sig_guard_cap = IB_GUARD_T10DIF_CRC | | 282 | props->sig_guard_cap = IB_GUARD_T10DIF_CRC | |
283 | IB_GUARD_T10DIF_CSUM; | 283 | IB_GUARD_T10DIF_CSUM; |
284 | } | 284 | } |
285 | if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST) | ||
286 | props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||
285 | 287 | ||
286 | props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & | 288 | props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & |
287 | 0xffffff; | 289 | 0xffffff; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ae788d27b93f..dc930ed21eca 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -807,6 +807,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
807 | spin_lock_init(&qp->sq.lock); | 807 | spin_lock_init(&qp->sq.lock); |
808 | spin_lock_init(&qp->rq.lock); | 808 | spin_lock_init(&qp->rq.lock); |
809 | 809 | ||
810 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { | ||
811 | if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { | ||
812 | mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); | ||
813 | return -EINVAL; | ||
814 | } else { | ||
815 | qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; | ||
816 | } | ||
817 | } | ||
818 | |||
810 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 819 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
811 | qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; | 820 | qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; |
812 | 821 | ||
@@ -878,6 +887,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
878 | if (qp->wq_sig) | 887 | if (qp->wq_sig) |
879 | in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); | 888 | in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); |
880 | 889 | ||
890 | if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) | ||
891 | in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); | ||
892 | |||
881 | if (qp->scat_cqe && is_connected(init_attr->qp_type)) { | 893 | if (qp->scat_cqe && is_connected(init_attr->qp_type)) { |
882 | int rcqe_sz; | 894 | int rcqe_sz; |
883 | int scqe_sz; | 895 | int scqe_sz; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 87897b95666d..ded76c101dde 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -858,13 +858,9 @@ static int mthca_enable_msi_x(struct mthca_dev *mdev) | |||
858 | entries[1].entry = 1; | 858 | entries[1].entry = 1; |
859 | entries[2].entry = 2; | 859 | entries[2].entry = 2; |
860 | 860 | ||
861 | err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries)); | 861 | err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries)); |
862 | if (err) { | 862 | if (err) |
863 | if (err > 0) | ||
864 | mthca_info(mdev, "Only %d MSI-X vectors available, " | ||
865 | "not using MSI-X\n", err); | ||
866 | return err; | 863 | return err; |
867 | } | ||
868 | 864 | ||
869 | mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; | 865 | mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; |
870 | mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; | 866 | mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index c8d9c4ab142b..61a0046efb76 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -197,46 +197,47 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, | |||
197 | struct qib_msix_entry *qib_msix_entry) | 197 | struct qib_msix_entry *qib_msix_entry) |
198 | { | 198 | { |
199 | int ret; | 199 | int ret; |
200 | u32 tabsize = 0; | 200 | int nvec = *msixcnt; |
201 | u16 msix_flags; | ||
202 | struct msix_entry *msix_entry; | 201 | struct msix_entry *msix_entry; |
203 | int i; | 202 | int i; |
204 | 203 | ||
204 | ret = pci_msix_vec_count(dd->pcidev); | ||
205 | if (ret < 0) | ||
206 | goto do_intx; | ||
207 | |||
208 | nvec = min(nvec, ret); | ||
209 | |||
205 | /* We can't pass qib_msix_entry array to qib_msix_setup | 210 | /* We can't pass qib_msix_entry array to qib_msix_setup |
206 | * so use a dummy msix_entry array and copy the allocated | 211 | * so use a dummy msix_entry array and copy the allocated |
207 | * irq back to the qib_msix_entry array. */ | 212 | * irq back to the qib_msix_entry array. */ |
208 | msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); | 213 | msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); |
209 | if (!msix_entry) { | 214 | if (!msix_entry) |
210 | ret = -ENOMEM; | ||
211 | goto do_intx; | 215 | goto do_intx; |
212 | } | 216 | |
213 | for (i = 0; i < *msixcnt; i++) | 217 | for (i = 0; i < nvec; i++) |
214 | msix_entry[i] = qib_msix_entry[i].msix; | 218 | msix_entry[i] = qib_msix_entry[i].msix; |
215 | 219 | ||
216 | pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); | 220 | ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec); |
217 | tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); | 221 | if (ret < 0) |
218 | if (tabsize > *msixcnt) | 222 | goto free_msix_entry; |
219 | tabsize = *msixcnt; | 223 | else |
220 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); | 224 | nvec = ret; |
221 | if (ret > 0) { | 225 | |
222 | tabsize = ret; | 226 | for (i = 0; i < nvec; i++) |
223 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); | ||
224 | } | ||
225 | do_intx: | ||
226 | if (ret) { | ||
227 | qib_dev_err(dd, | ||
228 | "pci_enable_msix %d vectors failed: %d, falling back to INTx\n", | ||
229 | tabsize, ret); | ||
230 | tabsize = 0; | ||
231 | } | ||
232 | for (i = 0; i < tabsize; i++) | ||
233 | qib_msix_entry[i].msix = msix_entry[i]; | 227 | qib_msix_entry[i].msix = msix_entry[i]; |
228 | |||
234 | kfree(msix_entry); | 229 | kfree(msix_entry); |
235 | *msixcnt = tabsize; | 230 | *msixcnt = nvec; |
231 | return; | ||
236 | 232 | ||
237 | if (ret) | 233 | free_msix_entry: |
238 | qib_enable_intx(dd->pcidev); | 234 | kfree(msix_entry); |
239 | 235 | ||
236 | do_intx: | ||
237 | qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " | ||
238 | "falling back to INTx\n", nvec, ret); | ||
239 | *msixcnt = 0; | ||
240 | qib_enable_intx(dd->pcidev); | ||
240 | } | 241 | } |
241 | 242 | ||
242 | /** | 243 | /** |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c98fdb185931..a1710465faaf 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <target/target_core_base.h> | 28 | #include <target/target_core_base.h> |
29 | #include <target/target_core_fabric.h> | 29 | #include <target/target_core_fabric.h> |
30 | #include <target/iscsi/iscsi_transport.h> | 30 | #include <target/iscsi/iscsi_transport.h> |
31 | #include <linux/semaphore.h> | ||
31 | 32 | ||
32 | #include "isert_proto.h" | 33 | #include "isert_proto.h" |
33 | #include "ib_isert.h" | 34 | #include "ib_isert.h" |
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
561 | struct isert_device *device; | 562 | struct isert_device *device; |
562 | struct ib_device *ib_dev = cma_id->device; | 563 | struct ib_device *ib_dev = cma_id->device; |
563 | int ret = 0; | 564 | int ret = 0; |
564 | u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | 565 | u8 pi_support; |
566 | |||
567 | spin_lock_bh(&np->np_thread_lock); | ||
568 | if (!np->enabled) { | ||
569 | spin_unlock_bh(&np->np_thread_lock); | ||
570 | pr_debug("iscsi_np is not enabled, reject connect request\n"); | ||
571 | return rdma_reject(cma_id, NULL, 0); | ||
572 | } | ||
573 | spin_unlock_bh(&np->np_thread_lock); | ||
565 | 574 | ||
566 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", | 575 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", |
567 | cma_id, cma_id->context); | 576 | cma_id, cma_id->context); |
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
652 | goto out_mr; | 661 | goto out_mr; |
653 | } | 662 | } |
654 | 663 | ||
664 | pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | ||
655 | if (pi_support && !device->pi_capable) { | 665 | if (pi_support && !device->pi_capable) { |
656 | pr_err("Protection information requested but not supported\n"); | 666 | pr_err("Protection information requested but not supported\n"); |
657 | ret = -EINVAL; | 667 | ret = -EINVAL; |
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
663 | goto out_conn_dev; | 673 | goto out_conn_dev; |
664 | 674 | ||
665 | mutex_lock(&isert_np->np_accept_mutex); | 675 | mutex_lock(&isert_np->np_accept_mutex); |
666 | list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); | 676 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); |
667 | mutex_unlock(&isert_np->np_accept_mutex); | 677 | mutex_unlock(&isert_np->np_accept_mutex); |
668 | 678 | ||
669 | pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); | 679 | pr_debug("isert_connect_request() up np_sem np: %p\n", np); |
670 | wake_up(&isert_np->np_accept_wq); | 680 | up(&isert_np->np_sem); |
671 | return 0; | 681 | return 0; |
672 | 682 | ||
673 | out_conn_dev: | 683 | out_conn_dev: |
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np, | |||
2999 | pr_err("Unable to allocate struct isert_np\n"); | 3009 | pr_err("Unable to allocate struct isert_np\n"); |
3000 | return -ENOMEM; | 3010 | return -ENOMEM; |
3001 | } | 3011 | } |
3002 | init_waitqueue_head(&isert_np->np_accept_wq); | 3012 | sema_init(&isert_np->np_sem, 0); |
3003 | mutex_init(&isert_np->np_accept_mutex); | 3013 | mutex_init(&isert_np->np_accept_mutex); |
3004 | INIT_LIST_HEAD(&isert_np->np_accept_list); | 3014 | INIT_LIST_HEAD(&isert_np->np_accept_list); |
3005 | init_completion(&isert_np->np_login_comp); | 3015 | init_completion(&isert_np->np_login_comp); |
@@ -3048,18 +3058,6 @@ out: | |||
3048 | } | 3058 | } |
3049 | 3059 | ||
3050 | static int | 3060 | static int |
3051 | isert_check_accept_queue(struct isert_np *isert_np) | ||
3052 | { | ||
3053 | int empty; | ||
3054 | |||
3055 | mutex_lock(&isert_np->np_accept_mutex); | ||
3056 | empty = list_empty(&isert_np->np_accept_list); | ||
3057 | mutex_unlock(&isert_np->np_accept_mutex); | ||
3058 | |||
3059 | return empty; | ||
3060 | } | ||
3061 | |||
3062 | static int | ||
3063 | isert_rdma_accept(struct isert_conn *isert_conn) | 3061 | isert_rdma_accept(struct isert_conn *isert_conn) |
3064 | { | 3062 | { |
3065 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; | 3063 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; |
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | |||
3151 | int max_accept = 0, ret; | 3149 | int max_accept = 0, ret; |
3152 | 3150 | ||
3153 | accept_wait: | 3151 | accept_wait: |
3154 | ret = wait_event_interruptible(isert_np->np_accept_wq, | 3152 | ret = down_interruptible(&isert_np->np_sem); |
3155 | !isert_check_accept_queue(isert_np) || | ||
3156 | np->np_thread_state == ISCSI_NP_THREAD_RESET); | ||
3157 | if (max_accept > 5) | 3153 | if (max_accept > 5) |
3158 | return -ENODEV; | 3154 | return -ENODEV; |
3159 | 3155 | ||
3160 | spin_lock_bh(&np->np_thread_lock); | 3156 | spin_lock_bh(&np->np_thread_lock); |
3161 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 3157 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { |
3162 | spin_unlock_bh(&np->np_thread_lock); | 3158 | spin_unlock_bh(&np->np_thread_lock); |
3163 | pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); | 3159 | pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); |
3164 | return -ENODEV; | 3160 | return -ENODEV; |
3165 | } | 3161 | } |
3166 | spin_unlock_bh(&np->np_thread_lock); | 3162 | spin_unlock_bh(&np->np_thread_lock); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 4c072ae34c01..da6612e68000 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -182,7 +182,7 @@ struct isert_device { | |||
182 | }; | 182 | }; |
183 | 183 | ||
184 | struct isert_np { | 184 | struct isert_np { |
185 | wait_queue_head_t np_accept_wq; | 185 | struct semaphore np_sem; |
186 | struct rdma_cm_id *np_cm_id; | 186 | struct rdma_cm_id *np_cm_id; |
187 | struct mutex np_accept_mutex; | 187 | struct mutex np_accept_mutex; |
188 | struct list_head np_accept_list; | 188 | struct list_head np_accept_list; |