aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
commit1236d6bb6e19fc72ffc6bbcdeb1bfefe450e54ee (patch)
tree47da3feee8e263e8c9352c85cf518e624be3c211 /drivers/infiniband
parent750b1a6894ecc9b178c6e3d0a1170122971b2036 (diff)
parent8a5776a5f49812d29fe4b2d0a2d71675c3facf3f (diff)
Merge 4.14-rc4 into staging-next
We want the staging/iio fixes in here as well to handle merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c101
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c20
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c41
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c50
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c154
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c39
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c20
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c47
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c30
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
39 files changed, 541 insertions, 334 deletions
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
100 if (ret) 100 if (ret)
101 goto pid_query_error; 101 goto pid_query_error;
102 102
103 nlmsg_end(skb, nlh);
104
103 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", 105 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
104 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); 106 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
105 107
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
170 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); 172 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
171 if (ret) 173 if (ret)
172 goto add_mapping_error; 174 goto add_mapping_error;
175
176 nlmsg_end(skb, nlh);
173 nlmsg_request->req_buffer = pm_msg; 177 nlmsg_request->req_buffer = pm_msg;
174 178
175 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 179 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
246 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); 250 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
247 if (ret) 251 if (ret)
248 goto query_mapping_error; 252 goto query_mapping_error;
253
254 nlmsg_end(skb, nlh);
249 nlmsg_request->req_buffer = pm_msg; 255 nlmsg_request->req_buffer = pm_msg;
250 256
251 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 257 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
308 if (ret) 314 if (ret)
309 goto remove_mapping_error; 315 goto remove_mapping_error;
310 316
317 nlmsg_end(skb, nlh);
318
311 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 319 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
312 if (ret) { 320 if (ret) {
313 skb = NULL; /* skb is freed in the netlink send-op handling */ 321 skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); 597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
598 if (ret) 598 if (ret)
599 goto mapinfo_num_error; 599 goto mapinfo_num_error;
600
601 nlmsg_end(skb, nlh);
602
600 ret = rdma_nl_unicast(skb, iwpm_pid); 603 ret = rdma_nl_unicast(skb, iwpm_pid);
601 if (ret) { 604 if (ret) {
602 skb = NULL; 605 skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
678 if (ret) 681 if (ret)
679 goto send_mapping_info_unlock; 682 goto send_mapping_info_unlock;
680 683
684 nlmsg_end(skb, nlh);
685
681 iwpm_print_sockaddr(&map_info->local_sockaddr, 686 iwpm_print_sockaddr(&map_info->local_sockaddr,
682 "send_mapping_info: Local sockaddr:"); 687 "send_mapping_info: Local sockaddr:");
683 iwpm_print_sockaddr(&map_info->mapped_sockaddr, 688 iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 70ad19c4c73e..88bdafb297f5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
432 atomic_set(&qp->qp_sec->error_list_count, 0); 432 atomic_set(&qp->qp_sec->error_list_count, 0);
433 init_completion(&qp->qp_sec->error_complete); 433 init_completion(&qp->qp_sec->error_complete);
434 ret = security_ib_alloc_security(&qp->qp_sec->security); 434 ret = security_ib_alloc_security(&qp->qp_sec->security);
435 if (ret) 435 if (ret) {
436 kfree(qp->qp_sec); 436 kfree(qp->qp_sec);
437 qp->qp_sec = NULL;
438 }
437 439
438 return ret; 440 return ret;
439} 441}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4ab30d832ac5..52a2cf2d83aa 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3869 resp.raw_packet_caps = attr.raw_packet_caps; 3869 resp.raw_packet_caps = attr.raw_packet_caps;
3870 resp.response_length += sizeof(resp.raw_packet_caps); 3870 resp.response_length += sizeof(resp.raw_packet_caps);
3871 3871
3872 if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) 3872 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
3873 goto end; 3873 goto end;
3874 3874
3875 resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; 3875 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3876 resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; 3876 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3877 resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; 3877 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3878 resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; 3878 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3879 resp.xrq_caps.flags = attr.xrq_caps.flags; 3879 resp.tm_caps.flags = attr.tm_caps.flags;
3880 resp.response_length += sizeof(resp.xrq_caps); 3880 resp.response_length += sizeof(resp.tm_caps);
3881end: 3881end:
3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3883 return err; 3883 return err;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ee9e27dc799b..de57d6c11a25 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1646 */ 1646 */
1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1648 if (attr.qp_state >= IB_QPS_INIT) { 1648 if (attr.qp_state >= IB_QPS_INIT) {
1649 if (qp->device->get_link_layer(qp->device, attr.port_num) != 1649 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
1650 IB_LINK_LAYER_INFINIBAND) 1650 IB_LINK_LAYER_INFINIBAND)
1651 return true; 1651 return true;
1652 goto lid_check; 1652 goto lid_check;
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1655 1655
1656 /* Can't get a quick answer, iterate over all ports */ 1656 /* Can't get a quick answer, iterate over all ports */
1657 for (port = 0; port < qp->device->phys_port_cnt; port++) 1657 for (port = 0; port < qp->device->phys_port_cnt; port++)
1658 if (qp->device->get_link_layer(qp->device, port) != 1658 if (rdma_port_get_link_layer(qp->device, port) !=
1659 IB_LINK_LAYER_INFINIBAND) 1659 IB_LINK_LAYER_INFINIBAND)
1660 num_eth_ports++; 1660 num_eth_ports++;
1661 1661
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b3ad37fec578..ecbac91b2e14 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -93,11 +93,13 @@ struct bnxt_re_dev {
93 struct ib_device ibdev; 93 struct ib_device ibdev;
94 struct list_head list; 94 struct list_head list;
95 unsigned long flags; 95 unsigned long flags;
96#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 96#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
97#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 97#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
98#define BNXT_RE_FLAG_GOT_MSIX 2 98#define BNXT_RE_FLAG_GOT_MSIX 2
99#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 99#define BNXT_RE_FLAG_HAVE_L2_REF 3
100#define BNXT_RE_FLAG_QOS_WORK_REG 16 100#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
101#define BNXT_RE_FLAG_QOS_WORK_REG 5
102#define BNXT_RE_FLAG_TASK_IN_PROG 6
101 struct net_device *netdev; 103 struct net_device *netdev;
102 unsigned int version, major, minor; 104 unsigned int version, major, minor;
103 struct bnxt_en_dev *en_dev; 105 struct bnxt_en_dev *en_dev;
@@ -108,6 +110,8 @@ struct bnxt_re_dev {
108 110
109 struct delayed_work worker; 111 struct delayed_work worker;
110 u8 cur_prio_map; 112 u8 cur_prio_map;
113 u8 active_speed;
114 u8 active_width;
111 115
112 /* FP Notification Queue (CQ & SRQ) */ 116 /* FP Notification Queue (CQ & SRQ) */
113 struct tasklet_struct nq_task; 117 struct tasklet_struct nq_task;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 01eee15bbd65..0d89621d9fe8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
259 port_attr->sm_sl = 0; 259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0; 260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0; 261 port_attr->init_type_reply = 0;
262 /* call the underlying netdev's ethtool hooks to query speed settings 262 port_attr->active_speed = rdev->active_speed;
263 * for which we acquire rtnl_lock _only_ if it's registered with 263 port_attr->active_width = rdev->active_width;
264 * IB stack to avoid race in the NETDEV_UNREG path 264
265 */
266 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
267 if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
268 &port_attr->active_width))
269 return -EINVAL;
270 return 0; 265 return 0;
271} 266}
272 267
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 314 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 316 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
317 struct bnxt_qplib_gid *gid_to_del;
322 318
323 /* Delete the entry from the hardware */ 319 /* Delete the entry from the hardware */
324 ctx = *context; 320 ctx = *context;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
328 if (sgid_tbl && sgid_tbl->active) { 324 if (sgid_tbl && sgid_tbl->active) {
329 if (ctx->idx >= sgid_tbl->max) 325 if (ctx->idx >= sgid_tbl->max)
330 return -EINVAL; 326 return -EINVAL;
327 gid_to_del = &sgid_tbl->tbl[ctx->idx];
328 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
329 * or via the ib_unregister_device path. In the former case QP1
330 * may not be destroyed yet, in which case just return as FW
331 * needs that entry to be present and will fail it's deletion.
332 * We could get invoked again after QP1 is destroyed OR get an
333 * ADD_GID call with a different GID value for the same index
334 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
335 */
336 if (ctx->idx == 0 &&
337 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
338 ctx->refcnt == 1 && rdev->qp1_sqp) {
339 dev_dbg(rdev_to_dev(rdev),
340 "Trying to delete GID0 while QP1 is alive\n");
341 return -EFAULT;
342 }
331 ctx->refcnt--; 343 ctx->refcnt--;
332 if (!ctx->refcnt) { 344 if (!ctx->refcnt) {
333 rc = bnxt_qplib_del_sgid(sgid_tbl, 345 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
334 &sgid_tbl->tbl[ctx->idx],
335 true);
336 if (rc) { 346 if (rc) {
337 dev_err(rdev_to_dev(rdev), 347 dev_err(rdev_to_dev(rdev),
338 "Failed to remove GID: %#x", rc); 348 "Failed to remove GID: %#x", rc);
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
816 826
817 kfree(rdev->sqp_ah); 827 kfree(rdev->sqp_ah);
818 kfree(rdev->qp1_sqp); 828 kfree(rdev->qp1_sqp);
829 rdev->qp1_sqp = NULL;
830 rdev->sqp_ah = NULL;
819 } 831 }
820 832
821 if (!IS_ERR_OR_NULL(qp->rumem)) 833 if (!IS_ERR_OR_NULL(qp->rumem))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1436 qp->qplib_qp.modify_flags |= 1448 qp->qplib_qp.modify_flags |=
1437 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1449 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1438 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); 1450 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1451 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1439 } else if (qp_attr->qp_state == IB_QPS_RTR) { 1452 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1440 qp->qplib_qp.modify_flags |= 1453 qp->qplib_qp.modify_flags |=
1441 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1454 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1442 qp->qplib_qp.path_mtu = 1455 qp->qplib_qp.path_mtu =
1443 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); 1456 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1457 qp->qplib_qp.mtu =
1458 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1444 } 1459 }
1445 1460
1446 if (qp_attr_mask & IB_QP_TIMEOUT) { 1461 if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1551{ 1566{
1552 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1567 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1553 struct bnxt_re_dev *rdev = qp->rdev; 1568 struct bnxt_re_dev *rdev = qp->rdev;
1554 struct bnxt_qplib_qp qplib_qp; 1569 struct bnxt_qplib_qp *qplib_qp;
1555 int rc; 1570 int rc;
1556 1571
1557 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); 1572 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1558 qplib_qp.id = qp->qplib_qp.id; 1573 if (!qplib_qp)
1559 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 1574 return -ENOMEM;
1575
1576 qplib_qp->id = qp->qplib_qp.id;
1577 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1560 1578
1561 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); 1579 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1562 if (rc) { 1580 if (rc) {
1563 dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); 1581 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1564 return rc; 1582 goto out;
1565 } 1583 }
1566 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); 1584 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1567 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; 1585 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1568 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); 1586 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1569 qp_attr->pkey_index = qplib_qp.pkey_index; 1587 qp_attr->pkey_index = qplib_qp->pkey_index;
1570 qp_attr->qkey = qplib_qp.qkey; 1588 qp_attr->qkey = qplib_qp->qkey;
1571 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 1589 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1572 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, 1590 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1573 qplib_qp.ah.host_sgid_index, 1591 qplib_qp->ah.host_sgid_index,
1574 qplib_qp.ah.hop_limit, 1592 qplib_qp->ah.hop_limit,
1575 qplib_qp.ah.traffic_class); 1593 qplib_qp->ah.traffic_class);
1576 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); 1594 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1577 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); 1595 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1578 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); 1596 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1579 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); 1597 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1580 qp_attr->timeout = qplib_qp.timeout; 1598 qp_attr->timeout = qplib_qp->timeout;
1581 qp_attr->retry_cnt = qplib_qp.retry_cnt; 1599 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1582 qp_attr->rnr_retry = qplib_qp.rnr_retry; 1600 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1583 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; 1601 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1584 qp_attr->rq_psn = qplib_qp.rq.psn; 1602 qp_attr->rq_psn = qplib_qp->rq.psn;
1585 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; 1603 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1586 qp_attr->sq_psn = qplib_qp.sq.psn; 1604 qp_attr->sq_psn = qplib_qp->sq.psn;
1587 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; 1605 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1588 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : 1606 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1589 IB_SIGNAL_REQ_WR; 1607 IB_SIGNAL_REQ_WR;
1590 qp_attr->dest_qp_num = qplib_qp.dest_qpn; 1608 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1591 1609
1592 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; 1610 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1593 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; 1611 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1596 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; 1614 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1597 qp_init_attr->cap = qp_attr->cap; 1615 qp_init_attr->cap = qp_attr->cap;
1598 1616
1599 return 0; 1617out:
1618 kfree(qplib_qp);
1619 return rc;
1600} 1620}
1601 1621
1602/* Routine for sending QP1 packets for RoCE V1 an V2 1622/* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1908 switch (wr->opcode) { 1928 switch (wr->opcode) {
1909 case IB_WR_ATOMIC_CMP_AND_SWP: 1929 case IB_WR_ATOMIC_CMP_AND_SWP:
1910 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; 1930 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1931 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1911 wqe->atomic.swap_data = atomic_wr(wr)->swap; 1932 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1912 break; 1933 break;
1913 case IB_WR_ATOMIC_FETCH_AND_ADD: 1934 case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3062 return rc; 3083 return rc;
3063 } 3084 }
3064 3085
3065 if (mr->npages && mr->pages) { 3086 if (mr->pages) {
3066 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3087 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3067 &mr->qplib_frpl); 3088 &mr->qplib_frpl);
3068 kfree(mr->pages); 3089 kfree(mr->pages);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 82d1cbc27aee..e7450ea92aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1161 } 1161 }
1162 } 1162 }
1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); 1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1164 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1165 &rdev->active_width);
1164 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); 1166 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
1165 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); 1167 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
1166 1168
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
1255 else if (netif_carrier_ok(rdev->netdev)) 1257 else if (netif_carrier_ok(rdev->netdev))
1256 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1258 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1257 IB_EVENT_PORT_ACTIVE); 1259 IB_EVENT_PORT_ACTIVE);
1260 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1261 &rdev->active_width);
1258 break; 1262 break;
1259 default: 1263 default:
1260 break; 1264 break;
1261 } 1265 }
1266 smp_mb__before_atomic();
1267 clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
1262 kfree(re_work); 1268 kfree(re_work);
1263} 1269}
1264 1270
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1317 break; 1323 break;
1318 1324
1319 case NETDEV_UNREGISTER: 1325 case NETDEV_UNREGISTER:
1326 /* netdev notifier will call NETDEV_UNREGISTER again later since
1327 * we are still holding the reference to the netdev
1328 */
1329 if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
1330 goto exit;
1320 bnxt_re_ib_unreg(rdev, false); 1331 bnxt_re_ib_unreg(rdev, false);
1321 bnxt_re_remove_one(rdev); 1332 bnxt_re_remove_one(rdev);
1322 bnxt_re_dev_unreg(rdev); 1333 bnxt_re_dev_unreg(rdev);
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1335 re_work->vlan_dev = (real_dev == netdev ? 1346 re_work->vlan_dev = (real_dev == netdev ?
1336 NULL : netdev); 1347 NULL : netdev);
1337 INIT_WORK(&re_work->work, bnxt_re_task); 1348 INIT_WORK(&re_work->work, bnxt_re_task);
1349 set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
1338 queue_work(bnxt_re_wq, &re_work->work); 1350 queue_work(bnxt_re_wq, &re_work->work);
1339 } 1351 }
1340 } 1352 }
@@ -1375,6 +1387,22 @@ err_netdev:
1375 1387
1376static void __exit bnxt_re_mod_exit(void) 1388static void __exit bnxt_re_mod_exit(void)
1377{ 1389{
1390 struct bnxt_re_dev *rdev;
1391 LIST_HEAD(to_be_deleted);
1392
1393 mutex_lock(&bnxt_re_dev_lock);
1394 /* Free all adapter allocated resources */
1395 if (!list_empty(&bnxt_re_dev_list))
1396 list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
1397 mutex_unlock(&bnxt_re_dev_lock);
1398
1399 list_for_each_entry(rdev, &to_be_deleted, list) {
1400 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1401 bnxt_re_dev_stop(rdev);
1402 bnxt_re_ib_unreg(rdev, true);
1403 bnxt_re_remove_one(rdev);
1404 bnxt_re_dev_unreg(rdev);
1405 }
1378 unregister_netdevice_notifier(&bnxt_re_netdev_notifier); 1406 unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1379 if (bnxt_re_wq) 1407 if (bnxt_re_wq)
1380 destroy_workqueue(bnxt_re_wq); 1408 destroy_workqueue(bnxt_re_wq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 391bb7006e8f..2bdb1562bd21 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
107 return -EINVAL; 107 return -EINVAL;
108 } 108 }
109 109
110 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
111 return -ETIMEDOUT;
112
110 /* Cmdq are in 16-byte units, each request can consume 1 or more 113 /* Cmdq are in 16-byte units, each request can consume 1 or more
111 * cmdqe 114 * cmdqe
112 */ 115 */
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
226 /* timed out */ 229 /* timed out */
227 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 230 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
228 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 231 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
232 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
229 return rc; 233 return rc;
230 } 234 }
231 235
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 0ed312f17c8d..85b16da287f9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
162 unsigned long *cmdq_bitmap; 162 unsigned long *cmdq_bitmap;
163 u32 bmap_size; 163 u32 bmap_size;
164 unsigned long flags; 164 unsigned long flags;
165#define FIRMWARE_INITIALIZED_FLAG 1 165#define FIRMWARE_INITIALIZED_FLAG BIT(0)
166#define FIRMWARE_FIRST_FLAG BIT(31) 166#define FIRMWARE_FIRST_FLAG BIT(31)
167#define FIRMWARE_TIMED_OUT BIT(3)
167 wait_queue_head_t waitq; 168 wait_queue_head_t waitq;
168 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 169 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
169 struct creq_func_event *); 170 struct creq_func_event *);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ceaa2fa54d32..daf7a56e5d7e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2333 unsigned int stid = GET_TID(rpl); 2333 unsigned int stid = GET_TID(rpl);
2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2335 2335
2336 if (!ep) {
2337 pr_debug("%s stid %d lookup failure!\n", __func__, stid);
2338 goto out;
2339 }
2336 pr_debug("%s ep %p\n", __func__, ep); 2340 pr_debug("%s ep %p\n", __func__, ep);
2337 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2341 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2338 c4iw_put_ep(&ep->com); 2342 c4iw_put_ep(&ep->com);
2343out:
2339 return 0; 2344 return 0;
2340} 2345}
2341 2346
@@ -2594,9 +2599,9 @@ fail:
2594 c4iw_put_ep(&child_ep->com); 2599 c4iw_put_ep(&child_ep->com);
2595reject: 2600reject:
2596 reject_cr(dev, hwtid, skb); 2601 reject_cr(dev, hwtid, skb);
2602out:
2597 if (parent_ep) 2603 if (parent_ep)
2598 c4iw_put_ep(&parent_ep->com); 2604 c4iw_put_ep(&parent_ep->com);
2599out:
2600 return 0; 2605 return 0;
2601} 2606}
2602 2607
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3457 cm_id->provider_data = ep; 3462 cm_id->provider_data = ep;
3458 goto out; 3463 goto out;
3459 } 3464 }
3460 3465 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3461 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3466 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3462 ep->com.local_addr.ss_family); 3467 ep->com.local_addr.ss_family);
3463fail2: 3468fail2:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b2ed4b9cda6e..0be42787759f 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd); 1066static int thermal_init(struct hfi1_devdata *dd);
1067 1067
1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070 int msecs);
1069static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1071static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs); 1072 int msecs);
1071static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1073static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
8238 u64 regs[CCE_NUM_INT_CSRS]; 8240 u64 regs[CCE_NUM_INT_CSRS];
8239 u32 bit; 8241 u32 bit;
8240 int i; 8242 int i;
8243 irqreturn_t handled = IRQ_NONE;
8241 8244
8242 this_cpu_inc(*dd->int_counter); 8245 this_cpu_inc(*dd->int_counter);
8243 8246
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
8258 for_each_set_bit(bit, (unsigned long *)&regs[0], 8261 for_each_set_bit(bit, (unsigned long *)&regs[0],
8259 CCE_NUM_INT_CSRS * 64) { 8262 CCE_NUM_INT_CSRS * 64) {
8260 is_interrupt(dd, bit); 8263 is_interrupt(dd, bit);
8264 handled = IRQ_HANDLED;
8261 } 8265 }
8262 8266
8263 return IRQ_HANDLED; 8267 return handled;
8264} 8268}
8265 8269
8266static irqreturn_t sdma_interrupt(int irq, void *data) 8270static irqreturn_t sdma_interrupt(int irq, void *data)
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9413 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9417 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9414} 9418}
9415 9419
9416void reset_qsfp(struct hfi1_pportdata *ppd) 9420int reset_qsfp(struct hfi1_pportdata *ppd)
9417{ 9421{
9418 struct hfi1_devdata *dd = ppd->dd; 9422 struct hfi1_devdata *dd = ppd->dd;
9419 u64 mask, qsfp_mask; 9423 u64 mask, qsfp_mask;
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
9443 * for alarms and warnings 9447 * for alarms and warnings
9444 */ 9448 */
9445 set_qsfp_int_n(ppd, 1); 9449 set_qsfp_int_n(ppd, 1);
9450
9451 /*
9452 * After the reset, AOC transmitters are enabled by default. They need
9453 * to be turned off to complete the QSFP setup before they can be
9454 * enabled again.
9455 */
9456 return set_qsfp_tx(ppd, 0);
9446} 9457}
9447 9458
9448static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9459static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10305{ 10316{
10306 struct hfi1_devdata *dd = ppd->dd; 10317 struct hfi1_devdata *dd = ppd->dd;
10307 u32 previous_state; 10318 u32 previous_state;
10319 int offline_state_ret;
10308 int ret; 10320 int ret;
10309 10321
10310 update_lcb_cache(dd); 10322 update_lcb_cache(dd);
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10326 ppd->offline_disabled_reason = 10338 ppd->offline_disabled_reason =
10327 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10339 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10328 10340
10329 /* 10341 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10330 * Wait for offline transition. It can take a while for 10342 if (offline_state_ret < 0)
10331 * the link to go down. 10343 return offline_state_ret;
10332 */
10333 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10334 if (ret < 0)
10335 return ret;
10336
10337 /*
10338 * Now in charge of LCB - must be after the physical state is
10339 * offline.quiet and before host_link_state is changed.
10340 */
10341 set_host_lcb_access(dd);
10342 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10343
10344 /* make sure the logical state is also down */
10345 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10346 if (ret)
10347 force_logical_link_state_down(ppd);
10348
10349 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10350 10344
10345 /* Disabling AOC transmitters */
10351 if (ppd->port_type == PORT_TYPE_QSFP && 10346 if (ppd->port_type == PORT_TYPE_QSFP &&
10352 ppd->qsfp_info.limiting_active && 10347 ppd->qsfp_info.limiting_active &&
10353 qsfp_mod_present(ppd)) { 10348 qsfp_mod_present(ppd)) {
@@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10365 } 10360 }
10366 10361
10367 /* 10362 /*
10363 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10364 * can take a while for the link to go down.
10365 */
10366 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10367 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10368 if (ret < 0)
10369 return ret;
10370 }
10371
10372 /*
10373 * Now in charge of LCB - must be after the physical state is
10374 * offline.quiet and before host_link_state is changed.
10375 */
10376 set_host_lcb_access(dd);
10377 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10378
10379 /* make sure the logical state is also down */
10380 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10381 if (ret)
10382 force_logical_link_state_down(ppd);
10383
10384 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10385
10386 /*
10368 * The LNI has a mandatory wait time after the physical state 10387 * The LNI has a mandatory wait time after the physical state
10369 * moves to Offline.Quiet. The wait time may be different 10388 * moves to Offline.Quiet. The wait time may be different
10370 * depending on how the link went down. The 8051 firmware 10389 * depending on how the link went down. The 8051 firmware
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10396 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10415 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10397 /* went down while attempting link up */ 10416 /* went down while attempting link up */
10398 check_lni_states(ppd); 10417 check_lni_states(ppd);
10418
10419 /* The QSFP doesn't need to be reset on LNI failure */
10420 ppd->qsfp_info.reset_needed = 0;
10399 } 10421 }
10400 10422
10401 /* the active link width (downgrade) is 0 on link down */ 10423 /* the active link width (downgrade) is 0 on link down */
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12804 return 0; 12826 return 0;
12805} 12827}
12806 12828
12829/*
12830 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12831 * @ppd: port device
12832 * @msecs: the number of milliseconds to wait
12833 *
12834 * Wait up to msecs milliseconds for any offline physical link
12835 * state change to occur.
12836 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12837 */
12838static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12839 int msecs)
12840{
12841 u32 read_state;
12842 unsigned long timeout;
12843
12844 timeout = jiffies + msecs_to_jiffies(msecs);
12845 while (1) {
12846 read_state = read_physical_state(ppd->dd);
12847 if ((read_state & 0xF0) == PLS_OFFLINE)
12848 break;
12849 if (time_after(jiffies, timeout)) {
12850 dd_dev_err(ppd->dd,
12851 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12852 read_state, msecs);
12853 return -ETIMEDOUT;
12854 }
12855 usleep_range(1950, 2050); /* sleep 2ms-ish */
12856 }
12857
12858 log_state_transition(ppd, read_state);
12859 return read_state;
12860}
12861
12807#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 12862#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12808(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12863(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12809 12864
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index b8345a60a0fb..50b8645d0b87 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -204,6 +204,7 @@
204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
205#define PLS_OFFLINE_REPORT_FAILURE 0x93 205#define PLS_OFFLINE_REPORT_FAILURE 0x93
206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
207#define PLS_OFFLINE_QUIET_DURATION 0x95
207#define PLS_POLLING 0x20 208#define PLS_POLLING 0x20
208#define PLS_POLLING_QUIET 0x20 209#define PLS_POLLING_QUIET 0x20
209#define PLS_POLLING_ACTIVE 0x21 210#define PLS_POLLING_ACTIVE 0x21
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
722void handle_link_bounce(struct work_struct *work); 723void handle_link_bounce(struct work_struct *work);
723void handle_start_link(struct work_struct *work); 724void handle_start_link(struct work_struct *work);
724void handle_sma_message(struct work_struct *work); 725void handle_sma_message(struct work_struct *work);
725void reset_qsfp(struct hfi1_pportdata *ppd); 726int reset_qsfp(struct hfi1_pportdata *ppd);
726void qsfp_event(struct work_struct *work); 727void qsfp_event(struct work_struct *work);
727void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); 728void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
728int send_idle_sma(struct hfi1_devdata *dd, u64 message); 729int send_idle_sma(struct hfi1_devdata *dd, u64 message);
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index d46b17107901..1613af1c58d9 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -204,7 +204,10 @@ done_asic:
204 return ret; 204 return ret;
205} 205}
206 206
207/* magic character sequence that trails an image */ 207/* magic character sequence that begins an image */
208#define IMAGE_START_MAGIC "APO="
209
210/* magic character sequence that might trail an image */
208#define IMAGE_TRAIL_MAGIC "egamiAPO" 211#define IMAGE_TRAIL_MAGIC "egamiAPO"
209 212
210/* EPROM file types */ 213/* EPROM file types */
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
250{ 253{
251 void *buffer; 254 void *buffer;
252 void *p; 255 void *p;
256 u32 length;
253 int ret; 257 int ret;
254 258
255 buffer = kmalloc(P1_SIZE, GFP_KERNEL); 259 buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
262 return ret; 266 return ret;
263 } 267 }
264 268
265 /* scan for image magic that may trail the actual data */ 269 /* config partition is valid only if it starts with IMAGE_START_MAGIC */
266 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); 270 if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
267 if (!p) {
268 kfree(buffer); 271 kfree(buffer);
269 return -ENOENT; 272 return -ENOENT;
270 } 273 }
271 274
275 /* scan for image magic that may trail the actual data */
276 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
277 if (p)
278 length = p - buffer;
279 else
280 length = P1_SIZE;
281
272 *data = buffer; 282 *data = buffer;
273 *size = p - buffer; 283 *size = length;
274 return 0; 284 return 0;
275} 285}
276 286
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2bc89260235a..d9a1e9893136 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
930 switch (ret) { 930 switch (ret) {
931 case 0: 931 case 0:
932 ret = setup_base_ctxt(fd, uctxt); 932 ret = setup_base_ctxt(fd, uctxt);
933 if (uctxt->subctxt_cnt) { 933 if (ret)
934 /* 934 deallocate_ctxt(uctxt);
935 * Base context is done (successfully or not), notify
936 * anybody using a sub-context that is waiting for
937 * this completion.
938 */
939 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
940 wake_up(&uctxt->wait);
941 }
942 break; 935 break;
943 case 1: 936 case 1:
944 ret = complete_subctxt(fd); 937 ret = complete_subctxt(fd);
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1305 /* Now allocate the RcvHdr queue and eager buffers. */ 1298 /* Now allocate the RcvHdr queue and eager buffers. */
1306 ret = hfi1_create_rcvhdrq(dd, uctxt); 1299 ret = hfi1_create_rcvhdrq(dd, uctxt);
1307 if (ret) 1300 if (ret)
1308 return ret; 1301 goto done;
1309 1302
1310 ret = hfi1_setup_eagerbufs(uctxt); 1303 ret = hfi1_setup_eagerbufs(uctxt);
1311 if (ret) 1304 if (ret)
1312 goto setup_failed; 1305 goto done;
1313 1306
1314 /* If sub-contexts are enabled, do the appropriate setup */ 1307 /* If sub-contexts are enabled, do the appropriate setup */
1315 if (uctxt->subctxt_cnt) 1308 if (uctxt->subctxt_cnt)
1316 ret = setup_subctxt(uctxt); 1309 ret = setup_subctxt(uctxt);
1317 if (ret) 1310 if (ret)
1318 goto setup_failed; 1311 goto done;
1319 1312
1320 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); 1313 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1321 if (ret) 1314 if (ret)
1322 goto setup_failed; 1315 goto done;
1323 1316
1324 ret = init_user_ctxt(fd, uctxt); 1317 ret = init_user_ctxt(fd, uctxt);
1325 if (ret) 1318 if (ret)
1326 goto setup_failed; 1319 goto done;
1327 1320
1328 user_init(uctxt); 1321 user_init(uctxt);
1329 1322
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1331 fd->uctxt = uctxt; 1324 fd->uctxt = uctxt;
1332 hfi1_rcd_get(uctxt); 1325 hfi1_rcd_get(uctxt);
1333 1326
1334 return 0; 1327done:
1328 if (uctxt->subctxt_cnt) {
1329 /*
1330 * On error, set the failed bit so sub-contexts will clean up
1331 * correctly.
1332 */
1333 if (ret)
1334 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1335 1335
1336setup_failed: 1336 /*
1337 /* Set the failed bit so sub-context init can do the right thing */ 1337 * Base context is done (successfully or not), notify anybody
1338 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); 1338 * using a sub-context that is waiting for this completion.
1339 deallocate_ctxt(uctxt); 1339 */
1340 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1341 wake_up(&uctxt->wait);
1342 }
1340 1343
1341 return ret; 1344 return ret;
1342} 1345}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 82447b7cdda1..09e50fd2a08f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -68,7 +68,7 @@
68/* 68/*
69 * Code to adjust PCIe capabilities. 69 * Code to adjust PCIe capabilities.
70 */ 70 */
71static int tune_pcie_caps(struct hfi1_devdata *); 71static void tune_pcie_caps(struct hfi1_devdata *);
72 72
73/* 73/*
74 * Do all the common PCIe setup and initialization. 74 * Do all the common PCIe setup and initialization.
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
351 */ 351 */
352int request_msix(struct hfi1_devdata *dd, u32 msireq) 352int request_msix(struct hfi1_devdata *dd, u32 msireq)
353{ 353{
354 int nvec, ret; 354 int nvec;
355 355
356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, 356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY); 357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
360 return nvec; 360 return nvec;
361 } 361 }
362 362
363 ret = tune_pcie_caps(dd); 363 tune_pcie_caps(dd);
364 if (ret) {
365 dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
366 pci_free_irq_vectors(dd->pcidev);
367 return ret;
368 }
369 364
370 /* check for legacy IRQ */ 365 /* check for legacy IRQ */
371 if (nvec == 1 && !dd->pcidev->msix_enabled) 366 if (nvec == 1 && !dd->pcidev->msix_enabled)
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
502module_param_named(aspm, aspm_mode, uint, S_IRUGO); 497module_param_named(aspm, aspm_mode, uint, S_IRUGO);
503MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); 498MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
504 499
505static int tune_pcie_caps(struct hfi1_devdata *dd) 500static void tune_pcie_caps(struct hfi1_devdata *dd)
506{ 501{
507 struct pci_dev *parent; 502 struct pci_dev *parent;
508 u16 rc_mpss, rc_mps, ep_mpss, ep_mps; 503 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
513 * Turn on extended tags in DevCtl in case the BIOS has turned it off 508 * Turn on extended tags in DevCtl in case the BIOS has turned it off
514 * to improve WFR SDMA bandwidth 509 * to improve WFR SDMA bandwidth
515 */ 510 */
516 ret = pcie_capability_read_word(dd->pcidev, 511 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
517 PCI_EXP_DEVCTL, &ectl); 512 if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
518 if (ret) {
519 dd_dev_err(dd, "Unable to read from PCI config\n");
520 return ret;
521 }
522
523 if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
524 dd_dev_info(dd, "Enabling PCIe extended tags\n"); 513 dd_dev_info(dd, "Enabling PCIe extended tags\n");
525 ectl |= PCI_EXP_DEVCTL_EXT_TAG; 514 ectl |= PCI_EXP_DEVCTL_EXT_TAG;
526 ret = pcie_capability_write_word(dd->pcidev, 515 ret = pcie_capability_write_word(dd->pcidev,
527 PCI_EXP_DEVCTL, ectl); 516 PCI_EXP_DEVCTL, ectl);
528 if (ret) { 517 if (ret)
529 dd_dev_err(dd, "Unable to write to PCI config\n"); 518 dd_dev_info(dd, "Unable to write to PCI config\n");
530 return ret;
531 }
532 } 519 }
533 /* Find out supported and configured values for parent (root) */ 520 /* Find out supported and configured values for parent (root) */
534 parent = dd->pcidev->bus->self; 521 parent = dd->pcidev->bus->self;
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
536 * The driver cannot perform the tuning if it does not have 523 * The driver cannot perform the tuning if it does not have
537 * access to the upstream component. 524 * access to the upstream component.
538 */ 525 */
539 if (!parent) 526 if (!parent) {
540 return -EINVAL; 527 dd_dev_info(dd, "Parent not found\n");
528 return;
529 }
541 if (!pci_is_root_bus(parent->bus)) { 530 if (!pci_is_root_bus(parent->bus)) {
542 dd_dev_info(dd, "Parent not root\n"); 531 dd_dev_info(dd, "Parent not root\n");
543 return -EINVAL; 532 return;
533 }
534 if (!pci_is_pcie(parent)) {
535 dd_dev_info(dd, "Parent is not PCI Express capable\n");
536 return;
537 }
538 if (!pci_is_pcie(dd->pcidev)) {
539 dd_dev_info(dd, "PCI device is not PCI Express capable\n");
540 return;
544 } 541 }
545
546 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
547 return -EINVAL;
548 rc_mpss = parent->pcie_mpss; 542 rc_mpss = parent->pcie_mpss;
549 rc_mps = ffs(pcie_get_mps(parent)) - 8; 543 rc_mps = ffs(pcie_get_mps(parent)) - 8;
550 /* Find out supported and configured values for endpoint (us) */ 544 /* Find out supported and configured values for endpoint (us) */
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
590 ep_mrrs = max_mrrs; 584 ep_mrrs = max_mrrs;
591 pcie_set_readrq(dd->pcidev, ep_mrrs); 585 pcie_set_readrq(dd->pcidev, ep_mrrs);
592 } 586 }
593
594 return 0;
595} 587}
596 588
597/* End of PCIe capability tuning */ 589/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index a8af96d2b1b0..d486355880cb 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
790 * reuse of stale settings established in our previous pass through. 790 * reuse of stale settings established in our previous pass through.
791 */ 791 */
792 if (ppd->qsfp_info.reset_needed) { 792 if (ppd->qsfp_info.reset_needed) {
793 reset_qsfp(ppd); 793 ret = reset_qsfp(ppd);
794 if (ret)
795 return ret;
794 refresh_qsfp_cache(ppd, &ppd->qsfp_info); 796 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
795 } else { 797 } else {
796 ppd->qsfp_info.reset_needed = 1; 798 ppd->qsfp_info.reset_needed = 1;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 9b1566468744..a65e4cbdce2f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -201,7 +201,6 @@ enum init_completion_state {
201 CEQ_CREATED, 201 CEQ_CREATED,
202 ILQ_CREATED, 202 ILQ_CREATED,
203 IEQ_CREATED, 203 IEQ_CREATED,
204 INET_NOTIFIER,
205 IP_ADDR_REGISTERED, 204 IP_ADDR_REGISTERED,
206 RDMA_DEV_REGISTERED 205 RDMA_DEV_REGISTERED
207}; 206};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 14f36ba4e5be..5230dd3c938c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
1504} 1504}
1505 1505
1506/** 1506/**
1507 * listen_port_in_use - determine if port is in use 1507 * i40iw_port_in_use - determine if port is in use
1508 * @port: Listen port number 1508 * @port: port number
1509 * @active_side: flag for listener side vs active side
1509 */ 1510 */
1510static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) 1511static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
1511{ 1512{
1512 struct i40iw_cm_listener *listen_node; 1513 struct i40iw_cm_listener *listen_node;
1514 struct i40iw_cm_node *cm_node;
1513 unsigned long flags; 1515 unsigned long flags;
1514 bool ret = false; 1516 bool ret = false;
1515 1517
1516 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1518 if (active_side) {
1517 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { 1519 /* search connected node list */
1518 if (listen_node->loc_port == port) { 1520 spin_lock_irqsave(&cm_core->ht_lock, flags);
1519 ret = true; 1521 list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
1520 break; 1522 if (cm_node->loc_port == port) {
1523 ret = true;
1524 break;
1525 }
1526 }
1527 if (!ret)
1528 clear_bit(port, cm_core->active_side_ports);
1529 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1530 } else {
1531 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1532 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1533 if (listen_node->loc_port == port) {
1534 ret = true;
1535 break;
1536 }
1521 } 1537 }
1538 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1522 } 1539 }
1523 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1540
1524 return ret; 1541 return ret;
1525} 1542}
1526 1543
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
1868 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1885 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1869 1886
1870 if (listener->iwdev) { 1887 if (listener->iwdev) {
1871 if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) 1888 if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
1872 i40iw_manage_apbvt(listener->iwdev, 1889 i40iw_manage_apbvt(listener->iwdev,
1873 listener->loc_port, 1890 listener->loc_port,
1874 I40IW_MANAGE_APBVT_DEL); 1891 I40IW_MANAGE_APBVT_DEL);
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
2247 if (cm_node->listener) { 2264 if (cm_node->listener) {
2248 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); 2265 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
2249 } else { 2266 } else {
2250 if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && 2267 if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
2251 cm_node->apbvt_set) {
2252 i40iw_manage_apbvt(cm_node->iwdev, 2268 i40iw_manage_apbvt(cm_node->iwdev,
2253 cm_node->loc_port, 2269 cm_node->loc_port,
2254 I40IW_MANAGE_APBVT_DEL); 2270 I40IW_MANAGE_APBVT_DEL);
2255 i40iw_get_addr_info(cm_node, &nfo); 2271 cm_node->apbvt_set = 0;
2256 if (cm_node->qhash_set) { 2272 }
2257 i40iw_manage_qhash(cm_node->iwdev, 2273 i40iw_get_addr_info(cm_node, &nfo);
2258 &nfo, 2274 if (cm_node->qhash_set) {
2259 I40IW_QHASH_TYPE_TCP_ESTABLISHED, 2275 i40iw_manage_qhash(cm_node->iwdev,
2260 I40IW_QHASH_MANAGE_TYPE_DELETE, 2276 &nfo,
2261 NULL, 2277 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2262 false); 2278 I40IW_QHASH_MANAGE_TYPE_DELETE,
2263 cm_node->qhash_set = 0; 2279 NULL,
2264 } 2280 false);
2281 cm_node->qhash_set = 0;
2265 } 2282 }
2266 } 2283 }
2267 2284
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
3255 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); 3272 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
3256 if (cm_node->vlan_id < VLAN_TAG_PRESENT) { 3273 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
3257 tcp_info->insert_vlan_tag = true; 3274 tcp_info->insert_vlan_tag = true;
3258 tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); 3275 tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
3276 cm_node->vlan_id);
3259 } 3277 }
3260 if (cm_node->ipv4) { 3278 if (cm_node->ipv4) {
3261 tcp_info->src_port = cpu_to_le16(cm_node->loc_port); 3279 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3737 struct sockaddr_in *raddr; 3755 struct sockaddr_in *raddr;
3738 struct sockaddr_in6 *laddr6; 3756 struct sockaddr_in6 *laddr6;
3739 struct sockaddr_in6 *raddr6; 3757 struct sockaddr_in6 *raddr6;
3740 bool qhash_set = false; 3758 int ret = 0;
3741 int apbvt_set = 0; 3759 unsigned long flags;
3742 int err = 0;
3743 enum i40iw_status_code status;
3744 3760
3745 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); 3761 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3746 if (!ibqp) 3762 if (!ibqp)
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3789 cm_info.user_pri = rt_tos2priority(cm_id->tos); 3805 cm_info.user_pri = rt_tos2priority(cm_id->tos);
3790 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", 3806 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
3791 __func__, cm_id->tos, cm_info.user_pri); 3807 __func__, cm_id->tos, cm_info.user_pri);
3792 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3793 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3794 raddr6->sin6_addr.in6_u.u6_addr32,
3795 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3796 status = i40iw_manage_qhash(iwdev,
3797 &cm_info,
3798 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3799 I40IW_QHASH_MANAGE_TYPE_ADD,
3800 NULL,
3801 true);
3802 if (status)
3803 return -EINVAL;
3804 qhash_set = true;
3805 }
3806 status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
3807 if (status) {
3808 i40iw_manage_qhash(iwdev,
3809 &cm_info,
3810 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3811 I40IW_QHASH_MANAGE_TYPE_DELETE,
3812 NULL,
3813 false);
3814 return -EINVAL;
3815 }
3816
3817 apbvt_set = 1;
3818 cm_id->add_ref(cm_id); 3808 cm_id->add_ref(cm_id);
3819 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, 3809 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
3820 conn_param->private_data_len, 3810 conn_param->private_data_len,
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3822 &cm_info); 3812 &cm_info);
3823 3813
3824 if (IS_ERR(cm_node)) { 3814 if (IS_ERR(cm_node)) {
3825 err = PTR_ERR(cm_node); 3815 ret = PTR_ERR(cm_node);
3826 goto err_out; 3816 cm_id->rem_ref(cm_id);
3817 return ret;
3818 }
3819
3820 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3821 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3822 raddr6->sin6_addr.in6_u.u6_addr32,
3823 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3824 if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3825 I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
3826 ret = -EINVAL;
3827 goto err;
3828 }
3829 cm_node->qhash_set = true;
3827 } 3830 }
3828 3831
3832 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
3833 if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
3834 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
3835 if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
3836 ret = -EINVAL;
3837 goto err;
3838 }
3839 } else {
3840 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
3841 }
3842
3843 cm_node->apbvt_set = true;
3829 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); 3844 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
3830 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && 3845 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
3831 !cm_node->ord_size) 3846 !cm_node->ord_size)
3832 cm_node->ord_size = 1; 3847 cm_node->ord_size = 1;
3833 3848
3834 cm_node->apbvt_set = apbvt_set;
3835 cm_node->qhash_set = qhash_set;
3836 iwqp->cm_node = cm_node; 3849 iwqp->cm_node = cm_node;
3837 cm_node->iwqp = iwqp; 3850 cm_node->iwqp = iwqp;
3838 iwqp->cm_id = cm_id; 3851 iwqp->cm_id = cm_id;
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3840 3853
3841 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { 3854 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
3842 cm_node->state = I40IW_CM_STATE_SYN_SENT; 3855 cm_node->state = I40IW_CM_STATE_SYN_SENT;
3843 err = i40iw_send_syn(cm_node, 0); 3856 ret = i40iw_send_syn(cm_node, 0);
3844 if (err) { 3857 if (ret)
3845 i40iw_rem_ref_cm_node(cm_node); 3858 goto err;
3846 goto err_out;
3847 }
3848 } 3859 }
3849 3860
3850 i40iw_debug(cm_node->dev, 3861 i40iw_debug(cm_node->dev,
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3853 cm_node->rem_port, 3864 cm_node->rem_port,
3854 cm_node, 3865 cm_node,
3855 cm_node->cm_id); 3866 cm_node->cm_id);
3867
3856 return 0; 3868 return 0;
3857 3869
3858err_out: 3870err:
3859 if (cm_info.ipv4) 3871 if (cm_info.ipv4)
3860 i40iw_debug(&iwdev->sc_dev, 3872 i40iw_debug(&iwdev->sc_dev,
3861 I40IW_DEBUG_CM, 3873 I40IW_DEBUG_CM,
@@ -3867,22 +3879,10 @@ err_out:
3867 "Api - connect() FAILED: dest addr=%pI6", 3879 "Api - connect() FAILED: dest addr=%pI6",
3868 cm_info.rem_addr); 3880 cm_info.rem_addr);
3869 3881
3870 if (qhash_set) 3882 i40iw_rem_ref_cm_node(cm_node);
3871 i40iw_manage_qhash(iwdev,
3872 &cm_info,
3873 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3874 I40IW_QHASH_MANAGE_TYPE_DELETE,
3875 NULL,
3876 false);
3877
3878 if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
3879 cm_info.loc_port))
3880 i40iw_manage_apbvt(iwdev,
3881 cm_info.loc_port,
3882 I40IW_MANAGE_APBVT_DEL);
3883 cm_id->rem_ref(cm_id); 3883 cm_id->rem_ref(cm_id);
3884 iwdev->cm_core.stats_connect_errs++; 3884 iwdev->cm_core.stats_connect_errs++;
3885 return err; 3885 return ret;
3886} 3886}
3887 3887
3888/** 3888/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 2e52e38ffcf3..45abef76295b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -71,6 +71,9 @@
71#define I40IW_HW_IRD_SETTING_32 32 71#define I40IW_HW_IRD_SETTING_32 32
72#define I40IW_HW_IRD_SETTING_64 64 72#define I40IW_HW_IRD_SETTING_64 64
73 73
74#define MAX_PORTS 65536
75#define I40IW_VLAN_PRIO_SHIFT 13
76
74enum ietf_mpa_flags { 77enum ietf_mpa_flags {
75 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ 78 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
76 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ 79 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
@@ -411,6 +414,8 @@ struct i40iw_cm_core {
411 spinlock_t ht_lock; /* manage hash table */ 414 spinlock_t ht_lock; /* manage hash table */
412 spinlock_t listen_list_lock; /* listen list */ 415 spinlock_t listen_list_lock; /* listen list */
413 416
417 unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
418
414 u64 stats_nodes_created; 419 u64 stats_nodes_created;
415 u64 stats_nodes_destroyed; 420 u64 stats_nodes_destroyed;
416 u64 stats_listen_created; 421 u64 stats_listen_created;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
48 * @wqe: cqp wqe for header 48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe 49 * @header: header for the cqp wqe
50 */ 50 */
51static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) 51void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52{ 52{
53 wmb(); /* make sure WQE is populated before polarity is set */ 53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe, 24, header); 54 set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index cc742c3132c6..27590ae21881 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
99 .notifier_call = i40iw_net_event 99 .notifier_call = i40iw_net_event
100}; 100};
101 101
102static atomic_t i40iw_notifiers_registered;
103
104/** 102/**
105 * i40iw_find_i40e_handler - find a handler given a client info 103 * i40iw_find_i40e_handler - find a handler given a client info
106 * @ldev: pointer to a client info 104 * @ldev: pointer to a client info
@@ -1376,11 +1374,20 @@ error:
1376 */ 1374 */
1377static void i40iw_register_notifiers(void) 1375static void i40iw_register_notifiers(void)
1378{ 1376{
1379 if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { 1377 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1380 register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1378 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1381 register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1379 register_netevent_notifier(&i40iw_net_notifier);
1382 register_netevent_notifier(&i40iw_net_notifier); 1380}
1383 } 1381
1382/**
1383 * i40iw_unregister_notifiers - unregister tcp ip notifiers
1384 */
1385
1386static void i40iw_unregister_notifiers(void)
1387{
1388 unregister_netevent_notifier(&i40iw_net_notifier);
1389 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1390 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1384} 1391}
1385 1392
1386/** 1393/**
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1400 u32 i; 1407 u32 i;
1401 u32 size; 1408 u32 size;
1402 1409
1410 if (!ldev->msix_count) {
1411 i40iw_pr_err("No MSI-X vectors\n");
1412 return I40IW_ERR_CONFIG;
1413 }
1414
1403 iwdev->msix_count = ldev->msix_count; 1415 iwdev->msix_count = ldev->msix_count;
1404 1416
1405 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; 1417 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
1462 if (!iwdev->reset) 1474 if (!iwdev->reset)
1463 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1475 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1464 /* fallthrough */ 1476 /* fallthrough */
1465 case INET_NOTIFIER:
1466 if (!atomic_dec_return(&i40iw_notifiers_registered)) {
1467 unregister_netevent_notifier(&i40iw_net_notifier);
1468 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1469 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1470 }
1471 /* fallthrough */ 1477 /* fallthrough */
1472 case PBLE_CHUNK_MEM: 1478 case PBLE_CHUNK_MEM:
1473 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); 1479 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1550 1556
1551 status = i40iw_save_msix_info(iwdev, ldev); 1557 status = i40iw_save_msix_info(iwdev, ldev);
1552 if (status) 1558 if (status)
1553 goto exit; 1559 return status;
1554 iwdev->hw.dev_context = (void *)ldev->pcidev; 1560 iwdev->hw.dev_context = (void *)ldev->pcidev;
1555 iwdev->hw.hw_addr = ldev->hw_addr; 1561 iwdev->hw.hw_addr = ldev->hw_addr;
1556 status = i40iw_allocate_dma_mem(&iwdev->hw, 1562 status = i40iw_allocate_dma_mem(&iwdev->hw,
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1667 break; 1673 break;
1668 iwdev->init_state = PBLE_CHUNK_MEM; 1674 iwdev->init_state = PBLE_CHUNK_MEM;
1669 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1675 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1670 i40iw_register_notifiers();
1671 iwdev->init_state = INET_NOTIFIER;
1672 status = i40iw_add_mac_ip(iwdev); 1676 status = i40iw_add_mac_ip(iwdev);
1673 if (status) 1677 if (status)
1674 break; 1678 break;
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
2018 i40iw_client.type = I40E_CLIENT_IWARP; 2022 i40iw_client.type = I40E_CLIENT_IWARP;
2019 spin_lock_init(&i40iw_handler_lock); 2023 spin_lock_init(&i40iw_handler_lock);
2020 ret = i40e_register_client(&i40iw_client); 2024 ret = i40e_register_client(&i40iw_client);
2025 i40iw_register_notifiers();
2026
2021 return ret; 2027 return ret;
2022} 2028}
2023 2029
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
2029 */ 2035 */
2030static void __exit i40iw_exit_module(void) 2036static void __exit i40iw_exit_module(void)
2031{ 2037{
2038 i40iw_unregister_notifiers();
2032 i40e_unregister_client(&i40iw_client); 2039 i40e_unregister_client(&i40iw_client);
2033} 2040}
2034 2041
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
59 struct i40iw_fast_reg_stag_info *info, 59 struct i40iw_fast_reg_stag_info *info,
60 bool post_sq); 60 bool post_sq);
61 61
62void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
63
62/* HMC/FPM functions */ 64/* HMC/FPM functions */
63enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, 65enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
64 u8 hmc_fn_id); 66 u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
123 get_64bit_val(wqe, 24, &offset24); 123 get_64bit_val(wqe, 24, &offset24);
124 124
125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); 125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
126 set_64bit_val(wqe, 24, offset24);
127 126
128 set_64bit_val(wqe, 0, buf->mem.pa); 127 set_64bit_val(wqe, 0, buf->mem.pa);
129 set_64bit_val(wqe, 8, 128 set_64bit_val(wqe, 8,
130 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); 129 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
131 set_64bit_val(wqe, 24, offset24); 130 i40iw_insert_wqe_hdr(wqe, offset24);
132} 131}
133 132
134/** 133/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); 408 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]); 409 set_64bit_val(wqe, 16, header[0]);
411 410
412 /* Ensure all data is written before writing valid bit */ 411 i40iw_insert_wqe_hdr(wqe, header[1]);
413 wmb();
414 set_64bit_val(wqe, 24, header[1]);
415 412
416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 413 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
417 i40iw_qp_post_wr(&qp->qp_uk); 414 i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
539 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | 536 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
540 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 537 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
541 538
542 set_64bit_val(wqe, 24, header); 539 i40iw_insert_wqe_hdr(wqe, header);
543 540
544 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); 541 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
545 i40iw_sc_cqp_post_sq(cqp); 542 i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
655 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | 652 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
656 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | 653 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
657 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 654 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
658 set_64bit_val(wqe, 24, header); 655 i40iw_insert_wqe_hdr(wqe, header);
659 656
660 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", 657 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
661 wqe, I40IW_CQP_WQE_SIZE * 8); 658 wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 62f1f45b8737..e52dbbb4165e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
160 return NOTIFY_DONE; 160 return NOTIFY_DONE;
161 161
162 iwdev = &hdl->device; 162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER) 163 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
164 return NOTIFY_DONE; 164 return NOTIFY_DONE;
165 165
166 netdev = iwdev->ldev->netdev; 166 netdev = iwdev->ldev->netdev;
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
217 return NOTIFY_DONE; 217 return NOTIFY_DONE;
218 218
219 iwdev = &hdl->device; 219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER) 220 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
221 return NOTIFY_DONE; 221 return NOTIFY_DONE;
222 222
223 netdev = iwdev->ldev->netdev; 223 netdev = iwdev->ldev->netdev;
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
266 if (!iwhdl) 266 if (!iwhdl)
267 return NOTIFY_DONE; 267 return NOTIFY_DONE;
268 iwdev = &iwhdl->device; 268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER) 269 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
270 return NOTIFY_DONE; 270 return NOTIFY_DONE;
271 p = (__be32 *)neigh->primary_key; 271 p = (__be32 *)neigh->primary_key;
272 i40iw_copy_ip_ntohl(local_ipaddr, p); 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1aa411034a27..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; 826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
829 attr->port_num = 1;
829 init_attr->event_handler = iwqp->ibqp.event_handler; 830 init_attr->event_handler = iwqp->ibqp.event_handler;
830 init_attr->qp_context = iwqp->ibqp.qp_context; 831 init_attr->qp_context = iwqp->ibqp.qp_context;
831 init_attr->send_cq = iwqp->ibqp.send_cq; 832 init_attr->send_cq = iwqp->ibqp.send_cq;
832 init_attr->recv_cq = iwqp->ibqp.recv_cq; 833 init_attr->recv_cq = iwqp->ibqp.recv_cq;
833 init_attr->srq = iwqp->ibqp.srq; 834 init_attr->srq = iwqp->ibqp.srq;
834 init_attr->cap = attr->cap; 835 init_attr->cap = attr->cap;
836 init_attr->port_num = 1;
835 return 0; 837 return 0;
836} 838}
837 839
@@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1027 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; 1029 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1028 iwqp->last_aeq = I40IW_AE_RESET_SENT; 1030 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1029 spin_unlock_irqrestore(&iwqp->lock, flags); 1031 spin_unlock_irqrestore(&iwqp->lock, flags);
1032 i40iw_cm_disconn(iwqp);
1030 } 1033 }
1034 } else {
1035 spin_lock_irqsave(&iwqp->lock, flags);
1036 if (iwqp->cm_id) {
1037 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1038 iwqp->cm_id->add_ref(iwqp->cm_id);
1039 i40iw_schedule_cm_timer(iwqp->cm_node,
1040 (struct i40iw_puda_buf *)iwqp,
1041 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1042 }
1043 }
1044 spin_unlock_irqrestore(&iwqp->lock, flags);
1031 } 1045 }
1032 } 1046 }
1033 return 0; 1047 return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ab3c562d5ba7..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
778 } 778 }
779 779
780 if (MLX5_CAP_GEN(mdev, tag_matching)) { 780 if (MLX5_CAP_GEN(mdev, tag_matching)) {
781 props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 781 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
782 props->xrq_caps.max_num_tags = 782 props->tm_caps.max_num_tags =
783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
784 props->xrq_caps.flags = IB_TM_CAP_RC; 784 props->tm_caps.flags = IB_TM_CAP_RC;
785 props->xrq_caps.max_ops = 785 props->tm_caps.max_ops =
786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
787 props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; 787 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
788 } 788 }
789 789
790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3837 if (!dbg) 3837 if (!dbg)
3838 return -ENOMEM; 3838 return -ENOMEM;
3839 3839
3840 dev->delay_drop.dbg = dbg;
3841
3840 dbg->dir_debugfs = 3842 dbg->dir_debugfs =
3841 debugfs_create_dir("delay_drop", 3843 debugfs_create_dir("delay_drop",
3842 dev->mdev->priv.dbg_root); 3844 dev->mdev->priv.dbg_root);
3843 if (!dbg->dir_debugfs) 3845 if (!dbg->dir_debugfs)
3844 return -ENOMEM; 3846 goto out_debugfs;
3845 3847
3846 dbg->events_cnt_debugfs = 3848 dbg->events_cnt_debugfs =
3847 debugfs_create_atomic_t("num_timeout_events", 0400, 3849 debugfs_create_atomic_t("num_timeout_events", 0400,
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3865 if (!dbg->timeout_debugfs) 3867 if (!dbg->timeout_debugfs)
3866 goto out_debugfs; 3868 goto out_debugfs;
3867 3869
3868 dev->delay_drop.dbg = dbg;
3869
3870 return 0; 3870 return 0;
3871 3871
3872out_debugfs: 3872out_debugfs:
@@ -4174,9 +4174,9 @@ err_bfreg:
4174err_uar_page: 4174err_uar_page:
4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4176 4176
4177err_cnt:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cong: 4177err_cong:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cnt:
4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4181 mlx5_ib_dealloc_counters(dev); 4181 mlx5_ib_dealloc_counters(dev);
4182 4182
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 914f212e7ef6..f3dbd75a0a96 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
50{ 50{
51 unsigned long tmp; 51 unsigned long tmp;
52 unsigned long m; 52 unsigned long m;
53 int i, k; 53 u64 base = ~0, p = 0;
54 u64 base = 0; 54 u64 len, pfn;
55 int p = 0; 55 int i = 0;
56 int skip;
57 int mask;
58 u64 len;
59 u64 pfn;
60 struct scatterlist *sg; 56 struct scatterlist *sg;
61 int entry; 57 int entry;
62 unsigned long page_shift = umem->page_shift; 58 unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
76 m = find_first_bit(&tmp, BITS_PER_LONG); 72 m = find_first_bit(&tmp, BITS_PER_LONG);
77 if (max_page_shift) 73 if (max_page_shift)
78 m = min_t(unsigned long, max_page_shift - page_shift, m); 74 m = min_t(unsigned long, max_page_shift - page_shift, m);
79 skip = 1 << m; 75
80 mask = skip - 1;
81 i = 0;
82 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 76 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
83 len = sg_dma_len(sg) >> page_shift; 77 len = sg_dma_len(sg) >> page_shift;
84 pfn = sg_dma_address(sg) >> page_shift; 78 pfn = sg_dma_address(sg) >> page_shift;
85 for (k = 0; k < len; k++) { 79 if (base + p != pfn) {
86 if (!(i & mask)) { 80 /* If either the offset or the new
87 tmp = (unsigned long)pfn; 81 * base are unaligned update m
88 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); 82 */
89 skip = 1 << m; 83 tmp = (unsigned long)(pfn | p);
90 mask = skip - 1; 84 if (!IS_ALIGNED(tmp, 1 << m))
91 base = pfn; 85 m = find_first_bit(&tmp, BITS_PER_LONG);
92 p = 0; 86
93 } else { 87 base = pfn;
94 if (base + p != pfn) { 88 p = 0;
95 tmp = (unsigned long)p;
96 m = find_first_bit(&tmp, BITS_PER_LONG);
97 skip = 1 << m;
98 mask = skip - 1;
99 base = pfn;
100 p = 0;
101 }
102 }
103 p++;
104 i++;
105 } 89 }
90
91 p += len;
92 i += len;
106 } 93 }
107 94
108 if (i) { 95 if (i) {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 0e2789d9bb4d..37bbc543847a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,7 +47,8 @@ enum {
47 47
48#define MLX5_UMR_ALIGN 2048 48#define MLX5_UMR_ALIGN 2048
49 49
50static int clean_mr(struct mlx5_ib_mr *mr); 50static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int mr_cache_max_order(struct mlx5_ib_dev *dev); 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
52static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
53 54
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1270 1271
1271 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, 1272 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1272 update_xlt_flags); 1273 update_xlt_flags);
1274
1273 if (err) { 1275 if (err) {
1274 mlx5_ib_dereg_mr(&mr->ibmr); 1276 dereg_mr(dev, mr);
1275 return ERR_PTR(err); 1277 return ERR_PTR(err);
1276 } 1278 }
1277 } 1279 }
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1356 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, 1358 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1357 &npages, &page_shift, &ncont, &order); 1359 &npages, &page_shift, &ncont, &order);
1358 if (err < 0) { 1360 if (err < 0) {
1359 clean_mr(mr); 1361 clean_mr(dev, mr);
1360 return err; 1362 return err;
1361 } 1363 }
1362 } 1364 }
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1410 if (err) { 1412 if (err) {
1411 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1413 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1412 ib_umem_release(mr->umem); 1414 ib_umem_release(mr->umem);
1413 clean_mr(mr); 1415 clean_mr(dev, mr);
1414 return err; 1416 return err;
1415 } 1417 }
1416 } 1418 }
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1469 } 1471 }
1470} 1472}
1471 1473
1472static int clean_mr(struct mlx5_ib_mr *mr) 1474static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1473{ 1475{
1474 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1475 int allocated_from_cache = mr->allocated_from_cache; 1476 int allocated_from_cache = mr->allocated_from_cache;
1476 int err; 1477 int err;
1477 1478
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
1507 return 0; 1508 return 0;
1508} 1509}
1509 1510
1510int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1511static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1511{ 1512{
1512 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1513 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1514 int npages = mr->npages; 1513 int npages = mr->npages;
1515 struct ib_umem *umem = mr->umem; 1514 struct ib_umem *umem = mr->umem;
1516 1515
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1539 } 1538 }
1540#endif 1539#endif
1541 1540
1542 clean_mr(mr); 1541 clean_mr(dev, mr);
1543 1542
1544 if (umem) { 1543 if (umem) {
1545 ib_umem_release(umem); 1544 ib_umem_release(umem);
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1549 return 0; 1548 return 0;
1550} 1549}
1551 1550
1551int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1552{
1553 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1554 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1555
1556 return dereg_mr(dev, mr);
1557}
1558
1552struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, 1559struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1553 enum ib_mr_type mr_type, 1560 enum ib_mr_type mr_type,
1554 u32 max_num_sg) 1561 u32 max_num_sg)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f0dc5f4aa177..442b9bdc0f03 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3232 mr->ibmr.iova); 3232 mr->ibmr.iova);
3233 set_wqe_32bit_value(wqe->wqe_words, 3233 set_wqe_32bit_value(wqe->wqe_words,
3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3235 mr->ibmr.length); 3235 lower_32_bits(mr->ibmr.length));
3236 set_wqe_32bit_value(wqe->wqe_words, 3236 set_wqe_32bit_value(wqe->wqe_words,
3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); 3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
3238 set_wqe_32bit_value(wqe->wqe_words, 3238 set_wqe_32bit_value(wqe->wqe_words,
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3274 mr->npages * 8); 3274 mr->npages * 8);
3275 3275
3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " 3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
3277 "length: %d, rkey: %0x, pgl_paddr: %llx, " 3277 "length: %lld, rkey: %0x, pgl_paddr: %llx, "
3278 "page_list_len: %u, wqe_misc: %x\n", 3278 "page_list_len: %u, wqe_misc: %x\n",
3279 (unsigned long long) mr->ibmr.iova, 3279 (unsigned long long) mr->ibmr.iova,
3280 mr->ibmr.length, 3280 mr->ibmr.length,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index dcb5942f9fb5..65b166cc7437 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: 252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
253 err_num = -EAGAIN; 253 err_num = -EAGAIN;
254 break; 254 break;
255 default:
256 err_num = -EFAULT;
255 } 257 }
258 break;
256 default: 259 default:
257 err_num = -EFAULT; 260 err_num = -EFAULT;
258 } 261 }
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
387 u8 wqe_size; 387 u8 wqe_size;
388 388
389 u8 smac[ETH_ALEN]; 389 u8 smac[ETH_ALEN];
390 u16 vlan_id; 390 u16 vlan;
391 int rc; 391 int rc;
392 } *rqe_wr_id; 392 } *rqe_wr_id;
393 393
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
105 105
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? 106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
107 -EINVAL : 0; 107 -EINVAL : 0;
108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; 108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
109 /* note: length stands for data length i.e. GRH is excluded */ 109 /* note: length stands for data length i.e. GRH is excluded */
110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = 110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
111 data->length.data_length; 111 data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
694 struct qedr_cq *cq = get_qedr_cq(ibcq); 694 struct qedr_cq *cq = get_qedr_cq(ibcq);
695 struct qedr_qp *qp = dev->gsi_qp; 695 struct qedr_qp *qp = dev->gsi_qp;
696 unsigned long flags; 696 unsigned long flags;
697 u16 vlan_id;
697 int i = 0; 698 int i = 0;
698 699
699 spin_lock_irqsave(&cq->cq_lock, flags); 700 spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
712 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; 713 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
713 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); 714 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
714 wc[i].wc_flags |= IB_WC_WITH_SMAC; 715 wc[i].wc_flags |= IB_WC_WITH_SMAC;
715 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { 716
717 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
718 VLAN_VID_MASK;
719 if (vlan_id) {
716 wc[i].wc_flags |= IB_WC_WITH_VLAN; 720 wc[i].wc_flags |= IB_WC_WITH_VLAN;
717 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; 721 wc[i].vlan_id = vlan_id;
722 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
723 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 } 724 }
719 725
720 qedr_inc_sw_cons(&qp->rq); 726 qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 663a0c301c43..984aa3484928 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
416 return (enum ib_wc_status)status; 416 return (enum ib_wc_status)status;
417} 417}
418 418
419static inline int pvrdma_wc_opcode_to_ib(int opcode) 419static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
420{ 420{
421 return opcode; 421 switch (opcode) {
422 case PVRDMA_WC_SEND:
423 return IB_WC_SEND;
424 case PVRDMA_WC_RDMA_WRITE:
425 return IB_WC_RDMA_WRITE;
426 case PVRDMA_WC_RDMA_READ:
427 return IB_WC_RDMA_READ;
428 case PVRDMA_WC_COMP_SWAP:
429 return IB_WC_COMP_SWAP;
430 case PVRDMA_WC_FETCH_ADD:
431 return IB_WC_FETCH_ADD;
432 case PVRDMA_WC_LOCAL_INV:
433 return IB_WC_LOCAL_INV;
434 case PVRDMA_WC_FAST_REG_MR:
435 return IB_WC_REG_MR;
436 case PVRDMA_WC_MASKED_COMP_SWAP:
437 return IB_WC_MASKED_COMP_SWAP;
438 case PVRDMA_WC_MASKED_FETCH_ADD:
439 return IB_WC_MASKED_FETCH_ADD;
440 case PVRDMA_WC_RECV:
441 return IB_WC_RECV;
442 case PVRDMA_WC_RECV_RDMA_WITH_IMM:
443 return IB_WC_RECV_RDMA_WITH_IMM;
444 default:
445 return IB_WC_SEND;
446 }
422} 447}
423 448
424static inline int pvrdma_wc_flags_to_ib(int flags) 449static inline int pvrdma_wc_flags_to_ib(int flags)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 14b62f7472b4..7774654c2ccb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
823 wc->status != IB_WC_WR_FLUSH_ERR) { 823 wc->status != IB_WC_WR_FLUSH_ERR) {
824 struct ipoib_neigh *neigh; 824 struct ipoib_neigh *neigh;
825 825
826 if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) 826 /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
827 ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 827 * so don't make waves.
828 wc->status, wr_id, wc->vendor_err); 828 */
829 if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
830 wc->status == IB_WC_RETRY_EXC_ERR)
831 ipoib_dbg(priv,
832 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
833 __func__, wc->status, wr_id, wc->vendor_err);
829 else 834 else
830 ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 835 ipoib_warn(priv,
831 wc->status, wr_id, wc->vendor_err); 836 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
837 __func__, wc->status, wr_id, wc->vendor_err);
832 838
833 spin_lock_irqsave(&priv->lock, flags); 839 spin_lock_irqsave(&priv->lock, flags);
834 neigh = tx->neigh; 840 neigh = tx->neigh;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2e075377242e..6cd61638b441 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1000 */ 1000 */
1001 priv->dev->broadcast[8] = priv->pkey >> 8; 1001 priv->dev->broadcast[8] = priv->pkey >> 8;
1002 priv->dev->broadcast[9] = priv->pkey & 0xff; 1002 priv->dev->broadcast[9] = priv->pkey & 0xff;
1003
1004 /*
1005 * Update the broadcast address in the priv->broadcast object,
1006 * in case it already exists, otherwise no one will do that.
1007 */
1008 if (priv->broadcast) {
1009 spin_lock_irq(&priv->lock);
1010 memcpy(priv->broadcast->mcmember.mgid.raw,
1011 priv->dev->broadcast + 4,
1012 sizeof(union ib_gid));
1013 spin_unlock_irq(&priv->lock);
1014 }
1015
1016 return 0; 1003 return 0;
1017 } 1004 }
1018 1005
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bac95b509a9b..dcc77014018d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
2180{ 2180{
2181 struct ipoib_dev_priv *priv; 2181 struct ipoib_dev_priv *priv;
2182 struct ib_port_attr attr; 2182 struct ib_port_attr attr;
2183 struct rdma_netdev *rn;
2183 int result = -ENOMEM; 2184 int result = -ENOMEM;
2184 2185
2185 priv = ipoib_intf_alloc(hca, port, format); 2186 priv = ipoib_intf_alloc(hca, port, format);
@@ -2279,7 +2280,8 @@ register_failed:
2279 ipoib_dev_cleanup(priv->dev); 2280 ipoib_dev_cleanup(priv->dev);
2280 2281
2281device_init_failed: 2282device_init_failed:
2282 free_netdev(priv->dev); 2283 rn = netdev_priv(priv->dev);
2284 rn->free_rdma_netdev(priv->dev);
2283 kfree(priv); 2285 kfree(priv);
2284 2286
2285alloc_mem_failed: 2287alloc_mem_failed:
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2328 return; 2330 return;
2329 2331
2330 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2332 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2331 struct rdma_netdev *rn = netdev_priv(priv->dev); 2333 struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
2332 2334
2333 ib_unregister_event_handler(&priv->event_handler); 2335 ib_unregister_event_handler(&priv->event_handler);
2334 flush_workqueue(ipoib_workqueue); 2336 flush_workqueue(ipoib_workqueue);
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2350 unregister_netdev(priv->dev); 2352 unregister_netdev(priv->dev);
2351 mutex_unlock(&priv->sysfs_mutex); 2353 mutex_unlock(&priv->sysfs_mutex);
2352 2354
2353 rn->free_rdma_netdev(priv->dev); 2355 parent_rn->free_rdma_netdev(priv->dev);
2356
2357 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
2358 struct rdma_netdev *child_rn;
2354 2359
2355 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) 2360 child_rn = netdev_priv(cpriv->dev);
2361 child_rn->free_rdma_netdev(cpriv->dev);
2356 kfree(cpriv); 2362 kfree(cpriv);
2363 }
2357 2364
2358 kfree(priv); 2365 kfree(priv);
2359 } 2366 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9927cd6b7082..55a9b71ed05a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
141 return restart_syscall(); 141 return restart_syscall();
142 } 142 }
143 143
144 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 144 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
145 if (!priv) {
146 rtnl_unlock(); 145 rtnl_unlock();
147 mutex_unlock(&ppriv->sysfs_mutex); 146 mutex_unlock(&ppriv->sysfs_mutex);
148 return -ENOMEM; 147 return restart_syscall();
149 } 148 }
150 149
151 down_write(&ppriv->vlan_rwsem); 150 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
151 if (!priv) {
152 result = -ENOMEM;
153 goto out;
154 }
152 155
153 /* 156 /*
154 * First ensure this isn't a duplicate. We check the parent device and 157 * First ensure this isn't a duplicate. We check the parent device and
@@ -175,8 +178,11 @@ out:
175 rtnl_unlock(); 178 rtnl_unlock();
176 mutex_unlock(&ppriv->sysfs_mutex); 179 mutex_unlock(&ppriv->sysfs_mutex);
177 180
178 if (result) { 181 if (result && priv) {
179 free_netdev(priv->dev); 182 struct rdma_netdev *rn;
183
184 rn = netdev_priv(priv->dev);
185 rn->free_rdma_netdev(priv->dev);
180 kfree(priv); 186 kfree(priv);
181 } 187 }
182 188
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
204 return restart_syscall(); 210 return restart_syscall();
205 } 211 }
206 212
207 down_write(&ppriv->vlan_rwsem); 213 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
214 rtnl_unlock();
215 mutex_unlock(&ppriv->sysfs_mutex);
216 return restart_syscall();
217 }
218
208 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 219 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
209 if (priv->pkey == pkey && 220 if (priv->pkey == pkey &&
210 priv->child_type == IPOIB_LEGACY_CHILD) { 221 priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
224 mutex_unlock(&ppriv->sysfs_mutex); 235 mutex_unlock(&ppriv->sysfs_mutex);
225 236
226 if (dev) { 237 if (dev) {
227 free_netdev(dev); 238 struct rdma_netdev *rn;
239
240 rn = netdev_priv(dev);
241 rn->free_rdma_netdev(priv->dev);
228 kfree(priv); 242 kfree(priv);
229 return 0; 243 return 0;
230 } 244 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9c3e9ab53a41..322209d5ff58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
154{ 154{
155 int i; 155 int i;
156 156
157 iser_err("page vec npages %d data length %d\n", 157 iser_err("page vec npages %d data length %lld\n",
158 page_vec->npages, page_vec->fake_mr.length); 158 page_vec->npages, page_vec->fake_mr.length);
159 for (i = 0; i < page_vec->npages; i++) 159 for (i = 0; i < page_vec->npages; i++)
160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); 160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);