aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/infiniband
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/addr.c15
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/ucma.c42
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c113
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c51
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c34
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c10
28 files changed, 299 insertions, 214 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5b4cf030c11..9183d148d644 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
550 dst_release(dst); 550 dst_release(dst);
551 } 551 }
552 552
553 if (ndev->flags & IFF_LOOPBACK) { 553 if (ndev) {
554 ret = rdma_translate_ip(dst_in, addr); 554 if (ndev->flags & IFF_LOOPBACK)
555 /* 555 ret = rdma_translate_ip(dst_in, addr);
556 * Put the loopback device and get the translated 556 else
557 * device instead. 557 addr->bound_dev_if = ndev->ifindex;
558 */
559 dev_put(ndev); 558 dev_put(ndev);
560 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
561 } else {
562 addr->bound_dev_if = ndev->ifindex;
563 } 559 }
564 dev_put(ndev);
565 560
566 return ret; 561 return ret;
567} 562}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 915bbd867b61..66f203730e80 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3069 continue; 3069 continue;
3070 3070
3071 /* different dest port -> unique */ 3071 /* different dest port -> unique */
3072 if (!cma_any_port(cur_daddr) && 3072 if (!cma_any_port(daddr) &&
3073 !cma_any_port(cur_daddr) &&
3073 (dport != cur_dport)) 3074 (dport != cur_dport))
3074 continue; 3075 continue;
3075 3076
@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3080 continue; 3081 continue;
3081 3082
3082 /* different dst address -> unique */ 3083 /* different dst address -> unique */
3083 if (!cma_any_addr(cur_daddr) && 3084 if (!cma_any_addr(daddr) &&
3085 !cma_any_addr(cur_daddr) &&
3084 cma_addr_cmp(daddr, cur_daddr)) 3086 cma_addr_cmp(daddr, cur_daddr))
3085 continue; 3087 continue;
3086 3088
@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3378 } 3380 }
3379#endif 3381#endif
3380 } 3382 }
3383 daddr = cma_dst_addr(id_priv);
3384 daddr->sa_family = addr->sa_family;
3385
3381 ret = cma_get_port(id_priv); 3386 ret = cma_get_port(id_priv);
3382 if (ret) 3387 if (ret)
3383 goto err2; 3388 goto err2;
3384 3389
3385 daddr = cma_dst_addr(id_priv);
3386 daddr->sa_family = addr->sa_family;
3387
3388 return 0; 3390 return 0;
3389err2: 3391err2:
3390 if (id_priv->cma_dev) 3392 if (id_priv->cma_dev)
@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4173 struct cma_multicast *mc; 4175 struct cma_multicast *mc;
4174 int ret; 4176 int ret;
4175 4177
4178 if (!id->device)
4179 return -EINVAL;
4180
4176 id_priv = container_of(id, struct rdma_id_private, id); 4181 id_priv = container_of(id, struct rdma_id_private, id);
4177 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4182 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
4178 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4183 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bc79ca8215d7..af5ad6a56ae4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -17,6 +17,7 @@
17 17
18/* # of WCs to poll for with a single call to ib_poll_cq */ 18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16 19#define IB_POLL_BATCH 16
20#define IB_POLL_BATCH_DIRECT 8
20 21
21/* # of WCs to iterate over before yielding */ 22/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256 23#define IB_POLL_BUDGET_IRQ 256
@@ -25,18 +26,18 @@
25#define IB_POLL_FLAGS \ 26#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 27 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27 28
28static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
30 int batch)
29{ 31{
30 int i, n, completed = 0; 32 int i, n, completed = 0;
31 struct ib_wc *wcs = poll_wc ? : cq->wc;
32 33
33 /* 34 /*
34 * budget might be (-1) if the caller does not 35 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned 36 * want to bound this call, thus we need unsigned
36 * minimum here. 37 * minimum here.
37 */ 38 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, 39 while ((n = ib_poll_cq(cq, min_t(u32, batch,
39 budget - completed), wcs)) > 0) { 40 budget - completed), wcs)) > 0) {
40 for (i = 0; i < n; i++) { 41 for (i = 0; i < n; i++) {
41 struct ib_wc *wc = &wcs[i]; 42 struct ib_wc *wc = &wcs[i];
42 43
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
48 49
49 completed += n; 50 completed += n;
50 51
51 if (n != IB_POLL_BATCH || 52 if (n != batch || (budget != -1 && completed >= budget))
52 (budget != -1 && completed >= budget))
53 break; 53 break;
54 } 54 }
55 55
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
72 */ 72 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget) 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{ 74{
75 struct ib_wc wcs[IB_POLL_BATCH]; 75 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
76 76
77 return __ib_process_cq(cq, budget, wcs); 77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
78} 78}
79EXPORT_SYMBOL(ib_process_cq_direct); 79EXPORT_SYMBOL(ib_process_cq_direct);
80 80
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed; 89 int completed;
90 90
91 completed = __ib_process_cq(cq, budget, NULL); 91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
92 if (completed < budget) { 92 if (completed < budget) {
93 irq_poll_complete(&cq->iop); 93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); 108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed; 109 int completed;
110 110
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); 111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112 IB_POLL_BATCH);
112 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 113 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work); 115 queue_work(ib_comp_wq, &cq->work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e8010e73a1cf..bb065c9449be 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
536 ret = device->query_device(device, &device->attrs, &uhw); 536 ret = device->query_device(device, &device->attrs, &uhw);
537 if (ret) { 537 if (ret) {
538 pr_warn("Couldn't query the device attributes\n"); 538 pr_warn("Couldn't query the device attributes\n");
539 goto cache_cleanup; 539 goto cg_cleanup;
540 } 540 }
541 541
542 ret = ib_device_register_sysfs(device, port_callback); 542 ret = ib_device_register_sysfs(device, port_callback);
543 if (ret) { 543 if (ret) {
544 pr_warn("Couldn't register device %s with driver model\n", 544 pr_warn("Couldn't register device %s with driver model\n",
545 device->name); 545 device->name);
546 goto cache_cleanup; 546 goto cg_cleanup;
547 } 547 }
548 548
549 device->reg_state = IB_DEV_REGISTERED; 549 device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
559 mutex_unlock(&device_mutex); 559 mutex_unlock(&device_mutex);
560 return 0; 560 return 0;
561 561
562cg_cleanup:
563 ib_device_unregister_rdmacg(device);
562cache_cleanup: 564cache_cleanup:
563 ib_cache_cleanup_one(device); 565 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device); 566 ib_cache_release_one(device);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8cf15d4a8ac4..9f029a1ca5ea 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1291 1291
1292 resolved_dev = dev_get_by_index(dev_addr.net, 1292 resolved_dev = dev_get_by_index(dev_addr.net,
1293 dev_addr.bound_dev_if); 1293 dev_addr.bound_dev_if);
1294 if (resolved_dev->flags & IFF_LOOPBACK) { 1294 if (!resolved_dev) {
1295 dev_put(resolved_dev); 1295 dev_put(idev);
1296 resolved_dev = idev; 1296 return -ENODEV;
1297 dev_hold(resolved_dev);
1298 } 1297 }
1299 ndev = ib_get_ndev_from_path(rec); 1298 ndev = ib_get_ndev_from_path(rec);
1300 rcu_read_lock(); 1299 rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index f015f1bf88c9..e5a1e7d81326 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
132 ctx = idr_find(&ctx_idr, id); 132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx) 133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT); 134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file) 135 else if (ctx->file != file || !ctx->cm_id)
136 ctx = ERR_PTR(-EINVAL); 136 ctx = ERR_PTR(-EINVAL);
137 return ctx; 137 return ctx;
138} 138}
@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
456 struct rdma_ucm_create_id cmd; 456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp; 457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx; 458 struct ucma_context *ctx;
459 struct rdma_cm_id *cm_id;
459 enum ib_qp_type qp_type; 460 enum ib_qp_type qp_type;
460 int ret; 461 int ret;
461 462
@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
476 return -ENOMEM; 477 return -ENOMEM;
477 478
478 ctx->uid = cmd.uid; 479 ctx->uid = cmd.uid;
479 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, 480 cm_id = rdma_create_id(current->nsproxy->net_ns,
480 ucma_event_handler, ctx, cmd.ps, qp_type); 481 ucma_event_handler, ctx, cmd.ps, qp_type);
481 if (IS_ERR(ctx->cm_id)) { 482 if (IS_ERR(cm_id)) {
482 ret = PTR_ERR(ctx->cm_id); 483 ret = PTR_ERR(cm_id);
483 goto err1; 484 goto err1;
484 } 485 }
485 486
@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
489 ret = -EFAULT; 490 ret = -EFAULT;
490 goto err2; 491 goto err2;
491 } 492 }
493
494 ctx->cm_id = cm_id;
492 return 0; 495 return 0;
493 496
494err2: 497err2:
495 rdma_destroy_id(ctx->cm_id); 498 rdma_destroy_id(cm_id);
496err1: 499err1:
497 mutex_lock(&mut); 500 mutex_lock(&mut);
498 idr_remove(&ctx_idr, ctx->id); 501 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut); 502 mutex_unlock(&mut);
503 mutex_lock(&file->mut);
504 list_del(&ctx->list);
505 mutex_unlock(&file->mut);
500 kfree(ctx); 506 kfree(ctx);
501 return ret; 507 return ret;
502} 508}
@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
664 int in_len, int out_len) 670 int in_len, int out_len)
665{ 671{
666 struct rdma_ucm_resolve_ip cmd; 672 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
667 struct ucma_context *ctx; 674 struct ucma_context *ctx;
668 int ret; 675 int ret;
669 676
670 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 677 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
671 return -EFAULT; 678 return -EFAULT;
672 679
680 src = (struct sockaddr *) &cmd.src_addr;
681 dst = (struct sockaddr *) &cmd.dst_addr;
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL;
684
673 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
674 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
675 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
676 688
677 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
678 (struct sockaddr *) &cmd.dst_addr,
679 cmd.timeout_ms);
680 ucma_put_ctx(ctx); 690 ucma_put_ctx(ctx);
681 return ret; 691 return ret;
682} 692}
@@ -1149,6 +1159,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT; 1160 return -EFAULT;
1151 1161
1162 if (cmd.qp_state > IB_QPS_ERR)
1163 return -EINVAL;
1164
1152 ctx = ucma_get_ctx(file, cmd.id); 1165 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx)) 1166 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx); 1167 return PTR_ERR(ctx);
@@ -1294,6 +1307,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1294 if (IS_ERR(ctx)) 1307 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx); 1308 return PTR_ERR(ctx);
1296 1309
1310 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1311 return -EINVAL;
1312
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1313 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen); 1314 cmd.optlen);
1299 if (IS_ERR(optval)) { 1315 if (IS_ERR(optval)) {
@@ -1343,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1343 return -ENOSPC; 1359 return -ENOSPC;
1344 1360
1345 addr = (struct sockaddr *) &cmd->addr; 1361 addr = (struct sockaddr *) &cmd->addr;
1346 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1362 if (cmd->addr_size != rdma_addr_size(addr))
1347 return -EINVAL; 1363 return -EINVAL;
1348 1364
1349 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1365 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
@@ -1411,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1411 join_cmd.uid = cmd.uid; 1427 join_cmd.uid = cmd.uid;
1412 join_cmd.id = cmd.id; 1428 join_cmd.id = cmd.id;
1413 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1430 if (!join_cmd.addr_size)
1431 return -EINVAL;
1432
1414 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1433 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1415 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1434 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1416 1435
@@ -1426,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1426 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1427 return -EFAULT; 1446 return -EFAULT;
1428 1447
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
1449 return -EINVAL;
1450
1429 return ucma_process_join(file, &cmd, out_len); 1451 return ucma_process_join(file, &cmd, out_len);
1430} 1452}
1431 1453
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3eb7a8387116..96f76896488d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -57,8 +57,8 @@
57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) 57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) 58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
59 59
60#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) 60#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) 61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH 62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
63 63
64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) 64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 643174d949a8..8301d7e5fa8c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
785 return 0; 785 return 0;
786} 786}
787 787
788static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 788unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790{ 790{
791 unsigned long flags; 791 unsigned long flags;
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
799 return flags; 799 return flags;
800} 800}
801 801
802static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 802void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 unsigned long flags) 803 unsigned long flags)
804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805{ 805{
806 if (qp->rcq != qp->scq) 806 if (qp->rcq != qp->scq)
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1606 int status; 1606 int status;
1607 union ib_gid sgid; 1607 union ib_gid sgid;
1608 struct ib_gid_attr sgid_attr; 1608 struct ib_gid_attr sgid_attr;
1609 unsigned int flags;
1609 u8 nw_type; 1610 u8 nw_type;
1610 1611
1611 qp->qplib_qp.modify_flags = 0; 1612 qp->qplib_qp.modify_flags = 0;
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1634 dev_dbg(rdev_to_dev(rdev), 1635 dev_dbg(rdev_to_dev(rdev),
1635 "Move QP = %p to flush list\n", 1636 "Move QP = %p to flush list\n",
1636 qp); 1637 qp);
1638 flags = bnxt_re_lock_cqs(qp);
1637 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1639 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1640 bnxt_re_unlock_cqs(qp, flags);
1638 } 1641 }
1639 if (!qp->sumem && 1642 if (!qp->sumem &&
1640 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1643 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1641 dev_dbg(rdev_to_dev(rdev), 1644 dev_dbg(rdev_to_dev(rdev),
1642 "Move QP = %p out of flush list\n", 1645 "Move QP = %p out of flush list\n",
1643 qp); 1646 qp);
1647 flags = bnxt_re_lock_cqs(qp);
1644 bnxt_qplib_clean_qp(&qp->qplib_qp); 1648 bnxt_qplib_clean_qp(&qp->qplib_qp);
1649 bnxt_re_unlock_cqs(qp, flags);
1645 } 1650 }
1646 } 1651 }
1647 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1652 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2227 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2232 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2228 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2233 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2229 2234
2235 /* Need unconditional fence for local invalidate
2236 * opcode to work as expected.
2237 */
2238 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2239
2230 if (wr->send_flags & IB_SEND_SIGNALED) 2240 if (wr->send_flags & IB_SEND_SIGNALED)
2231 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2241 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2232 if (wr->send_flags & IB_SEND_FENCE)
2233 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2234 if (wr->send_flags & IB_SEND_SOLICITED) 2242 if (wr->send_flags & IB_SEND_SOLICITED)
2235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2243 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2236 2244
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2251 wqe->frmr.levels = qplib_frpl->hwq.level + 1; 2259 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2252 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2260 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2253 2261
2254 if (wr->wr.send_flags & IB_SEND_FENCE) 2262 /* Need unconditional fence for reg_mr
2255 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2263 * opcode to function as expected.
2264 */
2265
2266 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2267
2256 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2268 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2257 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2269 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2258 2270
@@ -3586,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3586 int umem_pgs, page_shift, rc; 3598 int umem_pgs, page_shift, rc;
3587 3599
3588 if (length > BNXT_RE_MAX_MR_SIZE) { 3600 if (length > BNXT_RE_MAX_MR_SIZE) {
3589 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", 3601 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3590 length, BNXT_RE_MAX_MR_SIZE); 3602 length, BNXT_RE_MAX_MR_SIZE);
3591 return ERR_PTR(-ENOMEM); 3603 return ERR_PTR(-ENOMEM);
3592 } 3604 }
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b88a48d43a9d..e62b7c2c7da6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
222 struct ib_udata *udata); 222 struct ib_udata *udata);
223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); 223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
225
226unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
227void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
225#endif /* __BNXT_RE_IB_VERBS_H__ */ 228#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 33a448036c2e..f6e361750466 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
730 struct bnxt_re_qp *qp) 730 struct bnxt_re_qp *qp)
731{ 731{
732 struct ib_event event; 732 struct ib_event event;
733 unsigned int flags;
734
735 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
736 flags = bnxt_re_lock_cqs(qp);
737 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
738 bnxt_re_unlock_cqs(qp, flags);
739 }
733 740
734 memset(&event, 0, sizeof(event)); 741 memset(&event, 0, sizeof(event));
735 if (qp->qplib_qp.srq) { 742 if (qp->qplib_qp.srq) {
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
1416 switch (re_work->event) { 1423 switch (re_work->event) {
1417 case NETDEV_REGISTER: 1424 case NETDEV_REGISTER:
1418 rc = bnxt_re_ib_reg(rdev); 1425 rc = bnxt_re_ib_reg(rdev);
1419 if (rc) 1426 if (rc) {
1420 dev_err(rdev_to_dev(rdev), 1427 dev_err(rdev_to_dev(rdev),
1421 "Failed to register with IB: %#x", rc); 1428 "Failed to register with IB: %#x", rc);
1429 bnxt_re_remove_one(rdev);
1430 bnxt_re_dev_unreg(rdev);
1431 }
1422 break; 1432 break;
1423 case NETDEV_UP: 1433 case NETDEV_UP:
1424 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1434 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3ea5b9624f6b..3a78faba8d91 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
88 } 88 }
89} 89}
90 90
91void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags) 92 unsigned long *flags)
93 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) 93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{ 94{
95 spin_lock_irqsave(&qp->scq->hwq.lock, *flags); 95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq) 96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->hwq.lock); 97 __acquire(&qp->rcq->flush_lock);
98 else 98 else
99 spin_lock(&qp->rcq->hwq.lock); 99 spin_lock(&qp->rcq->flush_lock);
100} 100}
101 101
102void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags) 103 unsigned long *flags)
104 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) 104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{ 105{
106 if (qp->scq == qp->rcq) 106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->hwq.lock); 107 __release(&qp->rcq->flush_lock);
108 else 108 else
109 spin_unlock(&qp->rcq->hwq.lock); 109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); 110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
114 struct bnxt_qplib_cq *cq)
115{
116 struct bnxt_qplib_cq *buddy_cq = NULL;
117
118 if (qp->scq == qp->rcq)
119 buddy_cq = NULL;
120 else if (qp->scq == cq)
121 buddy_cq = qp->rcq;
122 else
123 buddy_cq = qp->scq;
124 return buddy_cq;
125}
126
127static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
128 struct bnxt_qplib_cq *cq)
129 __acquires(&buddy_cq->hwq.lock)
130{
131 struct bnxt_qplib_cq *buddy_cq = NULL;
132
133 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
134 if (!buddy_cq)
135 __acquire(&cq->hwq.lock);
136 else
137 spin_lock(&buddy_cq->hwq.lock);
138}
139
140static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
141 struct bnxt_qplib_cq *cq)
142 __releases(&buddy_cq->hwq.lock)
143{
144 struct bnxt_qplib_cq *buddy_cq = NULL;
145
146 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
147 if (!buddy_cq)
148 __release(&cq->hwq.lock);
149 else
150 spin_unlock(&buddy_cq->hwq.lock);
151} 111}
152 112
153void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
154{ 114{
155 unsigned long flags; 115 unsigned long flags;
156 116
157 bnxt_qplib_acquire_cq_locks(qp, &flags); 117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
158 __bnxt_qplib_add_flush_qp(qp); 118 __bnxt_qplib_add_flush_qp(qp);
159 bnxt_qplib_release_cq_locks(qp, &flags); 119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
160} 120}
161 121
162static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
177{ 137{
178 unsigned long flags; 138 unsigned long flags;
179 139
180 bnxt_qplib_acquire_cq_locks(qp, &flags); 140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
181 __clean_cq(qp->scq, (u64)(unsigned long)qp); 141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
182 qp->sq.hwq.prod = 0; 142 qp->sq.hwq.prod = 0;
183 qp->sq.hwq.cons = 0; 143 qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
186 qp->rq.hwq.cons = 0; 146 qp->rq.hwq.cons = 0;
187 147
188 __bnxt_qplib_del_flush_qp(qp); 148 __bnxt_qplib_del_flush_qp(qp);
189 bnxt_qplib_release_cq_locks(qp, &flags); 149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
190} 150}
191 151
192static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -283,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
283 u32 sw_cons, raw_cons; 243 u32 sw_cons, raw_cons;
284 u16 type; 244 u16 type;
285 int budget = nq->budget; 245 int budget = nq->budget;
286 u64 q_handle; 246 uintptr_t q_handle;
287 247
288 /* Service the NQ until empty */ 248 /* Service the NQ until empty */
289 raw_cons = hwq->cons; 249 raw_cons = hwq->cons;
@@ -566,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
566 526
567 /* Configure the request */ 527 /* Configure the request */
568 req.dpi = cpu_to_le32(srq->dpi->dpi); 528 req.dpi = cpu_to_le32(srq->dpi->dpi);
569 req.srq_handle = cpu_to_le64(srq); 529 req.srq_handle = cpu_to_le64((uintptr_t)srq);
570 530
571 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 531 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
572 pbl = &srq->hwq.pbl[PBL_LVL_0]; 532 pbl = &srq->hwq.pbl[PBL_LVL_0];
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
2107 /* Must block new posting of SQ and RQ */ 2067 /* Must block new posting of SQ and RQ */
2108 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2068 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2109 bnxt_qplib_cancel_phantom_processing(qp); 2069 bnxt_qplib_cancel_phantom_processing(qp);
2110
2111 /* Add qp to flush list of the CQ */
2112 __bnxt_qplib_add_flush_qp(qp);
2113} 2070}
2114 2071
2115/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2072/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2285 sw_sq_cons, cqe->wr_id, cqe->status); 2242 sw_sq_cons, cqe->wr_id, cqe->status);
2286 cqe++; 2243 cqe++;
2287 (*budget)--; 2244 (*budget)--;
2288 bnxt_qplib_lock_buddy_cq(qp, cq);
2289 bnxt_qplib_mark_qp_error(qp); 2245 bnxt_qplib_mark_qp_error(qp);
2290 bnxt_qplib_unlock_buddy_cq(qp, cq); 2246 /* Add qp to flush list of the CQ */
2247 bnxt_qplib_add_flush_qp(qp);
2291 } else { 2248 } else {
2292 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2249 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2293 /* Before we complete, do WA 9060 */ 2250 /* Before we complete, do WA 9060 */
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2403 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2360 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2404 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2361 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2405 /* Add qp to flush list of the CQ */ 2362 /* Add qp to flush list of the CQ */
2406 bnxt_qplib_lock_buddy_cq(qp, cq); 2363 bnxt_qplib_add_flush_qp(qp);
2407 __bnxt_qplib_add_flush_qp(qp);
2408 bnxt_qplib_unlock_buddy_cq(qp, cq);
2409 } 2364 }
2410 } 2365 }
2411 2366
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2489 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2444 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2490 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2445 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2491 /* Add qp to flush list of the CQ */ 2446 /* Add qp to flush list of the CQ */
2492 bnxt_qplib_lock_buddy_cq(qp, cq); 2447 bnxt_qplib_add_flush_qp(qp);
2493 __bnxt_qplib_add_flush_qp(qp);
2494 bnxt_qplib_unlock_buddy_cq(qp, cq);
2495 } 2448 }
2496 } 2449 }
2497done: 2450done:
@@ -2501,11 +2454,9 @@ done:
2501bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2454bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2502{ 2455{
2503 struct cq_base *hw_cqe, **hw_cqe_ptr; 2456 struct cq_base *hw_cqe, **hw_cqe_ptr;
2504 unsigned long flags;
2505 u32 sw_cons, raw_cons; 2457 u32 sw_cons, raw_cons;
2506 bool rc = true; 2458 bool rc = true;
2507 2459
2508 spin_lock_irqsave(&cq->hwq.lock, flags);
2509 raw_cons = cq->hwq.cons; 2460 raw_cons = cq->hwq.cons;
2510 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2461 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2511 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 2462 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2513 2464
2514 /* Check for Valid bit. If the CQE is valid, return false */ 2465 /* Check for Valid bit. If the CQE is valid, return false */
2515 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); 2466 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2516 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2517 return rc; 2467 return rc;
2518} 2468}
2519 2469
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2602 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2552 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2603 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2553 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2604 /* Add qp to flush list of the CQ */ 2554 /* Add qp to flush list of the CQ */
2605 bnxt_qplib_lock_buddy_cq(qp, cq); 2555 bnxt_qplib_add_flush_qp(qp);
2606 __bnxt_qplib_add_flush_qp(qp);
2607 bnxt_qplib_unlock_buddy_cq(qp, cq);
2608 } 2556 }
2609 } 2557 }
2610 2558
@@ -2719,9 +2667,7 @@ do_rq:
2719 */ 2667 */
2720 2668
2721 /* Add qp to flush list of the CQ */ 2669 /* Add qp to flush list of the CQ */
2722 bnxt_qplib_lock_buddy_cq(qp, cq); 2670 bnxt_qplib_add_flush_qp(qp);
2723 __bnxt_qplib_add_flush_qp(qp);
2724 bnxt_qplib_unlock_buddy_cq(qp, cq);
2725done: 2671done:
2726 return rc; 2672 return rc;
2727} 2673}
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2750 u32 budget = num_cqes; 2696 u32 budget = num_cqes;
2751 unsigned long flags; 2697 unsigned long flags;
2752 2698
2753 spin_lock_irqsave(&cq->hwq.lock, flags); 2699 spin_lock_irqsave(&cq->flush_lock, flags);
2754 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2700 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2755 dev_dbg(&cq->hwq.pdev->dev, 2701 dev_dbg(&cq->hwq.pdev->dev,
2756 "QPLIB: FP: Flushing SQ QP= %p", 2702 "QPLIB: FP: Flushing SQ QP= %p",
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2764 qp); 2710 qp);
2765 __flush_rq(&qp->rq, qp, &cqe, &budget); 2711 __flush_rq(&qp->rq, qp, &cqe, &budget);
2766 } 2712 }
2767 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2713 spin_unlock_irqrestore(&cq->flush_lock, flags);
2768 2714
2769 return num_cqes - budget; 2715 return num_cqes - budget;
2770} 2716}
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2773 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2719 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2774{ 2720{
2775 struct cq_base *hw_cqe, **hw_cqe_ptr; 2721 struct cq_base *hw_cqe, **hw_cqe_ptr;
2776 unsigned long flags;
2777 u32 sw_cons, raw_cons; 2722 u32 sw_cons, raw_cons;
2778 int budget, rc = 0; 2723 int budget, rc = 0;
2779 2724
2780 spin_lock_irqsave(&cq->hwq.lock, flags);
2781 raw_cons = cq->hwq.cons; 2725 raw_cons = cq->hwq.cons;
2782 budget = num_cqes; 2726 budget = num_cqes;
2783 2727
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2853 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); 2797 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2854 } 2798 }
2855exit: 2799exit:
2856 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2857 return num_cqes - budget; 2800 return num_cqes - budget;
2858} 2801}
2859 2802
2860void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 2803void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2861{ 2804{
2862 unsigned long flags;
2863
2864 spin_lock_irqsave(&cq->hwq.lock, flags);
2865 if (arm_type) 2805 if (arm_type)
2866 bnxt_qplib_arm_cq(cq, arm_type); 2806 bnxt_qplib_arm_cq(cq, arm_type);
2867 /* Using cq->arm_state variable to track whether to issue cq handler */ 2807 /* Using cq->arm_state variable to track whether to issue cq handler */
2868 atomic_set(&cq->arm_state, 1); 2808 atomic_set(&cq->arm_state, 1);
2869 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2870} 2809}
2871 2810
2872void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 2811void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ca0a2ffa3509..ade9f13c0fd1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
389 struct list_head sqf_head, rqf_head; 389 struct list_head sqf_head, rqf_head;
390 atomic_t arm_state; 390 atomic_t arm_state;
391 spinlock_t compl_lock; /* synch CQ handlers */ 391 spinlock_t compl_lock; /* synch CQ handlers */
392/* Locking Notes:
393 * QP can move to error state from modify_qp, async error event or error
394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
395 * to two flush lists, one each for SQ and RQ.
396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
397 * flush_locks should be acquired when QP is moved to error. The control path
398 * operations(modify_qp and async error events) are synchronized with poll_cq
399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
401 * of the same QP while manipulating the flush list.
402 */
403 spinlock_t flush_lock; /* QP flush management */
392}; 404};
393 405
394#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 406#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 8329ec6a7946..80027a494730 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
305 err_event->res_err_state_reason); 305 err_event->res_err_state_reason);
306 if (!qp) 306 if (!qp)
307 break; 307 break;
308 bnxt_qplib_acquire_cq_locks(qp, &flags);
309 bnxt_qplib_mark_qp_error(qp); 308 bnxt_qplib_mark_qp_error(qp);
310 bnxt_qplib_release_cq_locks(qp, &flags); 309 rcfw->aeq_handler(rcfw, qp_event, qp);
311 break; 310 break;
312 default: 311 default:
313 /* Command Response */ 312 /* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
460 int rc; 459 int rc;
461 460
462 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
463 462 /* Supply (log-base-2-of-host-page-size - base-page-shift)
463 * to bono to adjust the doorbell page sizes.
464 */
465 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
466 RCFW_DBR_BASE_PAGE_SHIFT);
464 /* 467 /*
465 * VFs need not setup the HW context area, PF 468 * VFs need not setup the HW context area, PF
466 * shall setup this area for VF. Skipping the 469 * shall setup this area for VF. Skipping the
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 6bee6e3636ea..c7cce2e4185e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -49,6 +49,7 @@
49#define RCFW_COMM_SIZE 0x104 49#define RCFW_COMM_SIZE 0x104
50 50
51#define RCFW_DBR_PCI_BAR_REGION 2 51#define RCFW_DBR_PCI_BAR_REGION 2
52#define RCFW_DBR_BASE_PAGE_SHIFT 12
52 53
53#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ 54#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
54 do { \ 55 do { \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 03057983341f..ee98e5efef84 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
139 attr->max_pkey = le32_to_cpu(sb->max_pkeys); 139 attr->max_pkey = le32_to_cpu(sb->max_pkeys);
140 140
141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
142 attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; 142 attr->l2_db_size = (sb->l2_db_space_size + 1) *
143 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
143 attr->max_sgid = le32_to_cpu(sb->max_gid); 144 attr->max_sgid = le32_to_cpu(sb->max_gid);
144 145
145 bnxt_qplib_query_version(rcfw, attr->fw_ver); 146 bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 2d7ea096a247..3e5a4f760d0e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) 1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) 1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) 1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
1764 __le16 reserved16; 1764 /* This value is (log-base-2-of-DBR-page-size - 12).
1765 * 0 for 4KB. HW supported values are enumerated below.
1766 */
1767 __le16 log2_dbr_pg_size;
1768 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
1769 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
1770 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
1771 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
1772 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
1773 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
1774 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
1775 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
1776 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
1777 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
1778 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
1779 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
1780 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
1781 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
1782 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
1783 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
1784 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
1785 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
1786 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
1787 CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
1765 __le64 qpc_page_dir; 1788 __le64 qpc_page_dir;
1766 __le64 mrw_page_dir; 1789 __le64 mrw_page_dir;
1767 __le64 srq_page_dir; 1790 __le64 srq_page_dir;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9a566ee3ceff..82adc0d1d30e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
601 wc->dlid_path_bits = 0; 601 wc->dlid_path_bits = 0;
602 602
603 if (is_eth) { 603 if (is_eth) {
604 wc->slid = 0;
604 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 605 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
605 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 606 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
606 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 607 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
851 } 852 }
852 } 853 }
853 854
854 wc->slid = be16_to_cpu(cqe->rlid);
855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
856 wc->src_qp = g_mlpath_rqpn & 0xffffff; 856 wc->src_qp = g_mlpath_rqpn & 0xffffff;
857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862 if (is_eth) { 862 if (is_eth) {
863 wc->slid = 0;
863 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 864 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
864 if (be32_to_cpu(cqe->vlan_my_qpn) & 865 if (be32_to_cpu(cqe->vlan_my_qpn) &
865 MLX4_CQE_CVLAN_PRESENT_MASK) { 866 MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
871 memcpy(wc->smac, cqe->smac, ETH_ALEN); 872 memcpy(wc->smac, cqe->smac, ETH_ALEN);
872 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 873 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
873 } else { 874 } else {
875 wc->slid = be16_to_cpu(cqe->rlid);
874 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
875 wc->vlan_id = 0xffff; 877 wc->vlan_id = 0xffff;
876 } 878 }
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2ee9322f2e..5a0e4fc4785a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
219 gid_tbl[i].version = 2; 219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1; 221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 } 222 }
225 } 223 }
226 224
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
366 if (!gids) { 364 if (!gids) {
367 ret = -ENOMEM; 365 ret = -ENOMEM;
368 } else { 366 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 367 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 368 memcpy(&gids[i].gid,
369 &port_gid_table->gids[i].gid,
370 sizeof(union ib_gid));
371 gids[i].gid_type =
372 port_gid_table->gids[i].gid_type;
373 }
371 } 374 }
372 } 375 }
373 spin_unlock_bh(&iboe->lock); 376 spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c4c7b82f4ac1..94a27d89a303 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -221,7 +221,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
222 break; 222 break;
223 } 223 }
224 wc->slid = be16_to_cpu(cqe->slid);
225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 224 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path; 225 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 226 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -236,10 +235,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
236 } 235 }
237 236
238 if (ll != IB_LINK_LAYER_ETHERNET) { 237 if (ll != IB_LINK_LAYER_ETHERNET) {
238 wc->slid = be16_to_cpu(cqe->slid);
239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
240 return; 240 return;
241 } 241 }
242 242
243 wc->slid = 0;
243 vlan_present = cqe->l4_l3_hdr_type & 0x1; 244 vlan_present = cqe->l4_l3_hdr_type & 0x1;
244 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 245 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
245 if (vlan_present) { 246 if (vlan_present) {
@@ -1188,7 +1189,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1188 if (ucmd.reserved0 || ucmd.reserved1) 1189 if (ucmd.reserved0 || ucmd.reserved1)
1189 return -EINVAL; 1190 return -EINVAL;
1190 1191
1191 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1192 /* check multiplication overflow */
1193 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1194 return -EINVAL;
1195
1196 umem = ib_umem_get(context, ucmd.buf_addr,
1197 (size_t)ucmd.cqe_size * entries,
1192 IB_ACCESS_LOCAL_WRITE, 1); 1198 IB_ACCESS_LOCAL_WRITE, 1);
1193 if (IS_ERR(umem)) { 1199 if (IS_ERR(umem)) {
1194 err = PTR_ERR(umem); 1200 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 61cc3d7db257..0e04fdddf670 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -30,12 +30,15 @@ static const struct mlx5_ib_profile rep_profile = {
30 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 30 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
31 mlx5_ib_stage_bfrag_init, 31 mlx5_ib_stage_bfrag_init,
32 mlx5_ib_stage_bfrag_cleanup), 32 mlx5_ib_stage_bfrag_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
34 NULL,
35 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 36 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
34 mlx5_ib_stage_ib_reg_init, 37 mlx5_ib_stage_ib_reg_init,
35 mlx5_ib_stage_ib_reg_cleanup), 38 mlx5_ib_stage_ib_reg_cleanup),
36 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 39 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
37 mlx5_ib_stage_umr_res_init, 40 mlx5_ib_stage_post_ib_reg_umr_init,
38 mlx5_ib_stage_umr_res_cleanup), 41 NULL),
39 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 42 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
40 mlx5_ib_stage_class_attr_init, 43 mlx5_ib_stage_class_attr_init,
41 NULL), 44 NULL),
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d9474b95d8e5..390e4375647e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -256,12 +256,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
256 struct mlx5_ib_multiport_info *mpi; 256 struct mlx5_ib_multiport_info *mpi;
257 struct mlx5_ib_port *port; 257 struct mlx5_ib_port *port;
258 258
259 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
260 ll != IB_LINK_LAYER_ETHERNET) {
261 if (native_port_num)
262 *native_port_num = ib_port_num;
263 return ibdev->mdev;
264 }
265
259 if (native_port_num) 266 if (native_port_num)
260 *native_port_num = 1; 267 *native_port_num = 1;
261 268
262 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
263 return ibdev->mdev;
264
265 port = &ibdev->port[ib_port_num - 1]; 269 port = &ibdev->port[ib_port_num - 1];
266 if (!port) 270 if (!port)
267 return NULL; 271 return NULL;
@@ -3297,7 +3301,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3297 struct mlx5_ib_dev *ibdev; 3301 struct mlx5_ib_dev *ibdev;
3298 struct ib_event ibev; 3302 struct ib_event ibev;
3299 bool fatal = false; 3303 bool fatal = false;
3300 u8 port = 0; 3304 u8 port = (u8)work->param;
3301 3305
3302 if (mlx5_core_is_mp_slave(work->dev)) { 3306 if (mlx5_core_is_mp_slave(work->dev)) {
3303 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3307 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3317,8 +3321,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3317 case MLX5_DEV_EVENT_PORT_UP: 3321 case MLX5_DEV_EVENT_PORT_UP:
3318 case MLX5_DEV_EVENT_PORT_DOWN: 3322 case MLX5_DEV_EVENT_PORT_DOWN:
3319 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3323 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3320 port = (u8)work->param;
3321
3322 /* In RoCE, port up/down events are handled in 3324 /* In RoCE, port up/down events are handled in
3323 * mlx5_netdev_event(). 3325 * mlx5_netdev_event().
3324 */ 3326 */
@@ -3332,24 +3334,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3332 3334
3333 case MLX5_DEV_EVENT_LID_CHANGE: 3335 case MLX5_DEV_EVENT_LID_CHANGE:
3334 ibev.event = IB_EVENT_LID_CHANGE; 3336 ibev.event = IB_EVENT_LID_CHANGE;
3335 port = (u8)work->param;
3336 break; 3337 break;
3337 3338
3338 case MLX5_DEV_EVENT_PKEY_CHANGE: 3339 case MLX5_DEV_EVENT_PKEY_CHANGE:
3339 ibev.event = IB_EVENT_PKEY_CHANGE; 3340 ibev.event = IB_EVENT_PKEY_CHANGE;
3340 port = (u8)work->param;
3341
3342 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3341 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3343 break; 3342 break;
3344 3343
3345 case MLX5_DEV_EVENT_GUID_CHANGE: 3344 case MLX5_DEV_EVENT_GUID_CHANGE:
3346 ibev.event = IB_EVENT_GID_CHANGE; 3345 ibev.event = IB_EVENT_GID_CHANGE;
3347 port = (u8)work->param;
3348 break; 3346 break;
3349 3347
3350 case MLX5_DEV_EVENT_CLIENT_REREG: 3348 case MLX5_DEV_EVENT_CLIENT_REREG:
3351 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3349 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3352 port = (u8)work->param;
3353 break; 3350 break;
3354 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3351 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3355 schedule_work(&ibdev->delay_drop.delay_drop_work); 3352 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3361,7 +3358,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3361 ibev.device = &ibdev->ib_dev; 3358 ibev.device = &ibdev->ib_dev;
3362 ibev.element.port_num = port; 3359 ibev.element.port_num = port;
3363 3360
3364 if (port < 1 || port > ibdev->num_ports) { 3361 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3365 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3362 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3366 goto out; 3363 goto out;
3367 } 3364 }
@@ -4999,19 +4996,19 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4999 return ib_register_device(&dev->ib_dev, NULL); 4996 return ib_register_device(&dev->ib_dev, NULL);
5000} 4997}
5001 4998
5002void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 4999void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
5003{ 5000{
5004 ib_unregister_device(&dev->ib_dev); 5001 destroy_umrc_res(dev);
5005} 5002}
5006 5003
5007int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) 5004void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5008{ 5005{
5009 return create_umr_res(dev); 5006 ib_unregister_device(&dev->ib_dev);
5010} 5007}
5011 5008
5012void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) 5009int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5013{ 5010{
5014 destroy_umrc_res(dev); 5011 return create_umr_res(dev);
5015} 5012}
5016 5013
5017static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 5014static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -5130,12 +5127,15 @@ static const struct mlx5_ib_profile pf_profile = {
5130 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5127 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5131 mlx5_ib_stage_bfrag_init, 5128 mlx5_ib_stage_bfrag_init,
5132 mlx5_ib_stage_bfrag_cleanup), 5129 mlx5_ib_stage_bfrag_cleanup),
5130 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5131 NULL,
5132 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5134 mlx5_ib_stage_ib_reg_init, 5134 mlx5_ib_stage_ib_reg_init,
5135 mlx5_ib_stage_ib_reg_cleanup), 5135 mlx5_ib_stage_ib_reg_cleanup),
5136 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5136 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5137 mlx5_ib_stage_umr_res_init, 5137 mlx5_ib_stage_post_ib_reg_umr_init,
5138 mlx5_ib_stage_umr_res_cleanup), 5138 NULL),
5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5140 mlx5_ib_stage_delay_drop_init, 5140 mlx5_ib_stage_delay_drop_init,
5141 mlx5_ib_stage_delay_drop_cleanup), 5141 mlx5_ib_stage_delay_drop_cleanup),
@@ -5172,12 +5172,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5173 mlx5_ib_stage_bfrag_init, 5173 mlx5_ib_stage_bfrag_init,
5174 mlx5_ib_stage_bfrag_cleanup), 5174 mlx5_ib_stage_bfrag_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5176 NULL,
5177 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5178 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5176 mlx5_ib_stage_ib_reg_init, 5179 mlx5_ib_stage_ib_reg_init,
5177 mlx5_ib_stage_ib_reg_cleanup), 5180 mlx5_ib_stage_ib_reg_cleanup),
5178 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5181 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5179 mlx5_ib_stage_umr_res_init, 5182 mlx5_ib_stage_post_ib_reg_umr_init,
5180 mlx5_ib_stage_umr_res_cleanup), 5183 NULL),
5181 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 5184 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5182 mlx5_ib_stage_class_attr_init, 5185 mlx5_ib_stage_class_attr_init,
5183 NULL), 5186 NULL),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e0bad28e0f09..c33bf1523d67 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -742,8 +742,9 @@ enum mlx5_ib_stages {
742 MLX5_IB_STAGE_CONG_DEBUGFS, 742 MLX5_IB_STAGE_CONG_DEBUGFS,
743 MLX5_IB_STAGE_UAR, 743 MLX5_IB_STAGE_UAR,
744 MLX5_IB_STAGE_BFREG, 744 MLX5_IB_STAGE_BFREG,
745 MLX5_IB_STAGE_PRE_IB_REG_UMR,
745 MLX5_IB_STAGE_IB_REG, 746 MLX5_IB_STAGE_IB_REG,
746 MLX5_IB_STAGE_UMR_RESOURCES, 747 MLX5_IB_STAGE_POST_IB_REG_UMR,
747 MLX5_IB_STAGE_DELAY_DROP, 748 MLX5_IB_STAGE_DELAY_DROP,
748 MLX5_IB_STAGE_CLASS_ATTR, 749 MLX5_IB_STAGE_CLASS_ATTR,
749 MLX5_IB_STAGE_REP_REG, 750 MLX5_IB_STAGE_REP_REG,
@@ -1068,10 +1069,10 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
1068void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); 1069void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
1069int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); 1070int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
1070void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); 1071void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
1071int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); 1073int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); 1074void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
1073int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev); 1075int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
1074void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
1075int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); 1076int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
1076void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1077void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1077 const struct mlx5_ib_profile *profile, 1078 const struct mlx5_ib_profile *profile,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a5fad3e87ff7..95a36e9ea552 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -839,7 +839,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
839 *umem = ib_umem_get(pd->uobject->context, start, length, 839 *umem = ib_umem_get(pd->uobject->context, start, length,
840 access_flags, 0); 840 access_flags, 0);
841 err = PTR_ERR_OR_ZERO(*umem); 841 err = PTR_ERR_OR_ZERO(*umem);
842 if (err < 0) { 842 if (err) {
843 *umem = NULL;
843 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 844 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
844 return err; 845 return err;
845 } 846 }
@@ -1416,6 +1417,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1416 if (err) { 1417 if (err) {
1417 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1418 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1418 ib_umem_release(mr->umem); 1419 ib_umem_release(mr->umem);
1420 mr->umem = NULL;
1419 clean_mr(dev, mr); 1421 clean_mr(dev, mr);
1420 return err; 1422 return err;
1421 } 1423 }
@@ -1499,14 +1501,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1499 u32 key = mr->mmkey.key; 1501 u32 key = mr->mmkey.key;
1500 1502
1501 err = destroy_mkey(dev, mr); 1503 err = destroy_mkey(dev, mr);
1502 kfree(mr);
1503 if (err) { 1504 if (err) {
1504 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1505 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1505 key, err); 1506 key, err);
1506 return err; 1507 return err;
1507 } 1508 }
1508 } else {
1509 mlx5_mr_cache_free(dev, mr);
1510 } 1509 }
1511 1510
1512 return 0; 1511 return 0;
@@ -1549,6 +1548,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1549 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1548 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1550 } 1549 }
1551 1550
1551 if (!mr->allocated_from_cache)
1552 kfree(mr);
1553 else
1554 mlx5_mr_cache_free(dev, mr);
1555
1552 return 0; 1556 return 0;
1553} 1557}
1554 1558
@@ -1817,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1817 1821
1818 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1822 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1819 mr->ibmr.length = 0; 1823 mr->ibmr.length = 0;
1820 mr->ndescs = sg_nents;
1821 1824
1822 for_each_sg(sgl, sg, sg_nents, i) { 1825 for_each_sg(sgl, sg, sg_nents, i) {
1823 if (unlikely(i >= mr->max_descs)) 1826 if (unlikely(i >= mr->max_descs))
@@ -1829,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1829 1832
1830 sg_offset = 0; 1833 sg_offset = 0;
1831 } 1834 }
1835 mr->ndescs = i;
1832 1836
1833 if (sg_offset_p) 1837 if (sg_offset_p)
1834 *sg_offset_p = sg_offset; 1838 *sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0e67e3682bca..85c612ac547a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1177,7 +1177,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1177 ib_umem_release(sq->ubuffer.umem); 1177 ib_umem_release(sq->ubuffer.umem);
1178} 1178}
1179 1179
1180static int get_rq_pas_size(void *qpc) 1180static size_t get_rq_pas_size(void *qpc)
1181{ 1181{
1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1193,7 +1193,8 @@ static int get_rq_pas_size(void *qpc)
1193} 1193}
1194 1194
1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1196 struct mlx5_ib_rq *rq, void *qpin) 1196 struct mlx5_ib_rq *rq, void *qpin,
1197 size_t qpinlen)
1197{ 1198{
1198 struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 1199 struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1199 __be64 *pas; 1200 __be64 *pas;
@@ -1202,9 +1203,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1202 void *rqc; 1203 void *rqc;
1203 void *wq; 1204 void *wq;
1204 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1205 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1205 int inlen; 1206 size_t rq_pas_size = get_rq_pas_size(qpc);
1207 size_t inlen;
1206 int err; 1208 int err;
1207 u32 rq_pas_size = get_rq_pas_size(qpc); 1209
1210 if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1211 return -EINVAL;
1208 1212
1209 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1213 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1210 in = kvzalloc(inlen, GFP_KERNEL); 1214 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1297,7 +1301,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1297} 1301}
1298 1302
1299static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1303static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1300 u32 *in, 1304 u32 *in, size_t inlen,
1301 struct ib_pd *pd) 1305 struct ib_pd *pd)
1302{ 1306{
1303 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1307 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1329,7 +1333,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1329 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 1333 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1330 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) 1334 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1331 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 1335 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1332 err = create_raw_packet_qp_rq(dev, rq, in); 1336 err = create_raw_packet_qp_rq(dev, rq, in, inlen);
1333 if (err) 1337 if (err)
1334 goto err_destroy_sq; 1338 goto err_destroy_sq;
1335 1339
@@ -1608,6 +1612,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1608 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1612 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1609 struct mlx5_ib_create_qp ucmd; 1613 struct mlx5_ib_create_qp ucmd;
1610 struct mlx5_ib_qp_base *base; 1614 struct mlx5_ib_qp_base *base;
1615 int mlx5_st;
1611 void *qpc; 1616 void *qpc;
1612 u32 *in; 1617 u32 *in;
1613 int err; 1618 int err;
@@ -1616,6 +1621,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1616 spin_lock_init(&qp->sq.lock); 1621 spin_lock_init(&qp->sq.lock);
1617 spin_lock_init(&qp->rq.lock); 1622 spin_lock_init(&qp->rq.lock);
1618 1623
1624 mlx5_st = to_mlx5_st(init_attr->qp_type);
1625 if (mlx5_st < 0)
1626 return -EINVAL;
1627
1619 if (init_attr->rwq_ind_tbl) { 1628 if (init_attr->rwq_ind_tbl) {
1620 if (!udata) 1629 if (!udata)
1621 return -ENOSYS; 1630 return -ENOSYS;
@@ -1777,7 +1786,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1777 1786
1778 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1787 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1779 1788
1780 MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1789 MLX5_SET(qpc, qpc, st, mlx5_st);
1781 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1790 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1782 1791
1783 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1792 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -1891,11 +1900,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1891 } 1900 }
1892 } 1901 }
1893 1902
1903 if (inlen < 0) {
1904 err = -EINVAL;
1905 goto err;
1906 }
1907
1894 if (init_attr->qp_type == IB_QPT_RAW_PACKET || 1908 if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
1895 qp->flags & MLX5_IB_QP_UNDERLAY) { 1909 qp->flags & MLX5_IB_QP_UNDERLAY) {
1896 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1910 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1897 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1911 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1898 err = create_raw_packet_qp(dev, qp, in, pd); 1912 err = create_raw_packet_qp(dev, qp, in, inlen, pd);
1899 } else { 1913 } else {
1900 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1914 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1901 } 1915 }
@@ -3116,8 +3130,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3116 goto out; 3130 goto out;
3117 3131
3118 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3132 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3119 !optab[mlx5_cur][mlx5_new]) 3133 !optab[mlx5_cur][mlx5_new]) {
3134 err = -EINVAL;
3120 goto out; 3135 goto out;
3136 }
3121 3137
3122 op = optab[mlx5_cur][mlx5_new]; 3138 op = optab[mlx5_cur][mlx5_new];
3123 optpar = ib_mask_to_mlx5_opt(attr_mask); 3139 optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6d5fadad9090..3c7522d025f2 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
241{ 241{
242 struct mlx5_ib_dev *dev = to_mdev(pd->device); 242 struct mlx5_ib_dev *dev = to_mdev(pd->device);
243 struct mlx5_ib_srq *srq; 243 struct mlx5_ib_srq *srq;
244 int desc_size; 244 size_t desc_size;
245 int buf_size; 245 size_t buf_size;
246 int err; 246 int err;
247 struct mlx5_srq_attr in = {0}; 247 struct mlx5_srq_attr in = {0};
248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
270 return ERR_PTR(-EINVAL);
269 desc_size = roundup_pow_of_two(desc_size); 271 desc_size = roundup_pow_of_two(desc_size);
270 desc_size = max_t(int, 32, desc_size); 272 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
274 return ERR_PTR(-EINVAL);
271 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
272 sizeof(struct mlx5_wqe_data_seg); 276 sizeof(struct mlx5_wqe_data_seg);
273 srq->msrq.wqe_shift = ilog2(desc_size); 277 srq->msrq.wqe_shift = ilog2(desc_size);
274 buf_size = srq->msrq.max * desc_size; 278 buf_size = srq->msrq.max * desc_size;
275 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", 279 if (buf_size < desc_size)
276 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 280 return ERR_PTR(-EINVAL);
277 srq->msrq.max_avail_gather);
278 in.type = init_attr->srq_type; 281 in.type = init_attr->srq_type;
279 282
280 if (pd->uobject) 283 if (pd->uobject)
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 478b7317b80a..26dc374787f7 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
458 } 458 }
459 return -EINVAL; 459 return -EINVAL;
460 } 460 }
461 neigh = dst_neigh_lookup(dst, &dst_in); 461 neigh = dst_neigh_lookup(dst, &fl6.daddr);
462
463 if (neigh) { 462 if (neigh) {
464 rcu_read_lock(); 463 rcu_read_lock();
465 if (neigh->nud_state & NUD_VALID) { 464 if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
494 493
495 qp = idr_find(&dev->qpidr, conn_param->qpn); 494 qp = idr_find(&dev->qpidr, conn_param->qpn);
496 495
497 laddr = (struct sockaddr_in *)&cm_id->local_addr; 496 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
498 raddr = (struct sockaddr_in *)&cm_id->remote_addr; 497 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
499 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 498 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
500 raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 499 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
500
501 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
502 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
503 ntohs(raddr->sin_port));
501 504
502 DP_DEBUG(dev, QEDR_MSG_IWARP, 505 DP_DEBUG(dev, QEDR_MSG_IWARP,
503 "Connect source address: %pISpc, remote address: %pISpc\n", 506 "Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
599 int rc; 602 int rc;
600 int i; 603 int i;
601 604
602 laddr = (struct sockaddr_in *)&cm_id->local_addr; 605 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
603 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 606 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
604 607
605 DP_DEBUG(dev, QEDR_MSG_IWARP, 608 DP_DEBUG(dev, QEDR_MSG_IWARP,
606 "Create Listener address: %pISpc\n", &cm_id->local_addr); 609 "Create Listener address: %pISpc\n", &cm_id->local_addr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 53f00dbf313f..875b17272d65 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3034 3034
3035 switch (wr->opcode) { 3035 switch (wr->opcode) {
3036 case IB_WR_SEND_WITH_IMM: 3036 case IB_WR_SEND_WITH_IMM:
3037 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3038 rc = -EINVAL;
3039 *bad_wr = wr;
3040 break;
3041 }
3037 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; 3042 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3038 swqe = (struct rdma_sq_send_wqe_1st *)wqe; 3043 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3039 swqe->wqe_size = 2; 3044 swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3075 break; 3080 break;
3076 3081
3077 case IB_WR_RDMA_WRITE_WITH_IMM: 3082 case IB_WR_RDMA_WRITE_WITH_IMM:
3083 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3084 rc = -EINVAL;
3085 *bad_wr = wr;
3086 break;
3087 }
3078 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; 3088 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3079 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; 3089 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3080 3090
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3724{ 3734{
3725 struct qedr_dev *dev = get_qedr_dev(ibcq->device); 3735 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3726 struct qedr_cq *cq = get_qedr_cq(ibcq); 3736 struct qedr_cq *cq = get_qedr_cq(ibcq);
3727 union rdma_cqe *cqe = cq->latest_cqe; 3737 union rdma_cqe *cqe;
3728 u32 old_cons, new_cons; 3738 u32 old_cons, new_cons;
3729 unsigned long flags; 3739 unsigned long flags;
3730 int update = 0; 3740 int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3741 return qedr_gsi_poll_cq(ibcq, num_entries, wc); 3751 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3742 3752
3743 spin_lock_irqsave(&cq->cq_lock, flags); 3753 spin_lock_irqsave(&cq->cq_lock, flags);
3754 cqe = cq->latest_cqe;
3744 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); 3755 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3745 while (num_entries && is_valid_cqe(cq, cqe)) { 3756 while (num_entries && is_valid_cqe(cq, cqe)) {
3746 struct qedr_qp *qp; 3757 struct qedr_qp *qp;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 1b2e5362a3ff..cc429b567d0a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
489 unsigned long timeout; 489 unsigned long timeout;
490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); 490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
491 491
492 if (percpu_ref_is_zero(&mr->refcount)) 492 if (mr->lkey) {
493 return 0; 493 /* avoid dma mr */
494 /* avoid dma mr */
495 if (mr->lkey)
496 rvt_dereg_clean_qps(mr); 494 rvt_dereg_clean_qps(mr);
495 /* @mr was indexed on rcu protected @lkey_table */
496 synchronize_rcu();
497 }
498
497 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); 499 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 if (!timeout) { 500 if (!timeout) {
499 rvt_pr_err(rdi, 501 rvt_pr_err(rdi,