aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-30 01:23:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-30 01:23:24 -0400
commitd89b9f50291a214f9d670594c9fc35483b6d87a4 (patch)
tree914d2691a3c91008fd06b0326428ab15dde198ff
parentab12762b19ad38d8f4611bbbba16e8562b6a0a98 (diff)
parent84652aefb347297aa08e91e283adf7b18f77c2d5 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "It has been fairly silent lately on our -rc front. Big queue of patches on the mailing list going to for-next though. Bug fixes: - qedr driver bugfixes causing application hangs, wrong uapi errnos, and a race condition - three syzkaller found bugfixes in the ucma uapi Regression fixes for things introduced in 4.16: - Crash on error introduced in mlx5 UMR flow - Crash on module unload/etc introduced by bad interaction of restrack and mlx5 patches this cycle - Typo in a two line syzkaller bugfix causing a bad regression - Coverity report of nonsense code in hns driver" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/ucma: Introduce safer rdma_addr_size() variants RDMA/hns: ensure for-loop actually iterates and free's buffers RDMA/ucma: Check that device exists prior to accessing it RDMA/ucma: Check that device is connected prior to access it RDMA/rdma_cm: Fix use after free race with process_one_req RDMA/qedr: Fix QP state initialization race RDMA/qedr: Fix rc initialization on CNQ allocation failure RDMA/qedr: fix QP's ack timeout configuration RDMA/ucma: Correct option size check using optlen RDMA/restrack: Move restrack_clean to be symmetrical to restrack_init IB/mlx5: Don't clean uninitialized UMR resources
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/ucma.c47
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c45
-rw-r--r--include/rdma/ib_addr.h2
9 files changed, 102 insertions, 40 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 9183d148d644..cb1d2ab13c66 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr)
207} 207}
208EXPORT_SYMBOL(rdma_addr_size); 208EXPORT_SYMBOL(rdma_addr_size);
209 209
210int rdma_addr_size_in6(struct sockaddr_in6 *addr)
211{
212 int ret = rdma_addr_size((struct sockaddr *) addr);
213
214 return ret <= sizeof(*addr) ? ret : 0;
215}
216EXPORT_SYMBOL(rdma_addr_size_in6);
217
218int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
219{
220 int ret = rdma_addr_size((struct sockaddr *) addr);
221
222 return ret <= sizeof(*addr) ? ret : 0;
223}
224EXPORT_SYMBOL(rdma_addr_size_kss);
225
210static struct rdma_addr_client self; 226static struct rdma_addr_client self;
211 227
212void rdma_addr_register_client(struct rdma_addr_client *client) 228void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -586,6 +602,15 @@ static void process_one_req(struct work_struct *_work)
586 list_del(&req->list); 602 list_del(&req->list);
587 mutex_unlock(&lock); 603 mutex_unlock(&lock);
588 604
605 /*
606 * Although the work will normally have been canceled by the
607 * workqueue, it can still be requeued as long as it is on the
608 * req_list, so it could have been requeued before we grabbed &lock.
609 * We need to cancel it after it is removed from req_list to really be
610 * sure it is safe to free.
611 */
612 cancel_delayed_work(&req->work);
613
589 req->callback(req->status, (struct sockaddr *)&req->src_addr, 614 req->callback(req->status, (struct sockaddr *)&req->src_addr,
590 req->addr, req->context); 615 req->addr, req->context);
591 put_client(req->client); 616 put_client(req->client);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index bb065c9449be..b7459cf524e4 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -290,6 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
290{ 290{
291 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && 291 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
292 device->reg_state != IB_DEV_UNINITIALIZED); 292 device->reg_state != IB_DEV_UNINITIALIZED);
293 rdma_restrack_clean(&device->res);
293 put_device(&device->dev); 294 put_device(&device->dev);
294} 295}
295EXPORT_SYMBOL(ib_dealloc_device); 296EXPORT_SYMBOL(ib_dealloc_device);
@@ -600,8 +601,6 @@ void ib_unregister_device(struct ib_device *device)
600 } 601 }
601 up_read(&lists_rwsem); 602 up_read(&lists_rwsem);
602 603
603 rdma_restrack_clean(&device->res);
604
605 ib_device_unregister_rdmacg(device); 604 ib_device_unregister_rdmacg(device);
606 ib_device_unregister_sysfs(device); 605 ib_device_unregister_sysfs(device);
607 606
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index e5a1e7d81326..d933336d7e01 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -632,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
632 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 632 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
633 return -EFAULT; 633 return -EFAULT;
634 634
635 if (!rdma_addr_size_in6(&cmd.addr))
636 return -EINVAL;
637
635 ctx = ucma_get_ctx(file, cmd.id); 638 ctx = ucma_get_ctx(file, cmd.id);
636 if (IS_ERR(ctx)) 639 if (IS_ERR(ctx))
637 return PTR_ERR(ctx); 640 return PTR_ERR(ctx);
@@ -645,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
645 int in_len, int out_len) 648 int in_len, int out_len)
646{ 649{
647 struct rdma_ucm_bind cmd; 650 struct rdma_ucm_bind cmd;
648 struct sockaddr *addr;
649 struct ucma_context *ctx; 651 struct ucma_context *ctx;
650 int ret; 652 int ret;
651 653
652 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 654 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
653 return -EFAULT; 655 return -EFAULT;
654 656
655 addr = (struct sockaddr *) &cmd.addr; 657 if (cmd.reserved || !cmd.addr_size ||
656 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) 658 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
657 return -EINVAL; 659 return -EINVAL;
658 660
659 ctx = ucma_get_ctx(file, cmd.id); 661 ctx = ucma_get_ctx(file, cmd.id);
660 if (IS_ERR(ctx)) 662 if (IS_ERR(ctx))
661 return PTR_ERR(ctx); 663 return PTR_ERR(ctx);
662 664
663 ret = rdma_bind_addr(ctx->cm_id, addr); 665 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
664 ucma_put_ctx(ctx); 666 ucma_put_ctx(ctx);
665 return ret; 667 return ret;
666} 668}
@@ -670,23 +672,22 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
670 int in_len, int out_len) 672 int in_len, int out_len)
671{ 673{
672 struct rdma_ucm_resolve_ip cmd; 674 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
674 struct ucma_context *ctx; 675 struct ucma_context *ctx;
675 int ret; 676 int ret;
676 677
677 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 678 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
678 return -EFAULT; 679 return -EFAULT;
679 680
680 src = (struct sockaddr *) &cmd.src_addr; 681 if (!rdma_addr_size_in6(&cmd.src_addr) ||
681 dst = (struct sockaddr *) &cmd.dst_addr; 682 !rdma_addr_size_in6(&cmd.dst_addr))
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL; 683 return -EINVAL;
684 684
685 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
686 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
687 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
688 688
689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); 689 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
690 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
690 ucma_put_ctx(ctx); 691 ucma_put_ctx(ctx);
691 return ret; 692 return ret;
692} 693}
@@ -696,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
696 int in_len, int out_len) 697 int in_len, int out_len)
697{ 698{
698 struct rdma_ucm_resolve_addr cmd; 699 struct rdma_ucm_resolve_addr cmd;
699 struct sockaddr *src, *dst;
700 struct ucma_context *ctx; 700 struct ucma_context *ctx;
701 int ret; 701 int ret;
702 702
703 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 703 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
704 return -EFAULT; 704 return -EFAULT;
705 705
706 src = (struct sockaddr *) &cmd.src_addr; 706 if (cmd.reserved ||
707 dst = (struct sockaddr *) &cmd.dst_addr; 707 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
708 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || 708 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
709 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
710 return -EINVAL; 709 return -EINVAL;
711 710
712 ctx = ucma_get_ctx(file, cmd.id); 711 ctx = ucma_get_ctx(file, cmd.id);
713 if (IS_ERR(ctx)) 712 if (IS_ERR(ctx))
714 return PTR_ERR(ctx); 713 return PTR_ERR(ctx);
715 714
716 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); 715 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
716 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
717 ucma_put_ctx(ctx); 717 ucma_put_ctx(ctx);
718 return ret; 718 return ret;
719} 719}
@@ -1166,6 +1166,11 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1166 if (IS_ERR(ctx)) 1166 if (IS_ERR(ctx))
1167 return PTR_ERR(ctx); 1167 return PTR_ERR(ctx);
1168 1168
1169 if (!ctx->cm_id->device) {
1170 ret = -EINVAL;
1171 goto out;
1172 }
1173
1169 resp.qp_attr_mask = 0; 1174 resp.qp_attr_mask = 0;
1170 memset(&qp_attr, 0, sizeof qp_attr); 1175 memset(&qp_attr, 0, sizeof qp_attr);
1171 qp_attr.qp_state = cmd.qp_state; 1176 qp_attr.qp_state = cmd.qp_state;
@@ -1307,7 +1312,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1307 if (IS_ERR(ctx)) 1312 if (IS_ERR(ctx))
1308 return PTR_ERR(ctx); 1313 return PTR_ERR(ctx);
1309 1314
1310 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) 1315 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1311 return -EINVAL; 1316 return -EINVAL;
1312 1317
1313 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1318 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
@@ -1331,7 +1336,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1331{ 1336{
1332 struct rdma_ucm_notify cmd; 1337 struct rdma_ucm_notify cmd;
1333 struct ucma_context *ctx; 1338 struct ucma_context *ctx;
1334 int ret; 1339 int ret = -EINVAL;
1335 1340
1336 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1341 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1337 return -EFAULT; 1342 return -EFAULT;
@@ -1340,7 +1345,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1340 if (IS_ERR(ctx)) 1345 if (IS_ERR(ctx))
1341 return PTR_ERR(ctx); 1346 return PTR_ERR(ctx);
1342 1347
1343 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 1348 if (ctx->cm_id->device)
1349 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1350
1344 ucma_put_ctx(ctx); 1351 ucma_put_ctx(ctx);
1345 return ret; 1352 return ret;
1346} 1353}
@@ -1426,7 +1433,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1426 join_cmd.response = cmd.response; 1433 join_cmd.response = cmd.response;
1427 join_cmd.uid = cmd.uid; 1434 join_cmd.uid = cmd.uid;
1428 join_cmd.id = cmd.id; 1435 join_cmd.id = cmd.id;
1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1436 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1430 if (!join_cmd.addr_size) 1437 if (!join_cmd.addr_size)
1431 return -EINVAL; 1438 return -EINVAL;
1432 1439
@@ -1445,7 +1452,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1452 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1446 return -EFAULT; 1453 return -EFAULT;
1447 1454
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr)) 1455 if (!rdma_addr_size_kss(&cmd.addr))
1449 return -EINVAL; 1456 return -EINVAL;
1450 1457
1451 return ucma_process_join(file, &cmd, out_len); 1458 return ucma_process_join(file, &cmd, out_len);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index db2ff352d75f..ec638778661c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4383,7 +4383,7 @@ err_dma_alloc_buf:
4383 eq->l0_dma = 0; 4383 eq->l0_dma = 0;
4384 4384
4385 if (mhop_num == 1) 4385 if (mhop_num == 1)
4386 for (i -= i; i >= 0; i--) 4386 for (i -= 1; i >= 0; i--)
4387 dma_free_coherent(dev, buf_chk_sz, eq->buf[i], 4387 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4388 eq->buf_dma[i]); 4388 eq->buf_dma[i]);
4389 else if (mhop_num == 2) { 4389 else if (mhop_num == 2) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index da091de4e69d..7f8bda3a2005 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3448,9 +3448,12 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
3448 if (err) 3448 if (err)
3449 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 3449 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
3450 3450
3451 mlx5_ib_destroy_qp(dev->umrc.qp); 3451 if (dev->umrc.qp)
3452 ib_free_cq(dev->umrc.cq); 3452 mlx5_ib_destroy_qp(dev->umrc.qp);
3453 ib_dealloc_pd(dev->umrc.pd); 3453 if (dev->umrc.cq)
3454 ib_free_cq(dev->umrc.cq);
3455 if (dev->umrc.pd)
3456 ib_dealloc_pd(dev->umrc.pd);
3454} 3457}
3455 3458
3456enum { 3459enum {
@@ -3552,12 +3555,15 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
3552 3555
3553error_4: 3556error_4:
3554 mlx5_ib_destroy_qp(qp); 3557 mlx5_ib_destroy_qp(qp);
3558 dev->umrc.qp = NULL;
3555 3559
3556error_3: 3560error_3:
3557 ib_free_cq(cq); 3561 ib_free_cq(cq);
3562 dev->umrc.cq = NULL;
3558 3563
3559error_2: 3564error_2:
3560 ib_dealloc_pd(pd); 3565 ib_dealloc_pd(pd);
3566 dev->umrc.pd = NULL;
3561 3567
3562error_0: 3568error_0:
3563 kfree(attr); 3569 kfree(attr);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c51c602f06d6..3e0b3f0238d6 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -739,6 +739,9 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
739{ 739{
740 int i; 740 int i;
741 741
742 if (!dev->cache.wq)
743 return 0;
744
742 dev->cache.stopped = 1; 745 dev->cache.stopped = 1;
743 flush_workqueue(dev->cache.wq); 746 flush_workqueue(dev->cache.wq);
744 747
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index db4bf97c0e15..0ffb9b93e22d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -833,7 +833,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
833 833
834 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); 834 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
835 if (!dev->num_cnq) { 835 if (!dev->num_cnq) {
836 DP_ERR(dev, "not enough CNQ resources.\n"); 836 DP_ERR(dev, "Failed. At least one CNQ is required.\n");
837 rc = -ENOMEM;
837 goto init_err; 838 goto init_err;
838 } 839 }
839 840
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 875b17272d65..419a158e8fca 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1841,14 +1841,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1841 1841
1842static int qedr_update_qp_state(struct qedr_dev *dev, 1842static int qedr_update_qp_state(struct qedr_dev *dev,
1843 struct qedr_qp *qp, 1843 struct qedr_qp *qp,
1844 enum qed_roce_qp_state cur_state,
1844 enum qed_roce_qp_state new_state) 1845 enum qed_roce_qp_state new_state)
1845{ 1846{
1846 int status = 0; 1847 int status = 0;
1847 1848
1848 if (new_state == qp->state) 1849 if (new_state == cur_state)
1849 return 0; 1850 return 0;
1850 1851
1851 switch (qp->state) { 1852 switch (cur_state) {
1852 case QED_ROCE_QP_STATE_RESET: 1853 case QED_ROCE_QP_STATE_RESET:
1853 switch (new_state) { 1854 switch (new_state) {
1854 case QED_ROCE_QP_STATE_INIT: 1855 case QED_ROCE_QP_STATE_INIT:
@@ -1955,6 +1956,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1955 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); 1956 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1956 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 1957 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1957 enum ib_qp_state old_qp_state, new_qp_state; 1958 enum ib_qp_state old_qp_state, new_qp_state;
1959 enum qed_roce_qp_state cur_state;
1958 int rc = 0; 1960 int rc = 0;
1959 1961
1960 DP_DEBUG(dev, QEDR_MSG_QP, 1962 DP_DEBUG(dev, QEDR_MSG_QP,
@@ -2086,18 +2088,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2086 SET_FIELD(qp_params.modify_flags, 2088 SET_FIELD(qp_params.modify_flags,
2087 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); 2089 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2088 2090
2089 qp_params.ack_timeout = attr->timeout; 2091 /* The received timeout value is an exponent used like this:
2090 if (attr->timeout) { 2092 * "12.7.34 LOCAL ACK TIMEOUT
2091 u32 temp; 2093 * Value representing the transport (ACK) timeout for use by
2092 2094 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2093 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; 2095 * The FW expects timeout in msec so we need to divide the usec
2094 /* FW requires [msec] */ 2096 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2095 qp_params.ack_timeout = temp; 2097 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2096 } else { 2098 * The value of zero means infinite so we use a 'max_t' to make
2097 /* Infinite */ 2099 * sure that sub 1 msec values will be configured as 1 msec.
2100 */
2101 if (attr->timeout)
2102 qp_params.ack_timeout =
2103 1 << max_t(int, attr->timeout - 8, 0);
2104 else
2098 qp_params.ack_timeout = 0; 2105 qp_params.ack_timeout = 0;
2099 }
2100 } 2106 }
2107
2101 if (attr_mask & IB_QP_RETRY_CNT) { 2108 if (attr_mask & IB_QP_RETRY_CNT) {
2102 SET_FIELD(qp_params.modify_flags, 2109 SET_FIELD(qp_params.modify_flags,
2103 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); 2110 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2170,13 +2177,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2170 qp->dest_qp_num = attr->dest_qp_num; 2177 qp->dest_qp_num = attr->dest_qp_num;
2171 } 2178 }
2172 2179
2180 cur_state = qp->state;
2181
2182 /* Update the QP state before the actual ramrod to prevent a race with
2183 * fast path. Modifying the QP state to error will cause the device to
2184 * flush the CQEs and while polling the flushed CQEs will considered as
2185 * a potential issue if the QP isn't in error state.
2186 */
2187 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2188 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2189 qp->state = QED_ROCE_QP_STATE_ERR;
2190
2173 if (qp->qp_type != IB_QPT_GSI) 2191 if (qp->qp_type != IB_QPT_GSI)
2174 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, 2192 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2175 qp->qed_qp, &qp_params); 2193 qp->qed_qp, &qp_params);
2176 2194
2177 if (attr_mask & IB_QP_STATE) { 2195 if (attr_mask & IB_QP_STATE) {
2178 if ((qp->qp_type != IB_QPT_GSI) && (!udata)) 2196 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2179 rc = qedr_update_qp_state(dev, qp, qp_params.new_state); 2197 rc = qedr_update_qp_state(dev, qp, cur_state,
2198 qp_params.new_state);
2180 qp->state = qp_params.new_state; 2199 qp->state = qp_params.new_state;
2181 } 2200 }
2182 2201
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index d656809f1217..415e09960017 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -130,6 +130,8 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
130 const unsigned char *dst_dev_addr); 130 const unsigned char *dst_dev_addr);
131 131
132int rdma_addr_size(struct sockaddr *addr); 132int rdma_addr_size(struct sockaddr *addr);
133int rdma_addr_size_in6(struct sockaddr_in6 *addr);
134int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
133 135
134int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, 136int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
135 const union ib_gid *dgid, 137 const union ib_gid *dgid,