aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-24 17:12:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-24 17:12:05 -0400
commit34b48b8789adf344d1012fda5d796994d4ddfc14 (patch)
tree7f4f7304e2e7af0e66a30e8a03c016cb65ca4f0f
parentd7b66b4ab0344dcc4bf169e0bbfda6234cdf6966 (diff)
parent55ba49cbcef37053d973f9a45bc58818c333fe13 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "This is pretty much just the usual array of smallish driver bugs. - remove bouncing addresses from the MAINTAINERS file - kernel oops and bad error handling fixes for hfi, i40iw, cxgb4, and hns drivers - various small LOC behavioral/operational bugs in mlx5, hns, qedr and i40iw drivers - two fixes for patches already sent during the merge window - a long-standing bug related to not decreasing the pinned pages count in the right MM was found and fixed" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (28 commits) RDMA/hns: Move the location for initializing tmp_len RDMA/hns: Bugfix for cq record db for kernel IB/uverbs: Fix uverbs_attr_get_obj RDMA/qedr: Fix doorbell bar mapping for dpi > 1 IB/umem: Use the correct mm during ib_umem_release iw_cxgb4: Fix an error handling path in 'c4iw_get_dma_mr()' RDMA/i40iw: Avoid panic when reading back the IRQ affinity hint RDMA/i40iw: Avoid reference leaks when processing the AEQ RDMA/i40iw: Avoid panic when objects are being created and destroyed RDMA/hns: Fix the bug with NULL pointer RDMA/hns: Set NULL for __internal_mr RDMA/hns: Enable inner_pa_vld filed of mpt RDMA/hns: Set desc_dma_addr for zero when free cmq desc RDMA/hns: Fix the bug with rq sge RDMA/hns: Not support qp transition from reset to reset for hip06 RDMA/hns: Add return operation when configured global param fail RDMA/hns: Update convert function of endian format RDMA/hns: Load the RoCE dirver automatically RDMA/hns: Bugfix for rq record db for kernel RDMA/hns: Add rq inline flags judgement ...
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig2
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/staging/lustre/lnet/Kconfig2
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--include/rdma/ib_umem.h1
-rw-r--r--include/rdma/uverbs_ioctl.h10
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/rds/Kconfig2
-rw-r--r--net/sunrpc/Kconfig2
29 files changed, 116 insertions, 98 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 078fd80f664f..a9ca122957e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5388,7 +5388,6 @@ S: Maintained
5388F: drivers/iommu/exynos-iommu.c 5388F: drivers/iommu/exynos-iommu.c
5389 5389
5390EZchip NPS platform support 5390EZchip NPS platform support
5391M: Elad Kanfi <eladkan@mellanox.com>
5392M: Vineet Gupta <vgupta@synopsys.com> 5391M: Vineet Gupta <vgupta@synopsys.com>
5393S: Supported 5392S: Supported
5394F: arch/arc/plat-eznps 5393F: arch/arc/plat-eznps
@@ -9021,7 +9020,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
9021F: drivers/net/ethernet/mellanox/mlx5/core/en_* 9020F: drivers/net/ethernet/mellanox/mlx5/core/en_*
9022 9021
9023MELLANOX ETHERNET INNOVA DRIVER 9022MELLANOX ETHERNET INNOVA DRIVER
9024M: Ilan Tayari <ilant@mellanox.com>
9025R: Boris Pismenny <borisp@mellanox.com> 9023R: Boris Pismenny <borisp@mellanox.com>
9026L: netdev@vger.kernel.org 9024L: netdev@vger.kernel.org
9027S: Supported 9025S: Supported
@@ -9031,7 +9029,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
9031F: include/linux/mlx5/mlx5_ifc_fpga.h 9029F: include/linux/mlx5/mlx5_ifc_fpga.h
9032 9030
9033MELLANOX ETHERNET INNOVA IPSEC DRIVER 9031MELLANOX ETHERNET INNOVA IPSEC DRIVER
9034M: Ilan Tayari <ilant@mellanox.com>
9035R: Boris Pismenny <borisp@mellanox.com> 9032R: Boris Pismenny <borisp@mellanox.com>
9036L: netdev@vger.kernel.org 9033L: netdev@vger.kernel.org
9037S: Supported 9034S: Supported
@@ -9087,7 +9084,6 @@ F: include/uapi/rdma/mlx4-abi.h
9087 9084
9088MELLANOX MLX5 core VPI driver 9085MELLANOX MLX5 core VPI driver
9089M: Saeed Mahameed <saeedm@mellanox.com> 9086M: Saeed Mahameed <saeedm@mellanox.com>
9090M: Matan Barak <matanb@mellanox.com>
9091M: Leon Romanovsky <leonro@mellanox.com> 9087M: Leon Romanovsky <leonro@mellanox.com>
9092L: netdev@vger.kernel.org 9088L: netdev@vger.kernel.org
9093L: linux-rdma@vger.kernel.org 9089L: linux-rdma@vger.kernel.org
@@ -9098,7 +9094,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/
9098F: include/linux/mlx5/ 9094F: include/linux/mlx5/
9099 9095
9100MELLANOX MLX5 IB driver 9096MELLANOX MLX5 IB driver
9101M: Matan Barak <matanb@mellanox.com>
9102M: Leon Romanovsky <leonro@mellanox.com> 9097M: Leon Romanovsky <leonro@mellanox.com>
9103L: linux-rdma@vger.kernel.org 9098L: linux-rdma@vger.kernel.org
9104W: http://www.mellanox.com 9099W: http://www.mellanox.com
@@ -9832,7 +9827,6 @@ F: net/netfilter/xt_CONNSECMARK.c
9832F: net/netfilter/xt_SECMARK.c 9827F: net/netfilter/xt_SECMARK.c
9833 9828
9834NETWORKING [TLS] 9829NETWORKING [TLS]
9835M: Ilya Lesokhin <ilyal@mellanox.com>
9836M: Aviad Yehezkel <aviadye@mellanox.com> 9830M: Aviad Yehezkel <aviadye@mellanox.com>
9837M: Dave Watson <davejwatson@fb.com> 9831M: Dave Watson <davejwatson@fb.com>
9838L: netdev@vger.kernel.org 9832L: netdev@vger.kernel.org
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 9a4e899d94b3..2b6c9b516070 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
119 umem->length = size; 119 umem->length = size;
120 umem->address = addr; 120 umem->address = addr;
121 umem->page_shift = PAGE_SHIFT; 121 umem->page_shift = PAGE_SHIFT;
122 umem->pid = get_task_pid(current, PIDTYPE_PID);
123 /* 122 /*
124 * We ask for writable memory if any of the following 123 * We ask for writable memory if any of the following
125 * access flags are set. "Local write" and "remote write" 124 * access flags are set. "Local write" and "remote write"
@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
132 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 131 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
133 132
134 if (access & IB_ACCESS_ON_DEMAND) { 133 if (access & IB_ACCESS_ON_DEMAND) {
135 put_pid(umem->pid);
136 ret = ib_umem_odp_get(context, umem, access); 134 ret = ib_umem_odp_get(context, umem, access);
137 if (ret) { 135 if (ret) {
138 kfree(umem); 136 kfree(umem);
@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
148 146
149 page_list = (struct page **) __get_free_page(GFP_KERNEL); 147 page_list = (struct page **) __get_free_page(GFP_KERNEL);
150 if (!page_list) { 148 if (!page_list) {
151 put_pid(umem->pid);
152 kfree(umem); 149 kfree(umem);
153 return ERR_PTR(-ENOMEM); 150 return ERR_PTR(-ENOMEM);
154 } 151 }
@@ -231,7 +228,6 @@ out:
231 if (ret < 0) { 228 if (ret < 0) {
232 if (need_release) 229 if (need_release)
233 __ib_umem_release(context->device, umem, 0); 230 __ib_umem_release(context->device, umem, 0);
234 put_pid(umem->pid);
235 kfree(umem); 231 kfree(umem);
236 } else 232 } else
237 current->mm->pinned_vm = locked; 233 current->mm->pinned_vm = locked;
@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
274 270
275 __ib_umem_release(umem->context->device, umem, 1); 271 __ib_umem_release(umem->context->device, umem, 1);
276 272
277 task = get_pid_task(umem->pid, PIDTYPE_PID); 273 task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
278 put_pid(umem->pid);
279 if (!task) 274 if (!task)
280 goto out; 275 goto out;
281 mm = get_task_mm(task); 276 mm = get_task_mm(task);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e90f2fd8dc16..1445918e3239 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
489err_dereg_mem: 489err_dereg_mem:
490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); 491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
492err_free_wr_wait:
493 c4iw_put_wr_wait(mhp->wr_waitp);
494err_free_skb: 492err_free_skb:
495 kfree_skb(mhp->dereg_skb); 493 kfree_skb(mhp->dereg_skb);
494err_free_wr_wait:
495 c4iw_put_wr_wait(mhp->wr_waitp);
496err_free_mhp: 496err_free_mhp:
497 kfree(mhp); 497 kfree(mhp);
498 return ERR_PTR(ret); 498 return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6a60fa59f2b..e6bdd0c1e80a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5944 u64 status; 5944 u64 status;
5945 u32 sw_index; 5945 u32 sw_index;
5946 int i = 0; 5946 int i = 0;
5947 unsigned long irq_flags;
5947 5948
5948 sw_index = dd->hw_to_sw[hw_context]; 5949 sw_index = dd->hw_to_sw[hw_context];
5949 if (sw_index >= dd->num_send_contexts) { 5950 if (sw_index >= dd->num_send_contexts) {
@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5953 return; 5954 return;
5954 } 5955 }
5955 sci = &dd->send_contexts[sw_index]; 5956 sci = &dd->send_contexts[sw_index];
5957 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5956 sc = sci->sc; 5958 sc = sci->sc;
5957 if (!sc) { 5959 if (!sc) {
5958 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5959 sw_index, hw_context); 5961 sw_index, hw_context);
5962 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5960 return; 5963 return;
5961 } 5964 }
5962 5965
@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5978 */ 5981 */
5979 if (sc->type != SC_USER) 5982 if (sc->type != SC_USER)
5980 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5983 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5984 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5981 5985
5982 /* 5986 /*
5983 * Update the counters for the corresponding status bits. 5987 * Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 14734d0d0b76..3a485f50fede 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
377 377
378 hr_cq->set_ci_db = hr_cq->db.db_record; 378 hr_cq->set_ci_db = hr_cq->db.db_record;
379 *hr_cq->set_ci_db = 0; 379 *hr_cq->set_ci_db = 0;
380 hr_cq->db_en = 1;
380 } 381 }
381 382
382 /* Init mmt table and write buff address to mtt table */ 383 /* Init mmt table and write buff address to mtt table */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 47e1b6ac1e1a..8013d69c5ac4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
722 free_mr->mr_free_pd = to_hr_pd(pd); 722 free_mr->mr_free_pd = to_hr_pd(pd);
723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; 723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
724 free_mr->mr_free_pd->ibpd.uobject = NULL; 724 free_mr->mr_free_pd->ibpd.uobject = NULL;
725 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
725 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); 726 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
726 727
727 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; 728 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
@@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1036 1037
1037 do { 1038 do {
1038 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1039 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1039 if (ret < 0) { 1040 if (ret < 0 && hr_qp) {
1040 dev_err(dev, 1041 dev_err(dev,
1041 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", 1042 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1042 hr_qp->qpn, ret, hr_mr->key, ne); 1043 hr_qp->qpn, ret, hr_mr->key, ne);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 25916e8522ed..1f0965bb64ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
142 unsigned long flags; 142 unsigned long flags;
143 unsigned int ind; 143 unsigned int ind;
144 void *wqe = NULL; 144 void *wqe = NULL;
145 u32 tmp_len = 0;
146 bool loopback; 145 bool loopback;
146 u32 tmp_len;
147 int ret = 0; 147 int ret = 0;
148 u8 *smac; 148 u8 *smac;
149 int nreq; 149 int nreq;
@@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
189 189
190 owner_bit = 190 owner_bit =
191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
192 tmp_len = 0;
192 193
193 /* Corresponding to the QP type, wqe process separately */ 194 /* Corresponding to the QP type, wqe process separately */
194 if (ibqp->qp_type == IB_QPT_GSI) { 195 if (ibqp->qp_type == IB_QPT_GSI) {
@@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
547 } 548 }
548 549
549 if (i < hr_qp->rq.max_gs) { 550 if (i < hr_qp->rq.max_gs) {
550 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 551 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
551 dseg[i].addr = 0; 552 dseg->addr = 0;
552 } 553 }
553 554
554 /* rq support inline data */ 555 /* rq support inline data */
555 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; 556 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
556 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge; 557 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
557 for (i = 0; i < wr->num_sge; i++) { 558 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
558 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; 559 (u32)wr->num_sge;
559 sge_list[i].len = wr->sg_list[i].length; 560 for (i = 0; i < wr->num_sge; i++) {
561 sge_list[i].addr =
562 (void *)(u64)wr->sg_list[i].addr;
563 sge_list[i].len = wr->sg_list[i].length;
564 }
560 } 565 }
561 566
562 hr_qp->rq.wrid[ind] = wr->wr_id; 567 hr_qp->rq.wrid[ind] = wr->wr_id;
@@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
613 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, 618 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
614 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 619 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
615 DMA_BIDIRECTIONAL); 620 DMA_BIDIRECTIONAL);
621
622 ring->desc_dma_addr = 0;
616 kfree(ring->desc); 623 kfree(ring->desc);
617} 624}
618 625
@@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1081 if (ret) { 1088 if (ret) {
1082 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", 1089 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1083 ret); 1090 ret);
1091 return ret;
1084 } 1092 }
1085 1093
1086 /* Get pf resource owned by every pf */ 1094 /* Get pf resource owned by every pf */
@@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1372 1380
1373 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1381 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1374 mr->type == MR_TYPE_MR ? 0 : 1); 1382 mr->type == MR_TYPE_MR ? 0 : 1);
1383 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1384 1);
1375 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); 1385 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1376 1386
1377 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 1387 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
@@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2169 struct hns_roce_v2_qp_context *context, 2179 struct hns_roce_v2_qp_context *context,
2170 struct hns_roce_v2_qp_context *qpc_mask) 2180 struct hns_roce_v2_qp_context *qpc_mask)
2171{ 2181{
2182 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2172 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2183 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2173 2184
2174 /* 2185 /*
@@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2281 context->rq_db_record_addr = hr_qp->rdb.dma >> 32; 2292 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2282 qpc_mask->rq_db_record_addr = 0; 2293 qpc_mask->rq_db_record_addr = 0;
2283 2294
2284 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1); 2295 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2296 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2285 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); 2297 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2286 2298
2287 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, 2299 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
4703 {0, } 4715 {0, }
4704}; 4716};
4705 4717
4718MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
4719
4706static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 4720static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
4707 struct hnae3_handle *handle) 4721 struct hnae3_handle *handle)
4708{ 4722{
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 9d48bc07a9e6..96fb6a9ed93c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
199 199
200 memset(props, 0, sizeof(*props)); 200 memset(props, 0, sizeof(*props));
201 201
202 props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); 202 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
203 props->max_mr_size = (u64)(~(0ULL)); 203 props->max_mr_size = (u64)(~(0ULL));
204 props->page_size_cap = hr_dev->caps.page_size_cap; 204 props->page_size_cap = hr_dev->caps.page_size_cap;
205 props->vendor_id = hr_dev->vendor_id; 205 props->vendor_id = hr_dev->vendor_id;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d4aad34c21e2..baaf906f7c2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
660 goto err_rq_sge_list; 660 goto err_rq_sge_list;
661 } 661 }
662 *hr_qp->rdb.db_record = 0; 662 *hr_qp->rdb.db_record = 0;
663 hr_qp->rdb_en = 1;
663 } 664 }
664 665
665 /* Allocate QP buf */ 666 /* Allocate QP buf */
@@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
955 } 956 }
956 957
957 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 958 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
958 ret = 0; 959 if (hr_dev->caps.min_wqes) {
960 ret = -EPERM;
961 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
962 new_state);
963 } else {
964 ret = 0;
965 }
966
959 goto out; 967 goto out;
960 } 968 }
961 969
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index d5d8c1be345a..2f2b4426ded7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -207,6 +207,7 @@ struct i40iw_msix_vector {
207 u32 irq; 207 u32 irq;
208 u32 cpu_affinity; 208 u32 cpu_affinity;
209 u32 ceq_id; 209 u32 ceq_id;
210 cpumask_t mask;
210}; 211};
211 212
212struct l2params_work { 213struct l2params_work {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 4cfa8f4647e2..f7c6fd9ff6e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2093 if (netif_is_bond_slave(netdev)) 2093 if (netif_is_bond_slave(netdev))
2094 netdev = netdev_master_upper_dev_get(netdev); 2094 netdev = netdev_master_upper_dev_get(netdev);
2095 2095
2096 neigh = dst_neigh_lookup(dst, &dst_addr); 2096 neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
2097 2097
2098 rcu_read_lock(); 2098 rcu_read_lock();
2099 if (neigh) { 2099 if (neigh) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 6139836fb533..c9f62ca7643c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
331 switch (info->ae_id) { 331 switch (info->ae_id) {
332 case I40IW_AE_LLP_FIN_RECEIVED: 332 case I40IW_AE_LLP_FIN_RECEIVED:
333 if (qp->term_flags) 333 if (qp->term_flags)
334 continue; 334 break;
335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; 336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && 337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
@@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
360 break; 360 break;
361 case I40IW_AE_LLP_CONNECTION_RESET: 361 case I40IW_AE_LLP_CONNECTION_RESET:
362 if (atomic_read(&iwqp->close_timer_started)) 362 if (atomic_read(&iwqp->close_timer_started))
363 continue; 363 break;
364 i40iw_cm_disconn(iwqp); 364 i40iw_cm_disconn(iwqp);
365 break; 365 break;
366 case I40IW_AE_QP_SUSPEND_COMPLETE: 366 case I40IW_AE_QP_SUSPEND_COMPLETE:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 9cd0d3ef9057..05001e6da1f8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
687 struct i40iw_msix_vector *msix_vec) 687 struct i40iw_msix_vector *msix_vec)
688{ 688{
689 enum i40iw_status_code status; 689 enum i40iw_status_code status;
690 cpumask_t mask;
691 690
692 if (iwdev->msix_shared && !ceq_id) { 691 if (iwdev->msix_shared && !ceq_id) {
693 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); 692 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
697 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); 696 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
698 } 697 }
699 698
700 cpumask_clear(&mask); 699 cpumask_clear(&msix_vec->mask);
701 cpumask_set_cpu(msix_vec->cpu_affinity, &mask); 700 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
702 irq_set_affinity_hint(msix_vec->irq, &mask); 701 irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
703 702
704 if (status) { 703 if (status) {
705 i40iw_pr_err("ceq irq config fail\n"); 704 i40iw_pr_err("ceq irq config fail\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 40e4f5ab2b46..68679ad4c6da 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
394 394
395 list_for_each_entry(iwpbl, pbl_list, list) { 395 list_for_each_entry(iwpbl, pbl_list, list) {
396 if (iwpbl->user_base == va) { 396 if (iwpbl->user_base == va) {
397 iwpbl->on_list = false;
397 list_del(&iwpbl->list); 398 list_del(&iwpbl->list);
398 return iwpbl; 399 return iwpbl;
399 } 400 }
@@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
614 return ERR_PTR(-ENOMEM); 615 return ERR_PTR(-ENOMEM);
615 616
616 iwqp = (struct i40iw_qp *)mem; 617 iwqp = (struct i40iw_qp *)mem;
618 iwqp->allocated_buffer = mem;
617 qp = &iwqp->sc_qp; 619 qp = &iwqp->sc_qp;
618 qp->back_qp = (void *)iwqp; 620 qp->back_qp = (void *)iwqp;
619 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; 621 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
@@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
642 goto error; 644 goto error;
643 } 645 }
644 646
645 iwqp->allocated_buffer = mem;
646 iwqp->iwdev = iwdev; 647 iwqp->iwdev = iwdev;
647 iwqp->iwpd = iwpd; 648 iwqp->iwpd = iwpd;
648 iwqp->ibqp.qp_num = qp_num; 649 iwqp->ibqp.qp_num = qp_num;
@@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1898 goto error; 1899 goto error;
1899 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 1900 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1900 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 1901 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1902 iwpbl->on_list = true;
1901 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 1903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1902 break; 1904 break;
1903 case IW_MEMREG_TYPE_CQ: 1905 case IW_MEMREG_TYPE_CQ:
@@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1908 1910
1909 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1911 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1910 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 1912 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1913 iwpbl->on_list = true;
1911 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1914 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1912 break; 1915 break;
1913 case IW_MEMREG_TYPE_MEM: 1916 case IW_MEMREG_TYPE_MEM:
@@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2045 switch (iwmr->type) { 2048 switch (iwmr->type) {
2046 case IW_MEMREG_TYPE_CQ: 2049 case IW_MEMREG_TYPE_CQ:
2047 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2050 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2048 if (!list_empty(&ucontext->cq_reg_mem_list)) 2051 if (iwpbl->on_list) {
2052 iwpbl->on_list = false;
2049 list_del(&iwpbl->list); 2053 list_del(&iwpbl->list);
2054 }
2050 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2055 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2051 break; 2056 break;
2052 case IW_MEMREG_TYPE_QP: 2057 case IW_MEMREG_TYPE_QP:
2053 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2058 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2054 if (!list_empty(&ucontext->qp_reg_mem_list)) 2059 if (iwpbl->on_list) {
2060 iwpbl->on_list = false;
2055 list_del(&iwpbl->list); 2061 list_del(&iwpbl->list);
2062 }
2056 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2063 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2057 break; 2064 break;
2058 default: 2065 default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 9067443cd311..76cf173377ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -78,6 +78,7 @@ struct i40iw_pbl {
78 }; 78 };
79 79
80 bool pbl_allocated; 80 bool pbl_allocated;
81 bool on_list;
81 u64 user_base; 82 u64 user_base;
82 struct i40iw_pble_alloc pble_alloc; 83 struct i40iw_pble_alloc pble_alloc;
83 struct i40iw_mr *iwmr; 84 struct i40iw_mr *iwmr;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b4d8ff8ab807..69716a7ea993 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2417} 2417}
2418 2418
2419static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, 2419static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2420 bool inner) 2420 bool inner)
2421{ 2421{
2422 if (inner) { 2422 if (inner) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 87b7c1be2a11..2193dc1765fb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
484 return 1; 484 return 1;
485} 485}
486 486
487static int first_med_bfreg(void)
488{
489 return 1;
490}
491
492enum { 487enum {
493 /* this is the first blue flame register in the array of bfregs assigned 488 /* this is the first blue flame register in the array of bfregs assigned
494 * to a processes. Since we do not use it for blue flame but rather 489 * to a processes. Since we do not use it for blue flame but rather
@@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
514 return n >= 0 ? n : 0; 509 return n >= 0 ? n : 0;
515} 510}
516 511
512static int first_med_bfreg(struct mlx5_ib_dev *dev,
513 struct mlx5_bfreg_info *bfregi)
514{
515 return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
516}
517
517static int first_hi_bfreg(struct mlx5_ib_dev *dev, 518static int first_hi_bfreg(struct mlx5_ib_dev *dev,
518 struct mlx5_bfreg_info *bfregi) 519 struct mlx5_bfreg_info *bfregi)
519{ 520{
@@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
541static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 542static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
542 struct mlx5_bfreg_info *bfregi) 543 struct mlx5_bfreg_info *bfregi)
543{ 544{
544 int minidx = first_med_bfreg(); 545 int minidx = first_med_bfreg(dev, bfregi);
545 int i; 546 int i;
546 547
547 for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { 548 if (minidx < 0)
549 return minidx;
550
551 for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
548 if (bfregi->count[i] < bfregi->count[minidx]) 552 if (bfregi->count[i] < bfregi->count[minidx])
549 minidx = i; 553 minidx = i;
550 if (!bfregi->count[minidx]) 554 if (!bfregi->count[minidx])
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7d3763b2e01c..3f9afc02d166 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{ 401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context); 402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device); 403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 404 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start); 405 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0; 406 unsigned long dpi_start;
408 bool found; 407
408 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
409 409
410 DP_DEBUG(dev, QEDR_MSG_INIT, 410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", 411 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); 412 (void *)vma->vm_start, (void *)vma->vm_end,
413 if (vma->vm_start & (PAGE_SIZE - 1)) { 413 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n", 414
415 vma->vm_start); 415 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
416 DP_ERR(dev,
417 "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
418 (void *)vma->vm_start, (void *)vma->vm_end);
416 return -EINVAL; 419 return -EINVAL;
417 } 420 }
418 421
419 found = qedr_search_mmap(ucontext, vm_page, len); 422 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
420 if (!found) { 423 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff); 424 vma->vm_pgoff);
423 return -EINVAL; 425 return -EINVAL;
424 } 426 }
425 427
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 428 if (phys_addr < dpi_start ||
427 429 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 430 DP_ERR(dev,
429 dev->db_size))) { 431 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 432 (void *)phys_addr, (void *)dpi_start,
431 if (vma->vm_flags & VM_READ) { 433 ucontext->dpi_size);
432 DP_ERR(dev, "Trying to map doorbell bar for read\n"); 434 return -EINVAL;
433 return -EPERM; 435 }
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437 436
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 437 if (vma->vm_flags & VM_READ) {
439 PAGE_SIZE, vma->vm_page_prot); 438 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
440 } else { 439 return -EINVAL;
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 } 440 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); 441
446 return rc; 442 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
443 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
444 vma->vm_page_prot);
447} 445}
448 446
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, 447struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2cb52fd48cf1..73a00a1c06f6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
761 unsigned int mask; 761 unsigned int mask;
762 unsigned int length = 0; 762 unsigned int length = 0;
763 int i; 763 int i;
764 int must_sched;
765 764
766 while (wr) { 765 while (wr) {
767 mask = wr_opcode_mask(wr->opcode, qp); 766 mask = wr_opcode_mask(wr->opcode, qp);
@@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
791 wr = wr->next; 790 wr = wr->next;
792 } 791 }
793 792
794 /* 793 rxe_run_task(&qp->req.task, 1);
795 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
796 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
797 */
798 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
799 (queue_count(qp->sq.queue) > 1);
800
801 rxe_run_task(&qp->req.task, must_sched);
802 if (unlikely(qp->req.state == QP_STATE_ERROR)) 794 if (unlikely(qp->req.state == QP_STATE_ERROR))
803 rxe_run_task(&qp->comp.task, 1); 795 rxe_run_task(&qp->comp.task, 1);
804 796
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
index fb8b7182f05e..25bf6955b6d0 100644
--- a/drivers/infiniband/ulp/srpt/Kconfig
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_SRPT 1config INFINIBAND_SRPT
2 tristate "InfiniBand SCSI RDMA Protocol target support" 2 tristate "InfiniBand SCSI RDMA Protocol target support"
3 depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE 3 depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
4 ---help--- 4 ---help---
5 5
6 Support for the SCSI RDMA Protocol (SRP) Target driver. The 6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 88a8b5916624..dbb7464c018c 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -27,7 +27,7 @@ config NVME_FABRICS
27 27
28config NVME_RDMA 28config NVME_RDMA
29 tristate "NVM Express over Fabrics RDMA host driver" 29 tristate "NVM Express over Fabrics RDMA host driver"
30 depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK 30 depends on INFINIBAND_ADDR_TRANS && BLOCK
31 select NVME_CORE 31 select NVME_CORE
32 select NVME_FABRICS 32 select NVME_FABRICS
33 select SG_POOL 33 select SG_POOL
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3c7b61ddb0d1..7595664ee753 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
27 27
28config NVME_TARGET_RDMA 28config NVME_TARGET_RDMA
29 tristate "NVMe over Fabrics RDMA target support" 29 tristate "NVMe over Fabrics RDMA target support"
30 depends on INFINIBAND && INFINIBAND_ADDR_TRANS 30 depends on INFINIBAND_ADDR_TRANS
31 depends on NVME_TARGET 31 depends on NVME_TARGET
32 select SGL_ALLOC 32 select SGL_ALLOC
33 help 33 help
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index ad049e6f24e4..f3b1ad4bd3dc 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -34,7 +34,7 @@ config LNET_SELFTEST
34 34
35config LNET_XPRT_IB 35config LNET_XPRT_IB
36 tristate "LNET infiniband support" 36 tristate "LNET infiniband support"
37 depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS 37 depends on LNET && PCI && INFINIBAND_ADDR_TRANS
38 default LNET && INFINIBAND 38 default LNET && INFINIBAND
39 help 39 help
40 This option allows the LNET users to use infiniband as an 40 This option allows the LNET users to use infiniband as an
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 5f132d59dfc2..d61e2de8d0eb 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -197,7 +197,7 @@ config CIFS_SMB311
197 197
198config CIFS_SMB_DIRECT 198config CIFS_SMB_DIRECT
199 bool "SMB Direct support (Experimental)" 199 bool "SMB Direct support (Experimental)"
200 depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y 200 depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y
201 help 201 help
202 Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. 202 Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
203 SMB Direct allows transferring SMB packets over RDMA. If unsure, 203 SMB Direct allows transferring SMB packets over RDMA. If unsure,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 23159dd5be18..a1fd63871d17 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -48,7 +48,6 @@ struct ib_umem {
48 int writable; 48 int writable;
49 int hugetlb; 49 int hugetlb;
50 struct work_struct work; 50 struct work_struct work;
51 struct pid *pid;
52 struct mm_struct *mm; 51 struct mm_struct *mm;
53 unsigned long diff; 52 unsigned long diff;
54 struct ib_umem_odp *odp_data; 53 struct ib_umem_odp *odp_data;
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 4a4201d997a7..095383a4bd1a 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -411,13 +411,13 @@ static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs
411static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, 411static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle,
412 u16 idx) 412 u16 idx)
413{ 413{
414 struct ib_uobject *uobj = 414 const struct uverbs_attr *attr;
415 uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject;
416 415
417 if (IS_ERR(uobj)) 416 attr = uverbs_attr_get(attrs_bundle, idx);
418 return uobj; 417 if (IS_ERR(attr))
418 return ERR_CAST(attr);
419 419
420 return uobj->object; 420 return attr->obj_attr.uobject->object;
421} 421}
422 422
423static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, 423static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index e6014e0e51f7..46c39f7da444 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -32,7 +32,7 @@ config NET_9P_XEN
32 32
33 33
34config NET_9P_RDMA 34config NET_9P_RDMA
35 depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS 35 depends on INET && INFINIBAND_ADDR_TRANS
36 tristate "9P RDMA Transport (Experimental)" 36 tristate "9P RDMA Transport (Experimental)"
37 help 37 help
38 This builds support for an RDMA transport. 38 This builds support for an RDMA transport.
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index bffde4b46c5d..1a31502ee7db 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -8,7 +8,7 @@ config RDS
8 8
9config RDS_RDMA 9config RDS_RDMA
10 tristate "RDS over Infiniband" 10 tristate "RDS over Infiniband"
11 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 11 depends on RDS && INFINIBAND_ADDR_TRANS
12 ---help--- 12 ---help---
13 Allow RDS to use Infiniband as a transport. 13 Allow RDS to use Infiniband as a transport.
14 This transport supports RDMA operations. 14 This transport supports RDMA operations.
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index ac09ca803296..6358e5271070 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -50,7 +50,7 @@ config SUNRPC_DEBUG
50 50
51config SUNRPC_XPRT_RDMA 51config SUNRPC_XPRT_RDMA
52 tristate "RPC-over-RDMA transport" 52 tristate "RPC-over-RDMA transport"
53 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS 53 depends on SUNRPC && INFINIBAND_ADDR_TRANS
54 default SUNRPC && INFINIBAND 54 default SUNRPC && INFINIBAND
55 select SG_POOL 55 select SG_POOL
56 help 56 help