aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-04-10 15:39:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-10 15:39:04 -0400
commit582549e3fbe137eb6ce9be591aca25c2222a36b4 (patch)
tree38edbb9c9e52d1a57a9a41bdea9473ec2424dda9
parented79cc87302bf7fbc87f05d655b998f866b4fed8 (diff)
parentd737b25b1ae0540ba13cbd45ebb9b58a1d6d7f0d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Several driver bug fixes posted in the last several weeks - Several bug fixes for the hfi1 driver 'TID RDMA' functionality merged into 5.1. Since TID RDMA is on by default these all seem to be regressions. - Wrong software permission checks on memory in mlx5 - Memory leak in vmw_pvrdma during driver remove - Several bug fixes for hns driver features merged into 5.1" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/hfi1: Do not flush send queue in the TID RDMA second leg RDMA/hns: Bugfix for SCC hem free RDMA/hns: Fix bug that caused srq creation to fail RDMA/vmw_pvrdma: Fix memory leak on pvrdma_pci_remove IB/mlx5: Reset access mask when looping inside page fault handler IB/hfi1: Fix the allocation of RSM table IB/hfi1: Eliminate opcode tests on mr deref IB/hfi1: Clear the IOWAIT pending bits when QP is put into error state IB/hfi1: Failed to drain send queue when QP is put into error state
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c26
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
9 files changed, 42 insertions, 41 deletions
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 612f04190ed8..9784c6c0d2ec 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
13232 int total_contexts; 13232 int total_contexts;
13233 int ret; 13233 int ret;
13234 unsigned ngroups; 13234 unsigned ngroups;
13235 int qos_rmt_count; 13235 int rmt_count;
13236 int user_rmt_reduced; 13236 int user_rmt_reduced;
13237 u32 n_usr_ctxts; 13237 u32 n_usr_ctxts;
13238 u32 send_contexts = chip_send_contexts(dd); 13238 u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
13294 n_usr_ctxts = rcv_contexts - total_contexts; 13294 n_usr_ctxts = rcv_contexts - total_contexts;
13295 } 13295 }
13296 13296
13297 /* each user context requires an entry in the RMT */ 13297 /*
13298 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); 13298 * The RMT entries are currently allocated as shown below:
13299 if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { 13299 * 1. QOS (0 to 128 entries);
13300 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; 13300 * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
13301 * 3. VNIC (num_vnic_contexts).
13302 * It should be noted that PSM FECN oversubscribe num_vnic_contexts
13303 * entries of RMT because both VNIC and PSM could allocate any receive
13304 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13305 * and PSM FECN must reserve an RMT entry for each possible PSM receive
13306 * context.
13307 */
13308 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13309 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13310 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13301 dd_dev_err(dd, 13311 dd_dev_err(dd,
13302 "RMT size is reducing the number of user receive contexts from %u to %d\n", 13312 "RMT size is reducing the number of user receive contexts from %u to %d\n",
13303 n_usr_ctxts, 13313 n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
14285 u64 reg; 14295 u64 reg;
14286 int i, idx, regoff, regidx; 14296 int i, idx, regoff, regidx;
14287 u8 offset; 14297 u8 offset;
14298 u32 total_cnt;
14288 14299
14289 /* there needs to be enough room in the map table */ 14300 /* there needs to be enough room in the map table */
14290 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { 14301 total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
14302 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14291 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); 14303 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14292 return; 14304 return;
14293 } 14305 }
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
14341 /* add rule 1 */ 14353 /* add rule 1 */
14342 add_rsm_rule(dd, RSM_INS_FECN, &rrd); 14354 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14343 14355
14344 rmt->used += dd->num_user_contexts; 14356 rmt->used += total_cnt;
14345} 14357}
14346 14358
14347/* Initialize RSM for VNIC */ 14359/* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b643c2409cf..eba300330a02 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
898 if (!list_empty(&priv->s_iowait.list) && 898 if (!list_empty(&priv->s_iowait.list) &&
899 !(qp->s_flags & RVT_S_BUSY) && 899 !(qp->s_flags & RVT_S_BUSY) &&
900 !(priv->s_flags & RVT_S_BUSY)) { 900 !(priv->s_flags & RVT_S_BUSY)) {
901 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 901 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
902 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
903 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
902 list_del_init(&priv->s_iowait.list); 904 list_del_init(&priv->s_iowait.list);
903 priv->s_iowait.lock = NULL; 905 priv->s_iowait.lock = NULL;
904 rvt_put_qp(qp); 906 rvt_put_qp(qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e6726c1ab866..5991211d72bd 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -3088,7 +3088,7 @@ send_last:
3088 update_ack_queue(qp, next); 3088 update_ack_queue(qp, next);
3089 } 3089 }
3090 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3090 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3091 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 3091 if (e->rdma_sge.mr) {
3092 rvt_put_mr(e->rdma_sge.mr); 3092 rvt_put_mr(e->rdma_sge.mr);
3093 e->rdma_sge.mr = NULL; 3093 e->rdma_sge.mr = NULL;
3094 } 3094 }
@@ -3166,7 +3166,7 @@ send_last:
3166 update_ack_queue(qp, next); 3166 update_ack_queue(qp, next);
3167 } 3167 }
3168 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3168 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3169 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 3169 if (e->rdma_sge.mr) {
3170 rvt_put_mr(e->rdma_sge.mr); 3170 rvt_put_mr(e->rdma_sge.mr);
3171 e->rdma_sge.mr = NULL; 3171 e->rdma_sge.mr = NULL;
3172 } 3172 }
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index fdda33aca77f..43cbce7a19ea 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
5017 make_tid_rdma_ack(qp, ohdr, ps)) 5017 make_tid_rdma_ack(qp, ohdr, ps))
5018 return 1; 5018 return 1;
5019 5019
5020 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 5020 /*
5021 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 5021 * Bail out if we can't send data.
5022 goto bail; 5022 * Be reminded that this check must been done after the call to
5023 /* We are in the error state, flush the work request. */ 5023 * make_tid_rdma_ack() because the responding QP could be in
5024 if (qp->s_last == READ_ONCE(qp->s_head)) 5024 * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
5025 goto bail; 5025 */
5026 /* If DMAs are in progress, we can't flush immediately. */ 5026 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
5027 if (iowait_sdma_pending(&priv->s_iowait)) { 5027 goto bail;
5028 qp->s_flags |= RVT_S_WAIT_DMA;
5029 goto bail;
5030 }
5031 clear_ahg(qp);
5032 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
5033 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
5034 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
5035 /* will get called again */
5036 goto done_free_tx;
5037 }
5038 5028
5039 if (priv->s_flags & RVT_S_WAIT_ACK) 5029 if (priv->s_flags & RVT_S_WAIT_ACK)
5040 goto bail; 5030 goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
5144 hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, 5134 hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
5145 middle, ps); 5135 middle, ps);
5146 return 1; 5136 return 1;
5147done_free_tx:
5148 hfi1_put_txreq(ps->s_txreq);
5149 ps->s_txreq = NULL;
5150 return 1;
5151
5152bail: 5137bail:
5153 hfi1_put_txreq(ps->s_txreq); 5138 hfi1_put_txreq(ps->s_txreq);
5154bail_no_tx: 5139bail_no_tx:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f1fec56f3ff4..8e29dbb5b5fb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
792 idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; 792 idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
793 dma_offset = offset = idx_offset * table->obj_size; 793 dma_offset = offset = idx_offset * table->obj_size;
794 } else { 794 } else {
795 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
796
795 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); 797 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
796 /* mtt mhop */ 798 /* mtt mhop */
797 i = mhop.l0_idx; 799 i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
803 hem_idx = i; 805 hem_idx = i;
804 806
805 hem = table->hem[hem_idx]; 807 hem = table->hem[hem_idx];
806 dma_offset = offset = (obj & (table->num_obj - 1)) * 808 dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
807 table->obj_size % mhop.bt_chunk_size; 809 mhop.bt_chunk_size;
808 if (mhop.hop_num == 2) 810 if (mhop.hop_num == 2)
809 dma_offset = offset = 0; 811 dma_offset = offset = 0;
810 } 812 }
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b09f1cde2ff5..08be0e4eabcd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
746 struct hns_roce_hem_table *table; 746 struct hns_roce_hem_table *table;
747 dma_addr_t dma_handle; 747 dma_addr_t dma_handle;
748 __le64 *mtts; 748 __le64 *mtts;
749 u32 s = start_index * sizeof(u64);
750 u32 bt_page_size; 749 u32 bt_page_size;
751 u32 i; 750 u32 i;
752 751
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
780 return -EINVAL; 779 return -EINVAL;
781 780
782 mtts = hns_roce_table_find(hr_dev, table, 781 mtts = hns_roce_table_find(hr_dev, table,
783 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, 782 mtt->first_seg +
783 start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
784 &dma_handle); 784 &dma_handle);
785 if (!mtts) 785 if (!mtts)
786 return -ENOMEM; 786 return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 57c76eafef2f..66cdf625534f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
274 wait_for_completion(&hr_qp->free); 274 wait_for_completion(&hr_qp->free);
275 275
276 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { 276 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
277 if (hr_dev->caps.sccc_entry_sz)
278 hns_roce_table_put(hr_dev, &qp_table->sccc_table,
279 hr_qp->qpn);
280 if (hr_dev->caps.trrl_entry_sz) 277 if (hr_dev->caps.trrl_entry_sz)
281 hns_roce_table_put(hr_dev, &qp_table->trrl_table, 278 hns_roce_table_put(hr_dev, &qp_table->trrl_table,
282 hr_qp->qpn); 279 hr_qp->qpn);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c20bfc41ecf1..0aa10ebda5d9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
585 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 585 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
586 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 586 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
587 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; 587 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
588 u64 access_mask = ODP_READ_ALLOWED_BIT; 588 u64 access_mask;
589 u64 start_idx, page_mask; 589 u64 start_idx, page_mask;
590 struct ib_umem_odp *odp; 590 struct ib_umem_odp *odp;
591 size_t size; 591 size_t size;
@@ -607,6 +607,7 @@ next_mr:
607 page_shift = mr->umem->page_shift; 607 page_shift = mr->umem->page_shift;
608 page_mask = ~(BIT(page_shift) - 1); 608 page_mask = ~(BIT(page_shift) - 1);
609 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; 609 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
610 access_mask = ODP_READ_ALLOWED_BIT;
610 611
611 if (prefetch && !downgrade && !mr->umem->writable) { 612 if (prefetch && !downgrade && !mr->umem->writable) {
612 /* prefetch with write-access must 613 /* prefetch with write-access must
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6d8b3e0de57a..ec41400fec0c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
1131 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1131 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1132 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1132 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1133 pvrdma_free_slots(dev); 1133 pvrdma_free_slots(dev);
1134 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1135 dev->dsrbase);
1134 1136
1135 iounmap(dev->regs); 1137 iounmap(dev->regs);
1136 kfree(dev->sgid_tbl); 1138 kfree(dev->sgid_tbl);