aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c227
1 files changed, 115 insertions, 112 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6f521a3418e8..307bdbca8938 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -64,7 +64,7 @@ static const u32 mlx5_ib_opcode[] = {
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR, 67 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, 70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
@@ -1838,9 +1838,9 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1838static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1838static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1839 struct ib_send_wr *wr) 1839 struct ib_send_wr *wr)
1840{ 1840{
1841 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); 1841 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1842 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); 1842 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1843 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1843 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1844} 1844}
1845 1845
1846static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 1846static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
@@ -1896,22 +1896,24 @@ static __be64 sig_mkey_mask(void)
1896 return cpu_to_be64(result); 1896 return cpu_to_be64(result);
1897} 1897}
1898 1898
1899static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1899static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1900 struct ib_send_wr *wr, int li) 1900 struct mlx5_ib_mr *mr)
1901{ 1901{
1902 memset(umr, 0, sizeof(*umr)); 1902 int ndescs = mr->ndescs;
1903
1904 if (li) {
1905 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1906 umr->flags = 1 << 7;
1907 return;
1908 }
1909 1903
1910 umr->flags = (1 << 5); /* fail if not free */ 1904 memset(umr, 0, sizeof(*umr));
1911 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); 1905 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1906 umr->klm_octowords = get_klm_octo(ndescs);
1912 umr->mkey_mask = frwr_mkey_mask(); 1907 umr->mkey_mask = frwr_mkey_mask();
1913} 1908}
1914 1909
1910static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1911{
1912 memset(umr, 0, sizeof(*umr));
1913 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1914 umr->flags = 1 << 7;
1915}
1916
1915static __be64 get_umr_reg_mr_mask(void) 1917static __be64 get_umr_reg_mr_mask(void)
1916{ 1918{
1917 u64 result; 1919 u64 result;
@@ -1952,7 +1954,7 @@ static __be64 get_umr_update_mtt_mask(void)
1952static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1954static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1953 struct ib_send_wr *wr) 1955 struct ib_send_wr *wr)
1954{ 1956{
1955 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 1957 struct mlx5_umr_wr *umrwr = umr_wr(wr);
1956 1958
1957 memset(umr, 0, sizeof(*umr)); 1959 memset(umr, 0, sizeof(*umr));
1958 1960
@@ -1987,29 +1989,31 @@ static u8 get_umr_flags(int acc)
1987 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; 1989 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1988} 1990}
1989 1991
1990static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, 1992static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
1991 int li, int *writ) 1993 struct mlx5_ib_mr *mr,
1994 u32 key, int access)
1992{ 1995{
1993 memset(seg, 0, sizeof(*seg)); 1996 int ndescs = ALIGN(mr->ndescs, 8) >> 1;
1994 if (li) {
1995 seg->status = MLX5_MKEY_STATUS_FREE;
1996 return;
1997 }
1998 1997
1999 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | 1998 memset(seg, 0, sizeof(*seg));
2000 MLX5_ACCESS_MODE_MTT; 1999 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2001 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); 2000 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2002 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
2003 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 2001 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2004 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 2002 seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2005 seg->len = cpu_to_be64(wr->wr.fast_reg.length); 2003 seg->len = cpu_to_be64(mr->ibmr.length);
2006 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); 2004 seg->xlt_oct_size = cpu_to_be32(ndescs);
2007 seg->log2_page_size = wr->wr.fast_reg.page_shift; 2005 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2006}
2007
2008static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2009{
2010 memset(seg, 0, sizeof(*seg));
2011 seg->status = MLX5_MKEY_STATUS_FREE;
2008} 2012}
2009 2013
2010static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 2014static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2011{ 2015{
2012 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 2016 struct mlx5_umr_wr *umrwr = umr_wr(wr);
2013 2017
2014 memset(seg, 0, sizeof(*seg)); 2018 memset(seg, 0, sizeof(*seg));
2015 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { 2019 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
@@ -2028,21 +2032,14 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
2028 mlx5_mkey_variant(umrwr->mkey)); 2032 mlx5_mkey_variant(umrwr->mkey));
2029} 2033}
2030 2034
2031static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, 2035static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2032 struct ib_send_wr *wr, 2036 struct mlx5_ib_mr *mr,
2033 struct mlx5_core_dev *mdev, 2037 struct mlx5_ib_pd *pd)
2034 struct mlx5_ib_pd *pd,
2035 int writ)
2036{ 2038{
2037 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 2039 int bcount = mr->desc_size * mr->ndescs;
2038 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
2039 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2040 int i;
2041 2040
2042 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) 2041 dseg->addr = cpu_to_be64(mr->desc_map);
2043 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2042 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2044 dseg->addr = cpu_to_be64(mfrpl->map);
2045 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
2046 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2043 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2047} 2044}
2048 2045
@@ -2224,22 +2221,22 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
2224 return 0; 2221 return 0;
2225} 2222}
2226 2223
2227static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, 2224static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2228 void **seg, int *size) 2225 struct mlx5_ib_qp *qp, void **seg, int *size)
2229{ 2226{
2230 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; 2227 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2231 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; 2228 struct ib_mr *sig_mr = wr->sig_mr;
2232 struct mlx5_bsf *bsf; 2229 struct mlx5_bsf *bsf;
2233 u32 data_len = wr->sg_list->length; 2230 u32 data_len = wr->wr.sg_list->length;
2234 u32 data_key = wr->sg_list->lkey; 2231 u32 data_key = wr->wr.sg_list->lkey;
2235 u64 data_va = wr->sg_list->addr; 2232 u64 data_va = wr->wr.sg_list->addr;
2236 int ret; 2233 int ret;
2237 int wqe_size; 2234 int wqe_size;
2238 2235
2239 if (!wr->wr.sig_handover.prot || 2236 if (!wr->prot ||
2240 (data_key == wr->wr.sig_handover.prot->lkey && 2237 (data_key == wr->prot->lkey &&
2241 data_va == wr->wr.sig_handover.prot->addr && 2238 data_va == wr->prot->addr &&
2242 data_len == wr->wr.sig_handover.prot->length)) { 2239 data_len == wr->prot->length)) {
2243 /** 2240 /**
2244 * Source domain doesn't contain signature information 2241 * Source domain doesn't contain signature information
2245 * or data and protection are interleaved in memory. 2242 * or data and protection are interleaved in memory.
@@ -2273,8 +2270,8 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2273 struct mlx5_stride_block_ctrl_seg *sblock_ctrl; 2270 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2274 struct mlx5_stride_block_entry *data_sentry; 2271 struct mlx5_stride_block_entry *data_sentry;
2275 struct mlx5_stride_block_entry *prot_sentry; 2272 struct mlx5_stride_block_entry *prot_sentry;
2276 u32 prot_key = wr->wr.sig_handover.prot->lkey; 2273 u32 prot_key = wr->prot->lkey;
2277 u64 prot_va = wr->wr.sig_handover.prot->addr; 2274 u64 prot_va = wr->prot->addr;
2278 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; 2275 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2279 int prot_size; 2276 int prot_size;
2280 2277
@@ -2326,16 +2323,16 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2326} 2323}
2327 2324
2328static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, 2325static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2329 struct ib_send_wr *wr, u32 nelements, 2326 struct ib_sig_handover_wr *wr, u32 nelements,
2330 u32 length, u32 pdn) 2327 u32 length, u32 pdn)
2331{ 2328{
2332 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; 2329 struct ib_mr *sig_mr = wr->sig_mr;
2333 u32 sig_key = sig_mr->rkey; 2330 u32 sig_key = sig_mr->rkey;
2334 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; 2331 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2335 2332
2336 memset(seg, 0, sizeof(*seg)); 2333 memset(seg, 0, sizeof(*seg));
2337 2334
2338 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | 2335 seg->flags = get_umr_flags(wr->access_flags) |
2339 MLX5_ACCESS_MODE_KLM; 2336 MLX5_ACCESS_MODE_KLM;
2340 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); 2337 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2341 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | 2338 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
@@ -2346,7 +2343,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2346} 2343}
2347 2344
2348static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 2345static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2349 struct ib_send_wr *wr, u32 nelements) 2346 u32 nelements)
2350{ 2347{
2351 memset(umr, 0, sizeof(*umr)); 2348 memset(umr, 0, sizeof(*umr));
2352 2349
@@ -2357,37 +2354,37 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2357} 2354}
2358 2355
2359 2356
2360static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, 2357static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2361 void **seg, int *size) 2358 void **seg, int *size)
2362{ 2359{
2363 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); 2360 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2361 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2364 u32 pdn = get_pd(qp)->pdn; 2362 u32 pdn = get_pd(qp)->pdn;
2365 u32 klm_oct_size; 2363 u32 klm_oct_size;
2366 int region_len, ret; 2364 int region_len, ret;
2367 2365
2368 if (unlikely(wr->num_sge != 1) || 2366 if (unlikely(wr->wr.num_sge != 1) ||
2369 unlikely(wr->wr.sig_handover.access_flags & 2367 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2370 IB_ACCESS_REMOTE_ATOMIC) ||
2371 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || 2368 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2372 unlikely(!sig_mr->sig->sig_status_checked)) 2369 unlikely(!sig_mr->sig->sig_status_checked))
2373 return -EINVAL; 2370 return -EINVAL;
2374 2371
2375 /* length of the protected region, data + protection */ 2372 /* length of the protected region, data + protection */
2376 region_len = wr->sg_list->length; 2373 region_len = wr->wr.sg_list->length;
2377 if (wr->wr.sig_handover.prot && 2374 if (wr->prot &&
2378 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || 2375 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2379 wr->wr.sig_handover.prot->addr != wr->sg_list->addr || 2376 wr->prot->addr != wr->wr.sg_list->addr ||
2380 wr->wr.sig_handover.prot->length != wr->sg_list->length)) 2377 wr->prot->length != wr->wr.sg_list->length))
2381 region_len += wr->wr.sig_handover.prot->length; 2378 region_len += wr->prot->length;
2382 2379
2383 /** 2380 /**
2384 * KLM octoword size - if protection was provided 2381 * KLM octoword size - if protection was provided
2385 * then we use strided block format (3 octowords), 2382 * then we use strided block format (3 octowords),
2386 * else we use single KLM (1 octoword) 2383 * else we use single KLM (1 octoword)
2387 **/ 2384 **/
2388 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; 2385 klm_oct_size = wr->prot ? 3 : 1;
2389 2386
2390 set_sig_umr_segment(*seg, wr, klm_oct_size); 2387 set_sig_umr_segment(*seg, klm_oct_size);
2391 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2388 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2392 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2389 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2393 if (unlikely((*seg == qp->sq.qend))) 2390 if (unlikely((*seg == qp->sq.qend)))
@@ -2433,38 +2430,52 @@ static int set_psv_wr(struct ib_sig_domain *domain,
2433 return 0; 2430 return 0;
2434} 2431}
2435 2432
2436static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, 2433static int set_reg_wr(struct mlx5_ib_qp *qp,
2437 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) 2434 struct ib_reg_wr *wr,
2435 void **seg, int *size)
2438{ 2436{
2439 int writ = 0; 2437 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2440 int li; 2438 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2441 2439
2442 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; 2440 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2443 if (unlikely(wr->send_flags & IB_SEND_INLINE)) 2441 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2442 "Invalid IB_SEND_INLINE send flag\n");
2444 return -EINVAL; 2443 return -EINVAL;
2444 }
2445 2445
2446 set_frwr_umr_segment(*seg, wr, li); 2446 set_reg_umr_seg(*seg, mr);
2447 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2447 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2448 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2448 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2449 if (unlikely((*seg == qp->sq.qend))) 2449 if (unlikely((*seg == qp->sq.qend)))
2450 *seg = mlx5_get_send_wqe(qp, 0); 2450 *seg = mlx5_get_send_wqe(qp, 0);
2451 set_mkey_segment(*seg, wr, li, &writ); 2451
2452 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2452 *seg += sizeof(struct mlx5_mkey_seg); 2453 *seg += sizeof(struct mlx5_mkey_seg);
2453 *size += sizeof(struct mlx5_mkey_seg) / 16; 2454 *size += sizeof(struct mlx5_mkey_seg) / 16;
2454 if (unlikely((*seg == qp->sq.qend))) 2455 if (unlikely((*seg == qp->sq.qend)))
2455 *seg = mlx5_get_send_wqe(qp, 0); 2456 *seg = mlx5_get_send_wqe(qp, 0);
2456 if (!li) {
2457 if (unlikely(wr->wr.fast_reg.page_list_len >
2458 wr->wr.fast_reg.page_list->max_page_list_len))
2459 return -ENOMEM;
2460 2457
2461 set_frwr_pages(*seg, wr, mdev, pd, writ); 2458 set_reg_data_seg(*seg, mr, pd);
2462 *seg += sizeof(struct mlx5_wqe_data_seg); 2459 *seg += sizeof(struct mlx5_wqe_data_seg);
2463 *size += (sizeof(struct mlx5_wqe_data_seg) / 16); 2460 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2464 } 2461
2465 return 0; 2462 return 0;
2466} 2463}
2467 2464
2465static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2466{
2467 set_linv_umr_seg(*seg);
2468 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2469 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2470 if (unlikely((*seg == qp->sq.qend)))
2471 *seg = mlx5_get_send_wqe(qp, 0);
2472 set_linv_mkey_seg(*seg);
2473 *seg += sizeof(struct mlx5_mkey_seg);
2474 *size += sizeof(struct mlx5_mkey_seg) / 16;
2475 if (unlikely((*seg == qp->sq.qend)))
2476 *seg = mlx5_get_send_wqe(qp, 0);
2477}
2478
2468static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 2479static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2469{ 2480{
2470 __be32 *p = NULL; 2481 __be32 *p = NULL;
@@ -2578,7 +2589,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2578{ 2589{
2579 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2590 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2580 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2591 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2581 struct mlx5_core_dev *mdev = dev->mdev;
2582 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2592 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2583 struct mlx5_ib_mr *mr; 2593 struct mlx5_ib_mr *mr;
2584 struct mlx5_wqe_data_seg *dpseg; 2594 struct mlx5_wqe_data_seg *dpseg;
@@ -2627,7 +2637,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2627 switch (ibqp->qp_type) { 2637 switch (ibqp->qp_type) {
2628 case IB_QPT_XRC_INI: 2638 case IB_QPT_XRC_INI:
2629 xrc = seg; 2639 xrc = seg;
2630 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2631 seg += sizeof(*xrc); 2640 seg += sizeof(*xrc);
2632 size += sizeof(*xrc) / 16; 2641 size += sizeof(*xrc) / 16;
2633 /* fall through */ 2642 /* fall through */
@@ -2636,8 +2645,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2636 case IB_WR_RDMA_READ: 2645 case IB_WR_RDMA_READ:
2637 case IB_WR_RDMA_WRITE: 2646 case IB_WR_RDMA_WRITE:
2638 case IB_WR_RDMA_WRITE_WITH_IMM: 2647 case IB_WR_RDMA_WRITE_WITH_IMM:
2639 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2648 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2640 wr->wr.rdma.rkey); 2649 rdma_wr(wr)->rkey);
2641 seg += sizeof(struct mlx5_wqe_raddr_seg); 2650 seg += sizeof(struct mlx5_wqe_raddr_seg);
2642 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2651 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2643 break; 2652 break;
@@ -2654,22 +2663,16 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2654 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2663 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2655 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 2664 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2656 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 2665 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2657 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2666 set_linv_wr(qp, &seg, &size);
2658 if (err) {
2659 mlx5_ib_warn(dev, "\n");
2660 *bad_wr = wr;
2661 goto out;
2662 }
2663 num_sge = 0; 2667 num_sge = 0;
2664 break; 2668 break;
2665 2669
2666 case IB_WR_FAST_REG_MR: 2670 case IB_WR_REG_MR:
2667 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2671 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2668 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; 2672 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2669 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); 2673 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2670 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2674 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2671 if (err) { 2675 if (err) {
2672 mlx5_ib_warn(dev, "\n");
2673 *bad_wr = wr; 2676 *bad_wr = wr;
2674 goto out; 2677 goto out;
2675 } 2678 }
@@ -2678,7 +2681,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2678 2681
2679 case IB_WR_REG_SIG_MR: 2682 case IB_WR_REG_SIG_MR:
2680 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; 2683 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2681 mr = to_mmr(wr->wr.sig_handover.sig_mr); 2684 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2682 2685
2683 ctrl->imm = cpu_to_be32(mr->ibmr.rkey); 2686 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2684 err = set_sig_umr_wr(wr, qp, &seg, &size); 2687 err = set_sig_umr_wr(wr, qp, &seg, &size);
@@ -2706,7 +2709,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2706 goto out; 2709 goto out;
2707 } 2710 }
2708 2711
2709 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, 2712 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2710 mr->sig->psv_memory.psv_idx, &seg, 2713 mr->sig->psv_memory.psv_idx, &seg,
2711 &size); 2714 &size);
2712 if (err) { 2715 if (err) {
@@ -2728,7 +2731,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2728 } 2731 }
2729 2732
2730 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2733 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2731 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, 2734 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2732 mr->sig->psv_wire.psv_idx, &seg, 2735 mr->sig->psv_wire.psv_idx, &seg,
2733 &size); 2736 &size);
2734 if (err) { 2737 if (err) {
@@ -2752,8 +2755,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2752 switch (wr->opcode) { 2755 switch (wr->opcode) {
2753 case IB_WR_RDMA_WRITE: 2756 case IB_WR_RDMA_WRITE:
2754 case IB_WR_RDMA_WRITE_WITH_IMM: 2757 case IB_WR_RDMA_WRITE_WITH_IMM:
2755 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2758 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2756 wr->wr.rdma.rkey); 2759 rdma_wr(wr)->rkey);
2757 seg += sizeof(struct mlx5_wqe_raddr_seg); 2760 seg += sizeof(struct mlx5_wqe_raddr_seg);
2758 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2761 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2759 break; 2762 break;
@@ -2780,7 +2783,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2780 goto out; 2783 goto out;
2781 } 2784 }
2782 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 2785 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2783 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); 2786 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2784 set_reg_umr_segment(seg, wr); 2787 set_reg_umr_segment(seg, wr);
2785 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2788 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2786 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2789 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;