aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-02-23 07:19:11 -0500
committerRoland Dreier <roland@purestorage.com>2014-03-07 14:39:51 -0500
commite6631814fb3ac454fbbf47ea343c2b9508e4e1ba (patch)
treedfe554c9fbe5f3f4f0a37276a74180bf2f4e9fd2
parent3bcdb17a5e88288ead90be3c107e754a6075a5b0 (diff)
IB/mlx5: Support IB_WR_REG_SIG_MR
This patch implements IB_WR_REG_SIG_MR posted by the user. Baisically this WR involves 3 WQEs in order to prepare and properly register the signature layout: 1. post UMR WR to register the sig_mr in one of two possible ways: * In case the user registered a single MR for data so the UMR data segment consists of: - single klm (data MR) passed by the user - BSF with signature attributes requested by the user. * In case the user registered 2 MRs, one for data and one for protection, the UMR consists of: - strided block format which includes data and protection MRs and their repetitive block format. - BSF with signature attributes requested by the user. 2. post SET_PSV in order to set the memory domain initial signature parameters passed by the user. SET_PSV is not signaled and solicited CQE. 3. post SET_PSV in order to set the wire domain initial signature parameters passed by the user. SET_PSV is not signaled and solicited CQE. * After this compound WR we place a small fence for next WR to come. This patch also introduces some helper functions to set the BSF correctly and determining the signature format selectors. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c422
-rw-r--r--include/linux/mlx5/qp.h61
2 files changed, 483 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1dbadbfc4474..67e79989b181 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1777,6 +1777,26 @@ static __be64 frwr_mkey_mask(void)
1777 return cpu_to_be64(result); 1777 return cpu_to_be64(result);
1778} 1778}
1779 1779
1780static __be64 sig_mkey_mask(void)
1781{
1782 u64 result;
1783
1784 result = MLX5_MKEY_MASK_LEN |
1785 MLX5_MKEY_MASK_PAGE_SIZE |
1786 MLX5_MKEY_MASK_START_ADDR |
1787 MLX5_MKEY_MASK_EN_RINVAL |
1788 MLX5_MKEY_MASK_KEY |
1789 MLX5_MKEY_MASK_LR |
1790 MLX5_MKEY_MASK_LW |
1791 MLX5_MKEY_MASK_RR |
1792 MLX5_MKEY_MASK_RW |
1793 MLX5_MKEY_MASK_SMALL_FENCE |
1794 MLX5_MKEY_MASK_FREE |
1795 MLX5_MKEY_MASK_BSF_EN;
1796
1797 return cpu_to_be64(result);
1798}
1799
1780static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1800static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1781 struct ib_send_wr *wr, int li) 1801 struct ib_send_wr *wr, int li)
1782{ 1802{
@@ -1961,6 +1981,339 @@ static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1961 return 0; 1981 return 0;
1962} 1982}
1963 1983
1984static u16 prot_field_size(enum ib_signature_type type)
1985{
1986 switch (type) {
1987 case IB_SIG_TYPE_T10_DIF:
1988 return MLX5_DIF_SIZE;
1989 default:
1990 return 0;
1991 }
1992}
1993
1994static u8 bs_selector(int block_size)
1995{
1996 switch (block_size) {
1997 case 512: return 0x1;
1998 case 520: return 0x2;
1999 case 4096: return 0x3;
2000 case 4160: return 0x4;
2001 case 1073741824: return 0x5;
2002 default: return 0;
2003 }
2004}
2005
2006static int format_selector(struct ib_sig_attrs *attr,
2007 struct ib_sig_domain *domain,
2008 int *selector)
2009{
2010
2011#define FORMAT_DIF_NONE 0
2012#define FORMAT_DIF_CRC_INC 8
2013#define FORMAT_DIF_CRC_NO_INC 12
2014#define FORMAT_DIF_CSUM_INC 13
2015#define FORMAT_DIF_CSUM_NO_INC 14
2016
2017 switch (domain->sig.dif.type) {
2018 case IB_T10DIF_NONE:
2019 /* No DIF */
2020 *selector = FORMAT_DIF_NONE;
2021 break;
2022 case IB_T10DIF_TYPE1: /* Fall through */
2023 case IB_T10DIF_TYPE2:
2024 switch (domain->sig.dif.bg_type) {
2025 case IB_T10DIF_CRC:
2026 *selector = FORMAT_DIF_CRC_INC;
2027 break;
2028 case IB_T10DIF_CSUM:
2029 *selector = FORMAT_DIF_CSUM_INC;
2030 break;
2031 default:
2032 return 1;
2033 }
2034 break;
2035 case IB_T10DIF_TYPE3:
2036 switch (domain->sig.dif.bg_type) {
2037 case IB_T10DIF_CRC:
2038 *selector = domain->sig.dif.type3_inc_reftag ?
2039 FORMAT_DIF_CRC_INC :
2040 FORMAT_DIF_CRC_NO_INC;
2041 break;
2042 case IB_T10DIF_CSUM:
2043 *selector = domain->sig.dif.type3_inc_reftag ?
2044 FORMAT_DIF_CSUM_INC :
2045 FORMAT_DIF_CSUM_NO_INC;
2046 break;
2047 default:
2048 return 1;
2049 }
2050 break;
2051 default:
2052 return 1;
2053 }
2054
2055 return 0;
2056}
2057
2058static int mlx5_set_bsf(struct ib_mr *sig_mr,
2059 struct ib_sig_attrs *sig_attrs,
2060 struct mlx5_bsf *bsf, u32 data_size)
2061{
2062 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2063 struct mlx5_bsf_basic *basic = &bsf->basic;
2064 struct ib_sig_domain *mem = &sig_attrs->mem;
2065 struct ib_sig_domain *wire = &sig_attrs->wire;
2066 int ret, selector;
2067
2068 switch (sig_attrs->mem.sig_type) {
2069 case IB_SIG_TYPE_T10_DIF:
2070 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
2071 return -EINVAL;
2072
2073 /* Input domain check byte mask */
2074 basic->check_byte_mask = sig_attrs->check_mask;
2075 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2076 mem->sig.dif.type == wire->sig.dif.type) {
2077 /* Same block structure */
2078 basic->bsf_size_sbs = 1 << 4;
2079 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2080 basic->wire.copy_byte_mask = 0xff;
2081 else
2082 basic->wire.copy_byte_mask = 0x3f;
2083 } else
2084 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2085
2086 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2087 basic->raw_data_size = cpu_to_be32(data_size);
2088
2089 ret = format_selector(sig_attrs, mem, &selector);
2090 if (ret)
2091 return -EINVAL;
2092 basic->m_bfs_psv = cpu_to_be32(selector << 24 |
2093 msig->psv_memory.psv_idx);
2094
2095 ret = format_selector(sig_attrs, wire, &selector);
2096 if (ret)
2097 return -EINVAL;
2098 basic->w_bfs_psv = cpu_to_be32(selector << 24 |
2099 msig->psv_wire.psv_idx);
2100 break;
2101
2102 default:
2103 return -EINVAL;
2104 }
2105
2106 return 0;
2107}
2108
2109static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2110 void **seg, int *size)
2111{
2112 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2113 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2114 struct mlx5_bsf *bsf;
2115 u32 data_len = wr->sg_list->length;
2116 u32 data_key = wr->sg_list->lkey;
2117 u64 data_va = wr->sg_list->addr;
2118 int ret;
2119 int wqe_size;
2120
2121 if (!wr->wr.sig_handover.prot) {
2122 /**
2123 * Source domain doesn't contain signature information
2124 * So need construct:
2125 * ------------------
2126 * | data_klm |
2127 * ------------------
2128 * | BSF |
2129 * ------------------
2130 **/
2131 struct mlx5_klm *data_klm = *seg;
2132
2133 data_klm->bcount = cpu_to_be32(data_len);
2134 data_klm->key = cpu_to_be32(data_key);
2135 data_klm->va = cpu_to_be64(data_va);
2136 wqe_size = ALIGN(sizeof(*data_klm), 64);
2137 } else {
2138 /**
2139 * Source domain contains signature information
2140 * So need construct a strided block format:
2141 * ---------------------------
2142 * | stride_block_ctrl |
2143 * ---------------------------
2144 * | data_klm |
2145 * ---------------------------
2146 * | prot_klm |
2147 * ---------------------------
2148 * | BSF |
2149 * ---------------------------
2150 **/
2151 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2152 struct mlx5_stride_block_entry *data_sentry;
2153 struct mlx5_stride_block_entry *prot_sentry;
2154 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2155 u64 prot_va = wr->wr.sig_handover.prot->addr;
2156 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2157 int prot_size;
2158
2159 sblock_ctrl = *seg;
2160 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2161 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2162
2163 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2164 if (!prot_size) {
2165 pr_err("Bad block size given: %u\n", block_size);
2166 return -EINVAL;
2167 }
2168 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2169 prot_size);
2170 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2171 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2172 sblock_ctrl->num_entries = cpu_to_be16(2);
2173
2174 data_sentry->bcount = cpu_to_be16(block_size);
2175 data_sentry->key = cpu_to_be32(data_key);
2176 data_sentry->va = cpu_to_be64(data_va);
2177 prot_sentry->bcount = cpu_to_be16(prot_size);
2178 prot_sentry->key = cpu_to_be32(prot_key);
2179
2180 if (prot_key == data_key && prot_va == data_va) {
2181 /**
2182 * The data and protection are interleaved
2183 * in a single memory region
2184 **/
2185 prot_sentry->va = cpu_to_be64(data_va + block_size);
2186 prot_sentry->stride = cpu_to_be16(block_size + prot_size);
2187 data_sentry->stride = prot_sentry->stride;
2188 } else {
2189 /* The data and protection are two different buffers */
2190 prot_sentry->va = cpu_to_be64(prot_va);
2191 data_sentry->stride = cpu_to_be16(block_size);
2192 prot_sentry->stride = cpu_to_be16(prot_size);
2193 }
2194 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2195 sizeof(*prot_sentry), 64);
2196 }
2197
2198 *seg += wqe_size;
2199 *size += wqe_size / 16;
2200 if (unlikely((*seg == qp->sq.qend)))
2201 *seg = mlx5_get_send_wqe(qp, 0);
2202
2203 bsf = *seg;
2204 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2205 if (ret)
2206 return -EINVAL;
2207
2208 *seg += sizeof(*bsf);
2209 *size += sizeof(*bsf) / 16;
2210 if (unlikely((*seg == qp->sq.qend)))
2211 *seg = mlx5_get_send_wqe(qp, 0);
2212
2213 return 0;
2214}
2215
2216static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2217 struct ib_send_wr *wr, u32 nelements,
2218 u32 length, u32 pdn)
2219{
2220 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2221 u32 sig_key = sig_mr->rkey;
2222
2223 memset(seg, 0, sizeof(*seg));
2224
2225 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2226 MLX5_ACCESS_MODE_KLM;
2227 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2228 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL |
2229 MLX5_MKEY_BSF_EN | pdn);
2230 seg->len = cpu_to_be64(length);
2231 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2232 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2233}
2234
2235static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2236 struct ib_send_wr *wr, u32 nelements)
2237{
2238 memset(umr, 0, sizeof(*umr));
2239
2240 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2241 umr->klm_octowords = get_klm_octo(nelements);
2242 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2243 umr->mkey_mask = sig_mkey_mask();
2244}
2245
2246
2247static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2248 void **seg, int *size)
2249{
2250 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2251 u32 pdn = get_pd(qp)->pdn;
2252 u32 klm_oct_size;
2253 int region_len, ret;
2254
2255 if (unlikely(wr->num_sge != 1) ||
2256 unlikely(wr->wr.sig_handover.access_flags &
2257 IB_ACCESS_REMOTE_ATOMIC) ||
2258 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en))
2259 return -EINVAL;
2260
2261 /* length of the protected region, data + protection */
2262 region_len = wr->sg_list->length;
2263 if (wr->wr.sig_handover.prot)
2264 region_len += wr->wr.sig_handover.prot->length;
2265
2266 /**
2267 * KLM octoword size - if protection was provided
2268 * then we use strided block format (3 octowords),
2269 * else we use single KLM (1 octoword)
2270 **/
2271 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2272
2273 set_sig_umr_segment(*seg, wr, klm_oct_size);
2274 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2275 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2276 if (unlikely((*seg == qp->sq.qend)))
2277 *seg = mlx5_get_send_wqe(qp, 0);
2278
2279 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2280 *seg += sizeof(struct mlx5_mkey_seg);
2281 *size += sizeof(struct mlx5_mkey_seg) / 16;
2282 if (unlikely((*seg == qp->sq.qend)))
2283 *seg = mlx5_get_send_wqe(qp, 0);
2284
2285 ret = set_sig_data_segment(wr, qp, seg, size);
2286 if (ret)
2287 return ret;
2288
2289 return 0;
2290}
2291
2292static int set_psv_wr(struct ib_sig_domain *domain,
2293 u32 psv_idx, void **seg, int *size)
2294{
2295 struct mlx5_seg_set_psv *psv_seg = *seg;
2296
2297 memset(psv_seg, 0, sizeof(*psv_seg));
2298 psv_seg->psv_num = cpu_to_be32(psv_idx);
2299 switch (domain->sig_type) {
2300 case IB_SIG_TYPE_T10_DIF:
2301 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2302 domain->sig.dif.app_tag);
2303 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2304
2305 *seg += sizeof(*psv_seg);
2306 *size += sizeof(*psv_seg) / 16;
2307 break;
2308
2309 default:
2310 pr_err("Bad signature type given.\n");
2311 return 1;
2312 }
2313
2314 return 0;
2315}
2316
1964static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, 2317static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
1965 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) 2318 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
1966{ 2319{
@@ -2108,6 +2461,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2108 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2461 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2109 struct mlx5_core_dev *mdev = &dev->mdev; 2462 struct mlx5_core_dev *mdev = &dev->mdev;
2110 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2463 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2464 struct mlx5_ib_mr *mr;
2111 struct mlx5_wqe_data_seg *dpseg; 2465 struct mlx5_wqe_data_seg *dpseg;
2112 struct mlx5_wqe_xrc_seg *xrc; 2466 struct mlx5_wqe_xrc_seg *xrc;
2113 struct mlx5_bf *bf = qp->bf; 2467 struct mlx5_bf *bf = qp->bf;
@@ -2203,6 +2557,73 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2203 num_sge = 0; 2557 num_sge = 0;
2204 break; 2558 break;
2205 2559
2560 case IB_WR_REG_SIG_MR:
2561 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2562 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2563
2564 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2565 err = set_sig_umr_wr(wr, qp, &seg, &size);
2566 if (err) {
2567 mlx5_ib_warn(dev, "\n");
2568 *bad_wr = wr;
2569 goto out;
2570 }
2571
2572 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2573 nreq, get_fence(fence, wr),
2574 next_fence, MLX5_OPCODE_UMR);
2575 /*
2576 * SET_PSV WQEs are not signaled and solicited
2577 * on error
2578 */
2579 wr->send_flags &= ~IB_SEND_SIGNALED;
2580 wr->send_flags |= IB_SEND_SOLICITED;
2581 err = begin_wqe(qp, &seg, &ctrl, wr,
2582 &idx, &size, nreq);
2583 if (err) {
2584 mlx5_ib_warn(dev, "\n");
2585 err = -ENOMEM;
2586 *bad_wr = wr;
2587 goto out;
2588 }
2589
2590 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2591 mr->sig->psv_memory.psv_idx, &seg,
2592 &size);
2593 if (err) {
2594 mlx5_ib_warn(dev, "\n");
2595 *bad_wr = wr;
2596 goto out;
2597 }
2598
2599 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2600 nreq, get_fence(fence, wr),
2601 next_fence, MLX5_OPCODE_SET_PSV);
2602 err = begin_wqe(qp, &seg, &ctrl, wr,
2603 &idx, &size, nreq);
2604 if (err) {
2605 mlx5_ib_warn(dev, "\n");
2606 err = -ENOMEM;
2607 *bad_wr = wr;
2608 goto out;
2609 }
2610
2611 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2612 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2613 mr->sig->psv_wire.psv_idx, &seg,
2614 &size);
2615 if (err) {
2616 mlx5_ib_warn(dev, "\n");
2617 *bad_wr = wr;
2618 goto out;
2619 }
2620
2621 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2622 nreq, get_fence(fence, wr),
2623 next_fence, MLX5_OPCODE_SET_PSV);
2624 num_sge = 0;
2625 goto skip_psv;
2626
2206 default: 2627 default:
2207 break; 2628 break;
2208 } 2629 }
@@ -2286,6 +2707,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2286 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 2707 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2287 get_fence(fence, wr), next_fence, 2708 get_fence(fence, wr), next_fence,
2288 mlx5_ib_opcode[wr->opcode]); 2709 mlx5_ib_opcode[wr->opcode]);
2710skip_psv:
2289 if (0) 2711 if (0)
2290 dump_wqe(qp, idx, size); 2712 dump_wqe(qp, idx, size);
2291 } 2713 }
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 152756eaa8a3..49af74f90ef9 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -38,6 +38,8 @@
38 38
39#define MLX5_INVALID_LKEY 0x100 39#define MLX5_INVALID_LKEY 0x100
40#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) 40#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41#define MLX5_DIF_SIZE 8
42#define MLX5_STRIDE_BLOCK_OP 0x400
41 43
42enum mlx5_qp_optpar { 44enum mlx5_qp_optpar {
43 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 45 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -152,6 +154,11 @@ enum {
152 MLX5_SND_DBR = 1, 154 MLX5_SND_DBR = 1,
153}; 155};
154 156
157enum {
158 MLX5_FLAGS_INLINE = 1<<7,
159 MLX5_FLAGS_CHECK_FREE = 1<<5,
160};
161
155struct mlx5_wqe_fmr_seg { 162struct mlx5_wqe_fmr_seg {
156 __be32 flags; 163 __be32 flags;
157 __be32 mem_key; 164 __be32 mem_key;
@@ -279,6 +286,60 @@ struct mlx5_wqe_inline_seg {
279 __be32 byte_count; 286 __be32 byte_count;
280}; 287};
281 288
289struct mlx5_bsf {
290 struct mlx5_bsf_basic {
291 u8 bsf_size_sbs;
292 u8 check_byte_mask;
293 union {
294 u8 copy_byte_mask;
295 u8 bs_selector;
296 u8 rsvd_wflags;
297 } wire;
298 union {
299 u8 bs_selector;
300 u8 rsvd_mflags;
301 } mem;
302 __be32 raw_data_size;
303 __be32 w_bfs_psv;
304 __be32 m_bfs_psv;
305 } basic;
306 struct mlx5_bsf_ext {
307 __be32 t_init_gen_pro_size;
308 __be32 rsvd_epi_size;
309 __be32 w_tfs_psv;
310 __be32 m_tfs_psv;
311 } ext;
312 struct mlx5_bsf_inl {
313 __be32 w_inl_vld;
314 __be32 w_rsvd;
315 __be64 w_block_format;
316 __be32 m_inl_vld;
317 __be32 m_rsvd;
318 __be64 m_block_format;
319 } inl;
320};
321
322struct mlx5_klm {
323 __be32 bcount;
324 __be32 key;
325 __be64 va;
326};
327
328struct mlx5_stride_block_entry {
329 __be16 stride;
330 __be16 bcount;
331 __be32 key;
332 __be64 va;
333};
334
335struct mlx5_stride_block_ctrl_seg {
336 __be32 bcount_per_cycle;
337 __be32 op;
338 __be32 repeat_count;
339 u16 rsvd;
340 __be16 num_entries;
341};
342
282struct mlx5_core_qp { 343struct mlx5_core_qp {
283 void (*event) (struct mlx5_core_qp *, int); 344 void (*event) (struct mlx5_core_qp *, int);
284 int qpn; 345 int qpn;