aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/main.c
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2015-05-28 15:28:41 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-30 21:23:22 -0400
commit938fe83c8dcbbf294d167e6163200a8540ae43c4 (patch)
tree1867c23ac3d241f620a2b16eeca38d8ea8b2fe73 /drivers/infiniband/hw/mlx5/main.c
parente281682bf29438848daac11627216bceb1507b71 (diff)
net/mlx5_core: New device capabilities handling
- Query all supported types of dev caps on driver load. - Store the Cap data outbox per cap type into driver private data. - Introduce new Macros to access/dump stored caps (using the auto generated data types). - Obsolete SW representation of dev caps (no need for SW copy for each cap). - Modify IB driver to use new macros for checking caps. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c113
1 files changed, 54 insertions, 59 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 57c9809e8b87..9075649f30fc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -66,15 +66,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
66 struct ib_device_attr *props) 66 struct ib_device_attr *props)
67{ 67{
68 struct mlx5_ib_dev *dev = to_mdev(ibdev); 68 struct mlx5_ib_dev *dev = to_mdev(ibdev);
69 struct mlx5_core_dev *mdev = dev->mdev;
69 struct ib_smp *in_mad = NULL; 70 struct ib_smp *in_mad = NULL;
70 struct ib_smp *out_mad = NULL; 71 struct ib_smp *out_mad = NULL;
71 struct mlx5_general_caps *gen;
72 int err = -ENOMEM; 72 int err = -ENOMEM;
73 int max_rq_sg; 73 int max_rq_sg;
74 int max_sq_sg; 74 int max_sq_sg;
75 u64 flags;
76 75
77 gen = &dev->mdev->caps.gen;
78 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 76 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
79 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 77 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
80 if (!in_mad || !out_mad) 78 if (!in_mad || !out_mad)
@@ -96,18 +94,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
96 IB_DEVICE_PORT_ACTIVE_EVENT | 94 IB_DEVICE_PORT_ACTIVE_EVENT |
97 IB_DEVICE_SYS_IMAGE_GUID | 95 IB_DEVICE_SYS_IMAGE_GUID |
98 IB_DEVICE_RC_RNR_NAK_GEN; 96 IB_DEVICE_RC_RNR_NAK_GEN;
99 flags = gen->flags; 97
100 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 98 if (MLX5_CAP_GEN(mdev, pkv))
101 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 99 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
102 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) 100 if (MLX5_CAP_GEN(mdev, qkv))
103 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 101 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
104 if (flags & MLX5_DEV_CAP_FLAG_APM) 102 if (MLX5_CAP_GEN(mdev, apm))
105 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 103 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
106 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 104 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
107 if (flags & MLX5_DEV_CAP_FLAG_XRC) 105 if (MLX5_CAP_GEN(mdev, xrc))
108 props->device_cap_flags |= IB_DEVICE_XRC; 106 props->device_cap_flags |= IB_DEVICE_XRC;
109 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 107 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
110 if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) { 108 if (MLX5_CAP_GEN(mdev, sho)) {
111 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 109 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
112 /* At this stage no support for signature handover */ 110 /* At this stage no support for signature handover */
113 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 111 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -116,7 +114,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
116 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 114 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
117 IB_GUARD_T10DIF_CSUM; 115 IB_GUARD_T10DIF_CSUM;
118 } 116 }
119 if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST) 117 if (MLX5_CAP_GEN(mdev, block_lb_mc))
120 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 118 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
121 119
122 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 120 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
@@ -126,37 +124,38 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
126 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 124 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
127 125
128 props->max_mr_size = ~0ull; 126 props->max_mr_size = ~0ull;
129 props->page_size_cap = gen->min_page_sz; 127 props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
130 props->max_qp = 1 << gen->log_max_qp; 128 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
131 props->max_qp_wr = gen->max_wqes; 129 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
132 max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 130 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
133 max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 131 sizeof(struct mlx5_wqe_data_seg);
134 sizeof(struct mlx5_wqe_data_seg); 132 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
133 sizeof(struct mlx5_wqe_ctrl_seg)) /
134 sizeof(struct mlx5_wqe_data_seg);
135 props->max_sge = min(max_rq_sg, max_sq_sg); 135 props->max_sge = min(max_rq_sg, max_sq_sg);
136 props->max_cq = 1 << gen->log_max_cq; 136 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
137 props->max_cqe = gen->max_cqes - 1; 137 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
138 props->max_mr = 1 << gen->log_max_mkey; 138 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
139 props->max_pd = 1 << gen->log_max_pd; 139 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
140 props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; 140 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
141 props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; 141 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
142 props->max_srq = 1 << gen->log_max_srq; 142 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
143 props->max_srq_wr = gen->max_srq_wqes - 1; 143 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
144 props->local_ca_ack_delay = gen->local_ca_ack_delay; 144 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
145 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 145 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
146 props->max_srq_sge = max_rq_sg - 1; 146 props->max_srq_sge = max_rq_sg - 1;
147 props->max_fast_reg_page_list_len = (unsigned int)-1; 147 props->max_fast_reg_page_list_len = (unsigned int)-1;
148 props->local_ca_ack_delay = gen->local_ca_ack_delay;
149 props->atomic_cap = IB_ATOMIC_NONE; 148 props->atomic_cap = IB_ATOMIC_NONE;
150 props->masked_atomic_cap = IB_ATOMIC_NONE; 149 props->masked_atomic_cap = IB_ATOMIC_NONE;
151 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 150 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
152 props->max_mcast_grp = 1 << gen->log_max_mcg; 151 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
153 props->max_mcast_qp_attach = gen->max_qp_mcg; 152 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
154 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 153 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
155 props->max_mcast_grp; 154 props->max_mcast_grp;
156 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 155 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
157 156
158#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 157#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
159 if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) 158 if (MLX5_CAP_GEN(mdev, pg))
160 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 159 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
161 props->odp_caps = dev->odp_caps; 160 props->odp_caps = dev->odp_caps;
162#endif 161#endif
@@ -172,14 +171,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
172 struct ib_port_attr *props) 171 struct ib_port_attr *props)
173{ 172{
174 struct mlx5_ib_dev *dev = to_mdev(ibdev); 173 struct mlx5_ib_dev *dev = to_mdev(ibdev);
174 struct mlx5_core_dev *mdev = dev->mdev;
175 struct ib_smp *in_mad = NULL; 175 struct ib_smp *in_mad = NULL;
176 struct ib_smp *out_mad = NULL; 176 struct ib_smp *out_mad = NULL;
177 struct mlx5_general_caps *gen;
178 int ext_active_speed; 177 int ext_active_speed;
179 int err = -ENOMEM; 178 int err = -ENOMEM;
180 179
181 gen = &dev->mdev->caps.gen; 180 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
182 if (port < 1 || port > gen->num_ports) {
183 mlx5_ib_warn(dev, "invalid port number %d\n", port); 181 mlx5_ib_warn(dev, "invalid port number %d\n", port);
184 return -EINVAL; 182 return -EINVAL;
185 } 183 }
@@ -210,8 +208,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
210 props->phys_state = out_mad->data[33] >> 4; 208 props->phys_state = out_mad->data[33] >> 4;
211 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 209 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
212 props->gid_tbl_len = out_mad->data[50]; 210 props->gid_tbl_len = out_mad->data[50];
213 props->max_msg_sz = 1 << gen->log_max_msg; 211 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
214 props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; 212 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
215 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 213 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
216 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 214 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
217 props->active_width = out_mad->data[31] & 0xf; 215 props->active_width = out_mad->data[31] & 0xf;
@@ -238,7 +236,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
238 236
239 /* If reported active speed is QDR, check if is FDR-10 */ 237 /* If reported active speed is QDR, check if is FDR-10 */
240 if (props->active_speed == 4) { 238 if (props->active_speed == 4) {
241 if (gen->ext_port_cap[port - 1] & 239 if (mdev->port_caps[port - 1].ext_port_cap &
242 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 240 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
243 init_query_mad(in_mad); 241 init_query_mad(in_mad);
244 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 242 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -392,7 +390,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
392 struct mlx5_ib_alloc_ucontext_req_v2 req; 390 struct mlx5_ib_alloc_ucontext_req_v2 req;
393 struct mlx5_ib_alloc_ucontext_resp resp; 391 struct mlx5_ib_alloc_ucontext_resp resp;
394 struct mlx5_ib_ucontext *context; 392 struct mlx5_ib_ucontext *context;
395 struct mlx5_general_caps *gen;
396 struct mlx5_uuar_info *uuari; 393 struct mlx5_uuar_info *uuari;
397 struct mlx5_uar *uars; 394 struct mlx5_uar *uars;
398 int gross_uuars; 395 int gross_uuars;
@@ -403,7 +400,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
403 int i; 400 int i;
404 size_t reqlen; 401 size_t reqlen;
405 402
406 gen = &dev->mdev->caps.gen;
407 if (!dev->ib_active) 403 if (!dev->ib_active)
408 return ERR_PTR(-EAGAIN); 404 return ERR_PTR(-EAGAIN);
409 405
@@ -436,14 +432,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
436 432
437 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 433 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
438 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 434 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
439 resp.qp_tab_size = 1 << gen->log_max_qp; 435 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
440 resp.bf_reg_size = gen->bf_reg_size; 436 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
441 resp.cache_line_size = L1_CACHE_BYTES; 437 resp.cache_line_size = L1_CACHE_BYTES;
442 resp.max_sq_desc_sz = gen->max_sq_desc_sz; 438 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
443 resp.max_rq_desc_sz = gen->max_rq_desc_sz; 439 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
444 resp.max_send_wqebb = gen->max_wqes; 440 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
445 resp.max_recv_wr = gen->max_wqes; 441 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
446 resp.max_srq_recv_wr = gen->max_srq_wqes; 442 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
447 443
448 context = kzalloc(sizeof(*context), GFP_KERNEL); 444 context = kzalloc(sizeof(*context), GFP_KERNEL);
449 if (!context) 445 if (!context)
@@ -493,7 +489,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
493 mutex_init(&context->db_page_mutex); 489 mutex_init(&context->db_page_mutex);
494 490
495 resp.tot_uuars = req.total_num_uuars; 491 resp.tot_uuars = req.total_num_uuars;
496 resp.num_ports = gen->num_ports; 492 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
497 err = ib_copy_to_udata(udata, &resp, 493 err = ib_copy_to_udata(udata, &resp,
498 sizeof(resp) - sizeof(resp.reserved)); 494 sizeof(resp) - sizeof(resp.reserved));
499 if (err) 495 if (err)
@@ -895,11 +891,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
895 891
896static void get_ext_port_caps(struct mlx5_ib_dev *dev) 892static void get_ext_port_caps(struct mlx5_ib_dev *dev)
897{ 893{
898 struct mlx5_general_caps *gen;
899 int port; 894 int port;
900 895
901 gen = &dev->mdev->caps.gen; 896 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
902 for (port = 1; port <= gen->num_ports; port++)
903 mlx5_query_ext_port_caps(dev, port); 897 mlx5_query_ext_port_caps(dev, port);
904} 898}
905 899
@@ -907,11 +901,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
907{ 901{
908 struct ib_device_attr *dprops = NULL; 902 struct ib_device_attr *dprops = NULL;
909 struct ib_port_attr *pprops = NULL; 903 struct ib_port_attr *pprops = NULL;
910 struct mlx5_general_caps *gen;
911 int err = -ENOMEM; 904 int err = -ENOMEM;
912 int port; 905 int port;
913 906
914 gen = &dev->mdev->caps.gen;
915 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 907 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
916 if (!pprops) 908 if (!pprops)
917 goto out; 909 goto out;
@@ -926,14 +918,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
926 goto out; 918 goto out;
927 } 919 }
928 920
929 for (port = 1; port <= gen->num_ports; port++) { 921 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
930 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 922 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
931 if (err) { 923 if (err) {
932 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); 924 mlx5_ib_warn(dev, "query_port %d failed %d\n",
925 port, err);
933 break; 926 break;
934 } 927 }
935 gen->port[port - 1].pkey_table_len = dprops->max_pkeys; 928 dev->mdev->port_caps[port - 1].pkey_table_len =
936 gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; 929 dprops->max_pkeys;
930 dev->mdev->port_caps[port - 1].gid_table_len =
931 pprops->gid_tbl_len;
937 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 932 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
938 dprops->max_pkeys, pprops->gid_tbl_len); 933 dprops->max_pkeys, pprops->gid_tbl_len);
939 } 934 }
@@ -1207,8 +1202,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1207 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1202 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1208 dev->ib_dev.owner = THIS_MODULE; 1203 dev->ib_dev.owner = THIS_MODULE;
1209 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1204 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1210 dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; 1205 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1211 dev->num_ports = mdev->caps.gen.num_ports; 1206 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1212 dev->ib_dev.phys_port_cnt = dev->num_ports; 1207 dev->ib_dev.phys_port_cnt = dev->num_ports;
1213 dev->ib_dev.num_comp_vectors = 1208 dev->ib_dev.num_comp_vectors =
1214 dev->mdev->priv.eq_table.num_comp_vectors; 1209 dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1286,9 +1281,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1286 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; 1281 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
1287 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 1282 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1288 1283
1289 mlx5_ib_internal_query_odp_caps(dev); 1284 mlx5_ib_internal_fill_odp_caps(dev);
1290 1285
1291 if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { 1286 if (MLX5_CAP_GEN(mdev, xrc)) {
1292 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 1287 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1293 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 1288 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1294 dev->ib_dev.uverbs_cmd_mask |= 1289 dev->ib_dev.uverbs_cmd_mask |=