summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKamal Heib <kamalheib1@gmail.com>2018-12-10 14:09:48 -0500
committerJason Gunthorpe <jgg@mellanox.com>2018-12-12 09:40:16 -0500
commit3023a1e93656c02b8d6a3a46e712b815843fa514 (patch)
tree6c1471a47bb6ee96cee6fcac950a116701d79f8d
parent02a42f8e40caed53fd357c9c33912e1bfb6f0365 (diff)
RDMA: Start use ib_device_ops
Make all the required change to start use the ib_device_ops structure. Signed-off-by: Kamal Heib <kamalheib1@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/core/cache.c12
-rw-r--r--drivers/infiniband/core/core_priv.h12
-rw-r--r--drivers/infiniband/core/cq.c6
-rw-r--r--drivers/infiniband/core/device.c211
-rw-r--r--drivers/infiniband/core/fmr_pool.c4
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/rdma_core.c6
-rw-r--r--drivers/infiniband/core/security.c8
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c28
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c60
-rw-r--r--drivers/infiniband/core/uverbs_main.c14
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c10
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c14
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c4
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c3
-rw-r--r--drivers/infiniband/core/verbs.c159
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--fs/cifs/smbdirect.c2
-rw-r--r--include/rdma/ib_verbs.h303
-rw-r--r--include/rdma/uverbs_ioctl.h12
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c2
37 files changed, 353 insertions, 617 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 5b2fce4a7091..22e20ed5a393 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -217,7 +217,7 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
217 217
218 if (rdma_cap_roce_gid_table(device, port_num) && 218 if (rdma_cap_roce_gid_table(device, port_num) &&
219 entry->state != GID_TABLE_ENTRY_INVALID) 219 entry->state != GID_TABLE_ENTRY_INVALID)
220 device->del_gid(&entry->attr, &entry->context); 220 device->ops.del_gid(&entry->attr, &entry->context);
221 221
222 write_lock_irq(&table->rwlock); 222 write_lock_irq(&table->rwlock);
223 223
@@ -324,7 +324,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { 326 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
327 ret = attr->device->add_gid(attr, &entry->context); 327 ret = attr->device->ops.add_gid(attr, &entry->context);
328 if (ret) { 328 if (ret) {
329 dev_err(&attr->device->dev, 329 dev_err(&attr->device->dev,
330 "%s GID add failed port=%d index=%d\n", 330 "%s GID add failed port=%d index=%d\n",
@@ -548,8 +548,8 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
548 unsigned long mask; 548 unsigned long mask;
549 int ret; 549 int ret;
550 550
551 if (ib_dev->get_netdev) { 551 if (ib_dev->ops.get_netdev) {
552 idev = ib_dev->get_netdev(ib_dev, port); 552 idev = ib_dev->ops.get_netdev(ib_dev, port);
553 if (idev && attr->ndev != idev) { 553 if (idev && attr->ndev != idev) {
554 union ib_gid default_gid; 554 union ib_gid default_gid;
555 555
@@ -1296,9 +1296,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
1296 1296
1297 mutex_lock(&table->lock); 1297 mutex_lock(&table->lock);
1298 for (i = 0; i < gid_tbl_len; ++i) { 1298 for (i = 0; i < gid_tbl_len; ++i) {
1299 if (!device->query_gid) 1299 if (!device->ops.query_gid)
1300 continue; 1300 continue;
1301 ret = device->query_gid(device, port, i, &gid_attr.gid); 1301 ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1302 if (ret) { 1302 if (ret) {
1303 dev_warn(&device->dev, 1303 dev_warn(&device->dev,
1304 "query_gid failed (%d) for index %d\n", ret, 1304 "query_gid failed (%d) for index %d\n", ret,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cc7535c5e192..cea92624f9d4 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -215,10 +215,10 @@ static inline int ib_security_modify_qp(struct ib_qp *qp,
215 int qp_attr_mask, 215 int qp_attr_mask,
216 struct ib_udata *udata) 216 struct ib_udata *udata)
217{ 217{
218 return qp->device->modify_qp(qp->real_qp, 218 return qp->device->ops.modify_qp(qp->real_qp,
219 qp_attr, 219 qp_attr,
220 qp_attr_mask, 220 qp_attr_mask,
221 udata); 221 udata);
222} 222}
223 223
224static inline int ib_create_qp_security(struct ib_qp *qp, 224static inline int ib_create_qp_security(struct ib_qp *qp,
@@ -280,10 +280,10 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
280{ 280{
281 struct ib_qp *qp; 281 struct ib_qp *qp;
282 282
283 if (!dev->create_qp) 283 if (!dev->ops.create_qp)
284 return ERR_PTR(-EOPNOTSUPP); 284 return ERR_PTR(-EOPNOTSUPP);
285 285
286 qp = dev->create_qp(pd, attr, udata); 286 qp = dev->ops.create_qp(pd, attr, udata);
287 if (IS_ERR(qp)) 287 if (IS_ERR(qp))
288 return qp; 288 return qp;
289 289
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index b1e5365ddafa..7fb4f64ae933 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -145,7 +145,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
145 struct ib_cq *cq; 145 struct ib_cq *cq;
146 int ret = -ENOMEM; 146 int ret = -ENOMEM;
147 147
148 cq = dev->create_cq(dev, &cq_attr, NULL, NULL); 148 cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL);
149 if (IS_ERR(cq)) 149 if (IS_ERR(cq))
150 return cq; 150 return cq;
151 151
@@ -193,7 +193,7 @@ out_free_wc:
193 kfree(cq->wc); 193 kfree(cq->wc);
194 rdma_restrack_del(&cq->res); 194 rdma_restrack_del(&cq->res);
195out_destroy_cq: 195out_destroy_cq:
196 cq->device->destroy_cq(cq); 196 cq->device->ops.destroy_cq(cq);
197 return ERR_PTR(ret); 197 return ERR_PTR(ret);
198} 198}
199EXPORT_SYMBOL(__ib_alloc_cq); 199EXPORT_SYMBOL(__ib_alloc_cq);
@@ -225,7 +225,7 @@ void ib_free_cq(struct ib_cq *cq)
225 225
226 kfree(cq->wc); 226 kfree(cq->wc);
227 rdma_restrack_del(&cq->res); 227 rdma_restrack_del(&cq->res);
228 ret = cq->device->destroy_cq(cq); 228 ret = cq->device->ops.destroy_cq(cq);
229 WARN_ON_ONCE(ret); 229 WARN_ON_ONCE(ret);
230} 230}
231EXPORT_SYMBOL(ib_free_cq); 231EXPORT_SYMBOL(ib_free_cq);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 108c62d875af..47ab34ee1a9d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -96,7 +96,7 @@ static struct notifier_block ibdev_lsm_nb = {
96 96
97static int ib_device_check_mandatory(struct ib_device *device) 97static int ib_device_check_mandatory(struct ib_device *device)
98{ 98{
99#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 99#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
100 static const struct { 100 static const struct {
101 size_t offset; 101 size_t offset;
102 char *name; 102 char *name;
@@ -122,7 +122,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
122 int i; 122 int i;
123 123
124 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 124 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
125 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { 125 if (!*(void **) ((void *) &device->ops +
126 mandatory_table[i].offset)) {
126 dev_warn(&device->dev, 127 dev_warn(&device->dev,
127 "Device is missing mandatory function %s\n", 128 "Device is missing mandatory function %s\n",
128 mandatory_table[i].name); 129 mandatory_table[i].name);
@@ -373,8 +374,8 @@ static int read_port_immutable(struct ib_device *device)
373 return -ENOMEM; 374 return -ENOMEM;
374 375
375 for (port = start_port; port <= end_port; ++port) { 376 for (port = start_port; port <= end_port; ++port) {
376 ret = device->get_port_immutable(device, port, 377 ret = device->ops.get_port_immutable(
377 &device->port_immutable[port]); 378 device, port, &device->port_immutable[port]);
378 if (ret) 379 if (ret)
379 return ret; 380 return ret;
380 381
@@ -386,8 +387,8 @@ static int read_port_immutable(struct ib_device *device)
386 387
387void ib_get_device_fw_str(struct ib_device *dev, char *str) 388void ib_get_device_fw_str(struct ib_device *dev, char *str)
388{ 389{
389 if (dev->get_dev_fw_str) 390 if (dev->ops.get_dev_fw_str)
390 dev->get_dev_fw_str(dev, str); 391 dev->ops.get_dev_fw_str(dev, str);
391 else 392 else
392 str[0] = '\0'; 393 str[0] = '\0';
393} 394}
@@ -536,7 +537,7 @@ static int setup_device(struct ib_device *device)
536 } 537 }
537 538
538 memset(&device->attrs, 0, sizeof(device->attrs)); 539 memset(&device->attrs, 0, sizeof(device->attrs));
539 ret = device->query_device(device, &device->attrs, &uhw); 540 ret = device->ops.query_device(device, &device->attrs, &uhw);
540 if (ret) { 541 if (ret) {
541 dev_warn(&device->dev, 542 dev_warn(&device->dev,
542 "Couldn't query the device attributes\n"); 543 "Couldn't query the device attributes\n");
@@ -923,14 +924,14 @@ int ib_query_port(struct ib_device *device,
923 return -EINVAL; 924 return -EINVAL;
924 925
925 memset(port_attr, 0, sizeof(*port_attr)); 926 memset(port_attr, 0, sizeof(*port_attr));
926 err = device->query_port(device, port_num, port_attr); 927 err = device->ops.query_port(device, port_num, port_attr);
927 if (err || port_attr->subnet_prefix) 928 if (err || port_attr->subnet_prefix)
928 return err; 929 return err;
929 930
930 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 931 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
931 return 0; 932 return 0;
932 933
933 err = device->query_gid(device, port_num, 0, &gid); 934 err = device->ops.query_gid(device, port_num, 0, &gid);
934 if (err) 935 if (err)
935 return err; 936 return err;
936 937
@@ -964,8 +965,8 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
964 if (rdma_protocol_roce(ib_dev, port)) { 965 if (rdma_protocol_roce(ib_dev, port)) {
965 struct net_device *idev = NULL; 966 struct net_device *idev = NULL;
966 967
967 if (ib_dev->get_netdev) 968 if (ib_dev->ops.get_netdev)
968 idev = ib_dev->get_netdev(ib_dev, port); 969 idev = ib_dev->ops.get_netdev(ib_dev, port);
969 970
970 if (idev && 971 if (idev &&
971 idev->reg_state >= NETREG_UNREGISTERED) { 972 idev->reg_state >= NETREG_UNREGISTERED) {
@@ -1045,7 +1046,7 @@ int ib_query_pkey(struct ib_device *device,
1045 if (!rdma_is_port_valid(device, port_num)) 1046 if (!rdma_is_port_valid(device, port_num))
1046 return -EINVAL; 1047 return -EINVAL;
1047 1048
1048 return device->query_pkey(device, port_num, index, pkey); 1049 return device->ops.query_pkey(device, port_num, index, pkey);
1049} 1050}
1050EXPORT_SYMBOL(ib_query_pkey); 1051EXPORT_SYMBOL(ib_query_pkey);
1051 1052
@@ -1062,11 +1063,11 @@ int ib_modify_device(struct ib_device *device,
1062 int device_modify_mask, 1063 int device_modify_mask,
1063 struct ib_device_modify *device_modify) 1064 struct ib_device_modify *device_modify)
1064{ 1065{
1065 if (!device->modify_device) 1066 if (!device->ops.modify_device)
1066 return -ENOSYS; 1067 return -ENOSYS;
1067 1068
1068 return device->modify_device(device, device_modify_mask, 1069 return device->ops.modify_device(device, device_modify_mask,
1069 device_modify); 1070 device_modify);
1070} 1071}
1071EXPORT_SYMBOL(ib_modify_device); 1072EXPORT_SYMBOL(ib_modify_device);
1072 1073
@@ -1090,9 +1091,10 @@ int ib_modify_port(struct ib_device *device,
1090 if (!rdma_is_port_valid(device, port_num)) 1091 if (!rdma_is_port_valid(device, port_num))
1091 return -EINVAL; 1092 return -EINVAL;
1092 1093
1093 if (device->modify_port) 1094 if (device->ops.modify_port)
1094 rc = device->modify_port(device, port_num, port_modify_mask, 1095 rc = device->ops.modify_port(device, port_num,
1095 port_modify); 1096 port_modify_mask,
1097 port_modify);
1096 else 1098 else
1097 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 1099 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1098 return rc; 1100 return rc;
@@ -1221,6 +1223,7 @@ EXPORT_SYMBOL(ib_get_net_dev_by_params);
1221 1223
1222void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) 1224void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
1223{ 1225{
1226 struct ib_device_ops *dev_ops = &dev->ops;
1224#define SET_DEVICE_OP(ptr, name) \ 1227#define SET_DEVICE_OP(ptr, name) \
1225 do { \ 1228 do { \
1226 if (ops->name) \ 1229 if (ops->name) \
@@ -1228,92 +1231,92 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
1228 (ptr)->name = ops->name; \ 1231 (ptr)->name = ops->name; \
1229 } while (0) 1232 } while (0)
1230 1233
1231 SET_DEVICE_OP(dev, add_gid); 1234 SET_DEVICE_OP(dev_ops, add_gid);
1232 SET_DEVICE_OP(dev, alloc_dm); 1235 SET_DEVICE_OP(dev_ops, alloc_dm);
1233 SET_DEVICE_OP(dev, alloc_fmr); 1236 SET_DEVICE_OP(dev_ops, alloc_fmr);
1234 SET_DEVICE_OP(dev, alloc_hw_stats); 1237 SET_DEVICE_OP(dev_ops, alloc_hw_stats);
1235 SET_DEVICE_OP(dev, alloc_mr); 1238 SET_DEVICE_OP(dev_ops, alloc_mr);
1236 SET_DEVICE_OP(dev, alloc_mw); 1239 SET_DEVICE_OP(dev_ops, alloc_mw);
1237 SET_DEVICE_OP(dev, alloc_pd); 1240 SET_DEVICE_OP(dev_ops, alloc_pd);
1238 SET_DEVICE_OP(dev, alloc_rdma_netdev); 1241 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
1239 SET_DEVICE_OP(dev, alloc_ucontext); 1242 SET_DEVICE_OP(dev_ops, alloc_ucontext);
1240 SET_DEVICE_OP(dev, alloc_xrcd); 1243 SET_DEVICE_OP(dev_ops, alloc_xrcd);
1241 SET_DEVICE_OP(dev, attach_mcast); 1244 SET_DEVICE_OP(dev_ops, attach_mcast);
1242 SET_DEVICE_OP(dev, check_mr_status); 1245 SET_DEVICE_OP(dev_ops, check_mr_status);
1243 SET_DEVICE_OP(dev, create_ah); 1246 SET_DEVICE_OP(dev_ops, create_ah);
1244 SET_DEVICE_OP(dev, create_counters); 1247 SET_DEVICE_OP(dev_ops, create_counters);
1245 SET_DEVICE_OP(dev, create_cq); 1248 SET_DEVICE_OP(dev_ops, create_cq);
1246 SET_DEVICE_OP(dev, create_flow); 1249 SET_DEVICE_OP(dev_ops, create_flow);
1247 SET_DEVICE_OP(dev, create_flow_action_esp); 1250 SET_DEVICE_OP(dev_ops, create_flow_action_esp);
1248 SET_DEVICE_OP(dev, create_qp); 1251 SET_DEVICE_OP(dev_ops, create_qp);
1249 SET_DEVICE_OP(dev, create_rwq_ind_table); 1252 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
1250 SET_DEVICE_OP(dev, create_srq); 1253 SET_DEVICE_OP(dev_ops, create_srq);
1251 SET_DEVICE_OP(dev, create_wq); 1254 SET_DEVICE_OP(dev_ops, create_wq);
1252 SET_DEVICE_OP(dev, dealloc_dm); 1255 SET_DEVICE_OP(dev_ops, dealloc_dm);
1253 SET_DEVICE_OP(dev, dealloc_fmr); 1256 SET_DEVICE_OP(dev_ops, dealloc_fmr);
1254 SET_DEVICE_OP(dev, dealloc_mw); 1257 SET_DEVICE_OP(dev_ops, dealloc_mw);
1255 SET_DEVICE_OP(dev, dealloc_pd); 1258 SET_DEVICE_OP(dev_ops, dealloc_pd);
1256 SET_DEVICE_OP(dev, dealloc_ucontext); 1259 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
1257 SET_DEVICE_OP(dev, dealloc_xrcd); 1260 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
1258 SET_DEVICE_OP(dev, del_gid); 1261 SET_DEVICE_OP(dev_ops, del_gid);
1259 SET_DEVICE_OP(dev, dereg_mr); 1262 SET_DEVICE_OP(dev_ops, dereg_mr);
1260 SET_DEVICE_OP(dev, destroy_ah); 1263 SET_DEVICE_OP(dev_ops, destroy_ah);
1261 SET_DEVICE_OP(dev, destroy_counters); 1264 SET_DEVICE_OP(dev_ops, destroy_counters);
1262 SET_DEVICE_OP(dev, destroy_cq); 1265 SET_DEVICE_OP(dev_ops, destroy_cq);
1263 SET_DEVICE_OP(dev, destroy_flow); 1266 SET_DEVICE_OP(dev_ops, destroy_flow);
1264 SET_DEVICE_OP(dev, destroy_flow_action); 1267 SET_DEVICE_OP(dev_ops, destroy_flow_action);
1265 SET_DEVICE_OP(dev, destroy_qp); 1268 SET_DEVICE_OP(dev_ops, destroy_qp);
1266 SET_DEVICE_OP(dev, destroy_rwq_ind_table); 1269 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
1267 SET_DEVICE_OP(dev, destroy_srq); 1270 SET_DEVICE_OP(dev_ops, destroy_srq);
1268 SET_DEVICE_OP(dev, destroy_wq); 1271 SET_DEVICE_OP(dev_ops, destroy_wq);
1269 SET_DEVICE_OP(dev, detach_mcast); 1272 SET_DEVICE_OP(dev_ops, detach_mcast);
1270 SET_DEVICE_OP(dev, disassociate_ucontext); 1273 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
1271 SET_DEVICE_OP(dev, drain_rq); 1274 SET_DEVICE_OP(dev_ops, drain_rq);
1272 SET_DEVICE_OP(dev, drain_sq); 1275 SET_DEVICE_OP(dev_ops, drain_sq);
1273 SET_DEVICE_OP(dev, get_dev_fw_str); 1276 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
1274 SET_DEVICE_OP(dev, get_dma_mr); 1277 SET_DEVICE_OP(dev_ops, get_dma_mr);
1275 SET_DEVICE_OP(dev, get_hw_stats); 1278 SET_DEVICE_OP(dev_ops, get_hw_stats);
1276 SET_DEVICE_OP(dev, get_link_layer); 1279 SET_DEVICE_OP(dev_ops, get_link_layer);
1277 SET_DEVICE_OP(dev, get_netdev); 1280 SET_DEVICE_OP(dev_ops, get_netdev);
1278 SET_DEVICE_OP(dev, get_port_immutable); 1281 SET_DEVICE_OP(dev_ops, get_port_immutable);
1279 SET_DEVICE_OP(dev, get_vector_affinity); 1282 SET_DEVICE_OP(dev_ops, get_vector_affinity);
1280 SET_DEVICE_OP(dev, get_vf_config); 1283 SET_DEVICE_OP(dev_ops, get_vf_config);
1281 SET_DEVICE_OP(dev, get_vf_stats); 1284 SET_DEVICE_OP(dev_ops, get_vf_stats);
1282 SET_DEVICE_OP(dev, map_mr_sg); 1285 SET_DEVICE_OP(dev_ops, map_mr_sg);
1283 SET_DEVICE_OP(dev, map_phys_fmr); 1286 SET_DEVICE_OP(dev_ops, map_phys_fmr);
1284 SET_DEVICE_OP(dev, mmap); 1287 SET_DEVICE_OP(dev_ops, mmap);
1285 SET_DEVICE_OP(dev, modify_ah); 1288 SET_DEVICE_OP(dev_ops, modify_ah);
1286 SET_DEVICE_OP(dev, modify_cq); 1289 SET_DEVICE_OP(dev_ops, modify_cq);
1287 SET_DEVICE_OP(dev, modify_device); 1290 SET_DEVICE_OP(dev_ops, modify_device);
1288 SET_DEVICE_OP(dev, modify_flow_action_esp); 1291 SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
1289 SET_DEVICE_OP(dev, modify_port); 1292 SET_DEVICE_OP(dev_ops, modify_port);
1290 SET_DEVICE_OP(dev, modify_qp); 1293 SET_DEVICE_OP(dev_ops, modify_qp);
1291 SET_DEVICE_OP(dev, modify_srq); 1294 SET_DEVICE_OP(dev_ops, modify_srq);
1292 SET_DEVICE_OP(dev, modify_wq); 1295 SET_DEVICE_OP(dev_ops, modify_wq);
1293 SET_DEVICE_OP(dev, peek_cq); 1296 SET_DEVICE_OP(dev_ops, peek_cq);
1294 SET_DEVICE_OP(dev, poll_cq); 1297 SET_DEVICE_OP(dev_ops, poll_cq);
1295 SET_DEVICE_OP(dev, post_recv); 1298 SET_DEVICE_OP(dev_ops, post_recv);
1296 SET_DEVICE_OP(dev, post_send); 1299 SET_DEVICE_OP(dev_ops, post_send);
1297 SET_DEVICE_OP(dev, post_srq_recv); 1300 SET_DEVICE_OP(dev_ops, post_srq_recv);
1298 SET_DEVICE_OP(dev, process_mad); 1301 SET_DEVICE_OP(dev_ops, process_mad);
1299 SET_DEVICE_OP(dev, query_ah); 1302 SET_DEVICE_OP(dev_ops, query_ah);
1300 SET_DEVICE_OP(dev, query_device); 1303 SET_DEVICE_OP(dev_ops, query_device);
1301 SET_DEVICE_OP(dev, query_gid); 1304 SET_DEVICE_OP(dev_ops, query_gid);
1302 SET_DEVICE_OP(dev, query_pkey); 1305 SET_DEVICE_OP(dev_ops, query_pkey);
1303 SET_DEVICE_OP(dev, query_port); 1306 SET_DEVICE_OP(dev_ops, query_port);
1304 SET_DEVICE_OP(dev, query_qp); 1307 SET_DEVICE_OP(dev_ops, query_qp);
1305 SET_DEVICE_OP(dev, query_srq); 1308 SET_DEVICE_OP(dev_ops, query_srq);
1306 SET_DEVICE_OP(dev, rdma_netdev_get_params); 1309 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
1307 SET_DEVICE_OP(dev, read_counters); 1310 SET_DEVICE_OP(dev_ops, read_counters);
1308 SET_DEVICE_OP(dev, reg_dm_mr); 1311 SET_DEVICE_OP(dev_ops, reg_dm_mr);
1309 SET_DEVICE_OP(dev, reg_user_mr); 1312 SET_DEVICE_OP(dev_ops, reg_user_mr);
1310 SET_DEVICE_OP(dev, req_ncomp_notif); 1313 SET_DEVICE_OP(dev_ops, req_ncomp_notif);
1311 SET_DEVICE_OP(dev, req_notify_cq); 1314 SET_DEVICE_OP(dev_ops, req_notify_cq);
1312 SET_DEVICE_OP(dev, rereg_user_mr); 1315 SET_DEVICE_OP(dev_ops, rereg_user_mr);
1313 SET_DEVICE_OP(dev, resize_cq); 1316 SET_DEVICE_OP(dev_ops, resize_cq);
1314 SET_DEVICE_OP(dev, set_vf_guid); 1317 SET_DEVICE_OP(dev_ops, set_vf_guid);
1315 SET_DEVICE_OP(dev, set_vf_link_state); 1318 SET_DEVICE_OP(dev_ops, set_vf_link_state);
1316 SET_DEVICE_OP(dev, unmap_fmr); 1319 SET_DEVICE_OP(dev_ops, unmap_fmr);
1317} 1320}
1318EXPORT_SYMBOL(ib_set_device_ops); 1321EXPORT_SYMBOL(ib_set_device_ops);
1319 1322
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index b00dfd2ad31e..7d841b689a1e 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -211,8 +211,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
211 return ERR_PTR(-EINVAL); 211 return ERR_PTR(-EINVAL);
212 212
213 device = pd->device; 213 device = pd->device;
214 if (!device->alloc_fmr || !device->dealloc_fmr || 214 if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
215 !device->map_phys_fmr || !device->unmap_fmr) { 215 !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
216 dev_info(&device->dev, "Device does not support FMRs\n"); 216 dev_info(&device->dev, "Device does not support FMRs\n");
217 return ERR_PTR(-ENOSYS); 217 return ERR_PTR(-ENOSYS);
218 } 218 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index d7025cd5be28..7870823bac47 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -888,10 +888,10 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
888 } 888 }
889 889
890 /* No GRH for DR SMP */ 890 /* No GRH for DR SMP */
891 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 891 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
892 (const struct ib_mad_hdr *)smp, mad_size, 892 (const struct ib_mad_hdr *)smp, mad_size,
893 (struct ib_mad_hdr *)mad_priv->mad, 893 (struct ib_mad_hdr *)mad_priv->mad,
894 &mad_size, &out_mad_pkey_index); 894 &mad_size, &out_mad_pkey_index);
895 switch (ret) 895 switch (ret)
896 { 896 {
897 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 897 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -2305,14 +2305,12 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2305 } 2305 }
2306 2306
2307 /* Give driver "right of first refusal" on incoming MAD */ 2307 /* Give driver "right of first refusal" on incoming MAD */
2308 if (port_priv->device->process_mad) { 2308 if (port_priv->device->ops.process_mad) {
2309 ret = port_priv->device->process_mad(port_priv->device, 0, 2309 ret = port_priv->device->ops.process_mad(
2310 port_priv->port_num, 2310 port_priv->device, 0, port_priv->port_num, wc,
2311 wc, &recv->grh, 2311 &recv->grh, (const struct ib_mad_hdr *)recv->mad,
2312 (const struct ib_mad_hdr *)recv->mad, 2312 recv->mad_size, (struct ib_mad_hdr *)response->mad,
2313 recv->mad_size, 2313 &mad_size, &resp_mad_pkey_index);
2314 (struct ib_mad_hdr *)response->mad,
2315 &mad_size, &resp_mad_pkey_index);
2316 2314
2317 if (opa) 2315 if (opa)
2318 wc->pkey_index = resp_mad_pkey_index; 2316 wc->pkey_index = resp_mad_pkey_index;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9abbadb9e366..093bbfcdc53b 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -259,8 +259,8 @@ static int fill_port_info(struct sk_buff *msg,
259 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 259 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
260 return -EMSGSIZE; 260 return -EMSGSIZE;
261 261
262 if (device->get_netdev) 262 if (device->ops.get_netdev)
263 netdev = device->get_netdev(device, port); 263 netdev = device->ops.get_netdev(device, port);
264 264
265 if (netdev && net_eq(dev_net(netdev), net)) { 265 if (netdev && net_eq(dev_net(netdev), net)) {
266 ret = nla_put_u32(msg, 266 ret = nla_put_u32(msg,
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 3bfab3505a29..af4879bdf3d6 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -55,7 +55,7 @@ static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
55{ 55{
56 /* C14-9:3 -- We're at the end of the DR segment of path */ 56 /* C14-9:3 -- We're at the end of the DR segment of path */
57 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ 57 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
58 return (device->process_mad && 58 return (device->ops.process_mad &&
59 !opa_get_smp_direction(smp) && 59 !opa_get_smp_direction(smp) &&
60 (smp->hop_ptr == smp->hop_cnt + 1)) ? 60 (smp->hop_ptr == smp->hop_cnt + 1)) ?
61 IB_SMI_HANDLE : IB_SMI_DISCARD; 61 IB_SMI_HANDLE : IB_SMI_DISCARD;
@@ -70,7 +70,7 @@ static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *
70{ 70{
71 /* C14-13:3 -- We're at the end of the DR segment of path */ 71 /* C14-13:3 -- We're at the end of the DR segment of path */
72 /* C14-13:4 -- Hop Pointer == 0 -> give to SM */ 72 /* C14-13:4 -- Hop Pointer == 0 -> give to SM */
73 return (device->process_mad && 73 return (device->ops.process_mad &&
74 opa_get_smp_direction(smp) && 74 opa_get_smp_direction(smp) &&
75 !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD; 75 !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
76} 76}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 7d2f1ef75025..6c4747e61d2b 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -820,8 +820,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
820 */ 820 */
821 if (reason == RDMA_REMOVE_DRIVER_REMOVE) { 821 if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
822 uverbs_user_mmap_disassociate(ufile); 822 uverbs_user_mmap_disassociate(ufile);
823 if (ib_dev->disassociate_ucontext) 823 if (ib_dev->ops.disassociate_ucontext)
824 ib_dev->disassociate_ucontext(ucontext); 824 ib_dev->ops.disassociate_ucontext(ucontext);
825 } 825 }
826 826
827 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, 827 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
@@ -833,7 +833,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
833 * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove 833 * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
834 * the error return. 834 * the error return.
835 */ 835 */
836 ret = ib_dev->dealloc_ucontext(ucontext); 836 ret = ib_dev->ops.dealloc_ucontext(ucontext);
837 WARN_ON(ret); 837 WARN_ON(ret);
838 838
839 ufile->ucontext = NULL; 839 ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 1143c0448666..1efadbccf394 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -626,10 +626,10 @@ int ib_security_modify_qp(struct ib_qp *qp,
626 } 626 }
627 627
628 if (!ret) 628 if (!ret)
629 ret = real_qp->device->modify_qp(real_qp, 629 ret = real_qp->device->ops.modify_qp(real_qp,
630 qp_attr, 630 qp_attr,
631 qp_attr_mask, 631 qp_attr_mask,
632 udata); 632 udata);
633 633
634 if (new_pps) { 634 if (new_pps) {
635 /* Clean up the lists and free the appropriate 635 /* Clean up the lists and free the appropriate
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 33c91c8a16e9..91d9b353ab85 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -67,7 +67,7 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
67{ 67{
68 /* C14-9:3 -- We're at the end of the DR segment of path */ 68 /* C14-9:3 -- We're at the end of the DR segment of path */
69 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ 69 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
70 return ((device->process_mad && 70 return ((device->ops.process_mad &&
71 !ib_get_smp_direction(smp) && 71 !ib_get_smp_direction(smp) &&
72 (smp->hop_ptr == smp->hop_cnt + 1)) ? 72 (smp->hop_ptr == smp->hop_cnt + 1)) ?
73 IB_SMI_HANDLE : IB_SMI_DISCARD); 73 IB_SMI_HANDLE : IB_SMI_DISCARD);
@@ -82,7 +82,7 @@ static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
82{ 82{
83 /* C14-13:3 -- We're at the end of the DR segment of path */ 83 /* C14-13:3 -- We're at the end of the DR segment of path */
84 /* C14-13:4 -- Hop Pointer == 0 -> give to SM */ 84 /* C14-13:4 -- Hop Pointer == 0 -> give to SM */
85 return ((device->process_mad && 85 return ((device->ops.process_mad &&
86 ib_get_smp_direction(smp) && 86 ib_get_smp_direction(smp) &&
87 !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD); 87 !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
88} 88}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 6fcce2c206c6..80f68eb0ba5c 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -462,7 +462,7 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
462 u16 out_mad_pkey_index = 0; 462 u16 out_mad_pkey_index = 0;
463 ssize_t ret; 463 ssize_t ret;
464 464
465 if (!dev->process_mad) 465 if (!dev->ops.process_mad)
466 return -ENOSYS; 466 return -ENOSYS;
467 467
468 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 468 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -481,11 +481,11 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
481 if (attr != IB_PMA_CLASS_PORT_INFO) 481 if (attr != IB_PMA_CLASS_PORT_INFO)
482 in_mad->data[41] = port_num; /* PortSelect field */ 482 in_mad->data[41] = port_num; /* PortSelect field */
483 483
484 if ((dev->process_mad(dev, IB_MAD_IGNORE_MKEY, 484 if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
485 port_num, NULL, NULL, 485 port_num, NULL, NULL,
486 (const struct ib_mad_hdr *)in_mad, mad_size, 486 (const struct ib_mad_hdr *)in_mad, mad_size,
487 (struct ib_mad_hdr *)out_mad, &mad_size, 487 (struct ib_mad_hdr *)out_mad, &mad_size,
488 &out_mad_pkey_index) & 488 &out_mad_pkey_index) &
489 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != 489 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
490 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { 490 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
491 ret = -EINVAL; 491 ret = -EINVAL;
@@ -786,7 +786,7 @@ static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
786 786
787 if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) 787 if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
788 return 0; 788 return 0;
789 ret = dev->get_hw_stats(dev, stats, port_num, index); 789 ret = dev->ops.get_hw_stats(dev, stats, port_num, index);
790 if (ret < 0) 790 if (ret < 0)
791 return ret; 791 return ret;
792 if (ret == stats->num_counters) 792 if (ret == stats->num_counters)
@@ -946,7 +946,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
946 struct rdma_hw_stats *stats; 946 struct rdma_hw_stats *stats;
947 int i, ret; 947 int i, ret;
948 948
949 stats = device->alloc_hw_stats(device, port_num); 949 stats = device->ops.alloc_hw_stats(device, port_num);
950 950
951 if (!stats) 951 if (!stats)
952 return; 952 return;
@@ -964,8 +964,8 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
964 if (!hsag) 964 if (!hsag)
965 goto err_free_stats; 965 goto err_free_stats;
966 966
967 ret = device->get_hw_stats(device, stats, port_num, 967 ret = device->ops.get_hw_stats(device, stats, port_num,
968 stats->num_counters); 968 stats->num_counters);
969 if (ret != stats->num_counters) 969 if (ret != stats->num_counters)
970 goto err_free_hsag; 970 goto err_free_hsag;
971 971
@@ -1057,7 +1057,7 @@ static int add_port(struct ib_device *device, int port_num,
1057 goto err_put; 1057 goto err_put;
1058 } 1058 }
1059 1059
1060 if (device->process_mad) { 1060 if (device->ops.process_mad) {
1061 p->pma_table = get_counter_table(device, port_num); 1061 p->pma_table = get_counter_table(device, port_num);
1062 ret = sysfs_create_group(&p->kobj, p->pma_table); 1062 ret = sysfs_create_group(&p->kobj, p->pma_table);
1063 if (ret) 1063 if (ret)
@@ -1124,7 +1124,7 @@ static int add_port(struct ib_device *device, int port_num,
1124 * port, so holder should be device. Therefore skip per port conunter 1124 * port, so holder should be device. Therefore skip per port conunter
1125 * initialization. 1125 * initialization.
1126 */ 1126 */
1127 if (device->alloc_hw_stats && port_num) 1127 if (device->ops.alloc_hw_stats && port_num)
1128 setup_hw_stats(device, p, port_num); 1128 setup_hw_stats(device, p, port_num);
1129 1129
1130 list_add_tail(&p->kobj.entry, &device->port_list); 1130 list_add_tail(&p->kobj.entry, &device->port_list);
@@ -1245,7 +1245,7 @@ static ssize_t node_desc_store(struct device *device,
1245 struct ib_device_modify desc = {}; 1245 struct ib_device_modify desc = {};
1246 int ret; 1246 int ret;
1247 1247
1248 if (!dev->modify_device) 1248 if (!dev->ops.modify_device)
1249 return -EIO; 1249 return -EIO;
1250 1250
1251 memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX)); 1251 memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
@@ -1341,7 +1341,7 @@ int ib_device_register_sysfs(struct ib_device *device,
1341 } 1341 }
1342 } 1342 }
1343 1343
1344 if (device->alloc_hw_stats) 1344 if (device->ops.alloc_hw_stats)
1345 setup_hw_stats(device, NULL, 0); 1345 setup_hw_stats(device, NULL, 0);
1346 1346
1347 return 0; 1347 return 0;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 73332b9a25b5..7541fbaf58a3 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1242,7 +1242,7 @@ static void ib_ucm_add_one(struct ib_device *device)
1242 dev_t base; 1242 dev_t base;
1243 struct ib_ucm_device *ucm_dev; 1243 struct ib_ucm_device *ucm_dev;
1244 1244
1245 if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) 1245 if (!device->ops.alloc_ucontext || !rdma_cap_ib_cm(device, 1))
1246 return; 1246 return;
1247 1247
1248 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); 1248 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 7f10eade7653..357d33120ca4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -220,7 +220,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
220 if (ret) 220 if (ret)
221 goto err; 221 goto err;
222 222
223 ucontext = ib_dev->alloc_ucontext(ib_dev, &attrs->driver_udata); 223 ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata);
224 if (IS_ERR(ucontext)) { 224 if (IS_ERR(ucontext)) {
225 ret = PTR_ERR(ucontext); 225 ret = PTR_ERR(ucontext);
226 goto err_alloc; 226 goto err_alloc;
@@ -282,7 +282,7 @@ err_fd:
282 put_unused_fd(resp.async_fd); 282 put_unused_fd(resp.async_fd);
283 283
284err_free: 284err_free:
285 ib_dev->dealloc_ucontext(ucontext); 285 ib_dev->ops.dealloc_ucontext(ucontext);
286 286
287err_alloc: 287err_alloc:
288 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 288 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
@@ -457,7 +457,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
457 if (IS_ERR(uobj)) 457 if (IS_ERR(uobj))
458 return PTR_ERR(uobj); 458 return PTR_ERR(uobj);
459 459
460 pd = ib_dev->alloc_pd(ib_dev, uobj->context, &attrs->driver_udata); 460 pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
461 if (IS_ERR(pd)) { 461 if (IS_ERR(pd)) {
462 ret = PTR_ERR(pd); 462 ret = PTR_ERR(pd);
463 goto err; 463 goto err;
@@ -634,8 +634,8 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
634 } 634 }
635 635
636 if (!xrcd) { 636 if (!xrcd) {
637 xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context, 637 xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context,
638 &attrs->driver_udata); 638 &attrs->driver_udata);
639 if (IS_ERR(xrcd)) { 639 if (IS_ERR(xrcd)) {
640 ret = PTR_ERR(xrcd); 640 ret = PTR_ERR(xrcd);
641 goto err; 641 goto err;
@@ -774,8 +774,9 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
774 } 774 }
775 } 775 }
776 776
777 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 777 mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
778 cmd.access_flags, &attrs->driver_udata); 778 cmd.access_flags,
779 &attrs->driver_udata);
779 if (IS_ERR(mr)) { 780 if (IS_ERR(mr)) {
780 ret = PTR_ERR(mr); 781 ret = PTR_ERR(mr);
781 goto err_put; 782 goto err_put;
@@ -864,9 +865,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
864 } 865 }
865 866
866 old_pd = mr->pd; 867 old_pd = mr->pd;
867 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length, 868 ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
868 cmd.hca_va, cmd.access_flags, pd, 869 cmd.length, cmd.hca_va,
869 &attrs->driver_udata); 870 cmd.access_flags, pd,
871 &attrs->driver_udata);
870 if (!ret) { 872 if (!ret) {
871 if (cmd.flags & IB_MR_REREG_PD) { 873 if (cmd.flags & IB_MR_REREG_PD) {
872 atomic_inc(&pd->usecnt); 874 atomic_inc(&pd->usecnt);
@@ -929,7 +931,7 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
929 goto err_free; 931 goto err_free;
930 } 932 }
931 933
932 mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata); 934 mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
933 if (IS_ERR(mw)) { 935 if (IS_ERR(mw)) {
934 ret = PTR_ERR(mw); 936 ret = PTR_ERR(mw);
935 goto err_put; 937 goto err_put;
@@ -1043,8 +1045,8 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
1043 attr.comp_vector = cmd->comp_vector; 1045 attr.comp_vector = cmd->comp_vector;
1044 attr.flags = cmd->flags; 1046 attr.flags = cmd->flags;
1045 1047
1046 cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, 1048 cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
1047 &attrs->driver_udata); 1049 &attrs->driver_udata);
1048 if (IS_ERR(cq)) { 1050 if (IS_ERR(cq)) {
1049 ret = PTR_ERR(cq); 1051 ret = PTR_ERR(cq);
1050 goto err_file; 1052 goto err_file;
@@ -1144,7 +1146,7 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1144 if (!cq) 1146 if (!cq)
1145 return -EINVAL; 1147 return -EINVAL;
1146 1148
1147 ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata); 1149 ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1148 if (ret) 1150 if (ret)
1149 goto out; 1151 goto out;
1150 1152
@@ -2188,7 +2190,7 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2188 } 2190 }
2189 2191
2190 resp.bad_wr = 0; 2192 resp.bad_wr = 0;
2191 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2193 ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2192 if (ret) 2194 if (ret)
2193 for (next = wr; next; next = next->next) { 2195 for (next = wr; next; next = next->next) {
2194 ++resp.bad_wr; 2196 ++resp.bad_wr;
@@ -2341,7 +2343,7 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2341 } 2343 }
2342 2344
2343 resp.bad_wr = 0; 2345 resp.bad_wr = 0;
2344 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2346 ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2345 2347
2346 uobj_put_obj_read(qp); 2348 uobj_put_obj_read(qp);
2347 if (ret) { 2349 if (ret) {
@@ -2391,7 +2393,7 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2391 } 2393 }
2392 2394
2393 resp.bad_wr = 0; 2395 resp.bad_wr = 0;
2394 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2396 ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2395 2397
2396 uobj_put_obj_read(srq); 2398 uobj_put_obj_read(srq);
2397 2399
@@ -2961,7 +2963,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2961 obj->uevent.events_reported = 0; 2963 obj->uevent.events_reported = 0;
2962 INIT_LIST_HEAD(&obj->uevent.event_list); 2964 INIT_LIST_HEAD(&obj->uevent.event_list);
2963 2965
2964 wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata); 2966 wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
2965 if (IS_ERR(wq)) { 2967 if (IS_ERR(wq)) {
2966 err = PTR_ERR(wq); 2968 err = PTR_ERR(wq);
2967 goto err_put_cq; 2969 goto err_put_cq;
@@ -3061,8 +3063,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
3061 wq_attr.flags = cmd.flags; 3063 wq_attr.flags = cmd.flags;
3062 wq_attr.flags_mask = cmd.flags_mask; 3064 wq_attr.flags_mask = cmd.flags_mask;
3063 } 3065 }
3064 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, 3066 ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
3065 &attrs->driver_udata); 3067 &attrs->driver_udata);
3066 uobj_put_obj_read(wq); 3068 uobj_put_obj_read(wq);
3067 return ret; 3069 return ret;
3068} 3070}
@@ -3135,8 +3137,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3135 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3137 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3136 init_attr.ind_tbl = wqs; 3138 init_attr.ind_tbl = wqs;
3137 3139
3138 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, 3140 rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
3139 &attrs->driver_udata); 3141 &attrs->driver_udata);
3140 3142
3141 if (IS_ERR(rwq_ind_tbl)) { 3143 if (IS_ERR(rwq_ind_tbl)) {
3142 err = PTR_ERR(rwq_ind_tbl); 3144 err = PTR_ERR(rwq_ind_tbl);
@@ -3323,8 +3325,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3323 goto err_free; 3325 goto err_free;
3324 } 3326 }
3325 3327
3326 flow_id = qp->device->create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER, 3328 flow_id = qp->device->ops.create_flow(
3327 &attrs->driver_udata); 3329 qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
3328 3330
3329 if (IS_ERR(flow_id)) { 3331 if (IS_ERR(flow_id)) {
3330 err = PTR_ERR(flow_id); 3332 err = PTR_ERR(flow_id);
@@ -3346,7 +3348,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3346 kfree(kern_flow_attr); 3348 kfree(kern_flow_attr);
3347 return uobj_alloc_commit(uobj); 3349 return uobj_alloc_commit(uobj);
3348err_copy: 3350err_copy:
3349 if (!qp->device->destroy_flow(flow_id)) 3351 if (!qp->device->ops.destroy_flow(flow_id))
3350 atomic_dec(&qp->usecnt); 3352 atomic_dec(&qp->usecnt);
3351err_free: 3353err_free:
3352 ib_uverbs_flow_resources_free(uflow_res); 3354 ib_uverbs_flow_resources_free(uflow_res);
@@ -3441,7 +3443,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3441 obj->uevent.events_reported = 0; 3443 obj->uevent.events_reported = 0;
3442 INIT_LIST_HEAD(&obj->uevent.event_list); 3444 INIT_LIST_HEAD(&obj->uevent.event_list);
3443 3445
3444 srq = pd->device->create_srq(pd, &attr, udata); 3446 srq = pd->device->ops.create_srq(pd, &attr, udata);
3445 if (IS_ERR(srq)) { 3447 if (IS_ERR(srq)) {
3446 ret = PTR_ERR(srq); 3448 ret = PTR_ERR(srq);
3447 goto err_put; 3449 goto err_put;
@@ -3563,8 +3565,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3563 attr.max_wr = cmd.max_wr; 3565 attr.max_wr = cmd.max_wr;
3564 attr.srq_limit = cmd.srq_limit; 3566 attr.srq_limit = cmd.srq_limit;
3565 3567
3566 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, 3568 ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
3567 &attrs->driver_udata); 3569 &attrs->driver_udata);
3568 3570
3569 uobj_put_obj_read(srq); 3571 uobj_put_obj_read(srq);
3570 3572
@@ -3652,7 +3654,7 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3652 if (cmd.reserved) 3654 if (cmd.reserved)
3653 return -EINVAL; 3655 return -EINVAL;
3654 3656
3655 err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata); 3657 err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3656 if (err) 3658 if (err)
3657 return err; 3659 return err;
3658 3660
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 96a5f89bbb75..9f9172eb1512 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -106,7 +106,7 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
106 struct ib_pd *pd = mw->pd; 106 struct ib_pd *pd = mw->pd;
107 int ret; 107 int ret;
108 108
109 ret = mw->device->dealloc_mw(mw); 109 ret = mw->device->ops.dealloc_mw(mw);
110 if (!ret) 110 if (!ret)
111 atomic_dec(&pd->usecnt); 111 atomic_dec(&pd->usecnt);
112 return ret; 112 return ret;
@@ -197,7 +197,7 @@ void ib_uverbs_release_file(struct kref *ref)
197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu); 197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
198 ib_dev = srcu_dereference(file->device->ib_dev, 198 ib_dev = srcu_dereference(file->device->ib_dev,
199 &file->device->disassociate_srcu); 199 &file->device->disassociate_srcu);
200 if (ib_dev && !ib_dev->disassociate_ucontext) 200 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
201 module_put(ib_dev->owner); 201 module_put(ib_dev->owner);
202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); 202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
203 203
@@ -774,7 +774,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
774 goto out; 774 goto out;
775 } 775 }
776 776
777 ret = ucontext->device->mmap(ucontext, vma); 777 ret = ucontext->device->ops.mmap(ucontext, vma);
778out: 778out:
779 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); 779 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
780 return ret; 780 return ret;
@@ -1036,7 +1036,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
1036 /* In case IB device supports disassociate ucontext, there is no hard 1036 /* In case IB device supports disassociate ucontext, there is no hard
1037 * dependency between uverbs device and its low level device. 1037 * dependency between uverbs device and its low level device.
1038 */ 1038 */
1039 module_dependent = !(ib_dev->disassociate_ucontext); 1039 module_dependent = !(ib_dev->ops.disassociate_ucontext);
1040 1040
1041 if (module_dependent) { 1041 if (module_dependent) {
1042 if (!try_module_get(ib_dev->owner)) { 1042 if (!try_module_get(ib_dev->owner)) {
@@ -1203,7 +1203,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1203 struct ib_uverbs_device *uverbs_dev; 1203 struct ib_uverbs_device *uverbs_dev;
1204 int ret; 1204 int ret;
1205 1205
1206 if (!device->alloc_ucontext) 1206 if (!device->ops.alloc_ucontext)
1207 return; 1207 return;
1208 1208
1209 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); 1209 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
@@ -1249,7 +1249,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1249 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); 1249 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1250 1250
1251 cdev_init(&uverbs_dev->cdev, 1251 cdev_init(&uverbs_dev->cdev,
1252 device->mmap ? &uverbs_mmap_fops : &uverbs_fops); 1252 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
1253 uverbs_dev->cdev.owner = THIS_MODULE; 1253 uverbs_dev->cdev.owner = THIS_MODULE;
1254 1254
1255 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); 1255 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
@@ -1337,7 +1337,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1337 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); 1337 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1338 ida_free(&uverbs_ida, uverbs_dev->devnum); 1338 ida_free(&uverbs_ida, uverbs_dev->devnum);
1339 1339
1340 if (device->disassociate_ucontext) { 1340 if (device->ops.disassociate_ucontext) {
1341 /* We disassociate HW resources and immediately return. 1341 /* We disassociate HW resources and immediately return.
1342 * Userspace will see a EIO errno for all future access. 1342 * Userspace will see a EIO errno for all future access.
1343 * Upon returning, ib_device may be freed internally and is not 1343 * Upon returning, ib_device may be freed internally and is not
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 063aff9e7a04..424f325f8cba 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -54,7 +54,7 @@ static int uverbs_free_flow(struct ib_uobject *uobject,
54 struct ib_qp *qp = flow->qp; 54 struct ib_qp *qp = flow->qp;
55 int ret; 55 int ret;
56 56
57 ret = flow->device->destroy_flow(flow); 57 ret = flow->device->ops.destroy_flow(flow);
58 if (!ret) { 58 if (!ret) {
59 if (qp) 59 if (qp)
60 atomic_dec(&qp->usecnt); 60 atomic_dec(&qp->usecnt);
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 8835bad5c6dd..309c5e80988d 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -44,7 +44,7 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
44 if (ret) 44 if (ret)
45 return ret; 45 return ret;
46 46
47 return counters->device->destroy_counters(counters); 47 return counters->device->ops.destroy_counters(counters);
48} 48}
49 49
50static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( 50static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
@@ -61,10 +61,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
61 * have the ability to remove methods from parse tree once 61 * have the ability to remove methods from parse tree once
62 * such condition is met. 62 * such condition is met.
63 */ 63 */
64 if (!ib_dev->create_counters) 64 if (!ib_dev->ops.create_counters)
65 return -EOPNOTSUPP; 65 return -EOPNOTSUPP;
66 66
67 counters = ib_dev->create_counters(ib_dev, attrs); 67 counters = ib_dev->ops.create_counters(ib_dev, attrs);
68 if (IS_ERR(counters)) { 68 if (IS_ERR(counters)) {
69 ret = PTR_ERR(counters); 69 ret = PTR_ERR(counters);
70 goto err_create_counters; 70 goto err_create_counters;
@@ -90,7 +90,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
90 uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE); 90 uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
91 int ret; 91 int ret;
92 92
93 if (!counters->device->read_counters) 93 if (!counters->device->ops.read_counters)
94 return -EOPNOTSUPP; 94 return -EOPNOTSUPP;
95 95
96 if (!atomic_read(&counters->usecnt)) 96 if (!atomic_read(&counters->usecnt))
@@ -109,7 +109,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
109 if (IS_ERR(read_attr.counters_buff)) 109 if (IS_ERR(read_attr.counters_buff))
110 return PTR_ERR(read_attr.counters_buff); 110 return PTR_ERR(read_attr.counters_buff);
111 111
112 ret = counters->device->read_counters(counters, &read_attr, attrs); 112 ret = counters->device->ops.read_counters(counters, &read_attr, attrs);
113 if (ret) 113 if (ret)
114 return ret; 114 return ret;
115 115
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 859518eab583..42df59635a3c 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -71,7 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
71 struct ib_uverbs_completion_event_file *ev_file = NULL; 71 struct ib_uverbs_completion_event_file *ev_file = NULL;
72 struct ib_uobject *ev_file_uobj; 72 struct ib_uobject *ev_file_uobj;
73 73
74 if (!ib_dev->create_cq || !ib_dev->destroy_cq) 74 if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq)
75 return -EOPNOTSUPP; 75 return -EOPNOTSUPP;
76 76
77 ret = uverbs_copy_from(&attr.comp_vector, attrs, 77 ret = uverbs_copy_from(&attr.comp_vector, attrs,
@@ -110,8 +110,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
110 INIT_LIST_HEAD(&obj->comp_list); 110 INIT_LIST_HEAD(&obj->comp_list);
111 INIT_LIST_HEAD(&obj->async_list); 111 INIT_LIST_HEAD(&obj->async_list);
112 112
113 cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, 113 cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
114 &attrs->driver_udata); 114 &attrs->driver_udata);
115 if (IS_ERR(cq)) { 115 if (IS_ERR(cq)) {
116 ret = PTR_ERR(cq); 116 ret = PTR_ERR(cq);
117 goto err_event_file; 117 goto err_event_file;
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
index 658261b8f08e..2ef70637bee1 100644
--- a/drivers/infiniband/core/uverbs_std_types_dm.c
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -43,7 +43,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
43 if (ret) 43 if (ret)
44 return ret; 44 return ret;
45 45
46 return dm->device->dealloc_dm(dm); 46 return dm->device->ops.dealloc_dm(dm);
47} 47}
48 48
49static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( 49static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
@@ -57,7 +57,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
57 struct ib_dm *dm; 57 struct ib_dm *dm;
58 int ret; 58 int ret;
59 59
60 if (!ib_dev->alloc_dm) 60 if (!ib_dev->ops.alloc_dm)
61 return -EOPNOTSUPP; 61 return -EOPNOTSUPP;
62 62
63 ret = uverbs_copy_from(&attr.length, attrs, 63 ret = uverbs_copy_from(&attr.length, attrs,
@@ -70,7 +70,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
70 if (ret) 70 if (ret)
71 return ret; 71 return ret;
72 72
73 dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs); 73 dm = ib_dev->ops.alloc_dm(ib_dev, uobj->context, &attr, attrs);
74 if (IS_ERR(dm)) 74 if (IS_ERR(dm))
75 return PTR_ERR(dm); 75 return PTR_ERR(dm);
76 76
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index e4d01fb5335d..4962b87fa600 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -43,7 +43,7 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
43 if (ret) 43 if (ret)
44 return ret; 44 return ret;
45 45
46 return action->device->destroy_flow_action(action); 46 return action->device->ops.destroy_flow_action(action);
47} 47}
48 48
49static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, 49static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
@@ -313,7 +313,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
313 struct ib_flow_action *action; 313 struct ib_flow_action *action;
314 struct ib_flow_action_esp_attr esp_attr = {}; 314 struct ib_flow_action_esp_attr esp_attr = {};
315 315
316 if (!ib_dev->create_flow_action_esp) 316 if (!ib_dev->ops.create_flow_action_esp)
317 return -EOPNOTSUPP; 317 return -EOPNOTSUPP;
318 318
319 ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); 319 ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
@@ -321,7 +321,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
321 return ret; 321 return ret;
322 322
323 /* No need to check as this attribute is marked as MANDATORY */ 323 /* No need to check as this attribute is marked as MANDATORY */
324 action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs); 324 action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
325 attrs);
325 if (IS_ERR(action)) 326 if (IS_ERR(action))
326 return PTR_ERR(action); 327 return PTR_ERR(action);
327 328
@@ -340,7 +341,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
340 int ret; 341 int ret;
341 struct ib_flow_action_esp_attr esp_attr = {}; 342 struct ib_flow_action_esp_attr esp_attr = {};
342 343
343 if (!action->device->modify_flow_action_esp) 344 if (!action->device->ops.modify_flow_action_esp)
344 return -EOPNOTSUPP; 345 return -EOPNOTSUPP;
345 346
346 ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true); 347 ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
@@ -350,8 +351,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
350 if (action->type != IB_FLOW_ACTION_ESP) 351 if (action->type != IB_FLOW_ACTION_ESP)
351 return -EINVAL; 352 return -EINVAL;
352 353
353 return action->device->modify_flow_action_esp(action, &esp_attr.hdr, 354 return action->device->ops.modify_flow_action_esp(action,
354 attrs); 355 &esp_attr.hdr,
356 attrs);
355} 357}
356 358
357static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { 359static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 70ea48cfc047..cafb49a45515 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -54,7 +54,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
54 struct ib_mr *mr; 54 struct ib_mr *mr;
55 int ret; 55 int ret;
56 56
57 if (!ib_dev->reg_dm_mr) 57 if (!ib_dev->ops.reg_dm_mr)
58 return -EOPNOTSUPP; 58 return -EOPNOTSUPP;
59 59
60 ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET); 60 ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET);
@@ -83,7 +83,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
83 attr.length > dm->length - attr.offset) 83 attr.length > dm->length - attr.offset)
84 return -EINVAL; 84 return -EINVAL;
85 85
86 mr = pd->device->reg_dm_mr(pd, dm, &attr, attrs); 86 mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs);
87 if (IS_ERR(mr)) 87 if (IS_ERR(mr))
88 return PTR_ERR(mr); 88 return PTR_ERR(mr);
89 89
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 0136c1d78a0f..830e48fe5e65 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -300,7 +300,8 @@ static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev,
300 return 0; 300 return 0;
301 301
302 case UAPI_DEF_IS_SUPPORTED_DEV_FN: { 302 case UAPI_DEF_IS_SUPPORTED_DEV_FN: {
303 void **ibdev_fn = (void *)ibdev + def->needs_fn_offset; 303 void **ibdev_fn =
304 (void *)(&ibdev->ops) + def->needs_fn_offset;
304 305
305 if (*ibdev_fn) 306 if (*ibdev_fn)
306 continue; 307 continue;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index fb2fc0c7ecfb..92dbc758f6c9 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -226,8 +226,8 @@ EXPORT_SYMBOL(rdma_node_get_transport);
226enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 226enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
227{ 227{
228 enum rdma_transport_type lt; 228 enum rdma_transport_type lt;
229 if (device->get_link_layer) 229 if (device->ops.get_link_layer)
230 return device->get_link_layer(device, port_num); 230 return device->ops.get_link_layer(device, port_num);
231 231
232 lt = rdma_node_get_transport(device->node_type); 232 lt = rdma_node_get_transport(device->node_type);
233 if (lt == RDMA_TRANSPORT_IB) 233 if (lt == RDMA_TRANSPORT_IB)
@@ -255,7 +255,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
255 struct ib_pd *pd; 255 struct ib_pd *pd;
256 int mr_access_flags = 0; 256 int mr_access_flags = 0;
257 257
258 pd = device->alloc_pd(device, NULL, NULL); 258 pd = device->ops.alloc_pd(device, NULL, NULL);
259 if (IS_ERR(pd)) 259 if (IS_ERR(pd))
260 return pd; 260 return pd;
261 261
@@ -282,7 +282,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
282 if (mr_access_flags) { 282 if (mr_access_flags) {
283 struct ib_mr *mr; 283 struct ib_mr *mr;
284 284
285 mr = pd->device->get_dma_mr(pd, mr_access_flags); 285 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
286 if (IS_ERR(mr)) { 286 if (IS_ERR(mr)) {
287 ib_dealloc_pd(pd); 287 ib_dealloc_pd(pd);
288 return ERR_CAST(mr); 288 return ERR_CAST(mr);
@@ -319,7 +319,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
319 int ret; 319 int ret;
320 320
321 if (pd->__internal_mr) { 321 if (pd->__internal_mr) {
322 ret = pd->device->dereg_mr(pd->__internal_mr); 322 ret = pd->device->ops.dereg_mr(pd->__internal_mr);
323 WARN_ON(ret); 323 WARN_ON(ret);
324 pd->__internal_mr = NULL; 324 pd->__internal_mr = NULL;
325 } 325 }
@@ -331,7 +331,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
331 rdma_restrack_del(&pd->res); 331 rdma_restrack_del(&pd->res);
332 /* Making delalloc_pd a void return is a WIP, no driver should return 332 /* Making delalloc_pd a void return is a WIP, no driver should return
333 an error here. */ 333 an error here. */
334 ret = pd->device->dealloc_pd(pd); 334 ret = pd->device->ops.dealloc_pd(pd);
335 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 335 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
336} 336}
337EXPORT_SYMBOL(ib_dealloc_pd); 337EXPORT_SYMBOL(ib_dealloc_pd);
@@ -491,10 +491,10 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
491{ 491{
492 struct ib_ah *ah; 492 struct ib_ah *ah;
493 493
494 if (!pd->device->create_ah) 494 if (!pd->device->ops.create_ah)
495 return ERR_PTR(-EOPNOTSUPP); 495 return ERR_PTR(-EOPNOTSUPP);
496 496
497 ah = pd->device->create_ah(pd, ah_attr, udata); 497 ah = pd->device->ops.create_ah(pd, ah_attr, udata);
498 498
499 if (!IS_ERR(ah)) { 499 if (!IS_ERR(ah)) {
500 ah->device = pd->device; 500 ah->device = pd->device;
@@ -900,8 +900,8 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
900 if (ret) 900 if (ret)
901 return ret; 901 return ret;
902 902
903 ret = ah->device->modify_ah ? 903 ret = ah->device->ops.modify_ah ?
904 ah->device->modify_ah(ah, ah_attr) : 904 ah->device->ops.modify_ah(ah, ah_attr) :
905 -EOPNOTSUPP; 905 -EOPNOTSUPP;
906 906
907 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); 907 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
@@ -914,8 +914,8 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
914{ 914{
915 ah_attr->grh.sgid_attr = NULL; 915 ah_attr->grh.sgid_attr = NULL;
916 916
917 return ah->device->query_ah ? 917 return ah->device->ops.query_ah ?
918 ah->device->query_ah(ah, ah_attr) : 918 ah->device->ops.query_ah(ah, ah_attr) :
919 -EOPNOTSUPP; 919 -EOPNOTSUPP;
920} 920}
921EXPORT_SYMBOL(rdma_query_ah); 921EXPORT_SYMBOL(rdma_query_ah);
@@ -927,7 +927,7 @@ int rdma_destroy_ah(struct ib_ah *ah)
927 int ret; 927 int ret;
928 928
929 pd = ah->pd; 929 pd = ah->pd;
930 ret = ah->device->destroy_ah(ah); 930 ret = ah->device->ops.destroy_ah(ah);
931 if (!ret) { 931 if (!ret) {
932 atomic_dec(&pd->usecnt); 932 atomic_dec(&pd->usecnt);
933 if (sgid_attr) 933 if (sgid_attr)
@@ -945,10 +945,10 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
945{ 945{
946 struct ib_srq *srq; 946 struct ib_srq *srq;
947 947
948 if (!pd->device->create_srq) 948 if (!pd->device->ops.create_srq)
949 return ERR_PTR(-EOPNOTSUPP); 949 return ERR_PTR(-EOPNOTSUPP);
950 950
951 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 951 srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
952 952
953 if (!IS_ERR(srq)) { 953 if (!IS_ERR(srq)) {
954 srq->device = pd->device; 954 srq->device = pd->device;
@@ -977,17 +977,17 @@ int ib_modify_srq(struct ib_srq *srq,
977 struct ib_srq_attr *srq_attr, 977 struct ib_srq_attr *srq_attr,
978 enum ib_srq_attr_mask srq_attr_mask) 978 enum ib_srq_attr_mask srq_attr_mask)
979{ 979{
980 return srq->device->modify_srq ? 980 return srq->device->ops.modify_srq ?
981 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 981 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
982 -EOPNOTSUPP; 982 NULL) : -EOPNOTSUPP;
983} 983}
984EXPORT_SYMBOL(ib_modify_srq); 984EXPORT_SYMBOL(ib_modify_srq);
985 985
986int ib_query_srq(struct ib_srq *srq, 986int ib_query_srq(struct ib_srq *srq,
987 struct ib_srq_attr *srq_attr) 987 struct ib_srq_attr *srq_attr)
988{ 988{
989 return srq->device->query_srq ? 989 return srq->device->ops.query_srq ?
990 srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP; 990 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
991} 991}
992EXPORT_SYMBOL(ib_query_srq); 992EXPORT_SYMBOL(ib_query_srq);
993 993
@@ -1009,7 +1009,7 @@ int ib_destroy_srq(struct ib_srq *srq)
1009 if (srq_type == IB_SRQT_XRC) 1009 if (srq_type == IB_SRQT_XRC)
1010 xrcd = srq->ext.xrc.xrcd; 1010 xrcd = srq->ext.xrc.xrcd;
1011 1011
1012 ret = srq->device->destroy_srq(srq); 1012 ret = srq->device->ops.destroy_srq(srq);
1013 if (!ret) { 1013 if (!ret) {
1014 atomic_dec(&pd->usecnt); 1014 atomic_dec(&pd->usecnt);
1015 if (srq_type == IB_SRQT_XRC) 1015 if (srq_type == IB_SRQT_XRC)
@@ -1118,7 +1118,7 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
1118 if (!IS_ERR(qp)) 1118 if (!IS_ERR(qp))
1119 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 1119 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1120 else 1120 else
1121 real_qp->device->destroy_qp(real_qp); 1121 real_qp->device->ops.destroy_qp(real_qp);
1122 return qp; 1122 return qp;
1123} 1123}
1124 1124
@@ -1704,10 +1704,10 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1704 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) 1704 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1705 return -EINVAL; 1705 return -EINVAL;
1706 1706
1707 if (!dev->get_netdev) 1707 if (!dev->ops.get_netdev)
1708 return -EOPNOTSUPP; 1708 return -EOPNOTSUPP;
1709 1709
1710 netdev = dev->get_netdev(dev, port_num); 1710 netdev = dev->ops.get_netdev(dev, port_num);
1711 if (!netdev) 1711 if (!netdev)
1712 return -ENODEV; 1712 return -ENODEV;
1713 1713
@@ -1765,9 +1765,9 @@ int ib_query_qp(struct ib_qp *qp,
1765 qp_attr->ah_attr.grh.sgid_attr = NULL; 1765 qp_attr->ah_attr.grh.sgid_attr = NULL;
1766 qp_attr->alt_ah_attr.grh.sgid_attr = NULL; 1766 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1767 1767
1768 return qp->device->query_qp ? 1768 return qp->device->ops.query_qp ?
1769 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1769 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1770 -EOPNOTSUPP; 1770 qp_init_attr) : -EOPNOTSUPP;
1771} 1771}
1772EXPORT_SYMBOL(ib_query_qp); 1772EXPORT_SYMBOL(ib_query_qp);
1773 1773
@@ -1853,7 +1853,7 @@ int ib_destroy_qp(struct ib_qp *qp)
1853 rdma_rw_cleanup_mrs(qp); 1853 rdma_rw_cleanup_mrs(qp);
1854 1854
1855 rdma_restrack_del(&qp->res); 1855 rdma_restrack_del(&qp->res);
1856 ret = qp->device->destroy_qp(qp); 1856 ret = qp->device->ops.destroy_qp(qp);
1857 if (!ret) { 1857 if (!ret) {
1858 if (alt_path_sgid_attr) 1858 if (alt_path_sgid_attr)
1859 rdma_put_gid_attr(alt_path_sgid_attr); 1859 rdma_put_gid_attr(alt_path_sgid_attr);
@@ -1891,7 +1891,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
1891{ 1891{
1892 struct ib_cq *cq; 1892 struct ib_cq *cq;
1893 1893
1894 cq = device->create_cq(device, cq_attr, NULL, NULL); 1894 cq = device->ops.create_cq(device, cq_attr, NULL, NULL);
1895 1895
1896 if (!IS_ERR(cq)) { 1896 if (!IS_ERR(cq)) {
1897 cq->device = device; 1897 cq->device = device;
@@ -1911,8 +1911,9 @@ EXPORT_SYMBOL(__ib_create_cq);
1911 1911
1912int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1912int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1913{ 1913{
1914 return cq->device->modify_cq ? 1914 return cq->device->ops.modify_cq ?
1915 cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; 1915 cq->device->ops.modify_cq(cq, cq_count,
1916 cq_period) : -EOPNOTSUPP;
1916} 1917}
1917EXPORT_SYMBOL(rdma_set_cq_moderation); 1918EXPORT_SYMBOL(rdma_set_cq_moderation);
1918 1919
@@ -1922,14 +1923,14 @@ int ib_destroy_cq(struct ib_cq *cq)
1922 return -EBUSY; 1923 return -EBUSY;
1923 1924
1924 rdma_restrack_del(&cq->res); 1925 rdma_restrack_del(&cq->res);
1925 return cq->device->destroy_cq(cq); 1926 return cq->device->ops.destroy_cq(cq);
1926} 1927}
1927EXPORT_SYMBOL(ib_destroy_cq); 1928EXPORT_SYMBOL(ib_destroy_cq);
1928 1929
1929int ib_resize_cq(struct ib_cq *cq, int cqe) 1930int ib_resize_cq(struct ib_cq *cq, int cqe)
1930{ 1931{
1931 return cq->device->resize_cq ? 1932 return cq->device->ops.resize_cq ?
1932 cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; 1933 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
1933} 1934}
1934EXPORT_SYMBOL(ib_resize_cq); 1935EXPORT_SYMBOL(ib_resize_cq);
1935 1936
@@ -1942,7 +1943,7 @@ int ib_dereg_mr(struct ib_mr *mr)
1942 int ret; 1943 int ret;
1943 1944
1944 rdma_restrack_del(&mr->res); 1945 rdma_restrack_del(&mr->res);
1945 ret = mr->device->dereg_mr(mr); 1946 ret = mr->device->ops.dereg_mr(mr);
1946 if (!ret) { 1947 if (!ret) {
1947 atomic_dec(&pd->usecnt); 1948 atomic_dec(&pd->usecnt);
1948 if (dm) 1949 if (dm)
@@ -1971,10 +1972,10 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1971{ 1972{
1972 struct ib_mr *mr; 1973 struct ib_mr *mr;
1973 1974
1974 if (!pd->device->alloc_mr) 1975 if (!pd->device->ops.alloc_mr)
1975 return ERR_PTR(-EOPNOTSUPP); 1976 return ERR_PTR(-EOPNOTSUPP);
1976 1977
1977 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1978 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
1978 if (!IS_ERR(mr)) { 1979 if (!IS_ERR(mr)) {
1979 mr->device = pd->device; 1980 mr->device = pd->device;
1980 mr->pd = pd; 1981 mr->pd = pd;
@@ -1998,10 +1999,10 @@ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1998{ 1999{
1999 struct ib_fmr *fmr; 2000 struct ib_fmr *fmr;
2000 2001
2001 if (!pd->device->alloc_fmr) 2002 if (!pd->device->ops.alloc_fmr)
2002 return ERR_PTR(-EOPNOTSUPP); 2003 return ERR_PTR(-EOPNOTSUPP);
2003 2004
2004 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 2005 fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
2005 if (!IS_ERR(fmr)) { 2006 if (!IS_ERR(fmr)) {
2006 fmr->device = pd->device; 2007 fmr->device = pd->device;
2007 fmr->pd = pd; 2008 fmr->pd = pd;
@@ -2020,7 +2021,7 @@ int ib_unmap_fmr(struct list_head *fmr_list)
2020 return 0; 2021 return 0;
2021 2022
2022 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 2023 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
2023 return fmr->device->unmap_fmr(fmr_list); 2024 return fmr->device->ops.unmap_fmr(fmr_list);
2024} 2025}
2025EXPORT_SYMBOL(ib_unmap_fmr); 2026EXPORT_SYMBOL(ib_unmap_fmr);
2026 2027
@@ -2030,7 +2031,7 @@ int ib_dealloc_fmr(struct ib_fmr *fmr)
2030 int ret; 2031 int ret;
2031 2032
2032 pd = fmr->pd; 2033 pd = fmr->pd;
2033 ret = fmr->device->dealloc_fmr(fmr); 2034 ret = fmr->device->ops.dealloc_fmr(fmr);
2034 if (!ret) 2035 if (!ret)
2035 atomic_dec(&pd->usecnt); 2036 atomic_dec(&pd->usecnt);
2036 2037
@@ -2082,14 +2083,14 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2082{ 2083{
2083 int ret; 2084 int ret;
2084 2085
2085 if (!qp->device->attach_mcast) 2086 if (!qp->device->ops.attach_mcast)
2086 return -EOPNOTSUPP; 2087 return -EOPNOTSUPP;
2087 2088
2088 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 2089 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2089 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 2090 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2090 return -EINVAL; 2091 return -EINVAL;
2091 2092
2092 ret = qp->device->attach_mcast(qp, gid, lid); 2093 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2093 if (!ret) 2094 if (!ret)
2094 atomic_inc(&qp->usecnt); 2095 atomic_inc(&qp->usecnt);
2095 return ret; 2096 return ret;
@@ -2100,14 +2101,14 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2100{ 2101{
2101 int ret; 2102 int ret;
2102 2103
2103 if (!qp->device->detach_mcast) 2104 if (!qp->device->ops.detach_mcast)
2104 return -EOPNOTSUPP; 2105 return -EOPNOTSUPP;
2105 2106
2106 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 2107 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2107 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 2108 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2108 return -EINVAL; 2109 return -EINVAL;
2109 2110
2110 ret = qp->device->detach_mcast(qp, gid, lid); 2111 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2111 if (!ret) 2112 if (!ret)
2112 atomic_dec(&qp->usecnt); 2113 atomic_dec(&qp->usecnt);
2113 return ret; 2114 return ret;
@@ -2118,10 +2119,10 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
2118{ 2119{
2119 struct ib_xrcd *xrcd; 2120 struct ib_xrcd *xrcd;
2120 2121
2121 if (!device->alloc_xrcd) 2122 if (!device->ops.alloc_xrcd)
2122 return ERR_PTR(-EOPNOTSUPP); 2123 return ERR_PTR(-EOPNOTSUPP);
2123 2124
2124 xrcd = device->alloc_xrcd(device, NULL, NULL); 2125 xrcd = device->ops.alloc_xrcd(device, NULL, NULL);
2125 if (!IS_ERR(xrcd)) { 2126 if (!IS_ERR(xrcd)) {
2126 xrcd->device = device; 2127 xrcd->device = device;
2127 xrcd->inode = NULL; 2128 xrcd->inode = NULL;
@@ -2149,7 +2150,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
2149 return ret; 2150 return ret;
2150 } 2151 }
2151 2152
2152 return xrcd->device->dealloc_xrcd(xrcd); 2153 return xrcd->device->ops.dealloc_xrcd(xrcd);
2153} 2154}
2154EXPORT_SYMBOL(ib_dealloc_xrcd); 2155EXPORT_SYMBOL(ib_dealloc_xrcd);
2155 2156
@@ -2172,10 +2173,10 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
2172{ 2173{
2173 struct ib_wq *wq; 2174 struct ib_wq *wq;
2174 2175
2175 if (!pd->device->create_wq) 2176 if (!pd->device->ops.create_wq)
2176 return ERR_PTR(-EOPNOTSUPP); 2177 return ERR_PTR(-EOPNOTSUPP);
2177 2178
2178 wq = pd->device->create_wq(pd, wq_attr, NULL); 2179 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2179 if (!IS_ERR(wq)) { 2180 if (!IS_ERR(wq)) {
2180 wq->event_handler = wq_attr->event_handler; 2181 wq->event_handler = wq_attr->event_handler;
2181 wq->wq_context = wq_attr->wq_context; 2182 wq->wq_context = wq_attr->wq_context;
@@ -2205,7 +2206,7 @@ int ib_destroy_wq(struct ib_wq *wq)
2205 if (atomic_read(&wq->usecnt)) 2206 if (atomic_read(&wq->usecnt))
2206 return -EBUSY; 2207 return -EBUSY;
2207 2208
2208 err = wq->device->destroy_wq(wq); 2209 err = wq->device->ops.destroy_wq(wq);
2209 if (!err) { 2210 if (!err) {
2210 atomic_dec(&pd->usecnt); 2211 atomic_dec(&pd->usecnt);
2211 atomic_dec(&cq->usecnt); 2212 atomic_dec(&cq->usecnt);
@@ -2227,10 +2228,10 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
2227{ 2228{
2228 int err; 2229 int err;
2229 2230
2230 if (!wq->device->modify_wq) 2231 if (!wq->device->ops.modify_wq)
2231 return -EOPNOTSUPP; 2232 return -EOPNOTSUPP;
2232 2233
2233 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 2234 err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
2234 return err; 2235 return err;
2235} 2236}
2236EXPORT_SYMBOL(ib_modify_wq); 2237EXPORT_SYMBOL(ib_modify_wq);
@@ -2252,12 +2253,12 @@ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
2252 int i; 2253 int i;
2253 u32 table_size; 2254 u32 table_size;
2254 2255
2255 if (!device->create_rwq_ind_table) 2256 if (!device->ops.create_rwq_ind_table)
2256 return ERR_PTR(-EOPNOTSUPP); 2257 return ERR_PTR(-EOPNOTSUPP);
2257 2258
2258 table_size = (1 << init_attr->log_ind_tbl_size); 2259 table_size = (1 << init_attr->log_ind_tbl_size);
2259 rwq_ind_table = device->create_rwq_ind_table(device, 2260 rwq_ind_table = device->ops.create_rwq_ind_table(device,
2260 init_attr, NULL); 2261 init_attr, NULL);
2261 if (IS_ERR(rwq_ind_table)) 2262 if (IS_ERR(rwq_ind_table))
2262 return rwq_ind_table; 2263 return rwq_ind_table;
2263 2264
@@ -2287,7 +2288,7 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
2287 if (atomic_read(&rwq_ind_table->usecnt)) 2288 if (atomic_read(&rwq_ind_table->usecnt))
2288 return -EBUSY; 2289 return -EBUSY;
2289 2290
2290 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 2291 err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
2291 if (!err) { 2292 if (!err) {
2292 for (i = 0; i < table_size; i++) 2293 for (i = 0; i < table_size; i++)
2293 atomic_dec(&ind_tbl[i]->usecnt); 2294 atomic_dec(&ind_tbl[i]->usecnt);
@@ -2300,48 +2301,50 @@ EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
2300int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 2301int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2301 struct ib_mr_status *mr_status) 2302 struct ib_mr_status *mr_status)
2302{ 2303{
2303 return mr->device->check_mr_status ? 2304 if (!mr->device->ops.check_mr_status)
2304 mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP; 2305 return -EOPNOTSUPP;
2306
2307 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2305} 2308}
2306EXPORT_SYMBOL(ib_check_mr_status); 2309EXPORT_SYMBOL(ib_check_mr_status);
2307 2310
2308int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2311int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2309 int state) 2312 int state)
2310{ 2313{
2311 if (!device->set_vf_link_state) 2314 if (!device->ops.set_vf_link_state)
2312 return -EOPNOTSUPP; 2315 return -EOPNOTSUPP;
2313 2316
2314 return device->set_vf_link_state(device, vf, port, state); 2317 return device->ops.set_vf_link_state(device, vf, port, state);
2315} 2318}
2316EXPORT_SYMBOL(ib_set_vf_link_state); 2319EXPORT_SYMBOL(ib_set_vf_link_state);
2317 2320
2318int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2321int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2319 struct ifla_vf_info *info) 2322 struct ifla_vf_info *info)
2320{ 2323{
2321 if (!device->get_vf_config) 2324 if (!device->ops.get_vf_config)
2322 return -EOPNOTSUPP; 2325 return -EOPNOTSUPP;
2323 2326
2324 return device->get_vf_config(device, vf, port, info); 2327 return device->ops.get_vf_config(device, vf, port, info);
2325} 2328}
2326EXPORT_SYMBOL(ib_get_vf_config); 2329EXPORT_SYMBOL(ib_get_vf_config);
2327 2330
2328int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2331int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2329 struct ifla_vf_stats *stats) 2332 struct ifla_vf_stats *stats)
2330{ 2333{
2331 if (!device->get_vf_stats) 2334 if (!device->ops.get_vf_stats)
2332 return -EOPNOTSUPP; 2335 return -EOPNOTSUPP;
2333 2336
2334 return device->get_vf_stats(device, vf, port, stats); 2337 return device->ops.get_vf_stats(device, vf, port, stats);
2335} 2338}
2336EXPORT_SYMBOL(ib_get_vf_stats); 2339EXPORT_SYMBOL(ib_get_vf_stats);
2337 2340
2338int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2341int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2339 int type) 2342 int type)
2340{ 2343{
2341 if (!device->set_vf_guid) 2344 if (!device->ops.set_vf_guid)
2342 return -EOPNOTSUPP; 2345 return -EOPNOTSUPP;
2343 2346
2344 return device->set_vf_guid(device, vf, port, guid, type); 2347 return device->ops.set_vf_guid(device, vf, port, guid, type);
2345} 2348}
2346EXPORT_SYMBOL(ib_set_vf_guid); 2349EXPORT_SYMBOL(ib_set_vf_guid);
2347 2350
@@ -2373,12 +2376,12 @@ EXPORT_SYMBOL(ib_set_vf_guid);
2373int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2376int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2374 unsigned int *sg_offset, unsigned int page_size) 2377 unsigned int *sg_offset, unsigned int page_size)
2375{ 2378{
2376 if (unlikely(!mr->device->map_mr_sg)) 2379 if (unlikely(!mr->device->ops.map_mr_sg))
2377 return -EOPNOTSUPP; 2380 return -EOPNOTSUPP;
2378 2381
2379 mr->page_size = page_size; 2382 mr->page_size = page_size;
2380 2383
2381 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 2384 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2382} 2385}
2383EXPORT_SYMBOL(ib_map_mr_sg); 2386EXPORT_SYMBOL(ib_map_mr_sg);
2384 2387
@@ -2577,8 +2580,8 @@ static void __ib_drain_rq(struct ib_qp *qp)
2577 */ 2580 */
2578void ib_drain_sq(struct ib_qp *qp) 2581void ib_drain_sq(struct ib_qp *qp)
2579{ 2582{
2580 if (qp->device->drain_sq) 2583 if (qp->device->ops.drain_sq)
2581 qp->device->drain_sq(qp); 2584 qp->device->ops.drain_sq(qp);
2582 else 2585 else
2583 __ib_drain_sq(qp); 2586 __ib_drain_sq(qp);
2584} 2587}
@@ -2605,8 +2608,8 @@ EXPORT_SYMBOL(ib_drain_sq);
2605 */ 2608 */
2606void ib_drain_rq(struct ib_qp *qp) 2609void ib_drain_rq(struct ib_qp *qp)
2607{ 2610{
2608 if (qp->device->drain_rq) 2611 if (qp->device->ops.drain_rq)
2609 qp->device->drain_rq(qp); 2612 qp->device->ops.drain_rq(qp);
2610 else 2613 else
2611 __ib_drain_rq(qp); 2614 __ib_drain_rq(qp);
2612} 2615}
@@ -2644,10 +2647,11 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2644 struct net_device *netdev; 2647 struct net_device *netdev;
2645 int rc; 2648 int rc;
2646 2649
2647 if (!device->rdma_netdev_get_params) 2650 if (!device->ops.rdma_netdev_get_params)
2648 return ERR_PTR(-EOPNOTSUPP); 2651 return ERR_PTR(-EOPNOTSUPP);
2649 2652
2650 rc = device->rdma_netdev_get_params(device, port_num, type, &params); 2653 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2654 &params);
2651 if (rc) 2655 if (rc)
2652 return ERR_PTR(rc); 2656 return ERR_PTR(rc);
2653 2657
@@ -2669,10 +2673,11 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
2669 struct rdma_netdev_alloc_params params; 2673 struct rdma_netdev_alloc_params params;
2670 int rc; 2674 int rc;
2671 2675
2672 if (!device->rdma_netdev_get_params) 2676 if (!device->ops.rdma_netdev_get_params)
2673 return -EOPNOTSUPP; 2677 return -EOPNOTSUPP;
2674 2678
2675 rc = device->rdma_netdev_get_params(device, port_num, type, &params); 2679 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2680 &params);
2676 if (rc) 2681 if (rc)
2677 return rc; 2682 return rc;
2678 2683
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 771eb6bd0785..ef137c40205c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3478,7 +3478,7 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
3478 /* Need to free the Last Streaming Mode Message */ 3478 /* Need to free the Last Streaming Mode Message */
3479 if (iwqp->ietf_mem.va) { 3479 if (iwqp->ietf_mem.va) {
3480 if (iwqp->lsmm_mr) 3480 if (iwqp->lsmm_mr)
3481 iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr); 3481 iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr);
3482 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); 3482 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
3483 } 3483 }
3484 } 3484 }
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 155b4dfc0ae8..782499abcd98 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -849,7 +849,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
849 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); 849 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
850 850
851 for (i = 1; i <= dev->num_ports; ++i) { 851 for (i = 1; i <= dev->num_ports; ++i) {
852 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) { 852 if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) {
853 ret = -EFAULT; 853 ret = -EFAULT;
854 goto err_unregister; 854 goto err_unregister;
855 } 855 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 47d9cd260846..5a4e23105b0c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -150,7 +150,7 @@ static int get_port_state(struct ib_device *ibdev,
150 int ret; 150 int ret;
151 151
152 memset(&attr, 0, sizeof(attr)); 152 memset(&attr, 0, sizeof(attr));
153 ret = ibdev->query_port(ibdev, port_num, &attr); 153 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
154 if (!ret) 154 if (!ret)
155 *state = attr.state; 155 *state = attr.state;
156 return ret; 156 return ret;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 2b67ace5b614..032883180f65 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -3033,7 +3033,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
3033 /* Need to free the Last Streaming Mode Message */ 3033 /* Need to free the Last Streaming Mode Message */
3034 if (nesqp->ietf_frame) { 3034 if (nesqp->ietf_frame) {
3035 if (nesqp->lsmm_mr) 3035 if (nesqp->lsmm_mr)
3036 nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); 3036 nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr);
3037 pci_free_consistent(nesdev->pcidev, 3037 pci_free_consistent(nesdev->pcidev,
3038 nesqp->private_data_len + nesqp->ietf_frame_size, 3038 nesqp->private_data_len + nesqp->ietf_frame_size,
3039 nesqp->ietf_frame, nesqp->ietf_frame_pbase); 3039 nesqp->ietf_frame, nesqp->ietf_frame_pbase);
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index c52b38fe2416..aef3aa3fe667 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -456,31 +456,31 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
456 * rdmavt does not support modify device currently drivers must 456 * rdmavt does not support modify device currently drivers must
457 * provide. 457 * provide.
458 */ 458 */
459 if (!rdi->ibdev.modify_device) 459 if (!rdi->ibdev.ops.modify_device)
460 return -EOPNOTSUPP; 460 return -EOPNOTSUPP;
461 break; 461 break;
462 462
463 case QUERY_PORT: 463 case QUERY_PORT:
464 if (!rdi->ibdev.query_port) 464 if (!rdi->ibdev.ops.query_port)
465 if (!rdi->driver_f.query_port_state) 465 if (!rdi->driver_f.query_port_state)
466 return -EINVAL; 466 return -EINVAL;
467 break; 467 break;
468 468
469 case MODIFY_PORT: 469 case MODIFY_PORT:
470 if (!rdi->ibdev.modify_port) 470 if (!rdi->ibdev.ops.modify_port)
471 if (!rdi->driver_f.cap_mask_chg || 471 if (!rdi->driver_f.cap_mask_chg ||
472 !rdi->driver_f.shut_down_port) 472 !rdi->driver_f.shut_down_port)
473 return -EINVAL; 473 return -EINVAL;
474 break; 474 break;
475 475
476 case QUERY_GID: 476 case QUERY_GID:
477 if (!rdi->ibdev.query_gid) 477 if (!rdi->ibdev.ops.query_gid)
478 if (!rdi->driver_f.get_guid_be) 478 if (!rdi->driver_f.get_guid_be)
479 return -EINVAL; 479 return -EINVAL;
480 break; 480 break;
481 481
482 case CREATE_QP: 482 case CREATE_QP:
483 if (!rdi->ibdev.create_qp) 483 if (!rdi->ibdev.ops.create_qp)
484 if (!rdi->driver_f.qp_priv_alloc || 484 if (!rdi->driver_f.qp_priv_alloc ||
485 !rdi->driver_f.qp_priv_free || 485 !rdi->driver_f.qp_priv_free ||
486 !rdi->driver_f.notify_qp_reset || 486 !rdi->driver_f.notify_qp_reset ||
@@ -491,7 +491,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
491 break; 491 break;
492 492
493 case MODIFY_QP: 493 case MODIFY_QP:
494 if (!rdi->ibdev.modify_qp) 494 if (!rdi->ibdev.ops.modify_qp)
495 if (!rdi->driver_f.notify_qp_reset || 495 if (!rdi->driver_f.notify_qp_reset ||
496 !rdi->driver_f.schedule_send || 496 !rdi->driver_f.schedule_send ||
497 !rdi->driver_f.get_pmtu_from_attr || 497 !rdi->driver_f.get_pmtu_from_attr ||
@@ -505,7 +505,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
505 break; 505 break;
506 506
507 case DESTROY_QP: 507 case DESTROY_QP:
508 if (!rdi->ibdev.destroy_qp) 508 if (!rdi->ibdev.ops.destroy_qp)
509 if (!rdi->driver_f.qp_priv_free || 509 if (!rdi->driver_f.qp_priv_free ||
510 !rdi->driver_f.notify_qp_reset || 510 !rdi->driver_f.notify_qp_reset ||
511 !rdi->driver_f.flush_qp_waiters || 511 !rdi->driver_f.flush_qp_waiters ||
@@ -515,7 +515,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
515 break; 515 break;
516 516
517 case POST_SEND: 517 case POST_SEND:
518 if (!rdi->ibdev.post_send) 518 if (!rdi->ibdev.ops.post_send)
519 if (!rdi->driver_f.schedule_send || 519 if (!rdi->driver_f.schedule_send ||
520 !rdi->driver_f.do_send || 520 !rdi->driver_f.do_send ||
521 !rdi->post_parms) 521 !rdi->post_parms)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8710214594d8..5224c42f9d08 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2453,8 +2453,8 @@ static struct net_device *ipoib_add_port(const char *format,
2453 return ERR_PTR(result); 2453 return ERR_PTR(result);
2454 } 2454 }
2455 2455
2456 if (hca->rdma_netdev_get_params) { 2456 if (hca->ops.rdma_netdev_get_params) {
2457 int rc = hca->rdma_netdev_get_params(hca, port, 2457 int rc = hca->ops.rdma_netdev_get_params(hca, port,
2458 RDMA_NETDEV_IPOIB, 2458 RDMA_NETDEV_IPOIB,
2459 &params); 2459 &params);
2460 2460
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index dbe97c02848c..e9b7efc302d0 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -77,8 +77,8 @@ int iser_assign_reg_ops(struct iser_device *device)
77 struct ib_device *ib_dev = device->ib_device; 77 struct ib_device *ib_dev = device->ib_device;
78 78
79 /* Assign function handles - based on FMR support */ 79 /* Assign function handles - based on FMR support */
80 if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr && 80 if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
81 ib_dev->map_phys_fmr && ib_dev->unmap_fmr) { 81 ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
82 iser_info("FMR supported, using FMR for registration\n"); 82 iser_info("FMR supported, using FMR for registration\n");
83 device->reg_ops = &fmr_ops; 83 device->reg_ops = &fmr_ops;
84 } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 84 } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 61558788b3fa..ae70cd18903e 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -330,10 +330,10 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
330 struct rdma_netdev *rn; 330 struct rdma_netdev *rn;
331 int rc; 331 int rc;
332 332
333 netdev = ibdev->alloc_rdma_netdev(ibdev, port_num, 333 netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
334 RDMA_NETDEV_OPA_VNIC, 334 RDMA_NETDEV_OPA_VNIC,
335 "veth%d", NET_NAME_UNKNOWN, 335 "veth%d", NET_NAME_UNKNOWN,
336 ether_setup); 336 ether_setup);
337 if (!netdev) 337 if (!netdev)
338 return ERR_PTR(-ENOMEM); 338 return ERR_PTR(-ENOMEM);
339 else if (IS_ERR(netdev)) 339 else if (IS_ERR(netdev))
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index eed0eb3bb04c..e58146d020bc 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -4063,8 +4063,10 @@ static void srp_add_one(struct ib_device *device)
4063 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 4063 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4064 max_pages_per_mr); 4064 max_pages_per_mr);
4065 4065
4066 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 4066 srp_dev->has_fmr = (device->ops.alloc_fmr &&
4067 device->map_phys_fmr && device->unmap_fmr); 4067 device->ops.dealloc_fmr &&
4068 device->ops.map_phys_fmr &&
4069 device->ops.unmap_fmr);
4068 srp_dev->has_fr = (attr->device_cap_flags & 4070 srp_dev->has_fr = (attr->device_cap_flags &
4069 IB_DEVICE_MEM_MGT_EXTENSIONS); 4071 IB_DEVICE_MEM_MGT_EXTENSIONS);
4070 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { 4072 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index e94a8d1d08a3..a568dac7b3a1 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1724,7 +1724,7 @@ static struct smbd_connection *_smbd_get_connection(
1724 info->responder_resources); 1724 info->responder_resources);
1725 1725
1726 /* Need to send IRD/ORD in private data for iWARP */ 1726 /* Need to send IRD/ORD in private data for iWARP */
1727 info->id->device->get_port_immutable( 1727 info->id->device->ops.get_port_immutable(
1728 info->id->device, info->id->port_num, &port_immutable); 1728 info->id->device, info->id->port_num, &port_immutable);
1729 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { 1729 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1730 ird_ord_hdr[0] = info->responder_resources; 1730 ird_ord_hdr[0] = info->responder_resources;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 60315fd05411..5b3b51f00f48 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2507,7 +2507,7 @@ struct ib_device_ops {
2507struct ib_device { 2507struct ib_device {
2508 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2508 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2509 struct device *dma_device; 2509 struct device *dma_device;
2510 2510 struct ib_device_ops ops;
2511 char name[IB_DEVICE_NAME_MAX]; 2511 char name[IB_DEVICE_NAME_MAX];
2512 2512
2513 struct list_head event_handler_list; 2513 struct list_head event_handler_list;
@@ -2532,273 +2532,6 @@ struct ib_device {
2532 2532
2533 struct iw_cm_verbs *iwcm; 2533 struct iw_cm_verbs *iwcm;
2534 2534
2535 /**
2536 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2537 * driver initialized data. The struct is kfree()'ed by the sysfs
2538 * core when the device is removed. A lifespan of -1 in the return
2539 * struct tells the core to set a default lifespan.
2540 */
2541 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2542 u8 port_num);
2543 /**
2544 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2545 * @index - The index in the value array we wish to have updated, or
2546 * num_counters if we want all stats updated
2547 * Return codes -
2548 * < 0 - Error, no counters updated
2549 * index - Updated the single counter pointed to by index
2550 * num_counters - Updated all counters (will reset the timestamp
2551 * and prevent further calls for lifespan milliseconds)
2552 * Drivers are allowed to update all counters in lieu of just the
2553 * one given in index at their option
2554 */
2555 int (*get_hw_stats)(struct ib_device *device,
2556 struct rdma_hw_stats *stats,
2557 u8 port, int index);
2558 int (*query_device)(struct ib_device *device,
2559 struct ib_device_attr *device_attr,
2560 struct ib_udata *udata);
2561 int (*query_port)(struct ib_device *device,
2562 u8 port_num,
2563 struct ib_port_attr *port_attr);
2564 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2565 u8 port_num);
2566 /* When calling get_netdev, the HW vendor's driver should return the
2567 * net device of device @device at port @port_num or NULL if such
2568 * a net device doesn't exist. The vendor driver should call dev_hold
2569 * on this net device. The HW vendor's device driver must guarantee
2570 * that this function returns NULL before the net device has finished
2571 * NETDEV_UNREGISTER state.
2572 */
2573 struct net_device *(*get_netdev)(struct ib_device *device,
2574 u8 port_num);
2575 /* query_gid should be return GID value for @device, when @port_num
2576 * link layer is either IB or iWarp. It is no-op if @port_num port
2577 * is RoCE link layer.
2578 */
2579 int (*query_gid)(struct ib_device *device,
2580 u8 port_num, int index,
2581 union ib_gid *gid);
2582 /* When calling add_gid, the HW vendor's driver should add the gid
2583 * of device of port at gid index available at @attr. Meta-info of
2584 * that gid (for example, the network device related to this gid) is
2585 * available at @attr. @context allows the HW vendor driver to store
2586 * extra information together with a GID entry. The HW vendor driver may
2587 * allocate memory to contain this information and store it in @context
2588 * when a new GID entry is written to. Params are consistent until the
2589 * next call of add_gid or delete_gid. The function should return 0 on
2590 * success or error otherwise. The function could be called
2591 * concurrently for different ports. This function is only called when
2592 * roce_gid_table is used.
2593 */
2594 int (*add_gid)(const struct ib_gid_attr *attr,
2595 void **context);
2596 /* When calling del_gid, the HW vendor's driver should delete the
2597 * gid of device @device at gid index gid_index of port port_num
2598 * available in @attr.
2599 * Upon the deletion of a GID entry, the HW vendor must free any
2600 * allocated memory. The caller will clear @context afterwards.
2601 * This function is only called when roce_gid_table is used.
2602 */
2603 int (*del_gid)(const struct ib_gid_attr *attr,
2604 void **context);
2605 int (*query_pkey)(struct ib_device *device,
2606 u8 port_num, u16 index, u16 *pkey);
2607 int (*modify_device)(struct ib_device *device,
2608 int device_modify_mask,
2609 struct ib_device_modify *device_modify);
2610 int (*modify_port)(struct ib_device *device,
2611 u8 port_num, int port_modify_mask,
2612 struct ib_port_modify *port_modify);
2613 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2614 struct ib_udata *udata);
2615 int (*dealloc_ucontext)(struct ib_ucontext *context);
2616 int (*mmap)(struct ib_ucontext *context,
2617 struct vm_area_struct *vma);
2618 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2619 struct ib_ucontext *context,
2620 struct ib_udata *udata);
2621 int (*dealloc_pd)(struct ib_pd *pd);
2622 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2623 struct rdma_ah_attr *ah_attr,
2624 struct ib_udata *udata);
2625 int (*modify_ah)(struct ib_ah *ah,
2626 struct rdma_ah_attr *ah_attr);
2627 int (*query_ah)(struct ib_ah *ah,
2628 struct rdma_ah_attr *ah_attr);
2629 int (*destroy_ah)(struct ib_ah *ah);
2630 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2631 struct ib_srq_init_attr *srq_init_attr,
2632 struct ib_udata *udata);
2633 int (*modify_srq)(struct ib_srq *srq,
2634 struct ib_srq_attr *srq_attr,
2635 enum ib_srq_attr_mask srq_attr_mask,
2636 struct ib_udata *udata);
2637 int (*query_srq)(struct ib_srq *srq,
2638 struct ib_srq_attr *srq_attr);
2639 int (*destroy_srq)(struct ib_srq *srq);
2640 int (*post_srq_recv)(struct ib_srq *srq,
2641 const struct ib_recv_wr *recv_wr,
2642 const struct ib_recv_wr **bad_recv_wr);
2643 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2644 struct ib_qp_init_attr *qp_init_attr,
2645 struct ib_udata *udata);
2646 int (*modify_qp)(struct ib_qp *qp,
2647 struct ib_qp_attr *qp_attr,
2648 int qp_attr_mask,
2649 struct ib_udata *udata);
2650 int (*query_qp)(struct ib_qp *qp,
2651 struct ib_qp_attr *qp_attr,
2652 int qp_attr_mask,
2653 struct ib_qp_init_attr *qp_init_attr);
2654 int (*destroy_qp)(struct ib_qp *qp);
2655 int (*post_send)(struct ib_qp *qp,
2656 const struct ib_send_wr *send_wr,
2657 const struct ib_send_wr **bad_send_wr);
2658 int (*post_recv)(struct ib_qp *qp,
2659 const struct ib_recv_wr *recv_wr,
2660 const struct ib_recv_wr **bad_recv_wr);
2661 struct ib_cq * (*create_cq)(struct ib_device *device,
2662 const struct ib_cq_init_attr *attr,
2663 struct ib_ucontext *context,
2664 struct ib_udata *udata);
2665 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2666 u16 cq_period);
2667 int (*destroy_cq)(struct ib_cq *cq);
2668 int (*resize_cq)(struct ib_cq *cq, int cqe,
2669 struct ib_udata *udata);
2670 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2671 struct ib_wc *wc);
2672 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2673 int (*req_notify_cq)(struct ib_cq *cq,
2674 enum ib_cq_notify_flags flags);
2675 int (*req_ncomp_notif)(struct ib_cq *cq,
2676 int wc_cnt);
2677 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2678 int mr_access_flags);
2679 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2680 u64 start, u64 length,
2681 u64 virt_addr,
2682 int mr_access_flags,
2683 struct ib_udata *udata);
2684 int (*rereg_user_mr)(struct ib_mr *mr,
2685 int flags,
2686 u64 start, u64 length,
2687 u64 virt_addr,
2688 int mr_access_flags,
2689 struct ib_pd *pd,
2690 struct ib_udata *udata);
2691 int (*dereg_mr)(struct ib_mr *mr);
2692 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2693 enum ib_mr_type mr_type,
2694 u32 max_num_sg);
2695 int (*map_mr_sg)(struct ib_mr *mr,
2696 struct scatterlist *sg,
2697 int sg_nents,
2698 unsigned int *sg_offset);
2699 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2700 enum ib_mw_type type,
2701 struct ib_udata *udata);
2702 int (*dealloc_mw)(struct ib_mw *mw);
2703 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2704 int mr_access_flags,
2705 struct ib_fmr_attr *fmr_attr);
2706 int (*map_phys_fmr)(struct ib_fmr *fmr,
2707 u64 *page_list, int list_len,
2708 u64 iova);
2709 int (*unmap_fmr)(struct list_head *fmr_list);
2710 int (*dealloc_fmr)(struct ib_fmr *fmr);
2711 int (*attach_mcast)(struct ib_qp *qp,
2712 union ib_gid *gid,
2713 u16 lid);
2714 int (*detach_mcast)(struct ib_qp *qp,
2715 union ib_gid *gid,
2716 u16 lid);
2717 int (*process_mad)(struct ib_device *device,
2718 int process_mad_flags,
2719 u8 port_num,
2720 const struct ib_wc *in_wc,
2721 const struct ib_grh *in_grh,
2722 const struct ib_mad_hdr *in_mad,
2723 size_t in_mad_size,
2724 struct ib_mad_hdr *out_mad,
2725 size_t *out_mad_size,
2726 u16 *out_mad_pkey_index);
2727 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2728 struct ib_ucontext *ucontext,
2729 struct ib_udata *udata);
2730 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2731 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2732 struct ib_flow_attr
2733 *flow_attr,
2734 int domain,
2735 struct ib_udata *udata);
2736 int (*destroy_flow)(struct ib_flow *flow_id);
2737 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2738 struct ib_mr_status *mr_status);
2739 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2740 void (*drain_rq)(struct ib_qp *qp);
2741 void (*drain_sq)(struct ib_qp *qp);
2742 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2743 int state);
2744 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2745 struct ifla_vf_info *ivf);
2746 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2747 struct ifla_vf_stats *stats);
2748 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2749 int type);
2750 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2751 struct ib_wq_init_attr *init_attr,
2752 struct ib_udata *udata);
2753 int (*destroy_wq)(struct ib_wq *wq);
2754 int (*modify_wq)(struct ib_wq *wq,
2755 struct ib_wq_attr *attr,
2756 u32 wq_attr_mask,
2757 struct ib_udata *udata);
2758 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2759 struct ib_rwq_ind_table_init_attr *init_attr,
2760 struct ib_udata *udata);
2761 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2762 struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
2763 const struct ib_flow_action_attrs_esp *attr,
2764 struct uverbs_attr_bundle *attrs);
2765 int (*destroy_flow_action)(struct ib_flow_action *action);
2766 int (*modify_flow_action_esp)(struct ib_flow_action *action,
2767 const struct ib_flow_action_attrs_esp *attr,
2768 struct uverbs_attr_bundle *attrs);
2769 struct ib_dm * (*alloc_dm)(struct ib_device *device,
2770 struct ib_ucontext *context,
2771 struct ib_dm_alloc_attr *attr,
2772 struct uverbs_attr_bundle *attrs);
2773 int (*dealloc_dm)(struct ib_dm *dm);
2774 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2775 struct ib_dm_mr_attr *attr,
2776 struct uverbs_attr_bundle *attrs);
2777 struct ib_counters * (*create_counters)(struct ib_device *device,
2778 struct uverbs_attr_bundle *attrs);
2779 int (*destroy_counters)(struct ib_counters *counters);
2780 int (*read_counters)(struct ib_counters *counters,
2781 struct ib_counters_read_attr *counters_read_attr,
2782 struct uverbs_attr_bundle *attrs);
2783
2784 /**
2785 * rdma netdev operation
2786 *
2787 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2788 * must return -EOPNOTSUPP if it doesn't support the specified type.
2789 */
2790 struct net_device *(*alloc_rdma_netdev)(
2791 struct ib_device *device,
2792 u8 port_num,
2793 enum rdma_netdev_t type,
2794 const char *name,
2795 unsigned char name_assign_type,
2796 void (*setup)(struct net_device *));
2797
2798 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2799 enum rdma_netdev_t type,
2800 struct rdma_netdev_alloc_params *params);
2801
2802 struct module *owner; 2535 struct module *owner;
2803 struct device dev; 2536 struct device dev;
2804 /* First group for device attributes, 2537 /* First group for device attributes,
@@ -2840,17 +2573,6 @@ struct ib_device {
2840 */ 2573 */
2841 struct rdma_restrack_root res; 2574 struct rdma_restrack_root res;
2842 2575
2843 /**
2844 * The following mandatory functions are used only at device
2845 * registration. Keep functions such as these at the end of this
2846 * structure to avoid cache line misses when accessing struct ib_device
2847 * in fast paths.
2848 */
2849 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2850 void (*get_dev_fw_str)(struct ib_device *, char *str);
2851 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2852 int comp_vector);
2853
2854 const struct uapi_definition *driver_def; 2576 const struct uapi_definition *driver_def;
2855 enum rdma_driver_id driver_id; 2577 enum rdma_driver_id driver_id;
2856 /* 2578 /*
@@ -3365,7 +3087,7 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3365 u8 port_num) 3087 u8 port_num)
3366{ 3088{
3367 return rdma_protocol_roce(device, port_num) && 3089 return rdma_protocol_roce(device, port_num) &&
3368 device->add_gid && device->del_gid; 3090 device->ops.add_gid && device->ops.del_gid;
3369} 3091}
3370 3092
3371/* 3093/*
@@ -3589,7 +3311,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
3589{ 3311{
3590 const struct ib_recv_wr *dummy; 3312 const struct ib_recv_wr *dummy;
3591 3313
3592 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy); 3314 return srq->device->ops.post_srq_recv(srq, recv_wr,
3315 bad_recv_wr ? : &dummy);
3593} 3316}
3594 3317
3595/** 3318/**
@@ -3692,7 +3415,7 @@ static inline int ib_post_send(struct ib_qp *qp,
3692{ 3415{
3693 const struct ib_send_wr *dummy; 3416 const struct ib_send_wr *dummy;
3694 3417
3695 return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy); 3418 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3696} 3419}
3697 3420
3698/** 3421/**
@@ -3709,7 +3432,7 @@ static inline int ib_post_recv(struct ib_qp *qp,
3709{ 3432{
3710 const struct ib_recv_wr *dummy; 3433 const struct ib_recv_wr *dummy;
3711 3434
3712 return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3435 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3713} 3436}
3714 3437
3715struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, 3438struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
@@ -3782,7 +3505,7 @@ int ib_destroy_cq(struct ib_cq *cq);
3782static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3505static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3783 struct ib_wc *wc) 3506 struct ib_wc *wc)
3784{ 3507{
3785 return cq->device->poll_cq(cq, num_entries, wc); 3508 return cq->device->ops.poll_cq(cq, num_entries, wc);
3786} 3509}
3787 3510
3788/** 3511/**
@@ -3815,7 +3538,7 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3815static inline int ib_req_notify_cq(struct ib_cq *cq, 3538static inline int ib_req_notify_cq(struct ib_cq *cq,
3816 enum ib_cq_notify_flags flags) 3539 enum ib_cq_notify_flags flags)
3817{ 3540{
3818 return cq->device->req_notify_cq(cq, flags); 3541 return cq->device->ops.req_notify_cq(cq, flags);
3819} 3542}
3820 3543
3821/** 3544/**
@@ -3827,8 +3550,8 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
3827 */ 3550 */
3828static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3551static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3829{ 3552{
3830 return cq->device->req_ncomp_notif ? 3553 return cq->device->ops.req_ncomp_notif ?
3831 cq->device->req_ncomp_notif(cq, wc_cnt) : 3554 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3832 -ENOSYS; 3555 -ENOSYS;
3833} 3556}
3834 3557
@@ -4092,7 +3815,7 @@ static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4092 u64 *page_list, int list_len, 3815 u64 *page_list, int list_len,
4093 u64 iova) 3816 u64 iova)
4094{ 3817{
4095 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3818 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4096} 3819}
4097 3820
4098/** 3821/**
@@ -4445,10 +4168,10 @@ static inline const struct cpumask *
4445ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4168ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4446{ 4169{
4447 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4170 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4448 !device->get_vector_affinity) 4171 !device->ops.get_vector_affinity)
4449 return NULL; 4172 return NULL;
4450 4173
4451 return device->get_vector_affinity(device, comp_vector); 4174 return device->ops.get_vector_affinity(device, comp_vector);
4452 4175
4453} 4176}
4454 4177
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 7f83265f6298..839a857aa329 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -419,10 +419,10 @@ struct uapi_definition {
419 .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ 419 .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
420 .scope = UAPI_SCOPE_OBJECT, \ 420 .scope = UAPI_SCOPE_OBJECT, \
421 .needs_fn_offset = \ 421 .needs_fn_offset = \
422 offsetof(struct ib_device, ibdev_fn) + \ 422 offsetof(struct ib_device_ops, ibdev_fn) + \
423 BUILD_BUG_ON_ZERO( \ 423 BUILD_BUG_ON_ZERO( \
424 sizeof(((struct ib_device *)0)->ibdev_fn) != \ 424 sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
425 sizeof(void *)), \ 425 sizeof(void *)), \
426 } 426 }
427 427
428/* 428/*
@@ -434,10 +434,10 @@ struct uapi_definition {
434 .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ 434 .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
435 .scope = UAPI_SCOPE_METHOD, \ 435 .scope = UAPI_SCOPE_METHOD, \
436 .needs_fn_offset = \ 436 .needs_fn_offset = \
437 offsetof(struct ib_device, ibdev_fn) + \ 437 offsetof(struct ib_device_ops, ibdev_fn) + \
438 BUILD_BUG_ON_ZERO( \ 438 BUILD_BUG_ON_ZERO( \
439 sizeof(((struct ib_device *)0)->ibdev_fn) != \ 439 sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
440 sizeof(void *)), \ 440 sizeof(void *)), \
441 } 441 }
442 442
443/* Call a function to determine if the entire object is supported or not */ 443/* Call a function to determine if the entire object is supported or not */
diff --git a/net/rds/ib.c b/net/rds/ib.c
index eba75c1ba359..9d7b7586f240 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -148,8 +148,8 @@ static void rds_ib_add_one(struct ib_device *device)
148 148
149 has_fr = (device->attrs.device_cap_flags & 149 has_fr = (device->attrs.device_cap_flags &
150 IB_DEVICE_MEM_MGT_EXTENSIONS); 150 IB_DEVICE_MEM_MGT_EXTENSIONS);
151 has_fmr = (device->alloc_fmr && device->dealloc_fmr && 151 has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
152 device->map_phys_fmr && device->unmap_fmr); 152 device->ops.map_phys_fmr && device->ops.unmap_fmr);
153 rds_ibdev->use_fastreg = (has_fr && !has_fmr); 153 rds_ibdev->use_fastreg = (has_fr && !has_fmr);
154 154
155 rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32; 155 rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 7f5632cd5a48..fd8fea59fe92 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -41,7 +41,7 @@ enum {
41bool 41bool
42fmr_is_supported(struct rpcrdma_ia *ia) 42fmr_is_supported(struct rpcrdma_ia *ia)
43{ 43{
44 if (!ia->ri_device->alloc_fmr) { 44 if (!ia->ri_device->ops.alloc_fmr) {
45 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n", 45 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
46 ia->ri_device->name); 46 ia->ri_device->name);
47 return false; 47 return false;