diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-21 13:26:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-21 13:26:24 -0400 |
commit | 018c6837f3e63b45163d55a1668d9f8e6fdecf6e (patch) | |
tree | 0d960bf956f07b2dc0302acd276061ac0f61102e | |
parent | 84da111de0b4be15bd500deff773f5116f39f7be (diff) | |
parent | 3eca7fc2d8d1275d9cf0c709f0937becbfcf6d96 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull RDMA subsystem updates from Jason Gunthorpe:
"This cycle mainly saw lots of bug fixes and clean up code across the
core code and several drivers, few new functional changes were made.
- Many cleanup and bug fixes for hns
- Various small bug fixes and cleanups in hfi1, mlx5, usnic, qed,
bnxt_re, efa
- Share the query_port code between all the iWarp drivers
- General rework and cleanup of the ODP MR umem code to fit better
with the mmu notifier get/put scheme
- Support rdma netlink in non init_net name spaces
- mlx5 support for XRC devx and DC ODP"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (99 commits)
RDMA: Fix double-free in srq creation error flow
RDMA/efa: Fix incorrect error print
IB/mlx5: Free mpi in mp_slave mode
IB/mlx5: Use the original address for the page during free_pages
RDMA/bnxt_re: Fix spelling mistake "missin_resp" -> "missing_resp"
RDMA/hns: Package operations of rq inline buffer into separate functions
RDMA/hns: Optimize cmd init and mode selection for hip08
IB/hfi1: Define variables as unsigned long to fix KASAN warning
IB/{rdmavt, hfi1, qib}: Add a counter for credit waits
IB/hfi1: Add traces for TID RDMA READ
RDMA/siw: Relax from kmap_atomic() use in TX path
IB/iser: Support up to 16MB data transfer in a single command
RDMA/siw: Fix page address mapping in TX path
RDMA: Fix goto target to release the allocated memory
RDMA/usnic: Avoid overly large buffers on stack
RDMA/odp: Add missing cast for 32 bit
RDMA/hns: Use devm_platform_ioremap_resource() to simplify code
Documentation/infiniband: update name of some functions
RDMA/cma: Fix false error message
RDMA/hns: Fix wrong assignment of qp_access_flags
...
99 files changed, 2710 insertions, 1891 deletions
diff --git a/Documentation/infiniband/core_locking.rst b/Documentation/infiniband/core_locking.rst index f34669beb4fe..8f76a8a5a38f 100644 --- a/Documentation/infiniband/core_locking.rst +++ b/Documentation/infiniband/core_locking.rst | |||
@@ -29,10 +29,10 @@ Sleeping and interrupt context | |||
29 | The corresponding functions exported to upper level protocol | 29 | The corresponding functions exported to upper level protocol |
30 | consumers: | 30 | consumers: |
31 | 31 | ||
32 | - ib_create_ah | 32 | - rdma_create_ah |
33 | - ib_modify_ah | 33 | - rdma_modify_ah |
34 | - ib_query_ah | 34 | - rdma_query_ah |
35 | - ib_destroy_ah | 35 | - rdma_destroy_ah |
36 | - ib_post_send | 36 | - ib_post_send |
37 | - ib_post_recv | 37 | - ib_post_recv |
38 | - ib_req_notify_cq | 38 | - ib_req_notify_cq |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 9b76a8fcdd24..1dd467bed8fc 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, | |||
183 | 183 | ||
184 | /* Repair the nlmsg header length */ | 184 | /* Repair the nlmsg header length */ |
185 | nlmsg_end(skb, nlh); | 185 | nlmsg_end(skb, nlh); |
186 | rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL); | 186 | rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL); |
187 | 187 | ||
188 | /* Make the request retry, so when we get the response from userspace | 188 | /* Make the request retry, so when we get the response from userspace |
189 | * we will have something. | 189 | * we will have something. |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 18e476b3ced0..00fb3eacda19 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device, | |||
810 | if (leak) | 810 | if (leak) |
811 | return; | 811 | return; |
812 | 812 | ||
813 | mutex_destroy(&table->lock); | ||
813 | kfree(table->data_vec); | 814 | kfree(table->data_vec); |
814 | kfree(table); | 815 | kfree(table); |
815 | } | 816 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index a68d0ccf67a4..0e3cf3461999 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -3046,7 +3046,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
3046 | if (status) | 3046 | if (status) |
3047 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", | 3047 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", |
3048 | status); | 3048 | status); |
3049 | } else { | 3049 | } else if (status) { |
3050 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); | 3050 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); |
3051 | } | 3051 | } |
3052 | 3052 | ||
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 3ec2c415bb70..8b0b5ae22e4c 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c | |||
@@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = { | |||
342 | 342 | ||
343 | int __init cma_configfs_init(void) | 343 | int __init cma_configfs_init(void) |
344 | { | 344 | { |
345 | int ret; | ||
346 | |||
345 | config_group_init(&cma_subsys.su_group); | 347 | config_group_init(&cma_subsys.su_group); |
346 | mutex_init(&cma_subsys.su_mutex); | 348 | mutex_init(&cma_subsys.su_mutex); |
347 | return configfs_register_subsystem(&cma_subsys); | 349 | ret = configfs_register_subsystem(&cma_subsys); |
350 | if (ret) | ||
351 | mutex_destroy(&cma_subsys.su_mutex); | ||
352 | return ret; | ||
348 | } | 353 | } |
349 | 354 | ||
350 | void __exit cma_configfs_exit(void) | 355 | void __exit cma_configfs_exit(void) |
351 | { | 356 | { |
352 | configfs_unregister_subsystem(&cma_subsys); | 357 | configfs_unregister_subsystem(&cma_subsys); |
358 | mutex_destroy(&cma_subsys.su_mutex); | ||
353 | } | 359 | } |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index beee7b7e0d9a..3a8b0911c3bc 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/list.h> | 36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/cgroup_rdma.h> | 38 | #include <linux/cgroup_rdma.h> |
39 | #include <net/net_namespace.h> | ||
40 | #include <net/netns/generic.h> | ||
39 | 41 | ||
40 | #include <rdma/ib_verbs.h> | 42 | #include <rdma/ib_verbs.h> |
41 | #include <rdma/opa_addr.h> | 43 | #include <rdma/opa_addr.h> |
@@ -54,8 +56,26 @@ struct pkey_index_qp_list { | |||
54 | struct list_head qp_list; | 56 | struct list_head qp_list; |
55 | }; | 57 | }; |
56 | 58 | ||
59 | /** | ||
60 | * struct rdma_dev_net - rdma net namespace metadata for a net | ||
61 | * @nl_sock: Pointer to netlink socket | ||
62 | * @net: Pointer to owner net namespace | ||
63 | * @id: xarray id to identify the net namespace. | ||
64 | */ | ||
65 | struct rdma_dev_net { | ||
66 | struct sock *nl_sock; | ||
67 | possible_net_t net; | ||
68 | u32 id; | ||
69 | }; | ||
70 | |||
57 | extern const struct attribute_group ib_dev_attr_group; | 71 | extern const struct attribute_group ib_dev_attr_group; |
58 | extern bool ib_devices_shared_netns; | 72 | extern bool ib_devices_shared_netns; |
73 | extern unsigned int rdma_dev_net_id; | ||
74 | |||
75 | static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net) | ||
76 | { | ||
77 | return net_generic(net, rdma_dev_net_id); | ||
78 | } | ||
59 | 79 | ||
60 | int ib_device_register_sysfs(struct ib_device *device); | 80 | int ib_device_register_sysfs(struct ib_device *device); |
61 | void ib_device_unregister_sysfs(struct ib_device *device); | 81 | void ib_device_unregister_sysfs(struct ib_device *device); |
@@ -179,7 +199,6 @@ void ib_mad_cleanup(void); | |||
179 | int ib_sa_init(void); | 199 | int ib_sa_init(void); |
180 | void ib_sa_cleanup(void); | 200 | void ib_sa_cleanup(void); |
181 | 201 | ||
182 | int rdma_nl_init(void); | ||
183 | void rdma_nl_exit(void); | 202 | void rdma_nl_exit(void); |
184 | 203 | ||
185 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, | 204 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
@@ -365,4 +384,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj); | |||
365 | 384 | ||
366 | int ib_device_set_netns_put(struct sk_buff *skb, | 385 | int ib_device_set_netns_put(struct sk_buff *skb, |
367 | struct ib_device *dev, u32 ns_fd); | 386 | struct ib_device *dev, u32 ns_fd); |
387 | |||
388 | int rdma_nl_net_init(struct rdma_dev_net *rnet); | ||
389 | void rdma_nl_net_exit(struct rdma_dev_net *rnet); | ||
368 | #endif /* _CORE_PRIV_H */ | 390 | #endif /* _CORE_PRIV_H */ |
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index af8c85d18e62..680ad27f497d 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c | |||
@@ -599,7 +599,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port, | |||
599 | void rdma_counter_init(struct ib_device *dev) | 599 | void rdma_counter_init(struct ib_device *dev) |
600 | { | 600 | { |
601 | struct rdma_port_counter *port_counter; | 601 | struct rdma_port_counter *port_counter; |
602 | u32 port; | 602 | u32 port, i; |
603 | 603 | ||
604 | if (!dev->port_data) | 604 | if (!dev->port_data) |
605 | return; | 605 | return; |
@@ -620,13 +620,12 @@ void rdma_counter_init(struct ib_device *dev) | |||
620 | return; | 620 | return; |
621 | 621 | ||
622 | fail: | 622 | fail: |
623 | rdma_for_each_port(dev, port) { | 623 | for (i = port; i >= rdma_start_port(dev); i--) { |
624 | port_counter = &dev->port_data[port].port_counter; | 624 | port_counter = &dev->port_data[port].port_counter; |
625 | kfree(port_counter->hstats); | 625 | kfree(port_counter->hstats); |
626 | port_counter->hstats = NULL; | 626 | port_counter->hstats = NULL; |
627 | mutex_destroy(&port_counter->lock); | ||
627 | } | 628 | } |
628 | |||
629 | return; | ||
630 | } | 629 | } |
631 | 630 | ||
632 | void rdma_counter_release(struct ib_device *dev) | 631 | void rdma_counter_release(struct ib_device *dev) |
@@ -637,5 +636,6 @@ void rdma_counter_release(struct ib_device *dev) | |||
637 | rdma_for_each_port(dev, port) { | 636 | rdma_for_each_port(dev, port) { |
638 | port_counter = &dev->port_data[port].port_counter; | 637 | port_counter = &dev->port_data[port].port_counter; |
639 | kfree(port_counter->hstats); | 638 | kfree(port_counter->hstats); |
639 | mutex_destroy(&port_counter->lock); | ||
640 | } | 640 | } |
641 | } | 641 | } |
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 7c599878ccf7..bbfded6d5d3d 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
@@ -253,6 +253,34 @@ out_free_cq: | |||
253 | EXPORT_SYMBOL(__ib_alloc_cq_user); | 253 | EXPORT_SYMBOL(__ib_alloc_cq_user); |
254 | 254 | ||
255 | /** | 255 | /** |
256 | * __ib_alloc_cq_any - allocate a completion queue | ||
257 | * @dev: device to allocate the CQ for | ||
258 | * @private: driver private data, accessible from cq->cq_context | ||
259 | * @nr_cqe: number of CQEs to allocate | ||
260 | * @poll_ctx: context to poll the CQ from | ||
261 | * @caller: module owner name | ||
262 | * | ||
263 | * Attempt to spread ULP Completion Queues over each device's interrupt | ||
264 | * vectors. A simple best-effort mechanism is used. | ||
265 | */ | ||
266 | struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, | ||
267 | int nr_cqe, enum ib_poll_context poll_ctx, | ||
268 | const char *caller) | ||
269 | { | ||
270 | static atomic_t counter; | ||
271 | int comp_vector = 0; | ||
272 | |||
273 | if (dev->num_comp_vectors > 1) | ||
274 | comp_vector = | ||
275 | atomic_inc_return(&counter) % | ||
276 | min_t(int, dev->num_comp_vectors, num_online_cpus()); | ||
277 | |||
278 | return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, | ||
279 | caller, NULL); | ||
280 | } | ||
281 | EXPORT_SYMBOL(__ib_alloc_cq_any); | ||
282 | |||
283 | /** | ||
256 | * ib_free_cq_user - free a completion queue | 284 | * ib_free_cq_user - free a completion queue |
257 | * @cq: completion queue to free. | 285 | * @cq: completion queue to free. |
258 | * @udata: User data or NULL for kernel object | 286 | * @udata: User data or NULL for kernel object |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b5631b8a0397..99c4a55545cf 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
40 | #include <linux/netdevice.h> | 40 | #include <linux/netdevice.h> |
41 | #include <net/net_namespace.h> | 41 | #include <net/net_namespace.h> |
42 | #include <net/netns/generic.h> | ||
43 | #include <linux/security.h> | 42 | #include <linux/security.h> |
44 | #include <linux/notifier.h> | 43 | #include <linux/notifier.h> |
45 | #include <linux/hashtable.h> | 44 | #include <linux/hashtable.h> |
@@ -111,17 +110,7 @@ static void ib_client_put(struct ib_client *client) | |||
111 | */ | 110 | */ |
112 | #define CLIENT_DATA_REGISTERED XA_MARK_1 | 111 | #define CLIENT_DATA_REGISTERED XA_MARK_1 |
113 | 112 | ||
114 | /** | 113 | unsigned int rdma_dev_net_id; |
115 | * struct rdma_dev_net - rdma net namespace metadata for a net | ||
116 | * @net: Pointer to owner net namespace | ||
117 | * @id: xarray id to identify the net namespace. | ||
118 | */ | ||
119 | struct rdma_dev_net { | ||
120 | possible_net_t net; | ||
121 | u32 id; | ||
122 | }; | ||
123 | |||
124 | static unsigned int rdma_dev_net_id; | ||
125 | 114 | ||
126 | /* | 115 | /* |
127 | * A list of net namespaces is maintained in an xarray. This is necessary | 116 | * A list of net namespaces is maintained in an xarray. This is necessary |
@@ -514,6 +503,9 @@ static void ib_device_release(struct device *device) | |||
514 | rcu_head); | 503 | rcu_head); |
515 | } | 504 | } |
516 | 505 | ||
506 | mutex_destroy(&dev->unregistration_lock); | ||
507 | mutex_destroy(&dev->compat_devs_mutex); | ||
508 | |||
517 | xa_destroy(&dev->compat_devs); | 509 | xa_destroy(&dev->compat_devs); |
518 | xa_destroy(&dev->client_data); | 510 | xa_destroy(&dev->client_data); |
519 | kfree_rcu(dev, rcu_head); | 511 | kfree_rcu(dev, rcu_head); |
@@ -1060,7 +1052,7 @@ int rdma_compatdev_set(u8 enable) | |||
1060 | 1052 | ||
1061 | static void rdma_dev_exit_net(struct net *net) | 1053 | static void rdma_dev_exit_net(struct net *net) |
1062 | { | 1054 | { |
1063 | struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); | 1055 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
1064 | struct ib_device *dev; | 1056 | struct ib_device *dev; |
1065 | unsigned long index; | 1057 | unsigned long index; |
1066 | int ret; | 1058 | int ret; |
@@ -1094,25 +1086,32 @@ static void rdma_dev_exit_net(struct net *net) | |||
1094 | } | 1086 | } |
1095 | up_read(&devices_rwsem); | 1087 | up_read(&devices_rwsem); |
1096 | 1088 | ||
1089 | rdma_nl_net_exit(rnet); | ||
1097 | xa_erase(&rdma_nets, rnet->id); | 1090 | xa_erase(&rdma_nets, rnet->id); |
1098 | } | 1091 | } |
1099 | 1092 | ||
1100 | static __net_init int rdma_dev_init_net(struct net *net) | 1093 | static __net_init int rdma_dev_init_net(struct net *net) |
1101 | { | 1094 | { |
1102 | struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); | 1095 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
1103 | unsigned long index; | 1096 | unsigned long index; |
1104 | struct ib_device *dev; | 1097 | struct ib_device *dev; |
1105 | int ret; | 1098 | int ret; |
1106 | 1099 | ||
1100 | write_pnet(&rnet->net, net); | ||
1101 | |||
1102 | ret = rdma_nl_net_init(rnet); | ||
1103 | if (ret) | ||
1104 | return ret; | ||
1105 | |||
1107 | /* No need to create any compat devices in default init_net. */ | 1106 | /* No need to create any compat devices in default init_net. */ |
1108 | if (net_eq(net, &init_net)) | 1107 | if (net_eq(net, &init_net)) |
1109 | return 0; | 1108 | return 0; |
1110 | 1109 | ||
1111 | write_pnet(&rnet->net, net); | ||
1112 | |||
1113 | ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); | 1110 | ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); |
1114 | if (ret) | 1111 | if (ret) { |
1112 | rdma_nl_net_exit(rnet); | ||
1115 | return ret; | 1113 | return ret; |
1114 | } | ||
1116 | 1115 | ||
1117 | down_read(&devices_rwsem); | 1116 | down_read(&devices_rwsem); |
1118 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { | 1117 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { |
@@ -1974,31 +1973,64 @@ void ib_dispatch_event(struct ib_event *event) | |||
1974 | } | 1973 | } |
1975 | EXPORT_SYMBOL(ib_dispatch_event); | 1974 | EXPORT_SYMBOL(ib_dispatch_event); |
1976 | 1975 | ||
1977 | /** | 1976 | static int iw_query_port(struct ib_device *device, |
1978 | * ib_query_port - Query IB port attributes | 1977 | u8 port_num, |
1979 | * @device:Device to query | 1978 | struct ib_port_attr *port_attr) |
1980 | * @port_num:Port number to query | ||
1981 | * @port_attr:Port attributes | ||
1982 | * | ||
1983 | * ib_query_port() returns the attributes of a port through the | ||
1984 | * @port_attr pointer. | ||
1985 | */ | ||
1986 | int ib_query_port(struct ib_device *device, | ||
1987 | u8 port_num, | ||
1988 | struct ib_port_attr *port_attr) | ||
1989 | { | 1979 | { |
1990 | union ib_gid gid; | 1980 | struct in_device *inetdev; |
1981 | struct net_device *netdev; | ||
1991 | int err; | 1982 | int err; |
1992 | 1983 | ||
1993 | if (!rdma_is_port_valid(device, port_num)) | 1984 | memset(port_attr, 0, sizeof(*port_attr)); |
1994 | return -EINVAL; | 1985 | |
1986 | netdev = ib_device_get_netdev(device, port_num); | ||
1987 | if (!netdev) | ||
1988 | return -ENODEV; | ||
1989 | |||
1990 | dev_put(netdev); | ||
1991 | |||
1992 | port_attr->max_mtu = IB_MTU_4096; | ||
1993 | port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | ||
1994 | |||
1995 | if (!netif_carrier_ok(netdev)) { | ||
1996 | port_attr->state = IB_PORT_DOWN; | ||
1997 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; | ||
1998 | } else { | ||
1999 | inetdev = in_dev_get(netdev); | ||
2000 | |||
2001 | if (inetdev && inetdev->ifa_list) { | ||
2002 | port_attr->state = IB_PORT_ACTIVE; | ||
2003 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; | ||
2004 | in_dev_put(inetdev); | ||
2005 | } else { | ||
2006 | port_attr->state = IB_PORT_INIT; | ||
2007 | port_attr->phys_state = | ||
2008 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; | ||
2009 | } | ||
2010 | } | ||
2011 | |||
2012 | err = device->ops.query_port(device, port_num, port_attr); | ||
2013 | if (err) | ||
2014 | return err; | ||
2015 | |||
2016 | return 0; | ||
2017 | } | ||
2018 | |||
2019 | static int __ib_query_port(struct ib_device *device, | ||
2020 | u8 port_num, | ||
2021 | struct ib_port_attr *port_attr) | ||
2022 | { | ||
2023 | union ib_gid gid = {}; | ||
2024 | int err; | ||
1995 | 2025 | ||
1996 | memset(port_attr, 0, sizeof(*port_attr)); | 2026 | memset(port_attr, 0, sizeof(*port_attr)); |
2027 | |||
1997 | err = device->ops.query_port(device, port_num, port_attr); | 2028 | err = device->ops.query_port(device, port_num, port_attr); |
1998 | if (err || port_attr->subnet_prefix) | 2029 | if (err || port_attr->subnet_prefix) |
1999 | return err; | 2030 | return err; |
2000 | 2031 | ||
2001 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) | 2032 | if (rdma_port_get_link_layer(device, port_num) != |
2033 | IB_LINK_LAYER_INFINIBAND) | ||
2002 | return 0; | 2034 | return 0; |
2003 | 2035 | ||
2004 | err = device->ops.query_gid(device, port_num, 0, &gid); | 2036 | err = device->ops.query_gid(device, port_num, 0, &gid); |
@@ -2008,6 +2040,28 @@ int ib_query_port(struct ib_device *device, | |||
2008 | port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); | 2040 | port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); |
2009 | return 0; | 2041 | return 0; |
2010 | } | 2042 | } |
2043 | |||
2044 | /** | ||
2045 | * ib_query_port - Query IB port attributes | ||
2046 | * @device:Device to query | ||
2047 | * @port_num:Port number to query | ||
2048 | * @port_attr:Port attributes | ||
2049 | * | ||
2050 | * ib_query_port() returns the attributes of a port through the | ||
2051 | * @port_attr pointer. | ||
2052 | */ | ||
2053 | int ib_query_port(struct ib_device *device, | ||
2054 | u8 port_num, | ||
2055 | struct ib_port_attr *port_attr) | ||
2056 | { | ||
2057 | if (!rdma_is_port_valid(device, port_num)) | ||
2058 | return -EINVAL; | ||
2059 | |||
2060 | if (rdma_protocol_iwarp(device, port_num)) | ||
2061 | return iw_query_port(device, port_num, port_attr); | ||
2062 | else | ||
2063 | return __ib_query_port(device, port_num, port_attr); | ||
2064 | } | ||
2011 | EXPORT_SYMBOL(ib_query_port); | 2065 | EXPORT_SYMBOL(ib_query_port); |
2012 | 2066 | ||
2013 | static void add_ndev_hash(struct ib_port_data *pdata) | 2067 | static void add_ndev_hash(struct ib_port_data *pdata) |
@@ -2661,12 +2715,6 @@ static int __init ib_core_init(void) | |||
2661 | goto err_comp_unbound; | 2715 | goto err_comp_unbound; |
2662 | } | 2716 | } |
2663 | 2717 | ||
2664 | ret = rdma_nl_init(); | ||
2665 | if (ret) { | ||
2666 | pr_warn("Couldn't init IB netlink interface: err %d\n", ret); | ||
2667 | goto err_sysfs; | ||
2668 | } | ||
2669 | |||
2670 | ret = addr_init(); | 2718 | ret = addr_init(); |
2671 | if (ret) { | 2719 | if (ret) { |
2672 | pr_warn("Could't init IB address resolution\n"); | 2720 | pr_warn("Could't init IB address resolution\n"); |
@@ -2712,8 +2760,6 @@ err_mad: | |||
2712 | err_addr: | 2760 | err_addr: |
2713 | addr_cleanup(); | 2761 | addr_cleanup(); |
2714 | err_ibnl: | 2762 | err_ibnl: |
2715 | rdma_nl_exit(); | ||
2716 | err_sysfs: | ||
2717 | class_unregister(&ib_class); | 2763 | class_unregister(&ib_class); |
2718 | err_comp_unbound: | 2764 | err_comp_unbound: |
2719 | destroy_workqueue(ib_comp_unbound_wq); | 2765 | destroy_workqueue(ib_comp_unbound_wq); |
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 7d841b689a1e..e08aec427027 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | |||
148 | hlist_del_init(&fmr->cache_node); | 148 | hlist_del_init(&fmr->cache_node); |
149 | fmr->remap_count = 0; | 149 | fmr->remap_count = 0; |
150 | list_add_tail(&fmr->fmr->list, &fmr_list); | 150 | list_add_tail(&fmr->fmr->list, &fmr_list); |
151 | |||
152 | #ifdef DEBUG | ||
153 | if (fmr->ref_count !=0) { | ||
154 | pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n", | ||
155 | fmr, fmr->ref_count); | ||
156 | } | ||
157 | #endif | ||
158 | } | 151 | } |
159 | 152 | ||
160 | list_splice_init(&pool->dirty_list, &unmap_list); | 153 | list_splice_init(&pool->dirty_list, &unmap_list); |
@@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) | |||
496 | } | 489 | } |
497 | } | 490 | } |
498 | 491 | ||
499 | #ifdef DEBUG | ||
500 | if (fmr->ref_count < 0) | ||
501 | pr_warn(PFX "FMR %p has ref count %d < 0\n", | ||
502 | fmr, fmr->ref_count); | ||
503 | #endif | ||
504 | |||
505 | spin_unlock_irqrestore(&pool->pool_lock, flags); | 492 | spin_unlock_irqrestore(&pool->pool_lock, flags); |
506 | } | 493 | } |
507 | EXPORT_SYMBOL(ib_fmr_pool_unmap); | 494 | EXPORT_SYMBOL(ib_fmr_pool_unmap); |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 2452b0ddcf0d..46686990a827 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
@@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) | |||
112 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", | 112 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", |
113 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); | 113 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); |
114 | 114 | ||
115 | ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); | 115 | ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); |
116 | if (ret) { | 116 | if (ret) { |
117 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 117 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
118 | iwpm_user_pid = IWPM_PID_UNAVAILABLE; | 118 | iwpm_user_pid = IWPM_PID_UNAVAILABLE; |
@@ -124,8 +124,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) | |||
124 | return ret; | 124 | return ret; |
125 | pid_query_error: | 125 | pid_query_error: |
126 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | 126 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); |
127 | if (skb) | 127 | dev_kfree_skb(skb); |
128 | dev_kfree_skb(skb); | ||
129 | if (nlmsg_request) | 128 | if (nlmsg_request) |
130 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | 129 | iwpm_free_nlmsg_request(&nlmsg_request->kref); |
131 | return ret; | 130 | return ret; |
@@ -202,7 +201,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
202 | nlmsg_end(skb, nlh); | 201 | nlmsg_end(skb, nlh); |
203 | nlmsg_request->req_buffer = pm_msg; | 202 | nlmsg_request->req_buffer = pm_msg; |
204 | 203 | ||
205 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 204 | ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); |
206 | if (ret) { | 205 | if (ret) { |
207 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 206 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
208 | iwpm_user_pid = IWPM_PID_UNDEFINED; | 207 | iwpm_user_pid = IWPM_PID_UNDEFINED; |
@@ -214,8 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
214 | add_mapping_error: | 213 | add_mapping_error: |
215 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | 214 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); |
216 | add_mapping_error_nowarn: | 215 | add_mapping_error_nowarn: |
217 | if (skb) | 216 | dev_kfree_skb(skb); |
218 | dev_kfree_skb(skb); | ||
219 | if (nlmsg_request) | 217 | if (nlmsg_request) |
220 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | 218 | iwpm_free_nlmsg_request(&nlmsg_request->kref); |
221 | return ret; | 219 | return ret; |
@@ -297,7 +295,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
297 | nlmsg_end(skb, nlh); | 295 | nlmsg_end(skb, nlh); |
298 | nlmsg_request->req_buffer = pm_msg; | 296 | nlmsg_request->req_buffer = pm_msg; |
299 | 297 | ||
300 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 298 | ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); |
301 | if (ret) { | 299 | if (ret) { |
302 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 300 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
303 | err_str = "Unable to send a nlmsg"; | 301 | err_str = "Unable to send a nlmsg"; |
@@ -308,8 +306,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
308 | query_mapping_error: | 306 | query_mapping_error: |
309 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | 307 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); |
310 | query_mapping_error_nowarn: | 308 | query_mapping_error_nowarn: |
311 | if (skb) | 309 | dev_kfree_skb(skb); |
312 | dev_kfree_skb(skb); | ||
313 | if (nlmsg_request) | 310 | if (nlmsg_request) |
314 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | 311 | iwpm_free_nlmsg_request(&nlmsg_request->kref); |
315 | return ret; | 312 | return ret; |
@@ -364,7 +361,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) | |||
364 | 361 | ||
365 | nlmsg_end(skb, nlh); | 362 | nlmsg_end(skb, nlh); |
366 | 363 | ||
367 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 364 | ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); |
368 | if (ret) { | 365 | if (ret) { |
369 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 366 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
370 | iwpm_user_pid = IWPM_PID_UNDEFINED; | 367 | iwpm_user_pid = IWPM_PID_UNDEFINED; |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 41929bb83739..13495b43dbc1 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | |||
645 | 645 | ||
646 | nlmsg_end(skb, nlh); | 646 | nlmsg_end(skb, nlh); |
647 | 647 | ||
648 | ret = rdma_nl_unicast(skb, iwpm_pid); | 648 | ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); |
649 | if (ret) { | 649 | if (ret) { |
650 | skb = NULL; | 650 | skb = NULL; |
651 | err_str = "Unable to send a nlmsg"; | 651 | err_str = "Unable to send a nlmsg"; |
@@ -655,8 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | |||
655 | return 0; | 655 | return 0; |
656 | mapinfo_num_error: | 656 | mapinfo_num_error: |
657 | pr_info("%s: %s\n", __func__, err_str); | 657 | pr_info("%s: %s\n", __func__, err_str); |
658 | if (skb) | 658 | dev_kfree_skb(skb); |
659 | dev_kfree_skb(skb); | ||
660 | return ret; | 659 | return ret; |
661 | } | 660 | } |
662 | 661 | ||
@@ -674,7 +673,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) | |||
674 | return -ENOMEM; | 673 | return -ENOMEM; |
675 | } | 674 | } |
676 | nlh->nlmsg_type = NLMSG_DONE; | 675 | nlh->nlmsg_type = NLMSG_DONE; |
677 | ret = rdma_nl_unicast(skb, iwpm_pid); | 676 | ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); |
678 | if (ret) | 677 | if (ret) |
679 | pr_warn("%s Unable to send a nlmsg\n", __func__); | 678 | pr_warn("%s Unable to send a nlmsg\n", __func__); |
680 | return ret; | 679 | return ret; |
@@ -778,8 +777,7 @@ send_mapping_info_unlock: | |||
778 | send_mapping_info_exit: | 777 | send_mapping_info_exit: |
779 | if (ret) { | 778 | if (ret) { |
780 | pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); | 779 | pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); |
781 | if (skb) | 780 | dev_kfree_skb(skb); |
782 | dev_kfree_skb(skb); | ||
783 | return ret; | 781 | return ret; |
784 | } | 782 | } |
785 | send_nlmsg_done(skb, nl_client, iwpm_pid); | 783 | send_nlmsg_done(skb, nl_client, iwpm_pid); |
@@ -824,7 +822,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) | |||
824 | goto hello_num_error; | 822 | goto hello_num_error; |
825 | nlmsg_end(skb, nlh); | 823 | nlmsg_end(skb, nlh); |
826 | 824 | ||
827 | ret = rdma_nl_unicast(skb, iwpm_pid); | 825 | ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); |
828 | if (ret) { | 826 | if (ret) { |
829 | skb = NULL; | 827 | skb = NULL; |
830 | err_str = "Unable to send a nlmsg"; | 828 | err_str = "Unable to send a nlmsg"; |
@@ -834,7 +832,6 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) | |||
834 | return 0; | 832 | return 0; |
835 | hello_num_error: | 833 | hello_num_error: |
836 | pr_info("%s: %s\n", __func__, err_str); | 834 | pr_info("%s: %s\n", __func__, err_str); |
837 | if (skb) | 835 | dev_kfree_skb(skb); |
838 | dev_kfree_skb(skb); | ||
839 | return ret; | 836 | return ret; |
840 | } | 837 | } |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index eecfc0b377c9..81dbd5f41bed 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -36,20 +36,22 @@ | |||
36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
37 | #include <net/netlink.h> | 37 | #include <net/netlink.h> |
38 | #include <net/net_namespace.h> | 38 | #include <net/net_namespace.h> |
39 | #include <net/netns/generic.h> | ||
39 | #include <net/sock.h> | 40 | #include <net/sock.h> |
40 | #include <rdma/rdma_netlink.h> | 41 | #include <rdma/rdma_netlink.h> |
41 | #include <linux/module.h> | 42 | #include <linux/module.h> |
42 | #include "core_priv.h" | 43 | #include "core_priv.h" |
43 | 44 | ||
44 | static DEFINE_MUTEX(rdma_nl_mutex); | 45 | static DEFINE_MUTEX(rdma_nl_mutex); |
45 | static struct sock *nls; | ||
46 | static struct { | 46 | static struct { |
47 | const struct rdma_nl_cbs *cb_table; | 47 | const struct rdma_nl_cbs *cb_table; |
48 | } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; | 48 | } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; |
49 | 49 | ||
50 | bool rdma_nl_chk_listeners(unsigned int group) | 50 | bool rdma_nl_chk_listeners(unsigned int group) |
51 | { | 51 | { |
52 | return netlink_has_listeners(nls, group); | 52 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net); |
53 | |||
54 | return netlink_has_listeners(rnet->nl_sock, group); | ||
53 | } | 55 | } |
54 | EXPORT_SYMBOL(rdma_nl_chk_listeners); | 56 | EXPORT_SYMBOL(rdma_nl_chk_listeners); |
55 | 57 | ||
@@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op) | |||
73 | return (op < max_num_ops[type]) ? true : false; | 75 | return (op < max_num_ops[type]) ? true : false; |
74 | } | 76 | } |
75 | 77 | ||
76 | static bool is_nl_valid(unsigned int type, unsigned int op) | 78 | static bool |
79 | is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op) | ||
77 | { | 80 | { |
78 | const struct rdma_nl_cbs *cb_table; | 81 | const struct rdma_nl_cbs *cb_table; |
79 | 82 | ||
80 | if (!is_nl_msg_valid(type, op)) | 83 | if (!is_nl_msg_valid(type, op)) |
81 | return false; | 84 | return false; |
82 | 85 | ||
86 | /* | ||
87 | * Currently only NLDEV client is supporting netlink commands in | ||
88 | * non init_net net namespace. | ||
89 | */ | ||
90 | if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV) | ||
91 | return false; | ||
92 | |||
83 | if (!rdma_nl_types[type].cb_table) { | 93 | if (!rdma_nl_types[type].cb_table) { |
84 | mutex_unlock(&rdma_nl_mutex); | 94 | mutex_unlock(&rdma_nl_mutex); |
85 | request_module("rdma-netlink-subsys-%d", type); | 95 | request_module("rdma-netlink-subsys-%d", type); |
@@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
161 | unsigned int op = RDMA_NL_GET_OP(type); | 171 | unsigned int op = RDMA_NL_GET_OP(type); |
162 | const struct rdma_nl_cbs *cb_table; | 172 | const struct rdma_nl_cbs *cb_table; |
163 | 173 | ||
164 | if (!is_nl_valid(index, op)) | 174 | if (!is_nl_valid(skb, index, op)) |
165 | return -EINVAL; | 175 | return -EINVAL; |
166 | 176 | ||
167 | cb_table = rdma_nl_types[index].cb_table; | 177 | cb_table = rdma_nl_types[index].cb_table; |
@@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
185 | .dump = cb_table[op].dump, | 195 | .dump = cb_table[op].dump, |
186 | }; | 196 | }; |
187 | if (c.dump) | 197 | if (c.dump) |
188 | return netlink_dump_start(nls, skb, nlh, &c); | 198 | return netlink_dump_start(skb->sk, skb, nlh, &c); |
189 | return -EINVAL; | 199 | return -EINVAL; |
190 | } | 200 | } |
191 | 201 | ||
@@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb) | |||
258 | mutex_unlock(&rdma_nl_mutex); | 268 | mutex_unlock(&rdma_nl_mutex); |
259 | } | 269 | } |
260 | 270 | ||
261 | int rdma_nl_unicast(struct sk_buff *skb, u32 pid) | 271 | int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid) |
262 | { | 272 | { |
273 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); | ||
263 | int err; | 274 | int err; |
264 | 275 | ||
265 | err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); | 276 | err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT); |
266 | return (err < 0) ? err : 0; | 277 | return (err < 0) ? err : 0; |
267 | } | 278 | } |
268 | EXPORT_SYMBOL(rdma_nl_unicast); | 279 | EXPORT_SYMBOL(rdma_nl_unicast); |
269 | 280 | ||
270 | int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid) | 281 | int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid) |
271 | { | 282 | { |
283 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); | ||
272 | int err; | 284 | int err; |
273 | 285 | ||
274 | err = netlink_unicast(nls, skb, pid, 0); | 286 | err = netlink_unicast(rnet->nl_sock, skb, pid, 0); |
275 | return (err < 0) ? err : 0; | 287 | return (err < 0) ? err : 0; |
276 | } | 288 | } |
277 | EXPORT_SYMBOL(rdma_nl_unicast_wait); | 289 | EXPORT_SYMBOL(rdma_nl_unicast_wait); |
278 | 290 | ||
279 | int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags) | 291 | int rdma_nl_multicast(struct net *net, struct sk_buff *skb, |
292 | unsigned int group, gfp_t flags) | ||
280 | { | 293 | { |
281 | return nlmsg_multicast(nls, skb, 0, group, flags); | 294 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
295 | |||
296 | return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags); | ||
282 | } | 297 | } |
283 | EXPORT_SYMBOL(rdma_nl_multicast); | 298 | EXPORT_SYMBOL(rdma_nl_multicast); |
284 | 299 | ||
285 | int __init rdma_nl_init(void) | 300 | void rdma_nl_exit(void) |
301 | { | ||
302 | int idx; | ||
303 | |||
304 | for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) | ||
305 | WARN(rdma_nl_types[idx].cb_table, | ||
306 | "Netlink client %d wasn't released prior to unloading %s\n", | ||
307 | idx, KBUILD_MODNAME); | ||
308 | } | ||
309 | |||
310 | int rdma_nl_net_init(struct rdma_dev_net *rnet) | ||
286 | { | 311 | { |
312 | struct net *net = read_pnet(&rnet->net); | ||
287 | struct netlink_kernel_cfg cfg = { | 313 | struct netlink_kernel_cfg cfg = { |
288 | .input = rdma_nl_rcv, | 314 | .input = rdma_nl_rcv, |
289 | }; | 315 | }; |
316 | struct sock *nls; | ||
290 | 317 | ||
291 | nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); | 318 | nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg); |
292 | if (!nls) | 319 | if (!nls) |
293 | return -ENOMEM; | 320 | return -ENOMEM; |
294 | 321 | ||
295 | nls->sk_sndtimeo = 10 * HZ; | 322 | nls->sk_sndtimeo = 10 * HZ; |
323 | rnet->nl_sock = nls; | ||
296 | return 0; | 324 | return 0; |
297 | } | 325 | } |
298 | 326 | ||
299 | void rdma_nl_exit(void) | 327 | void rdma_nl_net_exit(struct rdma_dev_net *rnet) |
300 | { | 328 | { |
301 | int idx; | 329 | netlink_kernel_release(rnet->nl_sock); |
302 | |||
303 | for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) | ||
304 | rdma_nl_unregister(idx); | ||
305 | |||
306 | netlink_kernel_release(nls); | ||
307 | } | 330 | } |
308 | 331 | ||
309 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA); | 332 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA); |
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 020c26976558..7a7474000100 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c | |||
@@ -831,7 +831,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
831 | nlmsg_end(msg, nlh); | 831 | nlmsg_end(msg, nlh); |
832 | 832 | ||
833 | ib_device_put(device); | 833 | ib_device_put(device); |
834 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 834 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
835 | 835 | ||
836 | err_free: | 836 | err_free: |
837 | nlmsg_free(msg); | 837 | nlmsg_free(msg); |
@@ -971,7 +971,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
971 | nlmsg_end(msg, nlh); | 971 | nlmsg_end(msg, nlh); |
972 | ib_device_put(device); | 972 | ib_device_put(device); |
973 | 973 | ||
974 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 974 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
975 | 975 | ||
976 | err_free: | 976 | err_free: |
977 | nlmsg_free(msg); | 977 | nlmsg_free(msg); |
@@ -1073,7 +1073,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1073 | 1073 | ||
1074 | nlmsg_end(msg, nlh); | 1074 | nlmsg_end(msg, nlh); |
1075 | ib_device_put(device); | 1075 | ib_device_put(device); |
1076 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1076 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1077 | 1077 | ||
1078 | err_free: | 1078 | err_free: |
1079 | nlmsg_free(msg); | 1079 | nlmsg_free(msg); |
@@ -1250,7 +1250,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1250 | 1250 | ||
1251 | nlmsg_end(msg, nlh); | 1251 | nlmsg_end(msg, nlh); |
1252 | ib_device_put(device); | 1252 | ib_device_put(device); |
1253 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1253 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1254 | 1254 | ||
1255 | err_free: | 1255 | err_free: |
1256 | nlmsg_free(msg); | 1256 | nlmsg_free(msg); |
@@ -1595,7 +1595,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1595 | put_device(data.cdev); | 1595 | put_device(data.cdev); |
1596 | if (ibdev) | 1596 | if (ibdev) |
1597 | ib_device_put(ibdev); | 1597 | ib_device_put(ibdev); |
1598 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1598 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1599 | 1599 | ||
1600 | out_data: | 1600 | out_data: |
1601 | put_device(data.cdev); | 1601 | put_device(data.cdev); |
@@ -1635,7 +1635,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1635 | return err; | 1635 | return err; |
1636 | } | 1636 | } |
1637 | nlmsg_end(msg, nlh); | 1637 | nlmsg_end(msg, nlh); |
1638 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1638 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | 1641 | static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, |
@@ -1733,7 +1733,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1733 | 1733 | ||
1734 | nlmsg_end(msg, nlh); | 1734 | nlmsg_end(msg, nlh); |
1735 | ib_device_put(device); | 1735 | ib_device_put(device); |
1736 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1736 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1737 | 1737 | ||
1738 | err_fill: | 1738 | err_fill: |
1739 | rdma_counter_unbind_qpn(device, port, qpn, cntn); | 1739 | rdma_counter_unbind_qpn(device, port, qpn, cntn); |
@@ -1801,7 +1801,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1801 | 1801 | ||
1802 | nlmsg_end(msg, nlh); | 1802 | nlmsg_end(msg, nlh); |
1803 | ib_device_put(device); | 1803 | ib_device_put(device); |
1804 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1804 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1805 | 1805 | ||
1806 | err_fill: | 1806 | err_fill: |
1807 | rdma_counter_bind_qpn(device, port, qpn, cntn); | 1807 | rdma_counter_bind_qpn(device, port, qpn, cntn); |
@@ -1892,7 +1892,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, | |||
1892 | mutex_unlock(&stats->lock); | 1892 | mutex_unlock(&stats->lock); |
1893 | nlmsg_end(msg, nlh); | 1893 | nlmsg_end(msg, nlh); |
1894 | ib_device_put(device); | 1894 | ib_device_put(device); |
1895 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1895 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1896 | 1896 | ||
1897 | err_table: | 1897 | err_table: |
1898 | nla_nest_cancel(msg, table_attr); | 1898 | nla_nest_cancel(msg, table_attr); |
@@ -1964,7 +1964,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1964 | 1964 | ||
1965 | nlmsg_end(msg, nlh); | 1965 | nlmsg_end(msg, nlh); |
1966 | ib_device_put(device); | 1966 | ib_device_put(device); |
1967 | return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); | 1967 | return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); |
1968 | 1968 | ||
1969 | err_msg: | 1969 | err_msg: |
1970 | nlmsg_free(msg); | 1970 | nlmsg_free(msg); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7d8071c7e564..17fc2936c077 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) | |||
860 | /* Repair the nlmsg header length */ | 860 | /* Repair the nlmsg header length */ |
861 | nlmsg_end(skb, nlh); | 861 | nlmsg_end(skb, nlh); |
862 | 862 | ||
863 | return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask); | 863 | return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); |
864 | } | 864 | } |
865 | 865 | ||
866 | static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) | 866 | static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b477295a96c2..7a50cedcef1f 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
289 | ib_width_enum_to_int(attr.active_width), speed); | 289 | ib_width_enum_to_int(attr.active_width), speed); |
290 | } | 290 | } |
291 | 291 | ||
292 | static const char *phys_state_to_str(enum ib_port_phys_state phys_state) | ||
293 | { | ||
294 | static const char * phys_state_str[] = { | ||
295 | "<unknown>", | ||
296 | "Sleep", | ||
297 | "Polling", | ||
298 | "Disabled", | ||
299 | "PortConfigurationTraining", | ||
300 | "LinkUp", | ||
301 | "LinkErrorRecovery", | ||
302 | "Phy Test", | ||
303 | }; | ||
304 | |||
305 | if (phys_state < ARRAY_SIZE(phys_state_str)) | ||
306 | return phys_state_str[phys_state]; | ||
307 | return "<unknown>"; | ||
308 | } | ||
309 | |||
292 | static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, | 310 | static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, |
293 | char *buf) | 311 | char *buf) |
294 | { | 312 | { |
@@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, | |||
300 | if (ret) | 318 | if (ret) |
301 | return ret; | 319 | return ret; |
302 | 320 | ||
303 | switch (attr.phys_state) { | 321 | return sprintf(buf, "%d: %s\n", attr.phys_state, |
304 | case 1: return sprintf(buf, "1: Sleep\n"); | 322 | phys_state_to_str(attr.phys_state)); |
305 | case 2: return sprintf(buf, "2: Polling\n"); | ||
306 | case 3: return sprintf(buf, "3: Disabled\n"); | ||
307 | case 4: return sprintf(buf, "4: PortConfigurationTraining\n"); | ||
308 | case 5: return sprintf(buf, "5: LinkUp\n"); | ||
309 | case 6: return sprintf(buf, "6: LinkErrorRecovery\n"); | ||
310 | case 7: return sprintf(buf, "7: Phy Test\n"); | ||
311 | default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state); | ||
312 | } | ||
313 | } | 323 | } |
314 | 324 | ||
315 | static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, | 325 | static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 9aebe9ce8b07..f67a30fda1ed 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -218,7 +218,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp) | |||
218 | umem_odp->interval_tree.start = | 218 | umem_odp->interval_tree.start = |
219 | ALIGN_DOWN(umem_odp->umem.address, page_size); | 219 | ALIGN_DOWN(umem_odp->umem.address, page_size); |
220 | if (check_add_overflow(umem_odp->umem.address, | 220 | if (check_add_overflow(umem_odp->umem.address, |
221 | umem_odp->umem.length, | 221 | (unsigned long)umem_odp->umem.length, |
222 | &umem_odp->interval_tree.last)) | 222 | &umem_odp->interval_tree.last)) |
223 | return -EOVERFLOW; | 223 | return -EOVERFLOW; |
224 | umem_odp->interval_tree.last = | 224 | umem_odp->interval_tree.last = |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index ffdeaf6e0b68..d1407fa378e8 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1042,7 +1042,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
1042 | ib_unregister_mad_agent(file->agent[i]); | 1042 | ib_unregister_mad_agent(file->agent[i]); |
1043 | 1043 | ||
1044 | mutex_unlock(&file->port->file_mutex); | 1044 | mutex_unlock(&file->port->file_mutex); |
1045 | 1045 | mutex_destroy(&file->mutex); | |
1046 | kfree(file); | 1046 | kfree(file); |
1047 | return 0; | 1047 | return 0; |
1048 | } | 1048 | } |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 7c10dfe417a4..14a80fd9f464 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -3479,7 +3479,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, | |||
3479 | 3479 | ||
3480 | err_copy: | 3480 | err_copy: |
3481 | ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs)); | 3481 | ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs)); |
3482 | 3482 | /* It was released in ib_destroy_srq_user */ | |
3483 | srq = NULL; | ||
3483 | err_free: | 3484 | err_free: |
3484 | kfree(srq); | 3485 | kfree(srq); |
3485 | err_put: | 3486 | err_put: |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e369ac0d6f51..db98111b47f4 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device) | |||
120 | 120 | ||
121 | uverbs_destroy_api(dev->uapi); | 121 | uverbs_destroy_api(dev->uapi); |
122 | cleanup_srcu_struct(&dev->disassociate_srcu); | 122 | cleanup_srcu_struct(&dev->disassociate_srcu); |
123 | mutex_destroy(&dev->lists_mutex); | ||
124 | mutex_destroy(&dev->xrcd_tree_mutex); | ||
123 | kfree(dev); | 125 | kfree(dev); |
124 | } | 126 | } |
125 | 127 | ||
@@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref) | |||
212 | 214 | ||
213 | if (file->disassociate_page) | 215 | if (file->disassociate_page) |
214 | __free_pages(file->disassociate_page, 0); | 216 | __free_pages(file->disassociate_page, 0); |
217 | mutex_destroy(&file->umap_lock); | ||
218 | mutex_destroy(&file->ucontext_lock); | ||
215 | kfree(file); | 219 | kfree(file); |
216 | } | 220 | } |
217 | 221 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 92349bf37589..f974b6854224 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) | |||
2259 | if (ret) | 2259 | if (ret) |
2260 | return ret; | 2260 | return ret; |
2261 | } | 2261 | } |
2262 | mutex_destroy(&xrcd->tgt_qp_mutex); | ||
2262 | 2263 | ||
2263 | return xrcd->device->ops.dealloc_xrcd(xrcd, udata); | 2264 | return xrcd->device->ops.dealloc_xrcd(xrcd, udata); |
2264 | } | 2265 | } |
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 604b71875f5f..3421a0b15983 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c | |||
@@ -74,7 +74,7 @@ static const char * const bnxt_re_stat_name[] = { | |||
74 | [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", | 74 | [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", |
75 | [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", | 75 | [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", |
76 | [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", | 76 | [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", |
77 | [BNXT_RE_MISSING_RESP] = "missin_resp", | 77 | [BNXT_RE_MISSING_RESP] = "missing_resp", |
78 | [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", | 78 | [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", |
79 | [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", | 79 | [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", |
80 | [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", | 80 | [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 098ab883733e..b4149dc9e824 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -220,10 +220,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, | |||
220 | 220 | ||
221 | if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { | 221 | if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { |
222 | port_attr->state = IB_PORT_ACTIVE; | 222 | port_attr->state = IB_PORT_ACTIVE; |
223 | port_attr->phys_state = 5; | 223 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
224 | } else { | 224 | } else { |
225 | port_attr->state = IB_PORT_DOWN; | 225 | port_attr->state = IB_PORT_DOWN; |
226 | port_attr->phys_state = 3; | 226 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
227 | } | 227 | } |
228 | port_attr->max_mtu = IB_MTU_4096; | 228 | port_attr->max_mtu = IB_MTU_4096; |
229 | port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); | 229 | port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); |
@@ -1398,7 +1398,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, | |||
1398 | dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); | 1398 | dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); |
1399 | bnxt_qplib_destroy_srq(&rdev->qplib_res, | 1399 | bnxt_qplib_destroy_srq(&rdev->qplib_res, |
1400 | &srq->qplib_srq); | 1400 | &srq->qplib_srq); |
1401 | goto exit; | 1401 | goto fail; |
1402 | } | 1402 | } |
1403 | } | 1403 | } |
1404 | if (nq) | 1404 | if (nq) |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 029babe713f3..30a54f8aa42c 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -1473,7 +1473,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1473 | &rdev->active_width); | 1473 | &rdev->active_width); |
1474 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); | 1474 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); |
1475 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); | 1475 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); |
1476 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); | ||
1477 | 1476 | ||
1478 | return 0; | 1477 | return 0; |
1479 | free_sctx: | 1478 | free_sctx: |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index e775c1a1a450..dcf02ec02810 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -991,33 +991,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro | |||
991 | static int iwch_query_port(struct ib_device *ibdev, | 991 | static int iwch_query_port(struct ib_device *ibdev, |
992 | u8 port, struct ib_port_attr *props) | 992 | u8 port, struct ib_port_attr *props) |
993 | { | 993 | { |
994 | struct iwch_dev *dev; | ||
995 | struct net_device *netdev; | ||
996 | struct in_device *inetdev; | ||
997 | |||
998 | pr_debug("%s ibdev %p\n", __func__, ibdev); | 994 | pr_debug("%s ibdev %p\n", __func__, ibdev); |
999 | 995 | ||
1000 | dev = to_iwch_dev(ibdev); | ||
1001 | netdev = dev->rdev.port_info.lldevs[port-1]; | ||
1002 | |||
1003 | /* props being zeroed by the caller, avoid zeroing it here */ | ||
1004 | props->max_mtu = IB_MTU_4096; | ||
1005 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | ||
1006 | |||
1007 | if (!netif_carrier_ok(netdev)) | ||
1008 | props->state = IB_PORT_DOWN; | ||
1009 | else { | ||
1010 | inetdev = in_dev_get(netdev); | ||
1011 | if (inetdev) { | ||
1012 | if (inetdev->ifa_list) | ||
1013 | props->state = IB_PORT_ACTIVE; | ||
1014 | else | ||
1015 | props->state = IB_PORT_INIT; | ||
1016 | in_dev_put(inetdev); | ||
1017 | } else | ||
1018 | props->state = IB_PORT_INIT; | ||
1019 | } | ||
1020 | |||
1021 | props->port_cap_flags = | 996 | props->port_cap_flags = |
1022 | IB_PORT_CM_SUP | | 997 | IB_PORT_CM_SUP | |
1023 | IB_PORT_SNMP_TUNNEL_SUP | | 998 | IB_PORT_SNMP_TUNNEL_SUP | |
@@ -1273,8 +1248,24 @@ static const struct ib_device_ops iwch_dev_ops = { | |||
1273 | INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext), | 1248 | INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext), |
1274 | }; | 1249 | }; |
1275 | 1250 | ||
1251 | static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev) | ||
1252 | { | ||
1253 | int ret; | ||
1254 | int i; | ||
1255 | |||
1256 | for (i = 0; i < rdev->port_info.nports; i++) { | ||
1257 | ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i], | ||
1258 | i + 1); | ||
1259 | if (ret) | ||
1260 | return ret; | ||
1261 | } | ||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1276 | int iwch_register_device(struct iwch_dev *dev) | 1265 | int iwch_register_device(struct iwch_dev *dev) |
1277 | { | 1266 | { |
1267 | int err; | ||
1268 | |||
1278 | pr_debug("%s iwch_dev %p\n", __func__, dev); | 1269 | pr_debug("%s iwch_dev %p\n", __func__, dev); |
1279 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | 1270 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); |
1280 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | 1271 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); |
@@ -1315,6 +1306,10 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1315 | 1306 | ||
1316 | rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group); | 1307 | rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group); |
1317 | ib_set_device_ops(&dev->ibdev, &iwch_dev_ops); | 1308 | ib_set_device_ops(&dev->ibdev, &iwch_dev_ops); |
1309 | err = set_netdevs(&dev->ibdev, &dev->rdev); | ||
1310 | if (err) | ||
1311 | return err; | ||
1312 | |||
1318 | return ib_register_device(&dev->ibdev, "cxgb3_%d"); | 1313 | return ib_register_device(&dev->ibdev, "cxgb3_%d"); |
1319 | } | 1314 | } |
1320 | 1315 | ||
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 5e59c5708729..d373ac0fe2cb 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -305,32 +305,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro | |||
305 | static int c4iw_query_port(struct ib_device *ibdev, u8 port, | 305 | static int c4iw_query_port(struct ib_device *ibdev, u8 port, |
306 | struct ib_port_attr *props) | 306 | struct ib_port_attr *props) |
307 | { | 307 | { |
308 | struct c4iw_dev *dev; | ||
309 | struct net_device *netdev; | ||
310 | struct in_device *inetdev; | ||
311 | |||
312 | pr_debug("ibdev %p\n", ibdev); | 308 | pr_debug("ibdev %p\n", ibdev); |
313 | 309 | ||
314 | dev = to_c4iw_dev(ibdev); | ||
315 | netdev = dev->rdev.lldi.ports[port-1]; | ||
316 | /* props being zeroed by the caller, avoid zeroing it here */ | ||
317 | props->max_mtu = IB_MTU_4096; | ||
318 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | ||
319 | |||
320 | if (!netif_carrier_ok(netdev)) | ||
321 | props->state = IB_PORT_DOWN; | ||
322 | else { | ||
323 | inetdev = in_dev_get(netdev); | ||
324 | if (inetdev) { | ||
325 | if (inetdev->ifa_list) | ||
326 | props->state = IB_PORT_ACTIVE; | ||
327 | else | ||
328 | props->state = IB_PORT_INIT; | ||
329 | in_dev_put(inetdev); | ||
330 | } else | ||
331 | props->state = IB_PORT_INIT; | ||
332 | } | ||
333 | |||
334 | props->port_cap_flags = | 310 | props->port_cap_flags = |
335 | IB_PORT_CM_SUP | | 311 | IB_PORT_CM_SUP | |
336 | IB_PORT_SNMP_TUNNEL_SUP | | 312 | IB_PORT_SNMP_TUNNEL_SUP | |
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 119f8efec564..2283e432693e 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h | |||
@@ -156,5 +156,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
156 | int qp_attr_mask, struct ib_udata *udata); | 156 | int qp_attr_mask, struct ib_udata *udata); |
157 | enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, | 157 | enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, |
158 | u8 port_num); | 158 | u8 port_num); |
159 | struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num); | ||
160 | int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, | ||
161 | u8 port_num, int index); | ||
159 | 162 | ||
160 | #endif /* _EFA_H_ */ | 163 | #endif /* _EFA_H_ */ |
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 2cb42484b0f8..3c412bc5b94f 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c | |||
@@ -109,17 +109,19 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) | |||
109 | } while (time_is_after_jiffies(exp_time)); | 109 | } while (time_is_after_jiffies(exp_time)); |
110 | 110 | ||
111 | if (read_resp->req_id != mmio_read->seq_num) { | 111 | if (read_resp->req_id != mmio_read->seq_num) { |
112 | ibdev_err(edev->efa_dev, | 112 | ibdev_err_ratelimited( |
113 | "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", | 113 | edev->efa_dev, |
114 | mmio_read->seq_num, offset, read_resp->req_id, | 114 | "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", |
115 | read_resp->reg_off); | 115 | mmio_read->seq_num, offset, read_resp->req_id, |
116 | read_resp->reg_off); | ||
116 | err = EFA_MMIO_READ_INVALID; | 117 | err = EFA_MMIO_READ_INVALID; |
117 | goto out; | 118 | goto out; |
118 | } | 119 | } |
119 | 120 | ||
120 | if (read_resp->reg_off != offset) { | 121 | if (read_resp->reg_off != offset) { |
121 | ibdev_err(edev->efa_dev, | 122 | ibdev_err_ratelimited( |
122 | "Reading register failed: wrong offset provided\n"); | 123 | edev->efa_dev, |
124 | "Reading register failed: wrong offset provided\n"); | ||
123 | err = EFA_MMIO_READ_INVALID; | 125 | err = EFA_MMIO_READ_INVALID; |
124 | goto out; | 126 | goto out; |
125 | } | 127 | } |
@@ -293,9 +295,10 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq, | |||
293 | u16 ctx_id = cmd_id & (aq->depth - 1); | 295 | u16 ctx_id = cmd_id & (aq->depth - 1); |
294 | 296 | ||
295 | if (aq->comp_ctx[ctx_id].occupied && capture) { | 297 | if (aq->comp_ctx[ctx_id].occupied && capture) { |
296 | ibdev_err(aq->efa_dev, | 298 | ibdev_err_ratelimited( |
297 | "Completion context for command_id %#x is occupied\n", | 299 | aq->efa_dev, |
298 | cmd_id); | 300 | "Completion context for command_id %#x is occupied\n", |
301 | cmd_id); | ||
299 | return NULL; | 302 | return NULL; |
300 | } | 303 | } |
301 | 304 | ||
@@ -401,7 +404,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue | |||
401 | 404 | ||
402 | spin_lock(&aq->sq.lock); | 405 | spin_lock(&aq->sq.lock); |
403 | if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { | 406 | if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { |
404 | ibdev_err(aq->efa_dev, "Admin queue is closed\n"); | 407 | ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n"); |
405 | spin_unlock(&aq->sq.lock); | 408 | spin_unlock(&aq->sq.lock); |
406 | return ERR_PTR(-ENODEV); | 409 | return ERR_PTR(-ENODEV); |
407 | } | 410 | } |
@@ -519,8 +522,9 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c | |||
519 | break; | 522 | break; |
520 | 523 | ||
521 | if (time_is_before_jiffies(timeout)) { | 524 | if (time_is_before_jiffies(timeout)) { |
522 | ibdev_err(aq->efa_dev, | 525 | ibdev_err_ratelimited( |
523 | "Wait for completion (polling) timeout\n"); | 526 | aq->efa_dev, |
527 | "Wait for completion (polling) timeout\n"); | ||
524 | /* EFA didn't have any completion */ | 528 | /* EFA didn't have any completion */ |
525 | atomic64_inc(&aq->stats.no_completion); | 529 | atomic64_inc(&aq->stats.no_completion); |
526 | 530 | ||
@@ -561,17 +565,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com | |||
561 | atomic64_inc(&aq->stats.no_completion); | 565 | atomic64_inc(&aq->stats.no_completion); |
562 | 566 | ||
563 | if (comp_ctx->status == EFA_CMD_COMPLETED) | 567 | if (comp_ctx->status == EFA_CMD_COMPLETED) |
564 | ibdev_err(aq->efa_dev, | 568 | ibdev_err_ratelimited( |
565 | "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", | 569 | aq->efa_dev, |
566 | efa_com_cmd_str(comp_ctx->cmd_opcode), | 570 | "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", |
567 | comp_ctx->cmd_opcode, comp_ctx->status, | 571 | efa_com_cmd_str(comp_ctx->cmd_opcode), |
568 | comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); | 572 | comp_ctx->cmd_opcode, comp_ctx->status, |
573 | comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); | ||
569 | else | 574 | else |
570 | ibdev_err(aq->efa_dev, | 575 | ibdev_err_ratelimited( |
571 | "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", | 576 | aq->efa_dev, |
572 | efa_com_cmd_str(comp_ctx->cmd_opcode), | 577 | "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", |
573 | comp_ctx->cmd_opcode, comp_ctx->status, | 578 | efa_com_cmd_str(comp_ctx->cmd_opcode), |
574 | comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); | 579 | comp_ctx->cmd_opcode, comp_ctx->status, |
580 | comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); | ||
575 | 581 | ||
576 | clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); | 582 | clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); |
577 | err = -ETIME; | 583 | err = -ETIME; |
@@ -633,10 +639,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, | |||
633 | cmd->aq_common_descriptor.opcode); | 639 | cmd->aq_common_descriptor.opcode); |
634 | comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); | 640 | comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); |
635 | if (IS_ERR(comp_ctx)) { | 641 | if (IS_ERR(comp_ctx)) { |
636 | ibdev_err(aq->efa_dev, | 642 | ibdev_err_ratelimited( |
637 | "Failed to submit command %s (opcode %u) err %ld\n", | 643 | aq->efa_dev, |
638 | efa_com_cmd_str(cmd->aq_common_descriptor.opcode), | 644 | "Failed to submit command %s (opcode %u) err %ld\n", |
639 | cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); | 645 | efa_com_cmd_str(cmd->aq_common_descriptor.opcode), |
646 | cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); | ||
640 | 647 | ||
641 | up(&aq->avail_cmds); | 648 | up(&aq->avail_cmds); |
642 | return PTR_ERR(comp_ctx); | 649 | return PTR_ERR(comp_ctx); |
@@ -644,11 +651,12 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, | |||
644 | 651 | ||
645 | err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); | 652 | err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); |
646 | if (err) | 653 | if (err) |
647 | ibdev_err(aq->efa_dev, | 654 | ibdev_err_ratelimited( |
648 | "Failed to process command %s (opcode %u) comp_status %d err %d\n", | 655 | aq->efa_dev, |
649 | efa_com_cmd_str(cmd->aq_common_descriptor.opcode), | 656 | "Failed to process command %s (opcode %u) comp_status %d err %d\n", |
650 | cmd->aq_common_descriptor.opcode, | 657 | efa_com_cmd_str(cmd->aq_common_descriptor.opcode), |
651 | comp_ctx->comp_status, err); | 658 | cmd->aq_common_descriptor.opcode, comp_ctx->comp_status, |
659 | err); | ||
652 | 660 | ||
653 | up(&aq->avail_cmds); | 661 | up(&aq->avail_cmds); |
654 | 662 | ||
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 62345d8abf3c..c079f1332082 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c | |||
@@ -44,7 +44,8 @@ int efa_com_create_qp(struct efa_com_dev *edev, | |||
44 | (struct efa_admin_acq_entry *)&cmd_completion, | 44 | (struct efa_admin_acq_entry *)&cmd_completion, |
45 | sizeof(cmd_completion)); | 45 | sizeof(cmd_completion)); |
46 | if (err) { | 46 | if (err) { |
47 | ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err); | 47 | ibdev_err_ratelimited(edev->efa_dev, |
48 | "Failed to create qp [%d]\n", err); | ||
48 | return err; | 49 | return err; |
49 | } | 50 | } |
50 | 51 | ||
@@ -82,9 +83,10 @@ int efa_com_modify_qp(struct efa_com_dev *edev, | |||
82 | (struct efa_admin_acq_entry *)&resp, | 83 | (struct efa_admin_acq_entry *)&resp, |
83 | sizeof(resp)); | 84 | sizeof(resp)); |
84 | if (err) { | 85 | if (err) { |
85 | ibdev_err(edev->efa_dev, | 86 | ibdev_err_ratelimited( |
86 | "Failed to modify qp-%u modify_mask[%#x] [%d]\n", | 87 | edev->efa_dev, |
87 | cmd.qp_handle, cmd.modify_mask, err); | 88 | "Failed to modify qp-%u modify_mask[%#x] [%d]\n", |
89 | cmd.qp_handle, cmd.modify_mask, err); | ||
88 | return err; | 90 | return err; |
89 | } | 91 | } |
90 | 92 | ||
@@ -109,8 +111,9 @@ int efa_com_query_qp(struct efa_com_dev *edev, | |||
109 | (struct efa_admin_acq_entry *)&resp, | 111 | (struct efa_admin_acq_entry *)&resp, |
110 | sizeof(resp)); | 112 | sizeof(resp)); |
111 | if (err) { | 113 | if (err) { |
112 | ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n", | 114 | ibdev_err_ratelimited(edev->efa_dev, |
113 | cmd.qp_handle, err); | 115 | "Failed to query qp-%u [%d]\n", |
116 | cmd.qp_handle, err); | ||
114 | return err; | 117 | return err; |
115 | } | 118 | } |
116 | 119 | ||
@@ -139,8 +142,9 @@ int efa_com_destroy_qp(struct efa_com_dev *edev, | |||
139 | (struct efa_admin_acq_entry *)&cmd_completion, | 142 | (struct efa_admin_acq_entry *)&cmd_completion, |
140 | sizeof(cmd_completion)); | 143 | sizeof(cmd_completion)); |
141 | if (err) { | 144 | if (err) { |
142 | ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", | 145 | ibdev_err_ratelimited(edev->efa_dev, |
143 | qp_cmd.qp_handle, err); | 146 | "Failed to destroy qp-%u [%d]\n", |
147 | qp_cmd.qp_handle, err); | ||
144 | return err; | 148 | return err; |
145 | } | 149 | } |
146 | 150 | ||
@@ -173,7 +177,8 @@ int efa_com_create_cq(struct efa_com_dev *edev, | |||
173 | (struct efa_admin_acq_entry *)&cmd_completion, | 177 | (struct efa_admin_acq_entry *)&cmd_completion, |
174 | sizeof(cmd_completion)); | 178 | sizeof(cmd_completion)); |
175 | if (err) { | 179 | if (err) { |
176 | ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err); | 180 | ibdev_err_ratelimited(edev->efa_dev, |
181 | "Failed to create cq[%d]\n", err); | ||
177 | return err; | 182 | return err; |
178 | } | 183 | } |
179 | 184 | ||
@@ -201,8 +206,9 @@ int efa_com_destroy_cq(struct efa_com_dev *edev, | |||
201 | sizeof(destroy_resp)); | 206 | sizeof(destroy_resp)); |
202 | 207 | ||
203 | if (err) { | 208 | if (err) { |
204 | ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", | 209 | ibdev_err_ratelimited(edev->efa_dev, |
205 | params->cq_idx, err); | 210 | "Failed to destroy CQ-%u [%d]\n", |
211 | params->cq_idx, err); | ||
206 | return err; | 212 | return err; |
207 | } | 213 | } |
208 | 214 | ||
@@ -250,7 +256,8 @@ int efa_com_register_mr(struct efa_com_dev *edev, | |||
250 | (struct efa_admin_acq_entry *)&cmd_completion, | 256 | (struct efa_admin_acq_entry *)&cmd_completion, |
251 | sizeof(cmd_completion)); | 257 | sizeof(cmd_completion)); |
252 | if (err) { | 258 | if (err) { |
253 | ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err); | 259 | ibdev_err_ratelimited(edev->efa_dev, |
260 | "Failed to register mr [%d]\n", err); | ||
254 | return err; | 261 | return err; |
255 | } | 262 | } |
256 | 263 | ||
@@ -277,9 +284,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev, | |||
277 | (struct efa_admin_acq_entry *)&cmd_completion, | 284 | (struct efa_admin_acq_entry *)&cmd_completion, |
278 | sizeof(cmd_completion)); | 285 | sizeof(cmd_completion)); |
279 | if (err) { | 286 | if (err) { |
280 | ibdev_err(edev->efa_dev, | 287 | ibdev_err_ratelimited(edev->efa_dev, |
281 | "Failed to de-register mr(lkey-%u) [%d]\n", | 288 | "Failed to de-register mr(lkey-%u) [%d]\n", |
282 | mr_cmd.l_key, err); | 289 | mr_cmd.l_key, err); |
283 | return err; | 290 | return err; |
284 | } | 291 | } |
285 | 292 | ||
@@ -306,8 +313,9 @@ int efa_com_create_ah(struct efa_com_dev *edev, | |||
306 | (struct efa_admin_acq_entry *)&cmd_completion, | 313 | (struct efa_admin_acq_entry *)&cmd_completion, |
307 | sizeof(cmd_completion)); | 314 | sizeof(cmd_completion)); |
308 | if (err) { | 315 | if (err) { |
309 | ibdev_err(edev->efa_dev, "Failed to create ah for %pI6 [%d]\n", | 316 | ibdev_err_ratelimited(edev->efa_dev, |
310 | ah_cmd.dest_addr, err); | 317 | "Failed to create ah for %pI6 [%d]\n", |
318 | ah_cmd.dest_addr, err); | ||
311 | return err; | 319 | return err; |
312 | } | 320 | } |
313 | 321 | ||
@@ -334,8 +342,9 @@ int efa_com_destroy_ah(struct efa_com_dev *edev, | |||
334 | (struct efa_admin_acq_entry *)&cmd_completion, | 342 | (struct efa_admin_acq_entry *)&cmd_completion, |
335 | sizeof(cmd_completion)); | 343 | sizeof(cmd_completion)); |
336 | if (err) { | 344 | if (err) { |
337 | ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", | 345 | ibdev_err_ratelimited(edev->efa_dev, |
338 | ah_cmd.ah, ah_cmd.pd, err); | 346 | "Failed to destroy ah-%d pd-%d [%d]\n", |
347 | ah_cmd.ah, ah_cmd.pd, err); | ||
339 | return err; | 348 | return err; |
340 | } | 349 | } |
341 | 350 | ||
@@ -367,8 +376,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, | |||
367 | int err; | 376 | int err; |
368 | 377 | ||
369 | if (!efa_com_check_supported_feature_id(edev, feature_id)) { | 378 | if (!efa_com_check_supported_feature_id(edev, feature_id)) { |
370 | ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", | 379 | ibdev_err_ratelimited(edev->efa_dev, |
371 | feature_id); | 380 | "Feature %d isn't supported\n", |
381 | feature_id); | ||
372 | return -EOPNOTSUPP; | 382 | return -EOPNOTSUPP; |
373 | } | 383 | } |
374 | 384 | ||
@@ -396,9 +406,10 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, | |||
396 | sizeof(*get_resp)); | 406 | sizeof(*get_resp)); |
397 | 407 | ||
398 | if (err) { | 408 | if (err) { |
399 | ibdev_err(edev->efa_dev, | 409 | ibdev_err_ratelimited( |
400 | "Failed to submit get_feature command %d [%d]\n", | 410 | edev->efa_dev, |
401 | feature_id, err); | 411 | "Failed to submit get_feature command %d [%d]\n", |
412 | feature_id, err); | ||
402 | return err; | 413 | return err; |
403 | } | 414 | } |
404 | 415 | ||
@@ -421,8 +432,9 @@ int efa_com_get_network_attr(struct efa_com_dev *edev, | |||
421 | err = efa_com_get_feature(edev, &resp, | 432 | err = efa_com_get_feature(edev, &resp, |
422 | EFA_ADMIN_NETWORK_ATTR); | 433 | EFA_ADMIN_NETWORK_ATTR); |
423 | if (err) { | 434 | if (err) { |
424 | ibdev_err(edev->efa_dev, | 435 | ibdev_err_ratelimited(edev->efa_dev, |
425 | "Failed to get network attributes %d\n", err); | 436 | "Failed to get network attributes %d\n", |
437 | err); | ||
426 | return err; | 438 | return err; |
427 | } | 439 | } |
428 | 440 | ||
@@ -441,8 +453,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, | |||
441 | 453 | ||
442 | err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR); | 454 | err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR); |
443 | if (err) { | 455 | if (err) { |
444 | ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n", | 456 | ibdev_err_ratelimited(edev->efa_dev, |
445 | err); | 457 | "Failed to get device attributes %d\n", |
458 | err); | ||
446 | return err; | 459 | return err; |
447 | } | 460 | } |
448 | 461 | ||
@@ -456,9 +469,10 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, | |||
456 | result->db_bar = resp.u.device_attr.db_bar; | 469 | result->db_bar = resp.u.device_attr.db_bar; |
457 | 470 | ||
458 | if (result->admin_api_version < 1) { | 471 | if (result->admin_api_version < 1) { |
459 | ibdev_err(edev->efa_dev, | 472 | ibdev_err_ratelimited( |
460 | "Failed to get device attr api version [%u < 1]\n", | 473 | edev->efa_dev, |
461 | result->admin_api_version); | 474 | "Failed to get device attr api version [%u < 1]\n", |
475 | result->admin_api_version); | ||
462 | return -EINVAL; | 476 | return -EINVAL; |
463 | } | 477 | } |
464 | 478 | ||
@@ -466,8 +480,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, | |||
466 | err = efa_com_get_feature(edev, &resp, | 480 | err = efa_com_get_feature(edev, &resp, |
467 | EFA_ADMIN_QUEUE_ATTR); | 481 | EFA_ADMIN_QUEUE_ATTR); |
468 | if (err) { | 482 | if (err) { |
469 | ibdev_err(edev->efa_dev, | 483 | ibdev_err_ratelimited(edev->efa_dev, |
470 | "Failed to get network attributes %d\n", err); | 484 | "Failed to get queue attributes %d\n", |
485 | err); | ||
471 | return err; | 486 | return err; |
472 | } | 487 | } |
473 | 488 | ||
@@ -497,7 +512,8 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev, | |||
497 | 512 | ||
498 | err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS); | 513 | err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS); |
499 | if (err) { | 514 | if (err) { |
500 | ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err); | 515 | ibdev_err_ratelimited(edev->efa_dev, |
516 | "Failed to get hw hints %d\n", err); | ||
501 | return err; | 517 | return err; |
502 | } | 518 | } |
503 | 519 | ||
@@ -520,8 +536,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, | |||
520 | int err; | 536 | int err; |
521 | 537 | ||
522 | if (!efa_com_check_supported_feature_id(edev, feature_id)) { | 538 | if (!efa_com_check_supported_feature_id(edev, feature_id)) { |
523 | ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", | 539 | ibdev_err_ratelimited(edev->efa_dev, |
524 | feature_id); | 540 | "Feature %d isn't supported\n", |
541 | feature_id); | ||
525 | return -EOPNOTSUPP; | 542 | return -EOPNOTSUPP; |
526 | } | 543 | } |
527 | 544 | ||
@@ -545,9 +562,10 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, | |||
545 | sizeof(*set_resp)); | 562 | sizeof(*set_resp)); |
546 | 563 | ||
547 | if (err) { | 564 | if (err) { |
548 | ibdev_err(edev->efa_dev, | 565 | ibdev_err_ratelimited( |
549 | "Failed to submit set_feature command %d error: %d\n", | 566 | edev->efa_dev, |
550 | feature_id, err); | 567 | "Failed to submit set_feature command %d error: %d\n", |
568 | feature_id, err); | ||
551 | return err; | 569 | return err; |
552 | } | 570 | } |
553 | 571 | ||
@@ -574,8 +592,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) | |||
574 | 592 | ||
575 | err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG); | 593 | err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG); |
576 | if (err) { | 594 | if (err) { |
577 | ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n", | 595 | ibdev_err_ratelimited(edev->efa_dev, |
578 | err); | 596 | "Failed to get aenq attributes: %d\n", |
597 | err); | ||
579 | return err; | 598 | return err; |
580 | } | 599 | } |
581 | 600 | ||
@@ -585,9 +604,10 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) | |||
585 | get_resp.u.aenq.enabled_groups); | 604 | get_resp.u.aenq.enabled_groups); |
586 | 605 | ||
587 | if ((get_resp.u.aenq.supported_groups & groups) != groups) { | 606 | if ((get_resp.u.aenq.supported_groups & groups) != groups) { |
588 | ibdev_err(edev->efa_dev, | 607 | ibdev_err_ratelimited( |
589 | "Trying to set unsupported aenq groups[%#x] supported[%#x]\n", | 608 | edev->efa_dev, |
590 | groups, get_resp.u.aenq.supported_groups); | 609 | "Trying to set unsupported aenq groups[%#x] supported[%#x]\n", |
610 | groups, get_resp.u.aenq.supported_groups); | ||
591 | return -EOPNOTSUPP; | 611 | return -EOPNOTSUPP; |
592 | } | 612 | } |
593 | 613 | ||
@@ -595,8 +615,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) | |||
595 | err = efa_com_set_feature(edev, &set_resp, &cmd, | 615 | err = efa_com_set_feature(edev, &set_resp, &cmd, |
596 | EFA_ADMIN_AENQ_CONFIG); | 616 | EFA_ADMIN_AENQ_CONFIG); |
597 | if (err) { | 617 | if (err) { |
598 | ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n", | 618 | ibdev_err_ratelimited(edev->efa_dev, |
599 | err); | 619 | "Failed to set aenq attributes: %d\n", |
620 | err); | ||
600 | return err; | 621 | return err; |
601 | } | 622 | } |
602 | 623 | ||
@@ -619,7 +640,8 @@ int efa_com_alloc_pd(struct efa_com_dev *edev, | |||
619 | (struct efa_admin_acq_entry *)&resp, | 640 | (struct efa_admin_acq_entry *)&resp, |
620 | sizeof(resp)); | 641 | sizeof(resp)); |
621 | if (err) { | 642 | if (err) { |
622 | ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err); | 643 | ibdev_err_ratelimited(edev->efa_dev, |
644 | "Failed to allocate pd[%d]\n", err); | ||
623 | return err; | 645 | return err; |
624 | } | 646 | } |
625 | 647 | ||
@@ -645,8 +667,9 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev, | |||
645 | (struct efa_admin_acq_entry *)&resp, | 667 | (struct efa_admin_acq_entry *)&resp, |
646 | sizeof(resp)); | 668 | sizeof(resp)); |
647 | if (err) { | 669 | if (err) { |
648 | ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n", | 670 | ibdev_err_ratelimited(edev->efa_dev, |
649 | cmd.pd, err); | 671 | "Failed to deallocate pd-%u [%d]\n", |
672 | cmd.pd, err); | ||
650 | return err; | 673 | return err; |
651 | } | 674 | } |
652 | 675 | ||
@@ -669,7 +692,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev, | |||
669 | (struct efa_admin_acq_entry *)&resp, | 692 | (struct efa_admin_acq_entry *)&resp, |
670 | sizeof(resp)); | 693 | sizeof(resp)); |
671 | if (err) { | 694 | if (err) { |
672 | ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err); | 695 | ibdev_err_ratelimited(edev->efa_dev, |
696 | "Failed to allocate uar[%d]\n", err); | ||
673 | return err; | 697 | return err; |
674 | } | 698 | } |
675 | 699 | ||
@@ -695,10 +719,47 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev, | |||
695 | (struct efa_admin_acq_entry *)&resp, | 719 | (struct efa_admin_acq_entry *)&resp, |
696 | sizeof(resp)); | 720 | sizeof(resp)); |
697 | if (err) { | 721 | if (err) { |
698 | ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n", | 722 | ibdev_err_ratelimited(edev->efa_dev, |
699 | cmd.uar, err); | 723 | "Failed to deallocate uar-%u [%d]\n", |
724 | cmd.uar, err); | ||
700 | return err; | 725 | return err; |
701 | } | 726 | } |
702 | 727 | ||
703 | return 0; | 728 | return 0; |
704 | } | 729 | } |
730 | |||
731 | int efa_com_get_stats(struct efa_com_dev *edev, | ||
732 | struct efa_com_get_stats_params *params, | ||
733 | union efa_com_get_stats_result *result) | ||
734 | { | ||
735 | struct efa_com_admin_queue *aq = &edev->aq; | ||
736 | struct efa_admin_aq_get_stats_cmd cmd = {}; | ||
737 | struct efa_admin_acq_get_stats_resp resp; | ||
738 | int err; | ||
739 | |||
740 | cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS; | ||
741 | cmd.type = params->type; | ||
742 | cmd.scope = params->scope; | ||
743 | cmd.scope_modifier = params->scope_modifier; | ||
744 | |||
745 | err = efa_com_cmd_exec(aq, | ||
746 | (struct efa_admin_aq_entry *)&cmd, | ||
747 | sizeof(cmd), | ||
748 | (struct efa_admin_acq_entry *)&resp, | ||
749 | sizeof(resp)); | ||
750 | if (err) { | ||
751 | ibdev_err_ratelimited( | ||
752 | edev->efa_dev, | ||
753 | "Failed to get stats type-%u scope-%u.%u [%d]\n", | ||
754 | cmd.type, cmd.scope, cmd.scope_modifier, err); | ||
755 | return err; | ||
756 | } | ||
757 | |||
758 | result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes; | ||
759 | result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts; | ||
760 | result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes; | ||
761 | result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts; | ||
762 | result->basic_stats.rx_drops = resp.basic_stats.rx_drops; | ||
763 | |||
764 | return 0; | ||
765 | } | ||
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index a1174380462c..7f6c13052f49 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h | |||
@@ -225,6 +225,26 @@ struct efa_com_dealloc_uar_params { | |||
225 | u16 uarn; | 225 | u16 uarn; |
226 | }; | 226 | }; |
227 | 227 | ||
228 | struct efa_com_get_stats_params { | ||
229 | /* see enum efa_admin_get_stats_type */ | ||
230 | u8 type; | ||
231 | /* see enum efa_admin_get_stats_scope */ | ||
232 | u8 scope; | ||
233 | u16 scope_modifier; | ||
234 | }; | ||
235 | |||
236 | struct efa_com_basic_stats { | ||
237 | u64 tx_bytes; | ||
238 | u64 tx_pkts; | ||
239 | u64 rx_bytes; | ||
240 | u64 rx_pkts; | ||
241 | u64 rx_drops; | ||
242 | }; | ||
243 | |||
244 | union efa_com_get_stats_result { | ||
245 | struct efa_com_basic_stats basic_stats; | ||
246 | }; | ||
247 | |||
228 | void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); | 248 | void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); |
229 | int efa_com_create_qp(struct efa_com_dev *edev, | 249 | int efa_com_create_qp(struct efa_com_dev *edev, |
230 | struct efa_com_create_qp_params *params, | 250 | struct efa_com_create_qp_params *params, |
@@ -266,5 +286,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev, | |||
266 | struct efa_com_alloc_uar_result *result); | 286 | struct efa_com_alloc_uar_result *result); |
267 | int efa_com_dealloc_uar(struct efa_com_dev *edev, | 287 | int efa_com_dealloc_uar(struct efa_com_dev *edev, |
268 | struct efa_com_dealloc_uar_params *params); | 288 | struct efa_com_dealloc_uar_params *params); |
289 | int efa_com_get_stats(struct efa_com_dev *edev, | ||
290 | struct efa_com_get_stats_params *params, | ||
291 | union efa_com_get_stats_result *result); | ||
269 | 292 | ||
270 | #endif /* _EFA_COM_CMD_H_ */ | 293 | #endif /* _EFA_COM_CMD_H_ */ |
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index dd1c6d49466f..83858f7e83d0 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c | |||
@@ -201,6 +201,7 @@ static const struct ib_device_ops efa_dev_ops = { | |||
201 | .driver_id = RDMA_DRIVER_EFA, | 201 | .driver_id = RDMA_DRIVER_EFA, |
202 | .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, | 202 | .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, |
203 | 203 | ||
204 | .alloc_hw_stats = efa_alloc_hw_stats, | ||
204 | .alloc_pd = efa_alloc_pd, | 205 | .alloc_pd = efa_alloc_pd, |
205 | .alloc_ucontext = efa_alloc_ucontext, | 206 | .alloc_ucontext = efa_alloc_ucontext, |
206 | .create_ah = efa_create_ah, | 207 | .create_ah = efa_create_ah, |
@@ -212,6 +213,7 @@ static const struct ib_device_ops efa_dev_ops = { | |||
212 | .destroy_ah = efa_destroy_ah, | 213 | .destroy_ah = efa_destroy_ah, |
213 | .destroy_cq = efa_destroy_cq, | 214 | .destroy_cq = efa_destroy_cq, |
214 | .destroy_qp = efa_destroy_qp, | 215 | .destroy_qp = efa_destroy_qp, |
216 | .get_hw_stats = efa_get_hw_stats, | ||
215 | .get_link_layer = efa_port_link_layer, | 217 | .get_link_layer = efa_port_link_layer, |
216 | .get_port_immutable = efa_get_port_immutable, | 218 | .get_port_immutable = efa_get_port_immutable, |
217 | .mmap = efa_mmap, | 219 | .mmap = efa_mmap, |
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index df77bc312a25..4edae89e8e3c 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c | |||
@@ -41,6 +41,33 @@ static inline u64 get_mmap_key(const struct efa_mmap_entry *efa) | |||
41 | ((u64)efa->mmap_page << PAGE_SHIFT); | 41 | ((u64)efa->mmap_page << PAGE_SHIFT); |
42 | } | 42 | } |
43 | 43 | ||
44 | #define EFA_DEFINE_STATS(op) \ | ||
45 | op(EFA_TX_BYTES, "tx_bytes") \ | ||
46 | op(EFA_TX_PKTS, "tx_pkts") \ | ||
47 | op(EFA_RX_BYTES, "rx_bytes") \ | ||
48 | op(EFA_RX_PKTS, "rx_pkts") \ | ||
49 | op(EFA_RX_DROPS, "rx_drops") \ | ||
50 | op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ | ||
51 | op(EFA_COMPLETED_CMDS, "completed_cmds") \ | ||
52 | op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ | ||
53 | op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ | ||
54 | op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ | ||
55 | op(EFA_CREATE_QP_ERR, "create_qp_err") \ | ||
56 | op(EFA_REG_MR_ERR, "reg_mr_err") \ | ||
57 | op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ | ||
58 | op(EFA_CREATE_AH_ERR, "create_ah_err") | ||
59 | |||
60 | #define EFA_STATS_ENUM(ename, name) ename, | ||
61 | #define EFA_STATS_STR(ename, name) [ename] = name, | ||
62 | |||
63 | enum efa_hw_stats { | ||
64 | EFA_DEFINE_STATS(EFA_STATS_ENUM) | ||
65 | }; | ||
66 | |||
67 | static const char *const efa_stats_names[] = { | ||
68 | EFA_DEFINE_STATS(EFA_STATS_STR) | ||
69 | }; | ||
70 | |||
44 | #define EFA_CHUNK_PAYLOAD_SHIFT 12 | 71 | #define EFA_CHUNK_PAYLOAD_SHIFT 12 |
45 | #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) | 72 | #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) |
46 | #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 | 73 | #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 |
@@ -121,7 +148,7 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah) | |||
121 | } | 148 | } |
122 | 149 | ||
123 | #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ | 150 | #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ |
124 | sizeof(((typeof(x) *)0)->fld) <= (sz)) | 151 | FIELD_SIZEOF(typeof(x), fld) <= (sz)) |
125 | 152 | ||
126 | #define is_reserved_cleared(reserved) \ | 153 | #define is_reserved_cleared(reserved) \ |
127 | !memchr_inv(reserved, 0, sizeof(reserved)) | 154 | !memchr_inv(reserved, 0, sizeof(reserved)) |
@@ -306,7 +333,7 @@ int efa_query_port(struct ib_device *ibdev, u8 port, | |||
306 | props->lmc = 1; | 333 | props->lmc = 1; |
307 | 334 | ||
308 | props->state = IB_PORT_ACTIVE; | 335 | props->state = IB_PORT_ACTIVE; |
309 | props->phys_state = 5; | 336 | props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
310 | props->gid_tbl_len = 1; | 337 | props->gid_tbl_len = 1; |
311 | props->pkey_tbl_len = 1; | 338 | props->pkey_tbl_len = 1; |
312 | props->active_speed = IB_SPEED_EDR; | 339 | props->active_speed = IB_SPEED_EDR; |
@@ -1473,14 +1500,12 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) | |||
1473 | 1500 | ||
1474 | ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); | 1501 | ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); |
1475 | 1502 | ||
1476 | if (mr->umem) { | 1503 | params.l_key = mr->ibmr.lkey; |
1477 | params.l_key = mr->ibmr.lkey; | 1504 | err = efa_com_dereg_mr(&dev->edev, ¶ms); |
1478 | err = efa_com_dereg_mr(&dev->edev, ¶ms); | 1505 | if (err) |
1479 | if (err) | 1506 | return err; |
1480 | return err; | ||
1481 | } | ||
1482 | ib_umem_release(mr->umem); | ||
1483 | 1507 | ||
1508 | ib_umem_release(mr->umem); | ||
1484 | kfree(mr); | 1509 | kfree(mr); |
1485 | 1510 | ||
1486 | return 0; | 1511 | return 0; |
@@ -1727,6 +1752,54 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags) | |||
1727 | efa_ah_destroy(dev, ah); | 1752 | efa_ah_destroy(dev, ah); |
1728 | } | 1753 | } |
1729 | 1754 | ||
1755 | struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) | ||
1756 | { | ||
1757 | return rdma_alloc_hw_stats_struct(efa_stats_names, | ||
1758 | ARRAY_SIZE(efa_stats_names), | ||
1759 | RDMA_HW_STATS_DEFAULT_LIFESPAN); | ||
1760 | } | ||
1761 | |||
1762 | int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, | ||
1763 | u8 port_num, int index) | ||
1764 | { | ||
1765 | struct efa_com_get_stats_params params = {}; | ||
1766 | union efa_com_get_stats_result result; | ||
1767 | struct efa_dev *dev = to_edev(ibdev); | ||
1768 | struct efa_com_basic_stats *bs; | ||
1769 | struct efa_com_stats_admin *as; | ||
1770 | struct efa_stats *s; | ||
1771 | int err; | ||
1772 | |||
1773 | params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC; | ||
1774 | params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; | ||
1775 | |||
1776 | err = efa_com_get_stats(&dev->edev, ¶ms, &result); | ||
1777 | if (err) | ||
1778 | return err; | ||
1779 | |||
1780 | bs = &result.basic_stats; | ||
1781 | stats->value[EFA_TX_BYTES] = bs->tx_bytes; | ||
1782 | stats->value[EFA_TX_PKTS] = bs->tx_pkts; | ||
1783 | stats->value[EFA_RX_BYTES] = bs->rx_bytes; | ||
1784 | stats->value[EFA_RX_PKTS] = bs->rx_pkts; | ||
1785 | stats->value[EFA_RX_DROPS] = bs->rx_drops; | ||
1786 | |||
1787 | as = &dev->edev.aq.stats; | ||
1788 | stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); | ||
1789 | stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); | ||
1790 | stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); | ||
1791 | |||
1792 | s = &dev->stats; | ||
1793 | stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); | ||
1794 | stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err); | ||
1795 | stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err); | ||
1796 | stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err); | ||
1797 | stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err); | ||
1798 | stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err); | ||
1799 | |||
1800 | return ARRAY_SIZE(efa_stats_names); | ||
1801 | } | ||
1802 | |||
1730 | enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, | 1803 | enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, |
1731 | u8 port_num) | 1804 | u8 port_num) |
1732 | { | 1805 | { |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 67052dc3100c..9b1fb84a3d45 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -4101,6 +4101,7 @@ def_access_ibp_counter(rc_dupreq); | |||
4101 | def_access_ibp_counter(rdma_seq); | 4101 | def_access_ibp_counter(rdma_seq); |
4102 | def_access_ibp_counter(unaligned); | 4102 | def_access_ibp_counter(unaligned); |
4103 | def_access_ibp_counter(seq_naks); | 4103 | def_access_ibp_counter(seq_naks); |
4104 | def_access_ibp_counter(rc_crwaits); | ||
4104 | 4105 | ||
4105 | static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { | 4106 | static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { |
4106 | [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), | 4107 | [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), |
@@ -5119,6 +5120,7 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { | |||
5119 | [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), | 5120 | [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), |
5120 | [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), | 5121 | [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), |
5121 | [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), | 5122 | [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), |
5123 | [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), | ||
5122 | [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, | 5124 | [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, |
5123 | access_sw_cpu_rc_acks), | 5125 | access_sw_cpu_rc_acks), |
5124 | [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, | 5126 | [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b76cf81f927f..4ca5ac8d7e9e 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -1245,6 +1245,7 @@ enum { | |||
1245 | C_SW_IBP_RDMA_SEQ, | 1245 | C_SW_IBP_RDMA_SEQ, |
1246 | C_SW_IBP_UNALIGNED, | 1246 | C_SW_IBP_UNALIGNED, |
1247 | C_SW_IBP_SEQ_NAK, | 1247 | C_SW_IBP_SEQ_NAK, |
1248 | C_SW_IBP_RC_CRWAITS, | ||
1248 | C_SW_CPU_RC_ACKS, | 1249 | C_SW_CPU_RC_ACKS, |
1249 | C_SW_CPU_RC_QACKS, | 1250 | C_SW_CPU_RC_QACKS, |
1250 | C_SW_CPU_RC_DELAYED_COMP, | 1251 | C_SW_CPU_RC_DELAYED_COMP, |
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 184dba3c2828..d8ff063a5419 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
@@ -2326,7 +2326,7 @@ struct opa_port_status_req { | |||
2326 | __be32 vl_select_mask; | 2326 | __be32 vl_select_mask; |
2327 | }; | 2327 | }; |
2328 | 2328 | ||
2329 | #define VL_MASK_ALL 0x000080ff | 2329 | #define VL_MASK_ALL 0x00000000000080ffUL |
2330 | 2330 | ||
2331 | struct opa_port_status_rsp { | 2331 | struct opa_port_status_rsp { |
2332 | __u8 port_num; | 2332 | __u8 port_num; |
@@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, | |||
2625 | } | 2625 | } |
2626 | 2626 | ||
2627 | static void a0_portstatus(struct hfi1_pportdata *ppd, | 2627 | static void a0_portstatus(struct hfi1_pportdata *ppd, |
2628 | struct opa_port_status_rsp *rsp, u32 vl_select_mask) | 2628 | struct opa_port_status_rsp *rsp) |
2629 | { | 2629 | { |
2630 | if (!is_bx(ppd->dd)) { | 2630 | if (!is_bx(ppd->dd)) { |
2631 | unsigned long vl; | 2631 | unsigned long vl; |
2632 | u64 sum_vl_xmit_wait = 0; | 2632 | u64 sum_vl_xmit_wait = 0; |
2633 | u32 vl_all_mask = VL_MASK_ALL; | 2633 | unsigned long vl_all_mask = VL_MASK_ALL; |
2634 | 2634 | ||
2635 | for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), | 2635 | for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { |
2636 | 8 * sizeof(vl_all_mask)) { | ||
2637 | u64 tmp = sum_vl_xmit_wait + | 2636 | u64 tmp = sum_vl_xmit_wait + |
2638 | read_port_cntr(ppd, C_TX_WAIT_VL, | 2637 | read_port_cntr(ppd, C_TX_WAIT_VL, |
2639 | idx_from_vl(vl)); | 2638 | idx_from_vl(vl)); |
@@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, | |||
2730 | (struct opa_port_status_req *)pmp->data; | 2729 | (struct opa_port_status_req *)pmp->data; |
2731 | struct hfi1_devdata *dd = dd_from_ibdev(ibdev); | 2730 | struct hfi1_devdata *dd = dd_from_ibdev(ibdev); |
2732 | struct opa_port_status_rsp *rsp; | 2731 | struct opa_port_status_rsp *rsp; |
2733 | u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); | 2732 | unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); |
2734 | unsigned long vl; | 2733 | unsigned long vl; |
2735 | size_t response_data_size; | 2734 | size_t response_data_size; |
2736 | u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; | 2735 | u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; |
2737 | u8 port_num = req->port_num; | 2736 | u8 port_num = req->port_num; |
2738 | u8 num_vls = hweight32(vl_select_mask); | 2737 | u8 num_vls = hweight64(vl_select_mask); |
2739 | struct _vls_pctrs *vlinfo; | 2738 | struct _vls_pctrs *vlinfo; |
2740 | struct hfi1_ibport *ibp = to_iport(ibdev, port); | 2739 | struct hfi1_ibport *ibp = to_iport(ibdev, port); |
2741 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); | 2740 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
@@ -2770,7 +2769,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, | |||
2770 | 2769 | ||
2771 | hfi1_read_link_quality(dd, &rsp->link_quality_indicator); | 2770 | hfi1_read_link_quality(dd, &rsp->link_quality_indicator); |
2772 | 2771 | ||
2773 | rsp->vl_select_mask = cpu_to_be32(vl_select_mask); | 2772 | rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); |
2774 | rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, | 2773 | rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, |
2775 | CNTR_INVALID_VL)); | 2774 | CNTR_INVALID_VL)); |
2776 | rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, | 2775 | rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, |
@@ -2841,8 +2840,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, | |||
2841 | * So in the for_each_set_bit() loop below, we don't need | 2840 | * So in the for_each_set_bit() loop below, we don't need |
2842 | * any additional checks for vl. | 2841 | * any additional checks for vl. |
2843 | */ | 2842 | */ |
2844 | for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), | 2843 | for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { |
2845 | 8 * sizeof(vl_select_mask)) { | ||
2846 | memset(vlinfo, 0, sizeof(*vlinfo)); | 2844 | memset(vlinfo, 0, sizeof(*vlinfo)); |
2847 | 2845 | ||
2848 | tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); | 2846 | tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); |
@@ -2883,7 +2881,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, | |||
2883 | vfi++; | 2881 | vfi++; |
2884 | } | 2882 | } |
2885 | 2883 | ||
2886 | a0_portstatus(ppd, rsp, vl_select_mask); | 2884 | a0_portstatus(ppd, rsp); |
2887 | 2885 | ||
2888 | if (resp_len) | 2886 | if (resp_len) |
2889 | *resp_len += response_data_size; | 2887 | *resp_len += response_data_size; |
@@ -2930,16 +2928,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, | |||
2930 | return error_counter_summary; | 2928 | return error_counter_summary; |
2931 | } | 2929 | } |
2932 | 2930 | ||
2933 | static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, | 2931 | static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) |
2934 | u32 vl_select_mask) | ||
2935 | { | 2932 | { |
2936 | if (!is_bx(ppd->dd)) { | 2933 | if (!is_bx(ppd->dd)) { |
2937 | unsigned long vl; | 2934 | unsigned long vl; |
2938 | u64 sum_vl_xmit_wait = 0; | 2935 | u64 sum_vl_xmit_wait = 0; |
2939 | u32 vl_all_mask = VL_MASK_ALL; | 2936 | unsigned long vl_all_mask = VL_MASK_ALL; |
2940 | 2937 | ||
2941 | for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), | 2938 | for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { |
2942 | 8 * sizeof(vl_all_mask)) { | ||
2943 | u64 tmp = sum_vl_xmit_wait + | 2939 | u64 tmp = sum_vl_xmit_wait + |
2944 | read_port_cntr(ppd, C_TX_WAIT_VL, | 2940 | read_port_cntr(ppd, C_TX_WAIT_VL, |
2945 | idx_from_vl(vl)); | 2941 | idx_from_vl(vl)); |
@@ -2994,7 +2990,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
2994 | u64 port_mask; | 2990 | u64 port_mask; |
2995 | u8 port_num; | 2991 | u8 port_num; |
2996 | unsigned long vl; | 2992 | unsigned long vl; |
2997 | u32 vl_select_mask; | 2993 | unsigned long vl_select_mask; |
2998 | int vfi; | 2994 | int vfi; |
2999 | u16 link_width; | 2995 | u16 link_width; |
3000 | u16 link_speed; | 2996 | u16 link_speed; |
@@ -3071,8 +3067,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
3071 | * So in the for_each_set_bit() loop below, we don't need | 3067 | * So in the for_each_set_bit() loop below, we don't need |
3072 | * any additional checks for vl. | 3068 | * any additional checks for vl. |
3073 | */ | 3069 | */ |
3074 | for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), | 3070 | for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { |
3075 | 8 * sizeof(req->vl_select_mask)) { | ||
3076 | memset(vlinfo, 0, sizeof(*vlinfo)); | 3071 | memset(vlinfo, 0, sizeof(*vlinfo)); |
3077 | 3072 | ||
3078 | rsp->vls[vfi].port_vl_xmit_data = | 3073 | rsp->vls[vfi].port_vl_xmit_data = |
@@ -3120,7 +3115,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
3120 | vfi++; | 3115 | vfi++; |
3121 | } | 3116 | } |
3122 | 3117 | ||
3123 | a0_datacounters(ppd, rsp, vl_select_mask); | 3118 | a0_datacounters(ppd, rsp); |
3124 | 3119 | ||
3125 | if (resp_len) | 3120 | if (resp_len) |
3126 | *resp_len += response_data_size; | 3121 | *resp_len += response_data_size; |
@@ -3215,7 +3210,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, | |||
3215 | struct _vls_ectrs *vlinfo; | 3210 | struct _vls_ectrs *vlinfo; |
3216 | unsigned long vl; | 3211 | unsigned long vl; |
3217 | u64 port_mask, tmp; | 3212 | u64 port_mask, tmp; |
3218 | u32 vl_select_mask; | 3213 | unsigned long vl_select_mask; |
3219 | int vfi; | 3214 | int vfi; |
3220 | 3215 | ||
3221 | req = (struct opa_port_error_counters64_msg *)pmp->data; | 3216 | req = (struct opa_port_error_counters64_msg *)pmp->data; |
@@ -3273,8 +3268,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, | |||
3273 | vlinfo = &rsp->vls[0]; | 3268 | vlinfo = &rsp->vls[0]; |
3274 | vfi = 0; | 3269 | vfi = 0; |
3275 | vl_select_mask = be32_to_cpu(req->vl_select_mask); | 3270 | vl_select_mask = be32_to_cpu(req->vl_select_mask); |
3276 | for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), | 3271 | for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { |
3277 | 8 * sizeof(req->vl_select_mask)) { | ||
3278 | memset(vlinfo, 0, sizeof(*vlinfo)); | 3272 | memset(vlinfo, 0, sizeof(*vlinfo)); |
3279 | rsp->vls[vfi].port_vl_xmit_discards = | 3273 | rsp->vls[vfi].port_vl_xmit_discards = |
3280 | cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, | 3274 | cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, |
@@ -3485,7 +3479,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, | |||
3485 | u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; | 3479 | u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; |
3486 | u64 portn = be64_to_cpu(req->port_select_mask[3]); | 3480 | u64 portn = be64_to_cpu(req->port_select_mask[3]); |
3487 | u32 counter_select = be32_to_cpu(req->counter_select_mask); | 3481 | u32 counter_select = be32_to_cpu(req->counter_select_mask); |
3488 | u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ | 3482 | unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ |
3489 | unsigned long vl; | 3483 | unsigned long vl; |
3490 | 3484 | ||
3491 | if ((nports != 1) || (portn != 1 << port)) { | 3485 | if ((nports != 1) || (portn != 1 << port)) { |
@@ -3579,8 +3573,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, | |||
3579 | if (counter_select & CS_UNCORRECTABLE_ERRORS) | 3573 | if (counter_select & CS_UNCORRECTABLE_ERRORS) |
3580 | write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); | 3574 | write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); |
3581 | 3575 | ||
3582 | for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), | 3576 | for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { |
3583 | 8 * sizeof(vl_select_mask)) { | ||
3584 | if (counter_select & CS_PORT_XMIT_DATA) | 3577 | if (counter_select & CS_PORT_XMIT_DATA) |
3585 | write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); | 3578 | write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); |
3586 | 3579 | ||
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 024a7c2b6124..513a8aac9ccd 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -595,11 +595,8 @@ check_s_state: | |||
595 | case IB_WR_SEND_WITH_IMM: | 595 | case IB_WR_SEND_WITH_IMM: |
596 | case IB_WR_SEND_WITH_INV: | 596 | case IB_WR_SEND_WITH_INV: |
597 | /* If no credit, return. */ | 597 | /* If no credit, return. */ |
598 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && | 598 | if (!rvt_rc_credit_avail(qp, wqe)) |
599 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
600 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; | ||
601 | goto bail; | 599 | goto bail; |
602 | } | ||
603 | if (len > pmtu) { | 600 | if (len > pmtu) { |
604 | qp->s_state = OP(SEND_FIRST); | 601 | qp->s_state = OP(SEND_FIRST); |
605 | len = pmtu; | 602 | len = pmtu; |
@@ -632,11 +629,8 @@ check_s_state: | |||
632 | goto no_flow_control; | 629 | goto no_flow_control; |
633 | case IB_WR_RDMA_WRITE_WITH_IMM: | 630 | case IB_WR_RDMA_WRITE_WITH_IMM: |
634 | /* If no credit, return. */ | 631 | /* If no credit, return. */ |
635 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && | 632 | if (!rvt_rc_credit_avail(qp, wqe)) |
636 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
637 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; | ||
638 | goto bail; | 633 | goto bail; |
639 | } | ||
640 | no_flow_control: | 634 | no_flow_control: |
641 | put_ib_reth_vaddr( | 635 | put_ib_reth_vaddr( |
642 | wqe->rdma_wr.remote_addr, | 636 | wqe->rdma_wr.remote_addr, |
@@ -1483,6 +1477,11 @@ static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn, | |||
1483 | req->ack_pending = cur_seg - req->comp_seg; | 1477 | req->ack_pending = cur_seg - req->comp_seg; |
1484 | priv->pending_tid_r_segs += req->ack_pending; | 1478 | priv->pending_tid_r_segs += req->ack_pending; |
1485 | qp->s_num_rd_atomic += req->ack_pending; | 1479 | qp->s_num_rd_atomic += req->ack_pending; |
1480 | trace_hfi1_tid_req_update_num_rd_atomic(qp, 0, | ||
1481 | wqe->wr.opcode, | ||
1482 | wqe->psn, | ||
1483 | wqe->lpsn, | ||
1484 | req); | ||
1486 | } else { | 1485 | } else { |
1487 | priv->pending_tid_r_segs += req->total_segs; | 1486 | priv->pending_tid_r_segs += req->total_segs; |
1488 | qp->s_num_rd_atomic += req->total_segs; | 1487 | qp->s_num_rd_atomic += req->total_segs; |
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 6141f4edc6bf..b4dcc4d29f84 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c | |||
@@ -2646,6 +2646,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |||
2646 | u32 fpsn; | 2646 | u32 fpsn; |
2647 | 2647 | ||
2648 | lockdep_assert_held(&qp->r_lock); | 2648 | lockdep_assert_held(&qp->r_lock); |
2649 | trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn); | ||
2650 | trace_hfi1_sender_read_kdeth_eflags(qp); | ||
2651 | trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0); | ||
2649 | spin_lock(&qp->s_lock); | 2652 | spin_lock(&qp->s_lock); |
2650 | /* If the psn is out of valid range, drop the packet */ | 2653 | /* If the psn is out of valid range, drop the packet */ |
2651 | if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || | 2654 | if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || |
@@ -2710,6 +2713,8 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |||
2710 | goto s_unlock; | 2713 | goto s_unlock; |
2711 | 2714 | ||
2712 | req = wqe_to_tid_req(wqe); | 2715 | req = wqe_to_tid_req(wqe); |
2716 | trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn, | ||
2717 | wqe->lpsn, req); | ||
2713 | switch (rcv_type) { | 2718 | switch (rcv_type) { |
2714 | case RHF_RCV_TYPE_EXPECTED: | 2719 | case RHF_RCV_TYPE_EXPECTED: |
2715 | switch (rte) { | 2720 | switch (rte) { |
@@ -2724,6 +2729,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |||
2724 | * packets that could be still in the fabric. | 2729 | * packets that could be still in the fabric. |
2725 | */ | 2730 | */ |
2726 | flow = &req->flows[req->clear_tail]; | 2731 | flow = &req->flows[req->clear_tail]; |
2732 | trace_hfi1_tid_flow_read_kdeth_eflags(qp, | ||
2733 | req->clear_tail, | ||
2734 | flow); | ||
2727 | if (priv->s_flags & HFI1_R_TID_SW_PSN) { | 2735 | if (priv->s_flags & HFI1_R_TID_SW_PSN) { |
2728 | diff = cmp_psn(psn, | 2736 | diff = cmp_psn(psn, |
2729 | flow->flow_state.r_next_psn); | 2737 | flow->flow_state.r_next_psn); |
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h index 4388b594ed1b..343fb9894a82 100644 --- a/drivers/infiniband/hw/hfi1/trace_tid.h +++ b/drivers/infiniband/hw/hfi1/trace_tid.h | |||
@@ -627,6 +627,12 @@ DEFINE_EVENT(/* event */ | |||
627 | TP_ARGS(qp, index, flow) | 627 | TP_ARGS(qp, index, flow) |
628 | ); | 628 | ); |
629 | 629 | ||
630 | DEFINE_EVENT(/* event */ | ||
631 | hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags, | ||
632 | TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), | ||
633 | TP_ARGS(qp, index, flow) | ||
634 | ); | ||
635 | |||
630 | DECLARE_EVENT_CLASS(/* tid_node */ | 636 | DECLARE_EVENT_CLASS(/* tid_node */ |
631 | hfi1_tid_node_template, | 637 | hfi1_tid_node_template, |
632 | TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, | 638 | TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, |
@@ -851,6 +857,12 @@ DEFINE_EVENT(/* event */ | |||
851 | TP_ARGS(qp, psn) | 857 | TP_ARGS(qp, psn) |
852 | ); | 858 | ); |
853 | 859 | ||
860 | DEFINE_EVENT(/* event */ | ||
861 | hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags, | ||
862 | TP_PROTO(struct rvt_qp *qp, u32 psn), | ||
863 | TP_ARGS(qp, psn) | ||
864 | ); | ||
865 | |||
854 | DECLARE_EVENT_CLASS(/* sender_info */ | 866 | DECLARE_EVENT_CLASS(/* sender_info */ |
855 | hfi1_sender_info_template, | 867 | hfi1_sender_info_template, |
856 | TP_PROTO(struct rvt_qp *qp), | 868 | TP_PROTO(struct rvt_qp *qp), |
@@ -955,6 +967,12 @@ DEFINE_EVENT(/* event */ | |||
955 | TP_ARGS(qp) | 967 | TP_ARGS(qp) |
956 | ); | 968 | ); |
957 | 969 | ||
970 | DEFINE_EVENT(/* event */ | ||
971 | hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags, | ||
972 | TP_PROTO(struct rvt_qp *qp), | ||
973 | TP_ARGS(qp) | ||
974 | ); | ||
975 | |||
958 | DECLARE_EVENT_CLASS(/* tid_read_sender */ | 976 | DECLARE_EVENT_CLASS(/* tid_read_sender */ |
959 | hfi1_tid_read_sender_template, | 977 | hfi1_tid_read_sender_template, |
960 | TP_PROTO(struct rvt_qp *qp, char newreq), | 978 | TP_PROTO(struct rvt_qp *qp, char newreq), |
@@ -1015,6 +1033,12 @@ DEFINE_EVENT(/* event */ | |||
1015 | TP_ARGS(qp, newreq) | 1033 | TP_ARGS(qp, newreq) |
1016 | ); | 1034 | ); |
1017 | 1035 | ||
1036 | DEFINE_EVENT(/* event */ | ||
1037 | hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags, | ||
1038 | TP_PROTO(struct rvt_qp *qp, char newreq), | ||
1039 | TP_ARGS(qp, newreq) | ||
1040 | ); | ||
1041 | |||
1018 | DECLARE_EVENT_CLASS(/* tid_rdma_request */ | 1042 | DECLARE_EVENT_CLASS(/* tid_rdma_request */ |
1019 | hfi1_tid_rdma_request_template, | 1043 | hfi1_tid_rdma_request_template, |
1020 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, | 1044 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, |
@@ -1216,6 +1240,13 @@ DEFINE_EVENT(/* event */ | |||
1216 | ); | 1240 | ); |
1217 | 1241 | ||
1218 | DEFINE_EVENT(/* event */ | 1242 | DEFINE_EVENT(/* event */ |
1243 | hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags, | ||
1244 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, | ||
1245 | struct tid_rdma_request *req), | ||
1246 | TP_ARGS(qp, newreq, opcode, psn, lpsn, req) | ||
1247 | ); | ||
1248 | |||
1249 | DEFINE_EVENT(/* event */ | ||
1219 | hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, | 1250 | hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, |
1220 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, | 1251 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, |
1221 | struct tid_rdma_request *req), | 1252 | struct tid_rdma_request *req), |
@@ -1229,6 +1260,13 @@ DEFINE_EVENT(/* event */ | |||
1229 | TP_ARGS(qp, newreq, opcode, psn, lpsn, req) | 1260 | TP_ARGS(qp, newreq, opcode, psn, lpsn, req) |
1230 | ); | 1261 | ); |
1231 | 1262 | ||
1263 | DEFINE_EVENT(/* event */ | ||
1264 | hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic, | ||
1265 | TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, | ||
1266 | struct tid_rdma_request *req), | ||
1267 | TP_ARGS(qp, newreq, opcode, psn, lpsn, req) | ||
1268 | ); | ||
1269 | |||
1232 | DECLARE_EVENT_CLASS(/* rc_rcv_err */ | 1270 | DECLARE_EVENT_CLASS(/* rc_rcv_err */ |
1233 | hfi1_rc_rcv_err_template, | 1271 | hfi1_rc_rcv_err_template, |
1234 | TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), | 1272 | TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index 4d8510b0fc38..9972e0e6545e 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h | |||
@@ -110,12 +110,6 @@ enum pkt_q_sdma_state { | |||
110 | SDMA_PKT_Q_DEFERRED, | 110 | SDMA_PKT_Q_DEFERRED, |
111 | }; | 111 | }; |
112 | 112 | ||
113 | /* | ||
114 | * Maximum retry attempts to submit a TX request | ||
115 | * before putting the process to sleep. | ||
116 | */ | ||
117 | #define MAX_DEFER_RETRY_COUNT 1 | ||
118 | |||
119 | #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ | 113 | #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ |
120 | 114 | ||
121 | #define SDMA_DBG(req, fmt, ...) \ | 115 | #define SDMA_DBG(req, fmt, ...) \ |
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 646f61545ed6..9f53f63b1453 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
@@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | |||
874 | else | 874 | else |
875 | pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); | 875 | pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); |
876 | 876 | ||
877 | if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) | ||
878 | pbc = hfi1_fault_tx(qp, ps->opcode, pbc); | ||
879 | pbc = create_pbc(ppd, | 877 | pbc = create_pbc(ppd, |
880 | pbc, | 878 | pbc, |
881 | qp->srate_mbps, | 879 | qp->srate_mbps, |
882 | vl, | 880 | vl, |
883 | plen); | 881 | plen); |
884 | 882 | ||
885 | /* Update HCRC based on packet opcode */ | 883 | if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) |
886 | pbc = update_hcrc(ps->opcode, pbc); | 884 | pbc = hfi1_fault_tx(qp, ps->opcode, pbc); |
885 | else | ||
886 | /* Update HCRC based on packet opcode */ | ||
887 | pbc = update_hcrc(ps->opcode, pbc); | ||
887 | } | 888 | } |
888 | tx->wqe = qp->s_wqe; | 889 | tx->wqe = qp->s_wqe; |
889 | ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); | 890 | ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); |
@@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | |||
1030 | else | 1031 | else |
1031 | pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); | 1032 | pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); |
1032 | 1033 | ||
1034 | pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); | ||
1033 | if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) | 1035 | if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) |
1034 | pbc = hfi1_fault_tx(qp, ps->opcode, pbc); | 1036 | pbc = hfi1_fault_tx(qp, ps->opcode, pbc); |
1035 | pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); | 1037 | else |
1036 | 1038 | /* Update HCRC based on packet opcode */ | |
1037 | /* Update HCRC based on packet opcode */ | 1039 | pbc = update_hcrc(ps->opcode, pbc); |
1038 | pbc = update_hcrc(ps->opcode, pbc); | ||
1039 | } | 1040 | } |
1040 | if (cb) | 1041 | if (cb) |
1041 | iowait_pio_inc(&priv->s_iowait); | 1042 | iowait_pio_inc(&priv->s_iowait); |
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 54782197c717..d602b698b57e 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig | |||
@@ -8,8 +8,6 @@ config INFINIBAND_HNS | |||
8 | is used in Hisilicon Hip06 and more further ICT SoC based on | 8 | is used in Hisilicon Hip06 and more further ICT SoC based on |
9 | platform device. | 9 | platform device. |
10 | 10 | ||
11 | To compile HIP06 or HIP08 driver as module, choose M here. | ||
12 | |||
13 | config INFINIBAND_HNS_HIP06 | 11 | config INFINIBAND_HNS_HIP06 |
14 | tristate "Hisilicon Hip06 Family RoCE support" | 12 | tristate "Hisilicon Hip06 Family RoCE support" |
15 | depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET | 13 | depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET |
@@ -17,15 +15,9 @@ config INFINIBAND_HNS_HIP06 | |||
17 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and | 15 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and |
18 | Hip07 SoC. These RoCE engines are platform devices. | 16 | Hip07 SoC. These RoCE engines are platform devices. |
19 | 17 | ||
20 | To compile this driver, choose Y here: if INFINIBAND_HNS is m, this | ||
21 | module will be called hns-roce-hw-v1 | ||
22 | |||
23 | config INFINIBAND_HNS_HIP08 | 18 | config INFINIBAND_HNS_HIP08 |
24 | tristate "Hisilicon Hip08 Family RoCE support" | 19 | tristate "Hisilicon Hip08 Family RoCE support" |
25 | depends on INFINIBAND_HNS && PCI && HNS3 | 20 | depends on INFINIBAND_HNS && PCI && HNS3 |
26 | ---help--- | 21 | ---help--- |
27 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. | 22 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. |
28 | The RoCE engine is a PCI device. | 23 | The RoCE engine is a PCI device. |
29 | |||
30 | To compile this driver, choose Y here: if INFINIBAND_HNS is m, this | ||
31 | module will be called hns-roce-hw-v2. | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index cdd2ac24fc2a..90e08c0c332d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c | |||
@@ -66,11 +66,9 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, | |||
66 | HNS_ROCE_VLAN_SL_SHIFT; | 66 | HNS_ROCE_VLAN_SL_SHIFT; |
67 | } | 67 | } |
68 | 68 | ||
69 | ah->av.port_pd = cpu_to_le32(to_hr_pd(ibah->pd)->pdn | | 69 | ah->av.port = rdma_ah_get_port_num(ah_attr); |
70 | (rdma_ah_get_port_num(ah_attr) << | ||
71 | HNS_ROCE_PORT_NUM_SHIFT)); | ||
72 | ah->av.gid_index = grh->sgid_index; | 70 | ah->av.gid_index = grh->sgid_index; |
73 | ah->av.vlan = cpu_to_le16(vlan_tag); | 71 | ah->av.vlan = vlan_tag; |
74 | ah->av.vlan_en = vlan_en; | 72 | ah->av.vlan_en = vlan_en; |
75 | dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, | 73 | dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, |
76 | ah->av.vlan); | 74 | ah->av.vlan); |
@@ -79,8 +77,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, | |||
79 | ah->av.stat_rate = IB_RATE_10_GBPS; | 77 | ah->av.stat_rate = IB_RATE_10_GBPS; |
80 | 78 | ||
81 | memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); | 79 | memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); |
82 | ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) << | 80 | ah->av.sl = rdma_ah_get_sl(ah_attr); |
83 | HNS_ROCE_SL_SHIFT); | ||
84 | 81 | ||
85 | return 0; | 82 | return 0; |
86 | } | 83 | } |
@@ -91,17 +88,11 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) | |||
91 | 88 | ||
92 | memset(ah_attr, 0, sizeof(*ah_attr)); | 89 | memset(ah_attr, 0, sizeof(*ah_attr)); |
93 | 90 | ||
94 | rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> | 91 | rdma_ah_set_sl(ah_attr, ah->av.sl); |
95 | HNS_ROCE_SL_SHIFT)); | 92 | rdma_ah_set_port_num(ah_attr, ah->av.port); |
96 | rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >> | ||
97 | HNS_ROCE_PORT_NUM_SHIFT)); | ||
98 | rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate); | 93 | rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate); |
99 | rdma_ah_set_grh(ah_attr, NULL, | 94 | rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel, |
100 | (le32_to_cpu(ah->av.sl_tclass_flowlabel) & | 95 | ah->av.gid_index, ah->av.hop_limit, ah->av.tclass); |
101 | HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index, | ||
102 | ah->av.hop_limit, | ||
103 | (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> | ||
104 | HNS_ROCE_TCLASS_SHIFT)); | ||
105 | rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid); | 96 | rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid); |
106 | 97 | ||
107 | return 0; | 98 | return 0; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 0cd09bf4d7ea..455d533dd7c4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c | |||
@@ -211,7 +211,6 @@ int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) | |||
211 | mutex_init(&hr_dev->cmd.hcr_mutex); | 211 | mutex_init(&hr_dev->cmd.hcr_mutex); |
212 | sema_init(&hr_dev->cmd.poll_sem, 1); | 212 | sema_init(&hr_dev->cmd.poll_sem, 1); |
213 | hr_dev->cmd.use_events = 0; | 213 | hr_dev->cmd.use_events = 0; |
214 | hr_dev->cmd.toggle = 1; | ||
215 | hr_dev->cmd.max_cmds = CMD_MAX_NUM; | 214 | hr_dev->cmd.max_cmds = CMD_MAX_NUM; |
216 | hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, | 215 | hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, |
217 | HNS_ROCE_MAILBOX_SIZE, | 216 | HNS_ROCE_MAILBOX_SIZE, |
@@ -252,23 +251,15 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) | |||
252 | hr_cmd->token_mask = CMD_TOKEN_MASK; | 251 | hr_cmd->token_mask = CMD_TOKEN_MASK; |
253 | hr_cmd->use_events = 1; | 252 | hr_cmd->use_events = 1; |
254 | 253 | ||
255 | down(&hr_cmd->poll_sem); | ||
256 | |||
257 | return 0; | 254 | return 0; |
258 | } | 255 | } |
259 | 256 | ||
260 | void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) | 257 | void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) |
261 | { | 258 | { |
262 | struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; | 259 | struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; |
263 | int i; | ||
264 | |||
265 | hr_cmd->use_events = 0; | ||
266 | |||
267 | for (i = 0; i < hr_cmd->max_cmds; ++i) | ||
268 | down(&hr_cmd->event_sem); | ||
269 | 260 | ||
270 | kfree(hr_cmd->context); | 261 | kfree(hr_cmd->context); |
271 | up(&hr_cmd->poll_sem); | 262 | hr_cmd->use_events = 0; |
272 | } | 263 | } |
273 | 264 | ||
274 | struct hns_roce_cmd_mailbox | 265 | struct hns_roce_cmd_mailbox |
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 4e50c22a2da4..22541d19cd09 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c | |||
@@ -83,7 +83,6 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, | |||
83 | 83 | ||
84 | static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, | 84 | static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, |
85 | struct hns_roce_mtt *hr_mtt, | 85 | struct hns_roce_mtt *hr_mtt, |
86 | struct hns_roce_uar *hr_uar, | ||
87 | struct hns_roce_cq *hr_cq, int vector) | 86 | struct hns_roce_cq *hr_cq, int vector) |
88 | { | 87 | { |
89 | struct hns_roce_cmd_mailbox *mailbox; | 88 | struct hns_roce_cmd_mailbox *mailbox; |
@@ -154,7 +153,6 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, | |||
154 | 153 | ||
155 | hr_cq->cons_index = 0; | 154 | hr_cq->cons_index = 0; |
156 | hr_cq->arm_sn = 1; | 155 | hr_cq->arm_sn = 1; |
157 | hr_cq->uar = hr_uar; | ||
158 | 156 | ||
159 | atomic_set(&hr_cq->refcount, 1); | 157 | atomic_set(&hr_cq->refcount, 1); |
160 | init_completion(&hr_cq->free); | 158 | init_completion(&hr_cq->free); |
@@ -298,21 +296,127 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, | |||
298 | &buf->hr_buf); | 296 | &buf->hr_buf); |
299 | } | 297 | } |
300 | 298 | ||
299 | static int create_user_cq(struct hns_roce_dev *hr_dev, | ||
300 | struct hns_roce_cq *hr_cq, | ||
301 | struct ib_udata *udata, | ||
302 | struct hns_roce_ib_create_cq_resp *resp, | ||
303 | int cq_entries) | ||
304 | { | ||
305 | struct hns_roce_ib_create_cq ucmd; | ||
306 | struct device *dev = hr_dev->dev; | ||
307 | int ret; | ||
308 | struct hns_roce_ucontext *context = rdma_udata_to_drv_context( | ||
309 | udata, struct hns_roce_ucontext, ibucontext); | ||
310 | |||
311 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | ||
312 | dev_err(dev, "Failed to copy_from_udata.\n"); | ||
313 | return -EFAULT; | ||
314 | } | ||
315 | |||
316 | /* Get user space address, write it into mtt table */ | ||
317 | ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf, | ||
318 | &hr_cq->umem, ucmd.buf_addr, | ||
319 | cq_entries); | ||
320 | if (ret) { | ||
321 | dev_err(dev, "Failed to get_cq_umem.\n"); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | ||
326 | (udata->outlen >= sizeof(*resp))) { | ||
327 | ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, | ||
328 | &hr_cq->db); | ||
329 | if (ret) { | ||
330 | dev_err(dev, "cq record doorbell map failed!\n"); | ||
331 | goto err_mtt; | ||
332 | } | ||
333 | hr_cq->db_en = 1; | ||
334 | resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; | ||
335 | } | ||
336 | |||
337 | return 0; | ||
338 | |||
339 | err_mtt: | ||
340 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | ||
341 | ib_umem_release(hr_cq->umem); | ||
342 | |||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | static int create_kernel_cq(struct hns_roce_dev *hr_dev, | ||
347 | struct hns_roce_cq *hr_cq, int cq_entries) | ||
348 | { | ||
349 | struct device *dev = hr_dev->dev; | ||
350 | struct hns_roce_uar *uar; | ||
351 | int ret; | ||
352 | |||
353 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { | ||
354 | ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); | ||
355 | if (ret) | ||
356 | return ret; | ||
357 | |||
358 | hr_cq->set_ci_db = hr_cq->db.db_record; | ||
359 | *hr_cq->set_ci_db = 0; | ||
360 | hr_cq->db_en = 1; | ||
361 | } | ||
362 | |||
363 | /* Init mtt table and write buff address to mtt table */ | ||
364 | ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); | ||
365 | if (ret) { | ||
366 | dev_err(dev, "Failed to alloc_cq_buf.\n"); | ||
367 | goto err_db; | ||
368 | } | ||
369 | |||
370 | uar = &hr_dev->priv_uar; | ||
371 | hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + | ||
372 | DB_REG_OFFSET * uar->index; | ||
373 | |||
374 | return 0; | ||
375 | |||
376 | err_db: | ||
377 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) | ||
378 | hns_roce_free_db(hr_dev, &hr_cq->db); | ||
379 | |||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | static void destroy_user_cq(struct hns_roce_dev *hr_dev, | ||
384 | struct hns_roce_cq *hr_cq, | ||
385 | struct ib_udata *udata, | ||
386 | struct hns_roce_ib_create_cq_resp *resp) | ||
387 | { | ||
388 | struct hns_roce_ucontext *context = rdma_udata_to_drv_context( | ||
389 | udata, struct hns_roce_ucontext, ibucontext); | ||
390 | |||
391 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | ||
392 | (udata->outlen >= sizeof(*resp))) | ||
393 | hns_roce_db_unmap_user(context, &hr_cq->db); | ||
394 | |||
395 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | ||
396 | ib_umem_release(hr_cq->umem); | ||
397 | } | ||
398 | |||
399 | static void destroy_kernel_cq(struct hns_roce_dev *hr_dev, | ||
400 | struct hns_roce_cq *hr_cq) | ||
401 | { | ||
402 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | ||
403 | hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); | ||
404 | |||
405 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) | ||
406 | hns_roce_free_db(hr_dev, &hr_cq->db); | ||
407 | } | ||
408 | |||
301 | int hns_roce_ib_create_cq(struct ib_cq *ib_cq, | 409 | int hns_roce_ib_create_cq(struct ib_cq *ib_cq, |
302 | const struct ib_cq_init_attr *attr, | 410 | const struct ib_cq_init_attr *attr, |
303 | struct ib_udata *udata) | 411 | struct ib_udata *udata) |
304 | { | 412 | { |
305 | struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); | 413 | struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); |
306 | struct device *dev = hr_dev->dev; | 414 | struct device *dev = hr_dev->dev; |
307 | struct hns_roce_ib_create_cq ucmd; | ||
308 | struct hns_roce_ib_create_cq_resp resp = {}; | 415 | struct hns_roce_ib_create_cq_resp resp = {}; |
309 | struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); | 416 | struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); |
310 | struct hns_roce_uar *uar = NULL; | ||
311 | int vector = attr->comp_vector; | 417 | int vector = attr->comp_vector; |
312 | int cq_entries = attr->cqe; | 418 | int cq_entries = attr->cqe; |
313 | int ret; | 419 | int ret; |
314 | struct hns_roce_ucontext *context = rdma_udata_to_drv_context( | ||
315 | udata, struct hns_roce_ucontext, ibucontext); | ||
316 | 420 | ||
317 | if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { | 421 | if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { |
318 | dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", | 422 | dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", |
@@ -328,61 +432,21 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, | |||
328 | spin_lock_init(&hr_cq->lock); | 432 | spin_lock_init(&hr_cq->lock); |
329 | 433 | ||
330 | if (udata) { | 434 | if (udata) { |
331 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | 435 | ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries); |
332 | dev_err(dev, "Failed to copy_from_udata.\n"); | ||
333 | ret = -EFAULT; | ||
334 | goto err_cq; | ||
335 | } | ||
336 | |||
337 | /* Get user space address, write it into mtt table */ | ||
338 | ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf, | ||
339 | &hr_cq->umem, ucmd.buf_addr, | ||
340 | cq_entries); | ||
341 | if (ret) { | 436 | if (ret) { |
342 | dev_err(dev, "Failed to get_cq_umem.\n"); | 437 | dev_err(dev, "Create cq failed in user mode!\n"); |
343 | goto err_cq; | 438 | goto err_cq; |
344 | } | 439 | } |
345 | |||
346 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | ||
347 | (udata->outlen >= sizeof(resp))) { | ||
348 | ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, | ||
349 | &hr_cq->db); | ||
350 | if (ret) { | ||
351 | dev_err(dev, "cq record doorbell map failed!\n"); | ||
352 | goto err_mtt; | ||
353 | } | ||
354 | hr_cq->db_en = 1; | ||
355 | resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; | ||
356 | } | ||
357 | |||
358 | /* Get user space parameters */ | ||
359 | uar = &context->uar; | ||
360 | } else { | 440 | } else { |
361 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { | 441 | ret = create_kernel_cq(hr_dev, hr_cq, cq_entries); |
362 | ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); | ||
363 | if (ret) | ||
364 | goto err_cq; | ||
365 | |||
366 | hr_cq->set_ci_db = hr_cq->db.db_record; | ||
367 | *hr_cq->set_ci_db = 0; | ||
368 | hr_cq->db_en = 1; | ||
369 | } | ||
370 | |||
371 | /* Init mmt table and write buff address to mtt table */ | ||
372 | ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, | ||
373 | cq_entries); | ||
374 | if (ret) { | 442 | if (ret) { |
375 | dev_err(dev, "Failed to alloc_cq_buf.\n"); | 443 | dev_err(dev, "Create cq failed in kernel mode!\n"); |
376 | goto err_db; | 444 | goto err_cq; |
377 | } | 445 | } |
378 | |||
379 | uar = &hr_dev->priv_uar; | ||
380 | hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + | ||
381 | DB_REG_OFFSET * uar->index; | ||
382 | } | 446 | } |
383 | 447 | ||
384 | /* Allocate cq index, fill cq_context */ | 448 | /* Allocate cq index, fill cq_context */ |
385 | ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, | 449 | ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, |
386 | hr_cq, vector); | 450 | hr_cq, vector); |
387 | if (ret) { | 451 | if (ret) { |
388 | dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); | 452 | dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); |
@@ -416,20 +480,10 @@ err_cqc: | |||
416 | hns_roce_free_cq(hr_dev, hr_cq); | 480 | hns_roce_free_cq(hr_dev, hr_cq); |
417 | 481 | ||
418 | err_dbmap: | 482 | err_dbmap: |
419 | if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | 483 | if (udata) |
420 | (udata->outlen >= sizeof(resp))) | 484 | destroy_user_cq(hr_dev, hr_cq, udata, &resp); |
421 | hns_roce_db_unmap_user(context, &hr_cq->db); | 485 | else |
422 | 486 | destroy_kernel_cq(hr_dev, hr_cq); | |
423 | err_mtt: | ||
424 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | ||
425 | ib_umem_release(hr_cq->umem); | ||
426 | if (!udata) | ||
427 | hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, | ||
428 | hr_cq->ib_cq.cqe); | ||
429 | |||
430 | err_db: | ||
431 | if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) | ||
432 | hns_roce_free_db(hr_dev, &hr_cq->db); | ||
433 | 487 | ||
434 | err_cq: | 488 | err_cq: |
435 | return ret; | 489 | return ret; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a548b28aab63..96d1302abde1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h | |||
@@ -84,7 +84,6 @@ | |||
84 | #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 | 84 | #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 |
85 | #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 | 85 | #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 |
86 | 86 | ||
87 | /* 4G/4K = 1M */ | ||
88 | #define HNS_ROCE_SL_SHIFT 28 | 87 | #define HNS_ROCE_SL_SHIFT 28 |
89 | #define HNS_ROCE_TCLASS_SHIFT 20 | 88 | #define HNS_ROCE_TCLASS_SHIFT 20 |
90 | #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff | 89 | #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff |
@@ -128,6 +127,11 @@ | |||
128 | #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 | 127 | #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 |
129 | #define SRQ_DB_REG 0x230 | 128 | #define SRQ_DB_REG 0x230 |
130 | 129 | ||
130 | /* The chip implementation of the consumer index is calculated | ||
131 | * according to twice the actual EQ depth | ||
132 | */ | ||
133 | #define EQ_DEPTH_COEFF 2 | ||
134 | |||
131 | enum { | 135 | enum { |
132 | HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, | 136 | HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, |
133 | HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, | 137 | HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, |
@@ -322,7 +326,7 @@ struct hns_roce_hem_table { | |||
322 | unsigned long num_hem; | 326 | unsigned long num_hem; |
323 | /* HEM entry record obj total num */ | 327 | /* HEM entry record obj total num */ |
324 | unsigned long num_obj; | 328 | unsigned long num_obj; |
325 | /*Single obj size */ | 329 | /* Single obj size */ |
326 | unsigned long obj_size; | 330 | unsigned long obj_size; |
327 | unsigned long table_chunk_size; | 331 | unsigned long table_chunk_size; |
328 | int lowmem; | 332 | int lowmem; |
@@ -343,7 +347,7 @@ struct hns_roce_mtt { | |||
343 | 347 | ||
344 | struct hns_roce_buf_region { | 348 | struct hns_roce_buf_region { |
345 | int offset; /* page offset */ | 349 | int offset; /* page offset */ |
346 | u32 count; /* page count*/ | 350 | u32 count; /* page count */ |
347 | int hopnum; /* addressing hop num */ | 351 | int hopnum; /* addressing hop num */ |
348 | }; | 352 | }; |
349 | 353 | ||
@@ -384,25 +388,25 @@ struct hns_roce_mr { | |||
384 | u64 size; /* Address range of MR */ | 388 | u64 size; /* Address range of MR */ |
385 | u32 key; /* Key of MR */ | 389 | u32 key; /* Key of MR */ |
386 | u32 pd; /* PD num of MR */ | 390 | u32 pd; /* PD num of MR */ |
387 | u32 access;/* Access permission of MR */ | 391 | u32 access; /* Access permission of MR */ |
388 | u32 npages; | 392 | u32 npages; |
389 | int enabled; /* MR's active status */ | 393 | int enabled; /* MR's active status */ |
390 | int type; /* MR's register type */ | 394 | int type; /* MR's register type */ |
391 | u64 *pbl_buf;/* MR's PBL space */ | 395 | u64 *pbl_buf; /* MR's PBL space */ |
392 | dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ | 396 | dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ |
393 | u32 pbl_size;/* PA number in the PBL */ | 397 | u32 pbl_size; /* PA number in the PBL */ |
394 | u64 pbl_ba;/* page table address */ | 398 | u64 pbl_ba; /* page table address */ |
395 | u32 l0_chunk_last_num;/* L0 last number */ | 399 | u32 l0_chunk_last_num; /* L0 last number */ |
396 | u32 l1_chunk_last_num;/* L1 last number */ | 400 | u32 l1_chunk_last_num; /* L1 last number */ |
397 | u64 **pbl_bt_l2;/* PBL BT L2 */ | 401 | u64 **pbl_bt_l2; /* PBL BT L2 */ |
398 | u64 **pbl_bt_l1;/* PBL BT L1 */ | 402 | u64 **pbl_bt_l1; /* PBL BT L1 */ |
399 | u64 *pbl_bt_l0;/* PBL BT L0 */ | 403 | u64 *pbl_bt_l0; /* PBL BT L0 */ |
400 | dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ | 404 | dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */ |
401 | dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ | 405 | dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */ |
402 | dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ | 406 | dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */ |
403 | u32 pbl_ba_pg_sz;/* BT chunk page size */ | 407 | u32 pbl_ba_pg_sz; /* BT chunk page size */ |
404 | u32 pbl_buf_pg_sz;/* buf chunk page size */ | 408 | u32 pbl_buf_pg_sz; /* buf chunk page size */ |
405 | u32 pbl_hop_num;/* multi-hop number */ | 409 | u32 pbl_hop_num; /* multi-hop number */ |
406 | }; | 410 | }; |
407 | 411 | ||
408 | struct hns_roce_mr_table { | 412 | struct hns_roce_mr_table { |
@@ -425,16 +429,16 @@ struct hns_roce_wq { | |||
425 | u32 max_post; | 429 | u32 max_post; |
426 | int max_gs; | 430 | int max_gs; |
427 | int offset; | 431 | int offset; |
428 | int wqe_shift;/* WQE size */ | 432 | int wqe_shift; /* WQE size */ |
429 | u32 head; | 433 | u32 head; |
430 | u32 tail; | 434 | u32 tail; |
431 | void __iomem *db_reg_l; | 435 | void __iomem *db_reg_l; |
432 | }; | 436 | }; |
433 | 437 | ||
434 | struct hns_roce_sge { | 438 | struct hns_roce_sge { |
435 | int sge_cnt; /* SGE num */ | 439 | int sge_cnt; /* SGE num */ |
436 | int offset; | 440 | int offset; |
437 | int sge_shift;/* SGE size */ | 441 | int sge_shift; /* SGE size */ |
438 | }; | 442 | }; |
439 | 443 | ||
440 | struct hns_roce_buf_list { | 444 | struct hns_roce_buf_list { |
@@ -569,14 +573,16 @@ struct hns_roce_raq_table { | |||
569 | }; | 573 | }; |
570 | 574 | ||
571 | struct hns_roce_av { | 575 | struct hns_roce_av { |
572 | __le32 port_pd; | 576 | u8 port; |
573 | u8 gid_index; | 577 | u8 gid_index; |
574 | u8 stat_rate; | 578 | u8 stat_rate; |
575 | u8 hop_limit; | 579 | u8 hop_limit; |
576 | __le32 sl_tclass_flowlabel; | 580 | u32 flowlabel; |
581 | u8 sl; | ||
582 | u8 tclass; | ||
577 | u8 dgid[HNS_ROCE_GID_SIZE]; | 583 | u8 dgid[HNS_ROCE_GID_SIZE]; |
578 | u8 mac[ETH_ALEN]; | 584 | u8 mac[ETH_ALEN]; |
579 | __le16 vlan; | 585 | u16 vlan; |
580 | bool vlan_en; | 586 | bool vlan_en; |
581 | }; | 587 | }; |
582 | 588 | ||
@@ -618,7 +624,6 @@ struct hns_roce_cmdq { | |||
618 | * close device, switch into poll mode(non event mode) | 624 | * close device, switch into poll mode(non event mode) |
619 | */ | 625 | */ |
620 | u8 use_events; | 626 | u8 use_events; |
621 | u8 toggle; | ||
622 | }; | 627 | }; |
623 | 628 | ||
624 | struct hns_roce_cmd_mailbox { | 629 | struct hns_roce_cmd_mailbox { |
@@ -652,10 +657,8 @@ struct hns_roce_qp { | |||
652 | u8 rdb_en; | 657 | u8 rdb_en; |
653 | u8 sdb_en; | 658 | u8 sdb_en; |
654 | u32 doorbell_qpn; | 659 | u32 doorbell_qpn; |
655 | __le32 sq_signal_bits; | 660 | u32 sq_signal_bits; |
656 | u32 sq_next_wqe; | 661 | u32 sq_next_wqe; |
657 | int sq_max_wqes_per_wr; | ||
658 | int sq_spare_wqes; | ||
659 | struct hns_roce_wq sq; | 662 | struct hns_roce_wq sq; |
660 | 663 | ||
661 | struct ib_umem *umem; | 664 | struct ib_umem *umem; |
@@ -709,7 +712,7 @@ enum { | |||
709 | }; | 712 | }; |
710 | 713 | ||
711 | struct hns_roce_ceqe { | 714 | struct hns_roce_ceqe { |
712 | u32 comp; | 715 | __le32 comp; |
713 | }; | 716 | }; |
714 | 717 | ||
715 | struct hns_roce_aeqe { | 718 | struct hns_roce_aeqe { |
@@ -752,7 +755,7 @@ struct hns_roce_eq { | |||
752 | struct hns_roce_dev *hr_dev; | 755 | struct hns_roce_dev *hr_dev; |
753 | void __iomem *doorbell; | 756 | void __iomem *doorbell; |
754 | 757 | ||
755 | int type_flag;/* Aeq:1 ceq:0 */ | 758 | int type_flag; /* Aeq:1 ceq:0 */ |
756 | int eqn; | 759 | int eqn; |
757 | u32 entries; | 760 | u32 entries; |
758 | int log_entries; | 761 | int log_entries; |
@@ -798,22 +801,22 @@ struct hns_roce_caps { | |||
798 | int local_ca_ack_delay; | 801 | int local_ca_ack_delay; |
799 | int num_uars; | 802 | int num_uars; |
800 | u32 phy_num_uars; | 803 | u32 phy_num_uars; |
801 | u32 max_sq_sg; /* 2 */ | 804 | u32 max_sq_sg; |
802 | u32 max_sq_inline; /* 32 */ | 805 | u32 max_sq_inline; |
803 | u32 max_rq_sg; /* 2 */ | 806 | u32 max_rq_sg; |
804 | u32 max_extend_sg; | 807 | u32 max_extend_sg; |
805 | int num_qps; /* 256k */ | 808 | int num_qps; |
806 | int reserved_qps; | 809 | int reserved_qps; |
807 | int num_qpc_timer; | 810 | int num_qpc_timer; |
808 | int num_cqc_timer; | 811 | int num_cqc_timer; |
809 | u32 max_srq_sg; | 812 | u32 max_srq_sg; |
810 | int num_srqs; | 813 | int num_srqs; |
811 | u32 max_wqes; /* 16k */ | 814 | u32 max_wqes; |
812 | u32 max_srqs; | 815 | u32 max_srqs; |
813 | u32 max_srq_wrs; | 816 | u32 max_srq_wrs; |
814 | u32 max_srq_sges; | 817 | u32 max_srq_sges; |
815 | u32 max_sq_desc_sz; /* 64 */ | 818 | u32 max_sq_desc_sz; |
816 | u32 max_rq_desc_sz; /* 64 */ | 819 | u32 max_rq_desc_sz; |
817 | u32 max_srq_desc_sz; | 820 | u32 max_srq_desc_sz; |
818 | int max_qp_init_rdma; | 821 | int max_qp_init_rdma; |
819 | int max_qp_dest_rdma; | 822 | int max_qp_dest_rdma; |
@@ -824,7 +827,7 @@ struct hns_roce_caps { | |||
824 | int reserved_cqs; | 827 | int reserved_cqs; |
825 | int reserved_srqs; | 828 | int reserved_srqs; |
826 | u32 max_srqwqes; | 829 | u32 max_srqwqes; |
827 | int num_aeq_vectors; /* 1 */ | 830 | int num_aeq_vectors; |
828 | int num_comp_vectors; | 831 | int num_comp_vectors; |
829 | int num_other_vectors; | 832 | int num_other_vectors; |
830 | int num_mtpts; | 833 | int num_mtpts; |
@@ -905,7 +908,7 @@ struct hns_roce_caps { | |||
905 | u32 sl_num; | 908 | u32 sl_num; |
906 | u32 tsq_buf_pg_sz; | 909 | u32 tsq_buf_pg_sz; |
907 | u32 tpq_buf_pg_sz; | 910 | u32 tpq_buf_pg_sz; |
908 | u32 chunk_sz; /* chunk size in non multihop mode*/ | 911 | u32 chunk_sz; /* chunk size in non multihop mode */ |
909 | u64 flags; | 912 | u64 flags; |
910 | }; | 913 | }; |
911 | 914 | ||
@@ -991,16 +994,6 @@ struct hns_roce_hw { | |||
991 | const struct ib_device_ops *hns_roce_dev_srq_ops; | 994 | const struct ib_device_ops *hns_roce_dev_srq_ops; |
992 | }; | 995 | }; |
993 | 996 | ||
994 | enum hns_phy_state { | ||
995 | HNS_ROCE_PHY_SLEEP = 1, | ||
996 | HNS_ROCE_PHY_POLLING = 2, | ||
997 | HNS_ROCE_PHY_DISABLED = 3, | ||
998 | HNS_ROCE_PHY_TRAINING = 4, | ||
999 | HNS_ROCE_PHY_LINKUP = 5, | ||
1000 | HNS_ROCE_PHY_LINKERR = 6, | ||
1001 | HNS_ROCE_PHY_TEST = 7 | ||
1002 | }; | ||
1003 | |||
1004 | struct hns_roce_dev { | 997 | struct hns_roce_dev { |
1005 | struct ib_device ib_dev; | 998 | struct ib_device ib_dev; |
1006 | struct platform_device *pdev; | 999 | struct platform_device *pdev; |
@@ -1045,8 +1038,8 @@ struct hns_roce_dev { | |||
1045 | int loop_idc; | 1038 | int loop_idc; |
1046 | u32 sdb_offset; | 1039 | u32 sdb_offset; |
1047 | u32 odb_offset; | 1040 | u32 odb_offset; |
1048 | dma_addr_t tptr_dma_addr; /*only for hw v1*/ | 1041 | dma_addr_t tptr_dma_addr; /* only for hw v1 */ |
1049 | u32 tptr_size; /*only for hw v1*/ | 1042 | u32 tptr_size; /* only for hw v1 */ |
1050 | const struct hns_roce_hw *hw; | 1043 | const struct hns_roce_hw *hw; |
1051 | void *priv; | 1044 | void *priv; |
1052 | struct workqueue_struct *irq_workq; | 1045 | struct workqueue_struct *irq_workq; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f4da5bd2884f..e82215774032 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c | |||
@@ -41,29 +41,57 @@ | |||
41 | 41 | ||
42 | bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) | 42 | bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) |
43 | { | 43 | { |
44 | if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || | 44 | int hop_num = 0; |
45 | (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || | 45 | |
46 | (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || | 46 | switch (type) { |
47 | (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || | 47 | case HEM_TYPE_QPC: |
48 | (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) || | 48 | hop_num = hr_dev->caps.qpc_hop_num; |
49 | (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) || | 49 | break; |
50 | (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) || | 50 | case HEM_TYPE_MTPT: |
51 | (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || | 51 | hop_num = hr_dev->caps.mpt_hop_num; |
52 | (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) || | 52 | break; |
53 | (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) || | 53 | case HEM_TYPE_CQC: |
54 | (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX)) | 54 | hop_num = hr_dev->caps.cqc_hop_num; |
55 | return true; | 55 | break; |
56 | 56 | case HEM_TYPE_SRQC: | |
57 | return false; | 57 | hop_num = hr_dev->caps.srqc_hop_num; |
58 | break; | ||
59 | case HEM_TYPE_SCCC: | ||
60 | hop_num = hr_dev->caps.sccc_hop_num; | ||
61 | break; | ||
62 | case HEM_TYPE_QPC_TIMER: | ||
63 | hop_num = hr_dev->caps.qpc_timer_hop_num; | ||
64 | break; | ||
65 | case HEM_TYPE_CQC_TIMER: | ||
66 | hop_num = hr_dev->caps.cqc_timer_hop_num; | ||
67 | break; | ||
68 | case HEM_TYPE_CQE: | ||
69 | hop_num = hr_dev->caps.cqe_hop_num; | ||
70 | break; | ||
71 | case HEM_TYPE_MTT: | ||
72 | hop_num = hr_dev->caps.mtt_hop_num; | ||
73 | break; | ||
74 | case HEM_TYPE_SRQWQE: | ||
75 | hop_num = hr_dev->caps.srqwqe_hop_num; | ||
76 | break; | ||
77 | case HEM_TYPE_IDX: | ||
78 | hop_num = hr_dev->caps.idx_hop_num; | ||
79 | break; | ||
80 | default: | ||
81 | return false; | ||
82 | } | ||
83 | |||
84 | return hop_num ? true : false; | ||
58 | } | 85 | } |
59 | 86 | ||
60 | static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, | 87 | static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, |
61 | u32 bt_chunk_num) | 88 | u32 bt_chunk_num, u64 hem_max_num) |
62 | { | 89 | { |
63 | int i; | 90 | u64 check_max_num = start_idx + bt_chunk_num; |
91 | u64 i; | ||
64 | 92 | ||
65 | for (i = 0; i < bt_chunk_num; i++) | 93 | for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++) |
66 | if (hem[start_idx + i]) | 94 | if (hem[i]) |
67 | return false; | 95 | return false; |
68 | 96 | ||
69 | return true; | 97 | return true; |
@@ -92,17 +120,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) | |||
92 | return 0; | 120 | return 0; |
93 | } | 121 | } |
94 | 122 | ||
95 | int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, | 123 | static int get_hem_table_config(struct hns_roce_dev *hr_dev, |
96 | struct hns_roce_hem_table *table, unsigned long *obj, | 124 | struct hns_roce_hem_mhop *mhop, |
97 | struct hns_roce_hem_mhop *mhop) | 125 | u32 type) |
98 | { | 126 | { |
99 | struct device *dev = hr_dev->dev; | 127 | struct device *dev = hr_dev->dev; |
100 | u32 chunk_ba_num; | ||
101 | u32 table_idx; | ||
102 | u32 bt_num; | ||
103 | u32 chunk_size; | ||
104 | 128 | ||
105 | switch (table->type) { | 129 | switch (type) { |
106 | case HEM_TYPE_QPC: | 130 | case HEM_TYPE_QPC: |
107 | mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz | 131 | mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz |
108 | + PAGE_SHIFT); | 132 | + PAGE_SHIFT); |
@@ -193,10 +217,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, | |||
193 | break; | 217 | break; |
194 | default: | 218 | default: |
195 | dev_err(dev, "Table %d not support multi-hop addressing!\n", | 219 | dev_err(dev, "Table %d not support multi-hop addressing!\n", |
196 | table->type); | 220 | type); |
197 | return -EINVAL; | 221 | return -EINVAL; |
198 | } | 222 | } |
199 | 223 | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, | ||
228 | struct hns_roce_hem_table *table, unsigned long *obj, | ||
229 | struct hns_roce_hem_mhop *mhop) | ||
230 | { | ||
231 | struct device *dev = hr_dev->dev; | ||
232 | u32 chunk_ba_num; | ||
233 | u32 table_idx; | ||
234 | u32 bt_num; | ||
235 | u32 chunk_size; | ||
236 | |||
237 | if (get_hem_table_config(hr_dev, mhop, table->type)) | ||
238 | return -EINVAL; | ||
239 | |||
200 | if (!obj) | 240 | if (!obj) |
201 | return 0; | 241 | return 0; |
202 | 242 | ||
@@ -324,13 +364,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, | |||
324 | { | 364 | { |
325 | spinlock_t *lock = &hr_dev->bt_cmd_lock; | 365 | spinlock_t *lock = &hr_dev->bt_cmd_lock; |
326 | struct device *dev = hr_dev->dev; | 366 | struct device *dev = hr_dev->dev; |
327 | unsigned long end = 0; | 367 | long end; |
328 | unsigned long flags; | 368 | unsigned long flags; |
329 | struct hns_roce_hem_iter iter; | 369 | struct hns_roce_hem_iter iter; |
330 | void __iomem *bt_cmd; | 370 | void __iomem *bt_cmd; |
331 | u32 bt_cmd_h_val = 0; | 371 | __le32 bt_cmd_val[2]; |
332 | u32 bt_cmd_val[2]; | 372 | __le32 bt_cmd_h = 0; |
333 | u32 bt_cmd_l = 0; | 373 | __le32 bt_cmd_l = 0; |
334 | u64 bt_ba = 0; | 374 | u64 bt_ba = 0; |
335 | int ret = 0; | 375 | int ret = 0; |
336 | 376 | ||
@@ -340,30 +380,20 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, | |||
340 | 380 | ||
341 | switch (table->type) { | 381 | switch (table->type) { |
342 | case HEM_TYPE_QPC: | 382 | case HEM_TYPE_QPC: |
343 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
344 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); | ||
345 | break; | ||
346 | case HEM_TYPE_MTPT: | 383 | case HEM_TYPE_MTPT: |
347 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
348 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, | ||
349 | HEM_TYPE_MTPT); | ||
350 | break; | ||
351 | case HEM_TYPE_CQC: | 384 | case HEM_TYPE_CQC: |
352 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
353 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); | ||
354 | break; | ||
355 | case HEM_TYPE_SRQC: | 385 | case HEM_TYPE_SRQC: |
356 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | 386 | roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, |
357 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, | 387 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); |
358 | HEM_TYPE_SRQC); | ||
359 | break; | 388 | break; |
360 | default: | 389 | default: |
361 | return ret; | 390 | return ret; |
362 | } | 391 | } |
363 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, | 392 | |
393 | roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, | ||
364 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); | 394 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); |
365 | roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); | 395 | roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); |
366 | roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); | 396 | roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); |
367 | 397 | ||
368 | /* Currently iter only a chunk */ | 398 | /* Currently iter only a chunk */ |
369 | for (hns_roce_hem_first(table->hem[i], &iter); | 399 | for (hns_roce_hem_first(table->hem[i], &iter); |
@@ -375,7 +405,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, | |||
375 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; | 405 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; |
376 | 406 | ||
377 | end = HW_SYNC_TIMEOUT_MSECS; | 407 | end = HW_SYNC_TIMEOUT_MSECS; |
378 | while (end) { | 408 | while (end > 0) { |
379 | if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) | 409 | if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) |
380 | break; | 410 | break; |
381 | 411 | ||
@@ -389,13 +419,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, | |||
389 | return -EBUSY; | 419 | return -EBUSY; |
390 | } | 420 | } |
391 | 421 | ||
392 | bt_cmd_l = (u32)bt_ba; | 422 | bt_cmd_l = cpu_to_le32(bt_ba); |
393 | roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, | 423 | roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, |
394 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, | 424 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, |
395 | bt_ba >> BT_BA_SHIFT); | 425 | bt_ba >> BT_BA_SHIFT); |
396 | 426 | ||
397 | bt_cmd_val[0] = bt_cmd_l; | 427 | bt_cmd_val[0] = bt_cmd_l; |
398 | bt_cmd_val[1] = bt_cmd_h_val; | 428 | bt_cmd_val[1] = bt_cmd_h; |
399 | hns_roce_write64_k(bt_cmd_val, | 429 | hns_roce_write64_k(bt_cmd_val, |
400 | hr_dev->reg_base + ROCEE_BT_CMD_L_REG); | 430 | hr_dev->reg_base + ROCEE_BT_CMD_L_REG); |
401 | spin_unlock_irqrestore(lock, flags); | 431 | spin_unlock_irqrestore(lock, flags); |
@@ -457,6 +487,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, | |||
457 | return -EINVAL; | 487 | return -EINVAL; |
458 | } | 488 | } |
459 | 489 | ||
490 | if (unlikely(hem_idx >= table->num_hem)) { | ||
491 | dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n", | ||
492 | table->type, hem_idx, table->num_hem); | ||
493 | return -EINVAL; | ||
494 | } | ||
495 | |||
460 | mutex_lock(&table->mutex); | 496 | mutex_lock(&table->mutex); |
461 | 497 | ||
462 | if (table->hem[hem_idx]) { | 498 | if (table->hem[hem_idx]) { |
@@ -693,7 +729,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, | |||
693 | if (check_whether_bt_num_2(table->type, hop_num)) { | 729 | if (check_whether_bt_num_2(table->type, hop_num)) { |
694 | start_idx = mhop.l0_idx * chunk_ba_num; | 730 | start_idx = mhop.l0_idx * chunk_ba_num; |
695 | if (hns_roce_check_hem_null(table->hem, start_idx, | 731 | if (hns_roce_check_hem_null(table->hem, start_idx, |
696 | chunk_ba_num)) { | 732 | chunk_ba_num, table->num_hem)) { |
697 | if (table->type < HEM_TYPE_MTT && | 733 | if (table->type < HEM_TYPE_MTT && |
698 | hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) | 734 | hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) |
699 | dev_warn(dev, "Clear HEM base address failed.\n"); | 735 | dev_warn(dev, "Clear HEM base address failed.\n"); |
@@ -707,7 +743,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, | |||
707 | start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + | 743 | start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + |
708 | mhop.l1_idx * chunk_ba_num; | 744 | mhop.l1_idx * chunk_ba_num; |
709 | if (hns_roce_check_hem_null(table->hem, start_idx, | 745 | if (hns_roce_check_hem_null(table->hem, start_idx, |
710 | chunk_ba_num)) { | 746 | chunk_ba_num, table->num_hem)) { |
711 | if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) | 747 | if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) |
712 | dev_warn(dev, "Clear HEM base address failed.\n"); | 748 | dev_warn(dev, "Clear HEM base address failed.\n"); |
713 | 749 | ||
@@ -791,7 +827,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, | |||
791 | } else { | 827 | } else { |
792 | u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ | 828 | u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ |
793 | 829 | ||
794 | hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); | 830 | if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop)) |
831 | goto out; | ||
795 | /* mtt mhop */ | 832 | /* mtt mhop */ |
796 | i = mhop.l0_idx; | 833 | i = mhop.l0_idx; |
797 | j = mhop.l1_idx; | 834 | j = mhop.l1_idx; |
@@ -840,11 +877,13 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, | |||
840 | { | 877 | { |
841 | struct hns_roce_hem_mhop mhop; | 878 | struct hns_roce_hem_mhop mhop; |
842 | unsigned long inc = table->table_chunk_size / table->obj_size; | 879 | unsigned long inc = table->table_chunk_size / table->obj_size; |
843 | unsigned long i; | 880 | unsigned long i = 0; |
844 | int ret; | 881 | int ret; |
845 | 882 | ||
846 | if (hns_roce_check_whether_mhop(hr_dev, table->type)) { | 883 | if (hns_roce_check_whether_mhop(hr_dev, table->type)) { |
847 | hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); | 884 | ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); |
885 | if (ret) | ||
886 | goto fail; | ||
848 | inc = mhop.bt_chunk_size / table->obj_size; | 887 | inc = mhop.bt_chunk_size / table->obj_size; |
849 | } | 888 | } |
850 | 889 | ||
@@ -874,7 +913,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, | |||
874 | unsigned long i; | 913 | unsigned long i; |
875 | 914 | ||
876 | if (hns_roce_check_whether_mhop(hr_dev, table->type)) { | 915 | if (hns_roce_check_whether_mhop(hr_dev, table->type)) { |
877 | hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); | 916 | if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) |
917 | return; | ||
878 | inc = mhop.bt_chunk_size / table->obj_size; | 918 | inc = mhop.bt_chunk_size / table->obj_size; |
879 | } | 919 | } |
880 | 920 | ||
@@ -887,7 +927,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, | |||
887 | unsigned long obj_size, unsigned long nobj, | 927 | unsigned long obj_size, unsigned long nobj, |
888 | int use_lowmem) | 928 | int use_lowmem) |
889 | { | 929 | { |
890 | struct device *dev = hr_dev->dev; | ||
891 | unsigned long obj_per_chunk; | 930 | unsigned long obj_per_chunk; |
892 | unsigned long num_hem; | 931 | unsigned long num_hem; |
893 | 932 | ||
@@ -900,99 +939,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, | |||
900 | if (!table->hem) | 939 | if (!table->hem) |
901 | return -ENOMEM; | 940 | return -ENOMEM; |
902 | } else { | 941 | } else { |
942 | struct hns_roce_hem_mhop mhop = {}; | ||
903 | unsigned long buf_chunk_size; | 943 | unsigned long buf_chunk_size; |
904 | unsigned long bt_chunk_size; | 944 | unsigned long bt_chunk_size; |
905 | unsigned long bt_chunk_num; | 945 | unsigned long bt_chunk_num; |
906 | unsigned long num_bt_l0 = 0; | 946 | unsigned long num_bt_l0 = 0; |
907 | u32 hop_num; | 947 | u32 hop_num; |
908 | 948 | ||
909 | switch (type) { | 949 | if (get_hem_table_config(hr_dev, &mhop, type)) |
910 | case HEM_TYPE_QPC: | ||
911 | buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz | ||
912 | + PAGE_SHIFT); | ||
913 | bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz | ||
914 | + PAGE_SHIFT); | ||
915 | num_bt_l0 = hr_dev->caps.qpc_bt_num; | ||
916 | hop_num = hr_dev->caps.qpc_hop_num; | ||
917 | break; | ||
918 | case HEM_TYPE_MTPT: | ||
919 | buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz | ||
920 | + PAGE_SHIFT); | ||
921 | bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz | ||
922 | + PAGE_SHIFT); | ||
923 | num_bt_l0 = hr_dev->caps.mpt_bt_num; | ||
924 | hop_num = hr_dev->caps.mpt_hop_num; | ||
925 | break; | ||
926 | case HEM_TYPE_CQC: | ||
927 | buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz | ||
928 | + PAGE_SHIFT); | ||
929 | bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz | ||
930 | + PAGE_SHIFT); | ||
931 | num_bt_l0 = hr_dev->caps.cqc_bt_num; | ||
932 | hop_num = hr_dev->caps.cqc_hop_num; | ||
933 | break; | ||
934 | case HEM_TYPE_SCCC: | ||
935 | buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz | ||
936 | + PAGE_SHIFT); | ||
937 | bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz | ||
938 | + PAGE_SHIFT); | ||
939 | num_bt_l0 = hr_dev->caps.sccc_bt_num; | ||
940 | hop_num = hr_dev->caps.sccc_hop_num; | ||
941 | break; | ||
942 | case HEM_TYPE_QPC_TIMER: | ||
943 | buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz | ||
944 | + PAGE_SHIFT); | ||
945 | bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz | ||
946 | + PAGE_SHIFT); | ||
947 | num_bt_l0 = hr_dev->caps.qpc_timer_bt_num; | ||
948 | hop_num = hr_dev->caps.qpc_timer_hop_num; | ||
949 | break; | ||
950 | case HEM_TYPE_CQC_TIMER: | ||
951 | buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz | ||
952 | + PAGE_SHIFT); | ||
953 | bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz | ||
954 | + PAGE_SHIFT); | ||
955 | num_bt_l0 = hr_dev->caps.cqc_timer_bt_num; | ||
956 | hop_num = hr_dev->caps.cqc_timer_hop_num; | ||
957 | break; | ||
958 | case HEM_TYPE_SRQC: | ||
959 | buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz | ||
960 | + PAGE_SHIFT); | ||
961 | bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz | ||
962 | + PAGE_SHIFT); | ||
963 | num_bt_l0 = hr_dev->caps.srqc_bt_num; | ||
964 | hop_num = hr_dev->caps.srqc_hop_num; | ||
965 | break; | ||
966 | case HEM_TYPE_MTT: | ||
967 | buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz | ||
968 | + PAGE_SHIFT); | ||
969 | bt_chunk_size = buf_chunk_size; | ||
970 | hop_num = hr_dev->caps.mtt_hop_num; | ||
971 | break; | ||
972 | case HEM_TYPE_CQE: | ||
973 | buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz | ||
974 | + PAGE_SHIFT); | ||
975 | bt_chunk_size = buf_chunk_size; | ||
976 | hop_num = hr_dev->caps.cqe_hop_num; | ||
977 | break; | ||
978 | case HEM_TYPE_SRQWQE: | ||
979 | buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz | ||
980 | + PAGE_SHIFT); | ||
981 | bt_chunk_size = buf_chunk_size; | ||
982 | hop_num = hr_dev->caps.srqwqe_hop_num; | ||
983 | break; | ||
984 | case HEM_TYPE_IDX: | ||
985 | buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz | ||
986 | + PAGE_SHIFT); | ||
987 | bt_chunk_size = buf_chunk_size; | ||
988 | hop_num = hr_dev->caps.idx_hop_num; | ||
989 | break; | ||
990 | default: | ||
991 | dev_err(dev, | ||
992 | "Table %d not support to init hem table here!\n", | ||
993 | type); | ||
994 | return -EINVAL; | 950 | return -EINVAL; |
995 | } | 951 | |
952 | buf_chunk_size = mhop.buf_chunk_size; | ||
953 | bt_chunk_size = mhop.bt_chunk_size; | ||
954 | num_bt_l0 = mhop.ba_l0_num; | ||
955 | hop_num = mhop.hop_num; | ||
956 | |||
996 | obj_per_chunk = buf_chunk_size / obj_size; | 957 | obj_per_chunk = buf_chunk_size / obj_size; |
997 | num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; | 958 | num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; |
998 | bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; | 959 | bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; |
@@ -1075,7 +1036,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, | |||
1075 | int i; | 1036 | int i; |
1076 | u64 obj; | 1037 | u64 obj; |
1077 | 1038 | ||
1078 | hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); | 1039 | if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) |
1040 | return; | ||
1079 | buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : | 1041 | buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : |
1080 | mhop.bt_chunk_size; | 1042 | mhop.bt_chunk_size; |
1081 | 1043 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index f1ccb8f35fe5..86783276fb1f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h | |||
@@ -102,9 +102,9 @@ struct hns_roce_hem_mhop { | |||
102 | u32 buf_chunk_size; | 102 | u32 buf_chunk_size; |
103 | u32 bt_chunk_size; | 103 | u32 bt_chunk_size; |
104 | u32 ba_l0_num; | 104 | u32 ba_l0_num; |
105 | u32 l0_idx;/* level 0 base address table index */ | 105 | u32 l0_idx; /* level 0 base address table index */ |
106 | u32 l1_idx;/* level 1 base address table index */ | 106 | u32 l1_idx; /* level 1 base address table index */ |
107 | u32 l2_idx;/* level 2 base address table index */ | 107 | u32 l2_idx; /* level 2 base address table index */ |
108 | }; | 108 | }; |
109 | 109 | ||
110 | void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); | 110 | void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 141205e76314..5f74bf55f471 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
@@ -73,7 +73,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, | |||
73 | int ps_opcode = 0, i = 0; | 73 | int ps_opcode = 0, i = 0; |
74 | unsigned long flags = 0; | 74 | unsigned long flags = 0; |
75 | void *wqe = NULL; | 75 | void *wqe = NULL; |
76 | u32 doorbell[2]; | 76 | __le32 doorbell[2]; |
77 | int nreq = 0; | 77 | int nreq = 0; |
78 | u32 ind = 0; | 78 | u32 ind = 0; |
79 | int ret = 0; | 79 | int ret = 0; |
@@ -175,13 +175,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, | |||
175 | roce_set_field(ud_sq_wqe->u32_36, | 175 | roce_set_field(ud_sq_wqe->u32_36, |
176 | UD_SEND_WQE_U32_36_FLOW_LABEL_M, | 176 | UD_SEND_WQE_U32_36_FLOW_LABEL_M, |
177 | UD_SEND_WQE_U32_36_FLOW_LABEL_S, | 177 | UD_SEND_WQE_U32_36_FLOW_LABEL_S, |
178 | ah->av.sl_tclass_flowlabel & | 178 | ah->av.flowlabel); |
179 | HNS_ROCE_FLOW_LABEL_MASK); | ||
180 | roce_set_field(ud_sq_wqe->u32_36, | 179 | roce_set_field(ud_sq_wqe->u32_36, |
181 | UD_SEND_WQE_U32_36_PRIORITY_M, | 180 | UD_SEND_WQE_U32_36_PRIORITY_M, |
182 | UD_SEND_WQE_U32_36_PRIORITY_S, | 181 | UD_SEND_WQE_U32_36_PRIORITY_S, |
183 | le32_to_cpu(ah->av.sl_tclass_flowlabel) >> | 182 | ah->av.sl); |
184 | HNS_ROCE_SL_SHIFT); | ||
185 | roce_set_field(ud_sq_wqe->u32_36, | 183 | roce_set_field(ud_sq_wqe->u32_36, |
186 | UD_SEND_WQE_U32_36_SGID_INDEX_M, | 184 | UD_SEND_WQE_U32_36_SGID_INDEX_M, |
187 | UD_SEND_WQE_U32_36_SGID_INDEX_S, | 185 | UD_SEND_WQE_U32_36_SGID_INDEX_S, |
@@ -195,8 +193,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, | |||
195 | roce_set_field(ud_sq_wqe->u32_40, | 193 | roce_set_field(ud_sq_wqe->u32_40, |
196 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, | 194 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, |
197 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, | 195 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, |
198 | ah->av.sl_tclass_flowlabel >> | 196 | ah->av.tclass); |
199 | HNS_ROCE_TCLASS_SHIFT); | ||
200 | 197 | ||
201 | memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); | 198 | memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); |
202 | 199 | ||
@@ -335,10 +332,10 @@ out: | |||
335 | SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); | 332 | SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); |
336 | roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); | 333 | roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); |
337 | 334 | ||
338 | doorbell[0] = le32_to_cpu(sq_db.u32_4); | 335 | doorbell[0] = sq_db.u32_4; |
339 | doorbell[1] = le32_to_cpu(sq_db.u32_8); | 336 | doorbell[1] = sq_db.u32_8; |
340 | 337 | ||
341 | hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l); | 338 | hns_roce_write64_k(doorbell, qp->sq.db_reg_l); |
342 | qp->sq_next_wqe = ind; | 339 | qp->sq_next_wqe = ind; |
343 | } | 340 | } |
344 | 341 | ||
@@ -363,7 +360,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, | |||
363 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 360 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
364 | struct device *dev = &hr_dev->pdev->dev; | 361 | struct device *dev = &hr_dev->pdev->dev; |
365 | struct hns_roce_rq_db rq_db; | 362 | struct hns_roce_rq_db rq_db; |
366 | uint32_t doorbell[2] = {0}; | 363 | __le32 doorbell[2] = {0}; |
367 | 364 | ||
368 | spin_lock_irqsave(&hr_qp->rq.lock, flags); | 365 | spin_lock_irqsave(&hr_qp->rq.lock, flags); |
369 | ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); | 366 | ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); |
@@ -437,11 +434,10 @@ out: | |||
437 | roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, | 434 | roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, |
438 | 1); | 435 | 1); |
439 | 436 | ||
440 | doorbell[0] = le32_to_cpu(rq_db.u32_4); | 437 | doorbell[0] = rq_db.u32_4; |
441 | doorbell[1] = le32_to_cpu(rq_db.u32_8); | 438 | doorbell[1] = rq_db.u32_8; |
442 | 439 | ||
443 | hns_roce_write64_k((__le32 *)doorbell, | 440 | hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); |
444 | hr_qp->rq.db_reg_l); | ||
445 | } | 441 | } |
446 | } | 442 | } |
447 | spin_unlock_irqrestore(&hr_qp->rq.lock, flags); | 443 | spin_unlock_irqrestore(&hr_qp->rq.lock, flags); |
@@ -715,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
715 | struct ib_cq *cq; | 711 | struct ib_cq *cq; |
716 | struct ib_pd *pd; | 712 | struct ib_pd *pd; |
717 | union ib_gid dgid; | 713 | union ib_gid dgid; |
718 | u64 subnet_prefix; | 714 | __be64 subnet_prefix; |
719 | int attr_mask = 0; | 715 | int attr_mask = 0; |
720 | int ret; | 716 | int ret; |
721 | int i, j; | 717 | int i, j; |
@@ -971,7 +967,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) | |||
971 | struct hns_roce_free_mr *free_mr; | 967 | struct hns_roce_free_mr *free_mr; |
972 | struct hns_roce_v1_priv *priv; | 968 | struct hns_roce_v1_priv *priv; |
973 | struct completion comp; | 969 | struct completion comp; |
974 | unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; | 970 | long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; |
975 | 971 | ||
976 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; | 972 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
977 | free_mr = &priv->free_mr; | 973 | free_mr = &priv->free_mr; |
@@ -991,7 +987,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) | |||
991 | 987 | ||
992 | queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); | 988 | queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); |
993 | 989 | ||
994 | while (end) { | 990 | while (end > 0) { |
995 | if (try_wait_for_completion(&comp)) | 991 | if (try_wait_for_completion(&comp)) |
996 | return 0; | 992 | return 0; |
997 | msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); | 993 | msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); |
@@ -1109,7 +1105,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, | |||
1109 | struct hns_roce_free_mr *free_mr; | 1105 | struct hns_roce_free_mr *free_mr; |
1110 | struct hns_roce_v1_priv *priv; | 1106 | struct hns_roce_v1_priv *priv; |
1111 | struct completion comp; | 1107 | struct completion comp; |
1112 | unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; | 1108 | long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; |
1113 | unsigned long start = jiffies; | 1109 | unsigned long start = jiffies; |
1114 | int npages; | 1110 | int npages; |
1115 | int ret = 0; | 1111 | int ret = 0; |
@@ -1139,7 +1135,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, | |||
1139 | 1135 | ||
1140 | queue_work(free_mr->free_mr_wq, &(mr_work->work)); | 1136 | queue_work(free_mr->free_mr_wq, &(mr_work->work)); |
1141 | 1137 | ||
1142 | while (end) { | 1138 | while (end > 0) { |
1143 | if (try_wait_for_completion(&comp)) | 1139 | if (try_wait_for_completion(&comp)) |
1144 | goto free_mr; | 1140 | goto free_mr; |
1145 | msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); | 1141 | msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); |
@@ -2165,7 +2161,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, | |||
2165 | { | 2161 | { |
2166 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | 2162 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
2167 | u32 notification_flag; | 2163 | u32 notification_flag; |
2168 | __le32 doorbell[2]; | 2164 | __le32 doorbell[2] = {}; |
2169 | 2165 | ||
2170 | notification_flag = (flags & IB_CQ_SOLICITED_MASK) == | 2166 | notification_flag = (flags & IB_CQ_SOLICITED_MASK) == |
2171 | IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; | 2167 | IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; |
@@ -2430,7 +2426,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, | |||
2430 | { | 2426 | { |
2431 | struct device *dev = &hr_dev->pdev->dev; | 2427 | struct device *dev = &hr_dev->pdev->dev; |
2432 | struct hns_roce_v1_priv *priv; | 2428 | struct hns_roce_v1_priv *priv; |
2433 | unsigned long end = 0, flags = 0; | 2429 | unsigned long flags = 0; |
2430 | long end = HW_SYNC_TIMEOUT_MSECS; | ||
2434 | __le32 bt_cmd_val[2] = {0}; | 2431 | __le32 bt_cmd_val[2] = {0}; |
2435 | void __iomem *bt_cmd; | 2432 | void __iomem *bt_cmd; |
2436 | u64 bt_ba = 0; | 2433 | u64 bt_ba = 0; |
@@ -2439,18 +2436,12 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, | |||
2439 | 2436 | ||
2440 | switch (table->type) { | 2437 | switch (table->type) { |
2441 | case HEM_TYPE_QPC: | 2438 | case HEM_TYPE_QPC: |
2442 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
2443 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); | ||
2444 | bt_ba = priv->bt_table.qpc_buf.map >> 12; | 2439 | bt_ba = priv->bt_table.qpc_buf.map >> 12; |
2445 | break; | 2440 | break; |
2446 | case HEM_TYPE_MTPT: | 2441 | case HEM_TYPE_MTPT: |
2447 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
2448 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); | ||
2449 | bt_ba = priv->bt_table.mtpt_buf.map >> 12; | 2442 | bt_ba = priv->bt_table.mtpt_buf.map >> 12; |
2450 | break; | 2443 | break; |
2451 | case HEM_TYPE_CQC: | 2444 | case HEM_TYPE_CQC: |
2452 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
2453 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); | ||
2454 | bt_ba = priv->bt_table.cqc_buf.map >> 12; | 2445 | bt_ba = priv->bt_table.cqc_buf.map >> 12; |
2455 | break; | 2446 | break; |
2456 | case HEM_TYPE_SRQC: | 2447 | case HEM_TYPE_SRQC: |
@@ -2459,6 +2450,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, | |||
2459 | default: | 2450 | default: |
2460 | return 0; | 2451 | return 0; |
2461 | } | 2452 | } |
2453 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | ||
2454 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); | ||
2462 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, | 2455 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, |
2463 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); | 2456 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); |
2464 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); | 2457 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); |
@@ -2468,7 +2461,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, | |||
2468 | 2461 | ||
2469 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; | 2462 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; |
2470 | 2463 | ||
2471 | end = HW_SYNC_TIMEOUT_MSECS; | ||
2472 | while (1) { | 2464 | while (1) { |
2473 | if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { | 2465 | if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { |
2474 | if (!end) { | 2466 | if (!end) { |
@@ -2484,7 +2476,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, | |||
2484 | end -= HW_SYNC_SLEEP_TIME_INTERVAL; | 2476 | end -= HW_SYNC_SLEEP_TIME_INTERVAL; |
2485 | } | 2477 | } |
2486 | 2478 | ||
2487 | bt_cmd_val[0] = (__le32)bt_ba; | 2479 | bt_cmd_val[0] = cpu_to_le32(bt_ba); |
2488 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, | 2480 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, |
2489 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); | 2481 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); |
2490 | hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); | 2482 | hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); |
@@ -2627,7 +2619,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, | |||
2627 | QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); | 2619 | QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); |
2628 | roce_set_bit(context->qp1c_bytes_16, | 2620 | roce_set_bit(context->qp1c_bytes_16, |
2629 | QP1C_BYTES_16_SIGNALING_TYPE_S, | 2621 | QP1C_BYTES_16_SIGNALING_TYPE_S, |
2630 | le32_to_cpu(hr_qp->sq_signal_bits)); | 2622 | hr_qp->sq_signal_bits); |
2631 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, | 2623 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, |
2632 | 1); | 2624 | 1); |
2633 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, | 2625 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, |
@@ -2933,7 +2925,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, | |||
2933 | 1); | 2925 | 1); |
2934 | roce_set_bit(context->qpc_bytes_32, | 2926 | roce_set_bit(context->qpc_bytes_32, |
2935 | QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, | 2927 | QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, |
2936 | le32_to_cpu(hr_qp->sq_signal_bits)); | 2928 | hr_qp->sq_signal_bits); |
2937 | 2929 | ||
2938 | port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : | 2930 | port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : |
2939 | hr_qp->port; | 2931 | hr_qp->port; |
@@ -3578,7 +3570,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
3578 | qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, | 3570 | qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, |
3579 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, | 3571 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, |
3580 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); | 3572 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); |
3581 | qp_attr->rnr_retry = (u8)context->rnr_retry; | 3573 | qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); |
3582 | 3574 | ||
3583 | done: | 3575 | done: |
3584 | qp_attr->cur_qp_state = qp_attr->qp_state; | 3576 | qp_attr->cur_qp_state = qp_attr->qp_state; |
@@ -4021,7 +4013,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, | |||
4021 | ++eq->cons_index; | 4013 | ++eq->cons_index; |
4022 | ceqes_found = 1; | 4014 | ceqes_found = 1; |
4023 | 4015 | ||
4024 | if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) { | 4016 | if (eq->cons_index > |
4017 | EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) { | ||
4025 | dev_warn(&eq->hr_dev->pdev->dev, | 4018 | dev_warn(&eq->hr_dev->pdev->dev, |
4026 | "cons_index overflow, set back to 0.\n"); | 4019 | "cons_index overflow, set back to 0.\n"); |
4027 | eq->cons_index = 0; | 4020 | eq->cons_index = 0; |
@@ -4518,7 +4511,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) | |||
4518 | struct platform_device *pdev = NULL; | 4511 | struct platform_device *pdev = NULL; |
4519 | struct net_device *netdev = NULL; | 4512 | struct net_device *netdev = NULL; |
4520 | struct device_node *net_node; | 4513 | struct device_node *net_node; |
4521 | struct resource *res; | ||
4522 | int port_cnt = 0; | 4514 | int port_cnt = 0; |
4523 | u8 phy_port; | 4515 | u8 phy_port; |
4524 | int ret; | 4516 | int ret; |
@@ -4557,8 +4549,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) | |||
4557 | } | 4549 | } |
4558 | 4550 | ||
4559 | /* get the mapped register base address */ | 4551 | /* get the mapped register base address */ |
4560 | res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); | 4552 | hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); |
4561 | hr_dev->reg_base = devm_ioremap_resource(dev, res); | ||
4562 | if (IS_ERR(hr_dev->reg_base)) | 4553 | if (IS_ERR(hr_dev->reg_base)) |
4563 | return PTR_ERR(hr_dev->reg_base); | 4554 | return PTR_ERR(hr_dev->reg_base); |
4564 | 4555 | ||
@@ -4633,10 +4624,8 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) | |||
4633 | /* fetch the interrupt numbers */ | 4624 | /* fetch the interrupt numbers */ |
4634 | for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { | 4625 | for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { |
4635 | hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); | 4626 | hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); |
4636 | if (hr_dev->irq[i] <= 0) { | 4627 | if (hr_dev->irq[i] <= 0) |
4637 | dev_err(dev, "platform get of irq[=%d] failed!\n", i); | ||
4638 | return -EINVAL; | 4628 | return -EINVAL; |
4639 | } | ||
4640 | } | 4629 | } |
4641 | 4630 | ||
4642 | return 0; | 4631 | return 0; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b76e3beeafb8..7a89d669f8bf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c | |||
@@ -239,7 +239,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, | |||
239 | struct device *dev = hr_dev->dev; | 239 | struct device *dev = hr_dev->dev; |
240 | struct hns_roce_v2_db sq_db; | 240 | struct hns_roce_v2_db sq_db; |
241 | struct ib_qp_attr attr; | 241 | struct ib_qp_attr attr; |
242 | unsigned int sge_ind = 0; | 242 | unsigned int sge_ind; |
243 | unsigned int owner_bit; | 243 | unsigned int owner_bit; |
244 | unsigned long flags; | 244 | unsigned long flags; |
245 | unsigned int ind; | 245 | unsigned int ind; |
@@ -397,18 +397,15 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, | |||
397 | roce_set_field(ud_sq_wqe->byte_36, | 397 | roce_set_field(ud_sq_wqe->byte_36, |
398 | V2_UD_SEND_WQE_BYTE_36_TCLASS_M, | 398 | V2_UD_SEND_WQE_BYTE_36_TCLASS_M, |
399 | V2_UD_SEND_WQE_BYTE_36_TCLASS_S, | 399 | V2_UD_SEND_WQE_BYTE_36_TCLASS_S, |
400 | ah->av.sl_tclass_flowlabel >> | 400 | ah->av.tclass); |
401 | HNS_ROCE_TCLASS_SHIFT); | ||
402 | roce_set_field(ud_sq_wqe->byte_40, | 401 | roce_set_field(ud_sq_wqe->byte_40, |
403 | V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, | 402 | V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, |
404 | V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, | 403 | V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, |
405 | ah->av.sl_tclass_flowlabel & | 404 | ah->av.flowlabel); |
406 | HNS_ROCE_FLOW_LABEL_MASK); | ||
407 | roce_set_field(ud_sq_wqe->byte_40, | 405 | roce_set_field(ud_sq_wqe->byte_40, |
408 | V2_UD_SEND_WQE_BYTE_40_SL_M, | 406 | V2_UD_SEND_WQE_BYTE_40_SL_M, |
409 | V2_UD_SEND_WQE_BYTE_40_SL_S, | 407 | V2_UD_SEND_WQE_BYTE_40_SL_S, |
410 | le32_to_cpu(ah->av.sl_tclass_flowlabel) >> | 408 | ah->av.sl); |
411 | HNS_ROCE_SL_SHIFT); | ||
412 | roce_set_field(ud_sq_wqe->byte_40, | 409 | roce_set_field(ud_sq_wqe->byte_40, |
413 | V2_UD_SEND_WQE_BYTE_40_PORTN_M, | 410 | V2_UD_SEND_WQE_BYTE_40_PORTN_M, |
414 | V2_UD_SEND_WQE_BYTE_40_PORTN_S, | 411 | V2_UD_SEND_WQE_BYTE_40_PORTN_S, |
@@ -887,8 +884,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) | |||
887 | roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, | 884 | roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, |
888 | upper_32_bits(dma)); | 885 | upper_32_bits(dma)); |
889 | roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, | 886 | roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, |
890 | (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | | 887 | ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); |
891 | HNS_ROCE_CMQ_ENABLE); | ||
892 | roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); | 888 | roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); |
893 | roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); | 889 | roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); |
894 | } else { | 890 | } else { |
@@ -896,8 +892,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) | |||
896 | roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, | 892 | roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, |
897 | upper_32_bits(dma)); | 893 | upper_32_bits(dma)); |
898 | roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, | 894 | roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, |
899 | (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | | 895 | ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); |
900 | HNS_ROCE_CMQ_ENABLE); | ||
901 | roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); | 896 | roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); |
902 | roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); | 897 | roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); |
903 | } | 898 | } |
@@ -1044,7 +1039,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, | |||
1044 | * If the command is sync, wait for the firmware to write back, | 1039 | * If the command is sync, wait for the firmware to write back, |
1045 | * if multi descriptors to be sent, use the first one to check | 1040 | * if multi descriptors to be sent, use the first one to check |
1046 | */ | 1041 | */ |
1047 | if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { | 1042 | if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { |
1048 | do { | 1043 | do { |
1049 | if (hns_roce_cmq_csq_done(hr_dev)) | 1044 | if (hns_roce_cmq_csq_done(hr_dev)) |
1050 | break; | 1045 | break; |
@@ -1061,7 +1056,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, | |||
1061 | desc_to_use = &csq->desc[ntc]; | 1056 | desc_to_use = &csq->desc[ntc]; |
1062 | desc[handle] = *desc_to_use; | 1057 | desc[handle] = *desc_to_use; |
1063 | dev_dbg(hr_dev->dev, "Get cmq desc:\n"); | 1058 | dev_dbg(hr_dev->dev, "Get cmq desc:\n"); |
1064 | desc_ret = desc[handle].retval; | 1059 | desc_ret = le16_to_cpu(desc[handle].retval); |
1065 | if (desc_ret == CMD_EXEC_SUCCESS) | 1060 | if (desc_ret == CMD_EXEC_SUCCESS) |
1066 | ret = 0; | 1061 | ret = 0; |
1067 | else | 1062 | else |
@@ -1124,32 +1119,124 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) | |||
1124 | return ret; | 1119 | return ret; |
1125 | 1120 | ||
1126 | resp = (struct hns_roce_query_version *)desc.data; | 1121 | resp = (struct hns_roce_query_version *)desc.data; |
1127 | hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); | 1122 | hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); |
1128 | hr_dev->vendor_id = hr_dev->pci_dev->vendor; | 1123 | hr_dev->vendor_id = hr_dev->pci_dev->vendor; |
1129 | 1124 | ||
1130 | return 0; | 1125 | return 0; |
1131 | } | 1126 | } |
1132 | 1127 | ||
1128 | static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev) | ||
1129 | { | ||
1130 | struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; | ||
1131 | struct hnae3_handle *handle = priv->handle; | ||
1132 | const struct hnae3_ae_ops *ops = handle->ae_algo->ops; | ||
1133 | unsigned long reset_cnt; | ||
1134 | bool sw_resetting; | ||
1135 | bool hw_resetting; | ||
1136 | |||
1137 | reset_cnt = ops->ae_dev_reset_cnt(handle); | ||
1138 | hw_resetting = ops->get_hw_reset_stat(handle); | ||
1139 | sw_resetting = ops->ae_dev_resetting(handle); | ||
1140 | |||
1141 | if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting) | ||
1142 | return true; | ||
1143 | |||
1144 | return false; | ||
1145 | } | ||
1146 | |||
1147 | static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, | ||
1148 | int flag) | ||
1149 | { | ||
1150 | struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; | ||
1151 | struct hnae3_handle *handle = priv->handle; | ||
1152 | const struct hnae3_ae_ops *ops = handle->ae_algo->ops; | ||
1153 | unsigned long instance_stage; | ||
1154 | unsigned long reset_cnt; | ||
1155 | unsigned long end; | ||
1156 | bool sw_resetting; | ||
1157 | bool hw_resetting; | ||
1158 | |||
1159 | instance_stage = handle->rinfo.instance_state; | ||
1160 | reset_cnt = ops->ae_dev_reset_cnt(handle); | ||
1161 | hw_resetting = ops->get_hw_reset_stat(handle); | ||
1162 | sw_resetting = ops->ae_dev_resetting(handle); | ||
1163 | |||
1164 | if (reset_cnt != hr_dev->reset_cnt) { | ||
1165 | hr_dev->dis_db = true; | ||
1166 | hr_dev->is_reset = true; | ||
1167 | dev_info(hr_dev->dev, "Func clear success after reset.\n"); | ||
1168 | } else if (hw_resetting) { | ||
1169 | hr_dev->dis_db = true; | ||
1170 | |||
1171 | dev_warn(hr_dev->dev, | ||
1172 | "Func clear is pending, device in resetting state.\n"); | ||
1173 | end = HNS_ROCE_V2_HW_RST_TIMEOUT; | ||
1174 | while (end) { | ||
1175 | if (!ops->get_hw_reset_stat(handle)) { | ||
1176 | hr_dev->is_reset = true; | ||
1177 | dev_info(hr_dev->dev, | ||
1178 | "Func clear success after reset.\n"); | ||
1179 | return; | ||
1180 | } | ||
1181 | msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); | ||
1182 | end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; | ||
1183 | } | ||
1184 | |||
1185 | dev_warn(hr_dev->dev, "Func clear failed.\n"); | ||
1186 | } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) { | ||
1187 | hr_dev->dis_db = true; | ||
1188 | |||
1189 | dev_warn(hr_dev->dev, | ||
1190 | "Func clear is pending, device in resetting state.\n"); | ||
1191 | end = HNS_ROCE_V2_HW_RST_TIMEOUT; | ||
1192 | while (end) { | ||
1193 | if (ops->ae_dev_reset_cnt(handle) != | ||
1194 | hr_dev->reset_cnt) { | ||
1195 | hr_dev->is_reset = true; | ||
1196 | dev_info(hr_dev->dev, | ||
1197 | "Func clear success after sw reset\n"); | ||
1198 | return; | ||
1199 | } | ||
1200 | msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); | ||
1201 | end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; | ||
1202 | } | ||
1203 | |||
1204 | dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); | ||
1205 | } else { | ||
1206 | if (retval && !flag) | ||
1207 | dev_warn(hr_dev->dev, | ||
1208 | "Func clear read failed, ret = %d.\n", retval); | ||
1209 | |||
1210 | dev_warn(hr_dev->dev, "Func clear failed.\n"); | ||
1211 | } | ||
1212 | } | ||
1133 | static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) | 1213 | static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) |
1134 | { | 1214 | { |
1215 | bool fclr_write_fail_flag = false; | ||
1135 | struct hns_roce_func_clear *resp; | 1216 | struct hns_roce_func_clear *resp; |
1136 | struct hns_roce_cmq_desc desc; | 1217 | struct hns_roce_cmq_desc desc; |
1137 | unsigned long end; | 1218 | unsigned long end; |
1138 | int ret; | 1219 | int ret = 0; |
1220 | |||
1221 | if (hns_roce_func_clr_chk_rst(hr_dev)) | ||
1222 | goto out; | ||
1139 | 1223 | ||
1140 | hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); | 1224 | hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); |
1141 | resp = (struct hns_roce_func_clear *)desc.data; | 1225 | resp = (struct hns_roce_func_clear *)desc.data; |
1142 | 1226 | ||
1143 | ret = hns_roce_cmq_send(hr_dev, &desc, 1); | 1227 | ret = hns_roce_cmq_send(hr_dev, &desc, 1); |
1144 | if (ret) { | 1228 | if (ret) { |
1229 | fclr_write_fail_flag = true; | ||
1145 | dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", | 1230 | dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", |
1146 | ret); | 1231 | ret); |
1147 | return; | 1232 | goto out; |
1148 | } | 1233 | } |
1149 | 1234 | ||
1150 | msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); | 1235 | msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); |
1151 | end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; | 1236 | end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; |
1152 | while (end) { | 1237 | while (end) { |
1238 | if (hns_roce_func_clr_chk_rst(hr_dev)) | ||
1239 | goto out; | ||
1153 | msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); | 1240 | msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); |
1154 | end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; | 1241 | end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; |
1155 | 1242 | ||
@@ -1166,7 +1253,9 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) | |||
1166 | } | 1253 | } |
1167 | } | 1254 | } |
1168 | 1255 | ||
1256 | out: | ||
1169 | dev_err(hr_dev->dev, "Func clear fail.\n"); | 1257 | dev_err(hr_dev->dev, "Func clear fail.\n"); |
1258 | hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag); | ||
1170 | } | 1259 | } |
1171 | 1260 | ||
1172 | static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) | 1261 | static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) |
@@ -1298,7 +1387,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, | |||
1298 | 1387 | ||
1299 | swt = (struct hns_roce_vf_switch *)desc.data; | 1388 | swt = (struct hns_roce_vf_switch *)desc.data; |
1300 | hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); | 1389 | hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); |
1301 | swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL); | 1390 | swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); |
1302 | roce_set_field(swt->fun_id, | 1391 | roce_set_field(swt->fun_id, |
1303 | VF_SWITCH_DATA_FUN_ID_VF_ID_M, | 1392 | VF_SWITCH_DATA_FUN_ID_VF_ID_M, |
1304 | VF_SWITCH_DATA_FUN_ID_VF_ID_S, | 1393 | VF_SWITCH_DATA_FUN_ID_VF_ID_S, |
@@ -1310,7 +1399,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, | |||
1310 | cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); | 1399 | cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); |
1311 | desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); | 1400 | desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); |
1312 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); | 1401 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); |
1313 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1); | 1402 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); |
1314 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); | 1403 | roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); |
1315 | 1404 | ||
1316 | return hns_roce_cmq_send(hr_dev, &desc, 1); | 1405 | return hns_roce_cmq_send(hr_dev, &desc, 1); |
@@ -1724,9 +1813,10 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, | |||
1724 | desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); | 1813 | desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); |
1725 | 1814 | ||
1726 | if (i == 0) { | 1815 | if (i == 0) { |
1727 | req_a->base_addr_l = link_tbl->table.map & 0xffffffff; | 1816 | req_a->base_addr_l = |
1728 | req_a->base_addr_h = (link_tbl->table.map >> 32) & | 1817 | cpu_to_le32(link_tbl->table.map & 0xffffffff); |
1729 | 0xffffffff; | 1818 | req_a->base_addr_h = |
1819 | cpu_to_le32(link_tbl->table.map >> 32); | ||
1730 | roce_set_field(req_a->depth_pgsz_init_en, | 1820 | roce_set_field(req_a->depth_pgsz_init_en, |
1731 | CFG_LLM_QUE_DEPTH_M, | 1821 | CFG_LLM_QUE_DEPTH_M, |
1732 | CFG_LLM_QUE_DEPTH_S, | 1822 | CFG_LLM_QUE_DEPTH_S, |
@@ -1735,13 +1825,15 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, | |||
1735 | CFG_LLM_QUE_PGSZ_M, | 1825 | CFG_LLM_QUE_PGSZ_M, |
1736 | CFG_LLM_QUE_PGSZ_S, | 1826 | CFG_LLM_QUE_PGSZ_S, |
1737 | link_tbl->pg_sz); | 1827 | link_tbl->pg_sz); |
1738 | req_a->head_ba_l = entry[0].blk_ba0; | 1828 | req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0); |
1739 | req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr; | 1829 | req_a->head_ba_h_nxtptr = |
1830 | cpu_to_le32(entry[0].blk_ba1_nxt_ptr); | ||
1740 | roce_set_field(req_a->head_ptr, | 1831 | roce_set_field(req_a->head_ptr, |
1741 | CFG_LLM_HEAD_PTR_M, | 1832 | CFG_LLM_HEAD_PTR_M, |
1742 | CFG_LLM_HEAD_PTR_S, 0); | 1833 | CFG_LLM_HEAD_PTR_S, 0); |
1743 | } else { | 1834 | } else { |
1744 | req_b->tail_ba_l = entry[page_num - 1].blk_ba0; | 1835 | req_b->tail_ba_l = |
1836 | cpu_to_le32(entry[page_num - 1].blk_ba0); | ||
1745 | roce_set_field(req_b->tail_ba_h, | 1837 | roce_set_field(req_b->tail_ba_h, |
1746 | CFG_LLM_TAIL_BA_H_M, | 1838 | CFG_LLM_TAIL_BA_H_M, |
1747 | CFG_LLM_TAIL_BA_H_S, | 1839 | CFG_LLM_TAIL_BA_H_S, |
@@ -1817,17 +1909,13 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, | |||
1817 | 1909 | ||
1818 | link_tbl->pg_list[i].map = t; | 1910 | link_tbl->pg_list[i].map = t; |
1819 | 1911 | ||
1820 | entry[i].blk_ba0 = (t >> 12) & 0xffffffff; | 1912 | entry[i].blk_ba0 = (u32)(t >> 12); |
1821 | roce_set_field(entry[i].blk_ba1_nxt_ptr, | 1913 | entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44); |
1822 | HNS_ROCE_LINK_TABLE_BA1_M, | ||
1823 | HNS_ROCE_LINK_TABLE_BA1_S, | ||
1824 | t >> 44); | ||
1825 | 1914 | ||
1826 | if (i < (pg_num - 1)) | 1915 | if (i < (pg_num - 1)) |
1827 | roce_set_field(entry[i].blk_ba1_nxt_ptr, | 1916 | entry[i].blk_ba1_nxt_ptr |= |
1828 | HNS_ROCE_LINK_TABLE_NXT_PTR_M, | 1917 | (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S; |
1829 | HNS_ROCE_LINK_TABLE_NXT_PTR_S, | 1918 | |
1830 | i + 1); | ||
1831 | } | 1919 | } |
1832 | link_tbl->npages = pg_num; | 1920 | link_tbl->npages = pg_num; |
1833 | link_tbl->pg_sz = buf_chk_sz; | 1921 | link_tbl->pg_sz = buf_chk_sz; |
@@ -1888,7 +1976,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) | |||
1888 | goto err_tpq_init_failed; | 1976 | goto err_tpq_init_failed; |
1889 | } | 1977 | } |
1890 | 1978 | ||
1891 | /* Alloc memory for QPC Timer buffer space chunk*/ | 1979 | /* Alloc memory for QPC Timer buffer space chunk */ |
1892 | for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; | 1980 | for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; |
1893 | qpc_count++) { | 1981 | qpc_count++) { |
1894 | ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, | 1982 | ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, |
@@ -1899,7 +1987,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) | |||
1899 | } | 1987 | } |
1900 | } | 1988 | } |
1901 | 1989 | ||
1902 | /* Alloc memory for CQC Timer buffer space chunk*/ | 1990 | /* Alloc memory for CQC Timer buffer space chunk */ |
1903 | for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; | 1991 | for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; |
1904 | cqc_count++) { | 1992 | cqc_count++) { |
1905 | ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, | 1993 | ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, |
@@ -1952,7 +2040,7 @@ static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev) | |||
1952 | if (status) | 2040 | if (status) |
1953 | return status; | 2041 | return status; |
1954 | 2042 | ||
1955 | return cpu_to_le32(mb_st->mb_status_hw_run); | 2043 | return le32_to_cpu(mb_st->mb_status_hw_run); |
1956 | } | 2044 | } |
1957 | 2045 | ||
1958 | static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) | 2046 | static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) |
@@ -1978,10 +2066,10 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, | |||
1978 | 2066 | ||
1979 | hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); | 2067 | hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); |
1980 | 2068 | ||
1981 | mb->in_param_l = cpu_to_le64(in_param); | 2069 | mb->in_param_l = cpu_to_le32(in_param); |
1982 | mb->in_param_h = cpu_to_le64(in_param) >> 32; | 2070 | mb->in_param_h = cpu_to_le32(in_param >> 32); |
1983 | mb->out_param_l = cpu_to_le64(out_param); | 2071 | mb->out_param_l = cpu_to_le32(out_param); |
1984 | mb->out_param_h = cpu_to_le64(out_param) >> 32; | 2072 | mb->out_param_h = cpu_to_le32(out_param >> 32); |
1985 | mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); | 2073 | mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); |
1986 | mb->token_event_en = cpu_to_le32(event << 16 | token); | 2074 | mb->token_event_en = cpu_to_le32(event << 16 | token); |
1987 | 2075 | ||
@@ -2123,7 +2211,7 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, | |||
2123 | roce_set_field(smac_tb->vf_smac_h_rsv, | 2211 | roce_set_field(smac_tb->vf_smac_h_rsv, |
2124 | CFG_SMAC_TB_VF_SMAC_H_M, | 2212 | CFG_SMAC_TB_VF_SMAC_H_M, |
2125 | CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); | 2213 | CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); |
2126 | smac_tb->vf_smac_l = reg_smac_l; | 2214 | smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); |
2127 | 2215 | ||
2128 | return hns_roce_cmq_send(hr_dev, &desc, 1); | 2216 | return hns_roce_cmq_send(hr_dev, &desc, 1); |
2129 | } | 2217 | } |
@@ -2409,7 +2497,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, | |||
2409 | 2497 | ||
2410 | for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); | 2498 | for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); |
2411 | ++prod_index) { | 2499 | ++prod_index) { |
2412 | if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) | 2500 | if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) |
2413 | break; | 2501 | break; |
2414 | } | 2502 | } |
2415 | 2503 | ||
@@ -2478,29 +2566,26 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, | |||
2478 | V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); | 2566 | V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); |
2479 | roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, | 2567 | roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, |
2480 | V2_CQC_BYTE_4_CEQN_S, vector); | 2568 | V2_CQC_BYTE_4_CEQN_S, vector); |
2481 | cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn); | ||
2482 | 2569 | ||
2483 | roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, | 2570 | roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, |
2484 | V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); | 2571 | V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); |
2485 | 2572 | ||
2486 | cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); | 2573 | cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); |
2487 | cq_context->cqe_cur_blk_addr = | ||
2488 | cpu_to_le32(cq_context->cqe_cur_blk_addr); | ||
2489 | 2574 | ||
2490 | roce_set_field(cq_context->byte_16_hop_addr, | 2575 | roce_set_field(cq_context->byte_16_hop_addr, |
2491 | V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, | 2576 | V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, |
2492 | V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, | 2577 | V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, |
2493 | cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT))); | 2578 | mtts[0] >> (32 + PAGE_ADDR_SHIFT)); |
2494 | roce_set_field(cq_context->byte_16_hop_addr, | 2579 | roce_set_field(cq_context->byte_16_hop_addr, |
2495 | V2_CQC_BYTE_16_CQE_HOP_NUM_M, | 2580 | V2_CQC_BYTE_16_CQE_HOP_NUM_M, |
2496 | V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == | 2581 | V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == |
2497 | HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); | 2582 | HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); |
2498 | 2583 | ||
2499 | cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); | 2584 | cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); |
2500 | roce_set_field(cq_context->byte_24_pgsz_addr, | 2585 | roce_set_field(cq_context->byte_24_pgsz_addr, |
2501 | V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, | 2586 | V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, |
2502 | V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, | 2587 | V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, |
2503 | cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT))); | 2588 | mtts[1] >> (32 + PAGE_ADDR_SHIFT)); |
2504 | roce_set_field(cq_context->byte_24_pgsz_addr, | 2589 | roce_set_field(cq_context->byte_24_pgsz_addr, |
2505 | V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, | 2590 | V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, |
2506 | V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, | 2591 | V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, |
@@ -2510,7 +2595,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, | |||
2510 | V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, | 2595 | V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, |
2511 | hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); | 2596 | hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); |
2512 | 2597 | ||
2513 | cq_context->cqe_ba = (u32)(dma_handle >> 3); | 2598 | cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); |
2514 | 2599 | ||
2515 | roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M, | 2600 | roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M, |
2516 | V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3))); | 2601 | V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3))); |
@@ -2523,7 +2608,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, | |||
2523 | V2_CQC_BYTE_44_DB_RECORD_ADDR_M, | 2608 | V2_CQC_BYTE_44_DB_RECORD_ADDR_M, |
2524 | V2_CQC_BYTE_44_DB_RECORD_ADDR_S, | 2609 | V2_CQC_BYTE_44_DB_RECORD_ADDR_S, |
2525 | ((u32)hr_cq->db.dma) >> 1); | 2610 | ((u32)hr_cq->db.dma) >> 1); |
2526 | cq_context->db_record_addr = hr_cq->db.dma >> 32; | 2611 | cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32); |
2527 | 2612 | ||
2528 | roce_set_field(cq_context->byte_56_cqe_period_maxcnt, | 2613 | roce_set_field(cq_context->byte_56_cqe_period_maxcnt, |
2529 | V2_CQC_BYTE_56_CQ_MAX_CNT_M, | 2614 | V2_CQC_BYTE_56_CQ_MAX_CNT_M, |
@@ -2541,7 +2626,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, | |||
2541 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); | 2626 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); |
2542 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | 2627 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
2543 | u32 notification_flag; | 2628 | u32 notification_flag; |
2544 | u32 doorbell[2]; | 2629 | __le32 doorbell[2]; |
2545 | 2630 | ||
2546 | doorbell[0] = 0; | 2631 | doorbell[0] = 0; |
2547 | doorbell[1] = 0; | 2632 | doorbell[1] = 0; |
@@ -2668,9 +2753,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, | |||
2668 | ++wq->tail; | 2753 | ++wq->tail; |
2669 | } else if ((*cur_qp)->ibqp.srq) { | 2754 | } else if ((*cur_qp)->ibqp.srq) { |
2670 | srq = to_hr_srq((*cur_qp)->ibqp.srq); | 2755 | srq = to_hr_srq((*cur_qp)->ibqp.srq); |
2671 | wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4, | 2756 | wqe_ctr = (u16)roce_get_field(cqe->byte_4, |
2672 | V2_CQE_BYTE_4_WQE_INDX_M, | 2757 | V2_CQE_BYTE_4_WQE_INDX_M, |
2673 | V2_CQE_BYTE_4_WQE_INDX_S)); | 2758 | V2_CQE_BYTE_4_WQE_INDX_S); |
2674 | wc->wr_id = srq->wrid[wqe_ctr]; | 2759 | wc->wr_id = srq->wrid[wqe_ctr]; |
2675 | hns_roce_free_srq_wqe(srq, wqe_ctr); | 2760 | hns_roce_free_srq_wqe(srq, wqe_ctr); |
2676 | } else { | 2761 | } else { |
@@ -2862,15 +2947,16 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, | |||
2862 | wc->smac[5] = roce_get_field(cqe->byte_28, | 2947 | wc->smac[5] = roce_get_field(cqe->byte_28, |
2863 | V2_CQE_BYTE_28_SMAC_5_M, | 2948 | V2_CQE_BYTE_28_SMAC_5_M, |
2864 | V2_CQE_BYTE_28_SMAC_5_S); | 2949 | V2_CQE_BYTE_28_SMAC_5_S); |
2950 | wc->wc_flags |= IB_WC_WITH_SMAC; | ||
2865 | if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { | 2951 | if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { |
2866 | wc->vlan_id = (u16)roce_get_field(cqe->byte_28, | 2952 | wc->vlan_id = (u16)roce_get_field(cqe->byte_28, |
2867 | V2_CQE_BYTE_28_VID_M, | 2953 | V2_CQE_BYTE_28_VID_M, |
2868 | V2_CQE_BYTE_28_VID_S); | 2954 | V2_CQE_BYTE_28_VID_S); |
2955 | wc->wc_flags |= IB_WC_WITH_VLAN; | ||
2869 | } else { | 2956 | } else { |
2870 | wc->vlan_id = 0xffff; | 2957 | wc->vlan_id = 0xffff; |
2871 | } | 2958 | } |
2872 | 2959 | ||
2873 | wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); | ||
2874 | wc->network_hdr_type = roce_get_field(cqe->byte_28, | 2960 | wc->network_hdr_type = roce_get_field(cqe->byte_28, |
2875 | V2_CQE_BYTE_28_PORT_TYPE_M, | 2961 | V2_CQE_BYTE_28_PORT_TYPE_M, |
2876 | V2_CQE_BYTE_28_PORT_TYPE_S); | 2962 | V2_CQE_BYTE_28_PORT_TYPE_S); |
@@ -2905,11 +2991,49 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, | |||
2905 | return npolled; | 2991 | return npolled; |
2906 | } | 2992 | } |
2907 | 2993 | ||
2994 | static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, | ||
2995 | int step_idx) | ||
2996 | { | ||
2997 | int op; | ||
2998 | |||
2999 | if (type == HEM_TYPE_SCCC && step_idx) | ||
3000 | return -EINVAL; | ||
3001 | |||
3002 | switch (type) { | ||
3003 | case HEM_TYPE_QPC: | ||
3004 | op = HNS_ROCE_CMD_WRITE_QPC_BT0; | ||
3005 | break; | ||
3006 | case HEM_TYPE_MTPT: | ||
3007 | op = HNS_ROCE_CMD_WRITE_MPT_BT0; | ||
3008 | break; | ||
3009 | case HEM_TYPE_CQC: | ||
3010 | op = HNS_ROCE_CMD_WRITE_CQC_BT0; | ||
3011 | break; | ||
3012 | case HEM_TYPE_SRQC: | ||
3013 | op = HNS_ROCE_CMD_WRITE_SRQC_BT0; | ||
3014 | break; | ||
3015 | case HEM_TYPE_SCCC: | ||
3016 | op = HNS_ROCE_CMD_WRITE_SCCC_BT0; | ||
3017 | break; | ||
3018 | case HEM_TYPE_QPC_TIMER: | ||
3019 | op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; | ||
3020 | break; | ||
3021 | case HEM_TYPE_CQC_TIMER: | ||
3022 | op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; | ||
3023 | break; | ||
3024 | default: | ||
3025 | dev_warn(hr_dev->dev, | ||
3026 | "Table %d not to be written by mailbox!\n", type); | ||
3027 | return -EINVAL; | ||
3028 | } | ||
3029 | |||
3030 | return op + step_idx; | ||
3031 | } | ||
3032 | |||
2908 | static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, | 3033 | static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, |
2909 | struct hns_roce_hem_table *table, int obj, | 3034 | struct hns_roce_hem_table *table, int obj, |
2910 | int step_idx) | 3035 | int step_idx) |
2911 | { | 3036 | { |
2912 | struct device *dev = hr_dev->dev; | ||
2913 | struct hns_roce_cmd_mailbox *mailbox; | 3037 | struct hns_roce_cmd_mailbox *mailbox; |
2914 | struct hns_roce_hem_iter iter; | 3038 | struct hns_roce_hem_iter iter; |
2915 | struct hns_roce_hem_mhop mhop; | 3039 | struct hns_roce_hem_mhop mhop; |
@@ -2922,7 +3046,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, | |||
2922 | u64 bt_ba = 0; | 3046 | u64 bt_ba = 0; |
2923 | u32 chunk_ba_num; | 3047 | u32 chunk_ba_num; |
2924 | u32 hop_num; | 3048 | u32 hop_num; |
2925 | u16 op = 0xff; | 3049 | int op; |
2926 | 3050 | ||
2927 | if (!hns_roce_check_whether_mhop(hr_dev, table->type)) | 3051 | if (!hns_roce_check_whether_mhop(hr_dev, table->type)) |
2928 | return 0; | 3052 | return 0; |
@@ -2944,39 +3068,10 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, | |||
2944 | hem_idx = i; | 3068 | hem_idx = i; |
2945 | } | 3069 | } |
2946 | 3070 | ||
2947 | switch (table->type) { | 3071 | op = get_op_for_set_hem(hr_dev, table->type, step_idx); |
2948 | case HEM_TYPE_QPC: | 3072 | if (op == -EINVAL) |
2949 | op = HNS_ROCE_CMD_WRITE_QPC_BT0; | ||
2950 | break; | ||
2951 | case HEM_TYPE_MTPT: | ||
2952 | op = HNS_ROCE_CMD_WRITE_MPT_BT0; | ||
2953 | break; | ||
2954 | case HEM_TYPE_CQC: | ||
2955 | op = HNS_ROCE_CMD_WRITE_CQC_BT0; | ||
2956 | break; | ||
2957 | case HEM_TYPE_SRQC: | ||
2958 | op = HNS_ROCE_CMD_WRITE_SRQC_BT0; | ||
2959 | break; | ||
2960 | case HEM_TYPE_SCCC: | ||
2961 | op = HNS_ROCE_CMD_WRITE_SCCC_BT0; | ||
2962 | break; | ||
2963 | case HEM_TYPE_QPC_TIMER: | ||
2964 | op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; | ||
2965 | break; | ||
2966 | case HEM_TYPE_CQC_TIMER: | ||
2967 | op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; | ||
2968 | break; | ||
2969 | default: | ||
2970 | dev_warn(dev, "Table %d not to be written by mailbox!\n", | ||
2971 | table->type); | ||
2972 | return 0; | ||
2973 | } | ||
2974 | |||
2975 | if (table->type == HEM_TYPE_SCCC && step_idx) | ||
2976 | return 0; | 3073 | return 0; |
2977 | 3074 | ||
2978 | op += step_idx; | ||
2979 | |||
2980 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); | 3075 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); |
2981 | if (IS_ERR(mailbox)) | 3076 | if (IS_ERR(mailbox)) |
2982 | return PTR_ERR(mailbox); | 3077 | return PTR_ERR(mailbox); |
@@ -3118,6 +3213,43 @@ static void set_access_flags(struct hns_roce_qp *hr_qp, | |||
3118 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); | 3213 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); |
3119 | } | 3214 | } |
3120 | 3215 | ||
3216 | static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, | ||
3217 | struct hns_roce_v2_qp_context *context, | ||
3218 | struct hns_roce_v2_qp_context *qpc_mask) | ||
3219 | { | ||
3220 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) | ||
3221 | roce_set_field(context->byte_4_sqpn_tst, | ||
3222 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3223 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3224 | ilog2((unsigned int)hr_qp->sge.sge_cnt)); | ||
3225 | else | ||
3226 | roce_set_field(context->byte_4_sqpn_tst, | ||
3227 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3228 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3229 | hr_qp->sq.max_gs > | ||
3230 | HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ? | ||
3231 | ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); | ||
3232 | |||
3233 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3234 | V2_QPC_BYTE_4_SGE_SHIFT_S, 0); | ||
3235 | |||
3236 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
3237 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, | ||
3238 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | ||
3239 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3240 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); | ||
3241 | |||
3242 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
3243 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, | ||
3244 | (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || | ||
3245 | hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || | ||
3246 | hr_qp->ibqp.srq) ? 0 : | ||
3247 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | ||
3248 | |||
3249 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3250 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); | ||
3251 | } | ||
3252 | |||
3121 | static void modify_qp_reset_to_init(struct ib_qp *ibqp, | 3253 | static void modify_qp_reset_to_init(struct ib_qp *ibqp, |
3122 | const struct ib_qp_attr *attr, | 3254 | const struct ib_qp_attr *attr, |
3123 | int attr_mask, | 3255 | int attr_mask, |
@@ -3138,21 +3270,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, | |||
3138 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, | 3270 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, |
3139 | V2_QPC_BYTE_4_TST_S, 0); | 3271 | V2_QPC_BYTE_4_TST_S, 0); |
3140 | 3272 | ||
3141 | if (ibqp->qp_type == IB_QPT_GSI) | ||
3142 | roce_set_field(context->byte_4_sqpn_tst, | ||
3143 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3144 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3145 | ilog2((unsigned int)hr_qp->sge.sge_cnt)); | ||
3146 | else | ||
3147 | roce_set_field(context->byte_4_sqpn_tst, | ||
3148 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3149 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3150 | hr_qp->sq.max_gs > 2 ? | ||
3151 | ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); | ||
3152 | |||
3153 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3154 | V2_QPC_BYTE_4_SGE_SHIFT_S, 0); | ||
3155 | |||
3156 | roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, | 3273 | roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, |
3157 | V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); | 3274 | V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); |
3158 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, | 3275 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, |
@@ -3168,19 +3285,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, | |||
3168 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, | 3285 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, |
3169 | V2_QPC_BYTE_20_RQWS_S, 0); | 3286 | V2_QPC_BYTE_20_RQWS_S, 0); |
3170 | 3287 | ||
3171 | roce_set_field(context->byte_20_smac_sgid_idx, | 3288 | set_qpc_wqe_cnt(hr_qp, context, qpc_mask); |
3172 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, | ||
3173 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | ||
3174 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3175 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); | ||
3176 | |||
3177 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
3178 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, | ||
3179 | (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || | ||
3180 | hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 : | ||
3181 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | ||
3182 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3183 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); | ||
3184 | 3289 | ||
3185 | /* No VLAN need to set 0xFFF */ | 3290 | /* No VLAN need to set 0xFFF */ |
3186 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, | 3291 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, |
@@ -3225,7 +3330,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, | |||
3225 | roce_set_field(qpc_mask->byte_68_rq_db, | 3330 | roce_set_field(qpc_mask->byte_68_rq_db, |
3226 | V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, | 3331 | V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, |
3227 | V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0); | 3332 | V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0); |
3228 | context->rq_db_record_addr = hr_qp->rdb.dma >> 32; | 3333 | context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32); |
3229 | qpc_mask->rq_db_record_addr = 0; | 3334 | qpc_mask->rq_db_record_addr = 0; |
3230 | 3335 | ||
3231 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, | 3336 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, |
@@ -3456,22 +3561,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, | |||
3456 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, | 3561 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, |
3457 | V2_QPC_BYTE_4_TST_S, 0); | 3562 | V2_QPC_BYTE_4_TST_S, 0); |
3458 | 3563 | ||
3459 | if (ibqp->qp_type == IB_QPT_GSI) | ||
3460 | roce_set_field(context->byte_4_sqpn_tst, | ||
3461 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3462 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3463 | ilog2((unsigned int)hr_qp->sge.sge_cnt)); | ||
3464 | else | ||
3465 | roce_set_field(context->byte_4_sqpn_tst, | ||
3466 | V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3467 | V2_QPC_BYTE_4_SGE_SHIFT_S, | ||
3468 | hr_qp->sq.max_gs > | ||
3469 | HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ? | ||
3470 | ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); | ||
3471 | |||
3472 | roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, | ||
3473 | V2_QPC_BYTE_4_SGE_SHIFT_S, 0); | ||
3474 | |||
3475 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | 3564 | if (attr_mask & IB_QP_ACCESS_FLAGS) { |
3476 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, | 3565 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, |
3477 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); | 3566 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); |
@@ -3506,20 +3595,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, | |||
3506 | 0); | 3595 | 0); |
3507 | } | 3596 | } |
3508 | 3597 | ||
3509 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
3510 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, | ||
3511 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | ||
3512 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3513 | V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); | ||
3514 | |||
3515 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
3516 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, | ||
3517 | (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || | ||
3518 | hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 : | ||
3519 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | ||
3520 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
3521 | V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); | ||
3522 | |||
3523 | roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, | 3598 | roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, |
3524 | V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); | 3599 | V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); |
3525 | roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, | 3600 | roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, |
@@ -3638,7 +3713,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, | |||
3638 | } | 3713 | } |
3639 | 3714 | ||
3640 | dmac = (u8 *)attr->ah_attr.roce.dmac; | 3715 | dmac = (u8 *)attr->ah_attr.roce.dmac; |
3641 | context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3); | 3716 | context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); |
3642 | qpc_mask->wqe_sge_ba = 0; | 3717 | qpc_mask->wqe_sge_ba = 0; |
3643 | 3718 | ||
3644 | /* | 3719 | /* |
@@ -3694,7 +3769,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, | |||
3694 | V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, | 3769 | V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, |
3695 | V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); | 3770 | V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); |
3696 | 3771 | ||
3697 | context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); | 3772 | context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); |
3698 | qpc_mask->rq_cur_blk_addr = 0; | 3773 | qpc_mask->rq_cur_blk_addr = 0; |
3699 | 3774 | ||
3700 | roce_set_field(context->byte_92_srq_info, | 3775 | roce_set_field(context->byte_92_srq_info, |
@@ -3705,7 +3780,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, | |||
3705 | V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, | 3780 | V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, |
3706 | V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); | 3781 | V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); |
3707 | 3782 | ||
3708 | context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); | 3783 | context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); |
3709 | qpc_mask->rq_nxt_blk_addr = 0; | 3784 | qpc_mask->rq_nxt_blk_addr = 0; |
3710 | 3785 | ||
3711 | roce_set_field(context->byte_104_rq_sge, | 3786 | roce_set_field(context->byte_104_rq_sge, |
@@ -3720,7 +3795,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, | |||
3720 | V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); | 3795 | V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); |
3721 | roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, | 3796 | roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, |
3722 | V2_QPC_BYTE_132_TRRL_BA_S, 0); | 3797 | V2_QPC_BYTE_132_TRRL_BA_S, 0); |
3723 | context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4)); | 3798 | context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4)); |
3724 | qpc_mask->trrl_ba = 0; | 3799 | qpc_mask->trrl_ba = 0; |
3725 | roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, | 3800 | roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, |
3726 | V2_QPC_BYTE_140_TRRL_BA_S, | 3801 | V2_QPC_BYTE_140_TRRL_BA_S, |
@@ -3728,7 +3803,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, | |||
3728 | roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, | 3803 | roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, |
3729 | V2_QPC_BYTE_140_TRRL_BA_S, 0); | 3804 | V2_QPC_BYTE_140_TRRL_BA_S, 0); |
3730 | 3805 | ||
3731 | context->irrl_ba = (u32)(dma_handle_2 >> 6); | 3806 | context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6); |
3732 | qpc_mask->irrl_ba = 0; | 3807 | qpc_mask->irrl_ba = 0; |
3733 | roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, | 3808 | roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, |
3734 | V2_QPC_BYTE_208_IRRL_BA_S, | 3809 | V2_QPC_BYTE_208_IRRL_BA_S, |
@@ -3876,7 +3951,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, | |||
3876 | * we should set all bits of the relevant fields in context mask to | 3951 | * we should set all bits of the relevant fields in context mask to |
3877 | * 0 at the same time, else set them to 0x1. | 3952 | * 0 at the same time, else set them to 0x1. |
3878 | */ | 3953 | */ |
3879 | context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); | 3954 | context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT); |
3880 | roce_set_field(context->byte_168_irrl_idx, | 3955 | roce_set_field(context->byte_168_irrl_idx, |
3881 | V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, | 3956 | V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, |
3882 | V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, | 3957 | V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, |
@@ -3888,8 +3963,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, | |||
3888 | 3963 | ||
3889 | context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) || | 3964 | context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) || |
3890 | hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? | 3965 | hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? |
3891 | ((u32)(sge_cur_blk >> | 3966 | cpu_to_le32(sge_cur_blk >> |
3892 | PAGE_ADDR_SHIFT)) : 0; | 3967 | PAGE_ADDR_SHIFT) : 0; |
3893 | roce_set_field(context->byte_184_irrl_idx, | 3968 | roce_set_field(context->byte_184_irrl_idx, |
3894 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, | 3969 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, |
3895 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, | 3970 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, |
@@ -3902,7 +3977,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, | |||
3902 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, | 3977 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, |
3903 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); | 3978 | V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); |
3904 | 3979 | ||
3905 | context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); | 3980 | context->rx_sq_cur_blk_addr = |
3981 | cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT); | ||
3906 | roce_set_field(context->byte_232_irrl_sge, | 3982 | roce_set_field(context->byte_232_irrl_sge, |
3907 | V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, | 3983 | V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, |
3908 | V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, | 3984 | V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, |
@@ -3974,30 +4050,119 @@ static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state, | |||
3974 | 4050 | ||
3975 | } | 4051 | } |
3976 | 4052 | ||
3977 | static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | 4053 | static int hns_roce_v2_set_path(struct ib_qp *ibqp, |
3978 | const struct ib_qp_attr *attr, | 4054 | const struct ib_qp_attr *attr, |
3979 | int attr_mask, enum ib_qp_state cur_state, | 4055 | int attr_mask, |
3980 | enum ib_qp_state new_state) | 4056 | struct hns_roce_v2_qp_context *context, |
4057 | struct hns_roce_v2_qp_context *qpc_mask) | ||
3981 | { | 4058 | { |
4059 | const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); | ||
3982 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 4060 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
3983 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 4061 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
3984 | struct hns_roce_v2_qp_context *context; | 4062 | const struct ib_gid_attr *gid_attr = NULL; |
3985 | struct hns_roce_v2_qp_context *qpc_mask; | 4063 | int is_roce_protocol; |
3986 | struct device *dev = hr_dev->dev; | 4064 | bool is_udp = false; |
3987 | int ret = -EINVAL; | 4065 | u16 vlan = 0xffff; |
4066 | u8 ib_port; | ||
4067 | u8 hr_port; | ||
4068 | int ret; | ||
3988 | 4069 | ||
3989 | context = kcalloc(2, sizeof(*context), GFP_ATOMIC); | 4070 | ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; |
3990 | if (!context) | 4071 | hr_port = ib_port - 1; |
3991 | return -ENOMEM; | 4072 | is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && |
4073 | rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; | ||
4074 | |||
4075 | if (is_roce_protocol) { | ||
4076 | gid_attr = attr->ah_attr.grh.sgid_attr; | ||
4077 | ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL); | ||
4078 | if (ret) | ||
4079 | return ret; | ||
4080 | |||
4081 | if (gid_attr) | ||
4082 | is_udp = (gid_attr->gid_type == | ||
4083 | IB_GID_TYPE_ROCE_UDP_ENCAP); | ||
4084 | } | ||
4085 | |||
4086 | if (vlan < VLAN_CFI_MASK) { | ||
4087 | roce_set_bit(context->byte_76_srqn_op_en, | ||
4088 | V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); | ||
4089 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, | ||
4090 | V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); | ||
4091 | roce_set_bit(context->byte_168_irrl_idx, | ||
4092 | V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); | ||
4093 | roce_set_bit(qpc_mask->byte_168_irrl_idx, | ||
4094 | V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); | ||
4095 | } | ||
4096 | |||
4097 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, | ||
4098 | V2_QPC_BYTE_24_VLAN_ID_S, vlan); | ||
4099 | roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, | ||
4100 | V2_QPC_BYTE_24_VLAN_ID_S, 0); | ||
4101 | |||
4102 | if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { | ||
4103 | dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n", | ||
4104 | grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); | ||
4105 | return -EINVAL; | ||
4106 | } | ||
4107 | |||
4108 | if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { | ||
4109 | dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); | ||
4110 | return -EINVAL; | ||
4111 | } | ||
4112 | |||
4113 | roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, | ||
4114 | V2_QPC_BYTE_52_UDPSPN_S, | ||
4115 | is_udp ? 0x12b7 : 0); | ||
4116 | |||
4117 | roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, | ||
4118 | V2_QPC_BYTE_52_UDPSPN_S, 0); | ||
4119 | |||
4120 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
4121 | V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, | ||
4122 | grh->sgid_index); | ||
4123 | |||
4124 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
4125 | V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); | ||
4126 | |||
4127 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, | ||
4128 | V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); | ||
4129 | roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, | ||
4130 | V2_QPC_BYTE_24_HOP_LIMIT_S, 0); | ||
4131 | |||
4132 | if (hr_dev->pci_dev->revision == 0x21 && is_udp) | ||
4133 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, | ||
4134 | V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); | ||
4135 | else | ||
4136 | roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, | ||
4137 | V2_QPC_BYTE_24_TC_S, grh->traffic_class); | ||
4138 | roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, | ||
4139 | V2_QPC_BYTE_24_TC_S, 0); | ||
4140 | roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, | ||
4141 | V2_QPC_BYTE_28_FL_S, grh->flow_label); | ||
4142 | roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, | ||
4143 | V2_QPC_BYTE_28_FL_S, 0); | ||
4144 | memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); | ||
4145 | memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); | ||
4146 | roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, | ||
4147 | V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr)); | ||
4148 | roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, | ||
4149 | V2_QPC_BYTE_28_SL_S, 0); | ||
4150 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); | ||
4151 | |||
4152 | return 0; | ||
4153 | } | ||
4154 | |||
4155 | static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, | ||
4156 | const struct ib_qp_attr *attr, | ||
4157 | int attr_mask, | ||
4158 | enum ib_qp_state cur_state, | ||
4159 | enum ib_qp_state new_state, | ||
4160 | struct hns_roce_v2_qp_context *context, | ||
4161 | struct hns_roce_v2_qp_context *qpc_mask) | ||
4162 | { | ||
4163 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | ||
4164 | int ret = 0; | ||
3992 | 4165 | ||
3993 | qpc_mask = context + 1; | ||
3994 | /* | ||
3995 | * In v2 engine, software pass context and context mask to hardware | ||
3996 | * when modifying qp. If software need modify some fields in context, | ||
3997 | * we should set all bits of the relevant fields in context mask to | ||
3998 | * 0 at the same time, else set them to 0x1. | ||
3999 | */ | ||
4000 | memset(qpc_mask, 0xff, sizeof(*qpc_mask)); | ||
4001 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { | 4166 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
4002 | memset(qpc_mask, 0, sizeof(*qpc_mask)); | 4167 | memset(qpc_mask, 0, sizeof(*qpc_mask)); |
4003 | modify_qp_reset_to_init(ibqp, attr, attr_mask, context, | 4168 | modify_qp_reset_to_init(ibqp, attr, attr_mask, context, |
@@ -4019,134 +4184,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4019 | /* Nothing */ | 4184 | /* Nothing */ |
4020 | ; | 4185 | ; |
4021 | } else { | 4186 | } else { |
4022 | dev_err(dev, "Illegal state for QP!\n"); | 4187 | dev_err(hr_dev->dev, "Illegal state for QP!\n"); |
4023 | ret = -EINVAL; | 4188 | ret = -EINVAL; |
4024 | goto out; | 4189 | goto out; |
4025 | } | 4190 | } |
4026 | 4191 | ||
4027 | /* When QP state is err, SQ and RQ WQE should be flushed */ | 4192 | out: |
4028 | if (new_state == IB_QPS_ERR) { | 4193 | return ret; |
4029 | roce_set_field(context->byte_160_sq_ci_pi, | 4194 | } |
4030 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, | ||
4031 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, | ||
4032 | hr_qp->sq.head); | ||
4033 | roce_set_field(qpc_mask->byte_160_sq_ci_pi, | ||
4034 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, | ||
4035 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); | ||
4036 | 4195 | ||
4037 | if (!ibqp->srq) { | 4196 | static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, |
4038 | roce_set_field(context->byte_84_rq_ci_pi, | 4197 | const struct ib_qp_attr *attr, |
4039 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, | 4198 | int attr_mask, |
4040 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, | 4199 | struct hns_roce_v2_qp_context *context, |
4041 | hr_qp->rq.head); | 4200 | struct hns_roce_v2_qp_context *qpc_mask) |
4042 | roce_set_field(qpc_mask->byte_84_rq_ci_pi, | 4201 | { |
4043 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, | 4202 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
4044 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); | 4203 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
4045 | } | 4204 | int ret = 0; |
4046 | } | ||
4047 | 4205 | ||
4048 | if (attr_mask & IB_QP_AV) { | 4206 | if (attr_mask & IB_QP_AV) { |
4049 | const struct ib_global_route *grh = | 4207 | ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, |
4050 | rdma_ah_read_grh(&attr->ah_attr); | 4208 | qpc_mask); |
4051 | const struct ib_gid_attr *gid_attr = NULL; | 4209 | if (ret) |
4052 | int is_roce_protocol; | 4210 | return ret; |
4053 | u16 vlan = 0xffff; | ||
4054 | u8 ib_port; | ||
4055 | u8 hr_port; | ||
4056 | |||
4057 | ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : | ||
4058 | hr_qp->port + 1; | ||
4059 | hr_port = ib_port - 1; | ||
4060 | is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && | ||
4061 | rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; | ||
4062 | |||
4063 | if (is_roce_protocol) { | ||
4064 | gid_attr = attr->ah_attr.grh.sgid_attr; | ||
4065 | ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL); | ||
4066 | if (ret) | ||
4067 | goto out; | ||
4068 | } | ||
4069 | |||
4070 | if (vlan < VLAN_CFI_MASK) { | ||
4071 | roce_set_bit(context->byte_76_srqn_op_en, | ||
4072 | V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); | ||
4073 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, | ||
4074 | V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); | ||
4075 | roce_set_bit(context->byte_168_irrl_idx, | ||
4076 | V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); | ||
4077 | roce_set_bit(qpc_mask->byte_168_irrl_idx, | ||
4078 | V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); | ||
4079 | } | ||
4080 | |||
4081 | roce_set_field(context->byte_24_mtu_tc, | ||
4082 | V2_QPC_BYTE_24_VLAN_ID_M, | ||
4083 | V2_QPC_BYTE_24_VLAN_ID_S, vlan); | ||
4084 | roce_set_field(qpc_mask->byte_24_mtu_tc, | ||
4085 | V2_QPC_BYTE_24_VLAN_ID_M, | ||
4086 | V2_QPC_BYTE_24_VLAN_ID_S, 0); | ||
4087 | |||
4088 | if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { | ||
4089 | dev_err(hr_dev->dev, | ||
4090 | "sgid_index(%u) too large. max is %d\n", | ||
4091 | grh->sgid_index, | ||
4092 | hr_dev->caps.gid_table_len[hr_port]); | ||
4093 | ret = -EINVAL; | ||
4094 | goto out; | ||
4095 | } | ||
4096 | |||
4097 | if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { | ||
4098 | dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); | ||
4099 | ret = -EINVAL; | ||
4100 | goto out; | ||
4101 | } | ||
4102 | |||
4103 | roce_set_field(context->byte_52_udpspn_dmac, | ||
4104 | V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, | ||
4105 | (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ? | ||
4106 | 0 : 0x12b7); | ||
4107 | |||
4108 | roce_set_field(qpc_mask->byte_52_udpspn_dmac, | ||
4109 | V2_QPC_BYTE_52_UDPSPN_M, | ||
4110 | V2_QPC_BYTE_52_UDPSPN_S, 0); | ||
4111 | |||
4112 | roce_set_field(context->byte_20_smac_sgid_idx, | ||
4113 | V2_QPC_BYTE_20_SGID_IDX_M, | ||
4114 | V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index); | ||
4115 | |||
4116 | roce_set_field(qpc_mask->byte_20_smac_sgid_idx, | ||
4117 | V2_QPC_BYTE_20_SGID_IDX_M, | ||
4118 | V2_QPC_BYTE_20_SGID_IDX_S, 0); | ||
4119 | |||
4120 | roce_set_field(context->byte_24_mtu_tc, | ||
4121 | V2_QPC_BYTE_24_HOP_LIMIT_M, | ||
4122 | V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); | ||
4123 | roce_set_field(qpc_mask->byte_24_mtu_tc, | ||
4124 | V2_QPC_BYTE_24_HOP_LIMIT_M, | ||
4125 | V2_QPC_BYTE_24_HOP_LIMIT_S, 0); | ||
4126 | |||
4127 | if (hr_dev->pci_dev->revision == 0x21 && | ||
4128 | gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | ||
4129 | roce_set_field(context->byte_24_mtu_tc, | ||
4130 | V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, | ||
4131 | grh->traffic_class >> 2); | ||
4132 | else | ||
4133 | roce_set_field(context->byte_24_mtu_tc, | ||
4134 | V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, | ||
4135 | grh->traffic_class); | ||
4136 | roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, | ||
4137 | V2_QPC_BYTE_24_TC_S, 0); | ||
4138 | roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, | ||
4139 | V2_QPC_BYTE_28_FL_S, grh->flow_label); | ||
4140 | roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, | ||
4141 | V2_QPC_BYTE_28_FL_S, 0); | ||
4142 | memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); | ||
4143 | memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); | ||
4144 | roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, | ||
4145 | V2_QPC_BYTE_28_SL_S, | ||
4146 | rdma_ah_get_sl(&attr->ah_attr)); | ||
4147 | roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, | ||
4148 | V2_QPC_BYTE_28_SL_S, 0); | ||
4149 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); | ||
4150 | } | 4211 | } |
4151 | 4212 | ||
4152 | if (attr_mask & IB_QP_TIMEOUT) { | 4213 | if (attr_mask & IB_QP_TIMEOUT) { |
@@ -4158,7 +4219,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4158 | V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, | 4219 | V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, |
4159 | 0); | 4220 | 0); |
4160 | } else { | 4221 | } else { |
4161 | dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n"); | 4222 | dev_warn(hr_dev->dev, |
4223 | "Local ACK timeout shall be 0 to 30.\n"); | ||
4162 | } | 4224 | } |
4163 | } | 4225 | } |
4164 | 4226 | ||
@@ -4196,6 +4258,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4196 | V2_QPC_BYTE_244_RNR_CNT_S, 0); | 4258 | V2_QPC_BYTE_244_RNR_CNT_S, 0); |
4197 | } | 4259 | } |
4198 | 4260 | ||
4261 | /* RC&UC&UD required attr */ | ||
4199 | if (attr_mask & IB_QP_SQ_PSN) { | 4262 | if (attr_mask & IB_QP_SQ_PSN) { |
4200 | roce_set_field(context->byte_172_sq_psn, | 4263 | roce_set_field(context->byte_172_sq_psn, |
4201 | V2_QPC_BYTE_172_SQ_CUR_PSN_M, | 4264 | V2_QPC_BYTE_172_SQ_CUR_PSN_M, |
@@ -4290,11 +4353,85 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4290 | } | 4353 | } |
4291 | 4354 | ||
4292 | if (attr_mask & IB_QP_QKEY) { | 4355 | if (attr_mask & IB_QP_QKEY) { |
4293 | context->qkey_xrcd = attr->qkey; | 4356 | context->qkey_xrcd = cpu_to_le32(attr->qkey); |
4294 | qpc_mask->qkey_xrcd = 0; | 4357 | qpc_mask->qkey_xrcd = 0; |
4295 | hr_qp->qkey = attr->qkey; | 4358 | hr_qp->qkey = attr->qkey; |
4296 | } | 4359 | } |
4297 | 4360 | ||
4361 | return ret; | ||
4362 | } | ||
4363 | |||
4364 | static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, | ||
4365 | const struct ib_qp_attr *attr, | ||
4366 | int attr_mask) | ||
4367 | { | ||
4368 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | ||
4369 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | ||
4370 | |||
4371 | if (attr_mask & IB_QP_ACCESS_FLAGS) | ||
4372 | hr_qp->atomic_rd_en = attr->qp_access_flags; | ||
4373 | |||
4374 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
4375 | hr_qp->resp_depth = attr->max_dest_rd_atomic; | ||
4376 | if (attr_mask & IB_QP_PORT) { | ||
4377 | hr_qp->port = attr->port_num - 1; | ||
4378 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; | ||
4379 | } | ||
4380 | } | ||
4381 | |||
4382 | static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | ||
4383 | const struct ib_qp_attr *attr, | ||
4384 | int attr_mask, enum ib_qp_state cur_state, | ||
4385 | enum ib_qp_state new_state) | ||
4386 | { | ||
4387 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | ||
4388 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | ||
4389 | struct hns_roce_v2_qp_context ctx[2]; | ||
4390 | struct hns_roce_v2_qp_context *context = ctx; | ||
4391 | struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; | ||
4392 | struct device *dev = hr_dev->dev; | ||
4393 | int ret; | ||
4394 | |||
4395 | /* | ||
4396 | * In v2 engine, software pass context and context mask to hardware | ||
4397 | * when modifying qp. If software need modify some fields in context, | ||
4398 | * we should set all bits of the relevant fields in context mask to | ||
4399 | * 0 at the same time, else set them to 0x1. | ||
4400 | */ | ||
4401 | memset(context, 0, sizeof(*context)); | ||
4402 | memset(qpc_mask, 0xff, sizeof(*qpc_mask)); | ||
4403 | ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, | ||
4404 | new_state, context, qpc_mask); | ||
4405 | if (ret) | ||
4406 | goto out; | ||
4407 | |||
4408 | /* When QP state is err, SQ and RQ WQE should be flushed */ | ||
4409 | if (new_state == IB_QPS_ERR) { | ||
4410 | roce_set_field(context->byte_160_sq_ci_pi, | ||
4411 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, | ||
4412 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, | ||
4413 | hr_qp->sq.head); | ||
4414 | roce_set_field(qpc_mask->byte_160_sq_ci_pi, | ||
4415 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, | ||
4416 | V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); | ||
4417 | |||
4418 | if (!ibqp->srq) { | ||
4419 | roce_set_field(context->byte_84_rq_ci_pi, | ||
4420 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, | ||
4421 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, | ||
4422 | hr_qp->rq.head); | ||
4423 | roce_set_field(qpc_mask->byte_84_rq_ci_pi, | ||
4424 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, | ||
4425 | V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); | ||
4426 | } | ||
4427 | } | ||
4428 | |||
4429 | /* Configure the optional fields */ | ||
4430 | ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, | ||
4431 | qpc_mask); | ||
4432 | if (ret) | ||
4433 | goto out; | ||
4434 | |||
4298 | roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, | 4435 | roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, |
4299 | ibqp->srq ? 1 : 0); | 4436 | ibqp->srq ? 1 : 0); |
4300 | roce_set_bit(qpc_mask->byte_108_rx_reqepsn, | 4437 | roce_set_bit(qpc_mask->byte_108_rx_reqepsn, |
@@ -4307,8 +4444,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4307 | V2_QPC_BYTE_60_QP_ST_S, 0); | 4444 | V2_QPC_BYTE_60_QP_ST_S, 0); |
4308 | 4445 | ||
4309 | /* SW pass context to HW */ | 4446 | /* SW pass context to HW */ |
4310 | ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, | 4447 | ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp); |
4311 | context, hr_qp); | ||
4312 | if (ret) { | 4448 | if (ret) { |
4313 | dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); | 4449 | dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); |
4314 | goto out; | 4450 | goto out; |
@@ -4316,15 +4452,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4316 | 4452 | ||
4317 | hr_qp->state = new_state; | 4453 | hr_qp->state = new_state; |
4318 | 4454 | ||
4319 | if (attr_mask & IB_QP_ACCESS_FLAGS) | 4455 | hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); |
4320 | hr_qp->atomic_rd_en = attr->qp_access_flags; | ||
4321 | |||
4322 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
4323 | hr_qp->resp_depth = attr->max_dest_rd_atomic; | ||
4324 | if (attr_mask & IB_QP_PORT) { | ||
4325 | hr_qp->port = attr->port_num - 1; | ||
4326 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; | ||
4327 | } | ||
4328 | 4456 | ||
4329 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { | 4457 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { |
4330 | hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, | 4458 | hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, |
@@ -4344,7 +4472,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, | |||
4344 | } | 4472 | } |
4345 | 4473 | ||
4346 | out: | 4474 | out: |
4347 | kfree(context); | ||
4348 | return ret; | 4475 | return ret; |
4349 | } | 4476 | } |
4350 | 4477 | ||
@@ -4395,16 +4522,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
4395 | { | 4522 | { |
4396 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 4523 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
4397 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 4524 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
4398 | struct hns_roce_v2_qp_context *context; | 4525 | struct hns_roce_v2_qp_context context = {}; |
4399 | struct device *dev = hr_dev->dev; | 4526 | struct device *dev = hr_dev->dev; |
4400 | int tmp_qp_state; | 4527 | int tmp_qp_state; |
4401 | int state; | 4528 | int state; |
4402 | int ret; | 4529 | int ret; |
4403 | 4530 | ||
4404 | context = kzalloc(sizeof(*context), GFP_KERNEL); | ||
4405 | if (!context) | ||
4406 | return -ENOMEM; | ||
4407 | |||
4408 | memset(qp_attr, 0, sizeof(*qp_attr)); | 4531 | memset(qp_attr, 0, sizeof(*qp_attr)); |
4409 | memset(qp_init_attr, 0, sizeof(*qp_init_attr)); | 4532 | memset(qp_init_attr, 0, sizeof(*qp_init_attr)); |
4410 | 4533 | ||
@@ -4416,14 +4539,14 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
4416 | goto done; | 4539 | goto done; |
4417 | } | 4540 | } |
4418 | 4541 | ||
4419 | ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context); | 4542 | ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); |
4420 | if (ret) { | 4543 | if (ret) { |
4421 | dev_err(dev, "query qpc error\n"); | 4544 | dev_err(dev, "query qpc error\n"); |
4422 | ret = -EINVAL; | 4545 | ret = -EINVAL; |
4423 | goto out; | 4546 | goto out; |
4424 | } | 4547 | } |
4425 | 4548 | ||
4426 | state = roce_get_field(context->byte_60_qpst_tempid, | 4549 | state = roce_get_field(context.byte_60_qpst_tempid, |
4427 | V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); | 4550 | V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); |
4428 | tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); | 4551 | tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); |
4429 | if (tmp_qp_state == -1) { | 4552 | if (tmp_qp_state == -1) { |
@@ -4433,7 +4556,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
4433 | } | 4556 | } |
4434 | hr_qp->state = (u8)tmp_qp_state; | 4557 | hr_qp->state = (u8)tmp_qp_state; |
4435 | qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; | 4558 | qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; |
4436 | qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc, | 4559 | qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc, |
4437 | V2_QPC_BYTE_24_MTU_M, | 4560 | V2_QPC_BYTE_24_MTU_M, |
4438 | V2_QPC_BYTE_24_MTU_S); | 4561 | V2_QPC_BYTE_24_MTU_S); |
4439 | qp_attr->path_mig_state = IB_MIG_ARMED; | 4562 | qp_attr->path_mig_state = IB_MIG_ARMED; |
@@ -4441,20 +4564,20 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
4441 | if (hr_qp->ibqp.qp_type == IB_QPT_UD) | 4564 | if (hr_qp->ibqp.qp_type == IB_QPT_UD) |
4442 | qp_attr->qkey = V2_QKEY_VAL; | 4565 | qp_attr->qkey = V2_QKEY_VAL; |
4443 | 4566 | ||
4444 | qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn, | 4567 | qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, |
4445 | V2_QPC_BYTE_108_RX_REQ_EPSN_M, | 4568 | V2_QPC_BYTE_108_RX_REQ_EPSN_M, |
4446 | V2_QPC_BYTE_108_RX_REQ_EPSN_S); | 4569 | V2_QPC_BYTE_108_RX_REQ_EPSN_S); |
4447 | qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn, | 4570 | qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn, |
4448 | V2_QPC_BYTE_172_SQ_CUR_PSN_M, | 4571 | V2_QPC_BYTE_172_SQ_CUR_PSN_M, |
4449 | V2_QPC_BYTE_172_SQ_CUR_PSN_S); | 4572 | V2_QPC_BYTE_172_SQ_CUR_PSN_S); |
4450 | qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err, | 4573 | qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err, |
4451 | V2_QPC_BYTE_56_DQPN_M, | 4574 | V2_QPC_BYTE_56_DQPN_M, |
4452 | V2_QPC_BYTE_56_DQPN_S); | 4575 | V2_QPC_BYTE_56_DQPN_S); |
4453 | qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en, | 4576 | qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en, |
4454 | V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) | | 4577 | V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) | |
4455 | ((roce_get_bit(context->byte_76_srqn_op_en, | 4578 | ((roce_get_bit(context.byte_76_srqn_op_en, |
4456 | V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) | | 4579 | V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) | |
4457 | ((roce_get_bit(context->byte_76_srqn_op_en, | 4580 | ((roce_get_bit(context.byte_76_srqn_op_en, |
4458 | V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); | 4581 | V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); |
4459 | 4582 | ||
4460 | if (hr_qp->ibqp.qp_type == IB_QPT_RC || | 4583 | if (hr_qp->ibqp.qp_type == IB_QPT_RC || |
@@ -4463,43 +4586,43 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
4463 | rdma_ah_retrieve_grh(&qp_attr->ah_attr); | 4586 | rdma_ah_retrieve_grh(&qp_attr->ah_attr); |
4464 | 4587 | ||
4465 | rdma_ah_set_sl(&qp_attr->ah_attr, | 4588 | rdma_ah_set_sl(&qp_attr->ah_attr, |
4466 | roce_get_field(context->byte_28_at_fl, | 4589 | roce_get_field(context.byte_28_at_fl, |
4467 | V2_QPC_BYTE_28_SL_M, | 4590 | V2_QPC_BYTE_28_SL_M, |
4468 | V2_QPC_BYTE_28_SL_S)); | 4591 | V2_QPC_BYTE_28_SL_S)); |
4469 | grh->flow_label = roce_get_field(context->byte_28_at_fl, | 4592 | grh->flow_label = roce_get_field(context.byte_28_at_fl, |
4470 | V2_QPC_BYTE_28_FL_M, | 4593 | V2_QPC_BYTE_28_FL_M, |
4471 | V2_QPC_BYTE_28_FL_S); | 4594 | V2_QPC_BYTE_28_FL_S); |
4472 | grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx, | 4595 | grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx, |
4473 | V2_QPC_BYTE_20_SGID_IDX_M, | 4596 | V2_QPC_BYTE_20_SGID_IDX_M, |
4474 | V2_QPC_BYTE_20_SGID_IDX_S); | 4597 | V2_QPC_BYTE_20_SGID_IDX_S); |
4475 | grh->hop_limit = roce_get_field(context->byte_24_mtu_tc, | 4598 | grh->hop_limit = roce_get_field(context.byte_24_mtu_tc, |
4476 | V2_QPC_BYTE_24_HOP_LIMIT_M, | 4599 | V2_QPC_BYTE_24_HOP_LIMIT_M, |
4477 | V2_QPC_BYTE_24_HOP_LIMIT_S); | 4600 | V2_QPC_BYTE_24_HOP_LIMIT_S); |
4478 | grh->traffic_class = roce_get_field(context->byte_24_mtu_tc, | 4601 | grh->traffic_class = roce_get_field(context.byte_24_mtu_tc, |
4479 | V2_QPC_BYTE_24_TC_M, | 4602 | V2_QPC_BYTE_24_TC_M, |
4480 | V2_QPC_BYTE_24_TC_S); | 4603 | V2_QPC_BYTE_24_TC_S); |
4481 | 4604 | ||
4482 | memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw)); | 4605 | memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); |
4483 | } | 4606 | } |
4484 | 4607 | ||
4485 | qp_attr->port_num = hr_qp->port + 1; | 4608 | qp_attr->port_num = hr_qp->port + 1; |
4486 | qp_attr->sq_draining = 0; | 4609 | qp_attr->sq_draining = 0; |
4487 | qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl, | 4610 | qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl, |
4488 | V2_QPC_BYTE_208_SR_MAX_M, | 4611 | V2_QPC_BYTE_208_SR_MAX_M, |
4489 | V2_QPC_BYTE_208_SR_MAX_S); | 4612 | V2_QPC_BYTE_208_SR_MAX_S); |
4490 | qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq, | 4613 | qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq, |
4491 | V2_QPC_BYTE_140_RR_MAX_M, | 4614 | V2_QPC_BYTE_140_RR_MAX_M, |
4492 | V2_QPC_BYTE_140_RR_MAX_S); | 4615 | V2_QPC_BYTE_140_RR_MAX_S); |
4493 | qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn, | 4616 | qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn, |
4494 | V2_QPC_BYTE_80_MIN_RNR_TIME_M, | 4617 | V2_QPC_BYTE_80_MIN_RNR_TIME_M, |
4495 | V2_QPC_BYTE_80_MIN_RNR_TIME_S); | 4618 | V2_QPC_BYTE_80_MIN_RNR_TIME_S); |
4496 | qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl, | 4619 | qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl, |
4497 | V2_QPC_BYTE_28_AT_M, | 4620 | V2_QPC_BYTE_28_AT_M, |
4498 | V2_QPC_BYTE_28_AT_S); | 4621 | V2_QPC_BYTE_28_AT_S); |
4499 | qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn, | 4622 | qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, |
4500 | V2_QPC_BYTE_212_RETRY_CNT_M, | 4623 | V2_QPC_BYTE_212_RETRY_CNT_M, |
4501 | V2_QPC_BYTE_212_RETRY_CNT_S); | 4624 | V2_QPC_BYTE_212_RETRY_CNT_S); |
4502 | qp_attr->rnr_retry = context->rq_rnr_timer; | 4625 | qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer); |
4503 | 4626 | ||
4504 | done: | 4627 | done: |
4505 | qp_attr->cur_qp_state = qp_attr->qp_state; | 4628 | qp_attr->cur_qp_state = qp_attr->qp_state; |
@@ -4518,7 +4641,6 @@ done: | |||
4518 | 4641 | ||
4519 | out: | 4642 | out: |
4520 | mutex_unlock(&hr_qp->mutex); | 4643 | mutex_unlock(&hr_qp->mutex); |
4521 | kfree(context); | ||
4522 | return ret; | 4644 | return ret; |
4523 | } | 4645 | } |
4524 | 4646 | ||
@@ -4527,7 +4649,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, | |||
4527 | struct ib_udata *udata) | 4649 | struct ib_udata *udata) |
4528 | { | 4650 | { |
4529 | struct hns_roce_cq *send_cq, *recv_cq; | 4651 | struct hns_roce_cq *send_cq, *recv_cq; |
4530 | struct device *dev = hr_dev->dev; | 4652 | struct ib_device *ibdev = &hr_dev->ib_dev; |
4531 | int ret; | 4653 | int ret; |
4532 | 4654 | ||
4533 | if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { | 4655 | if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { |
@@ -4535,8 +4657,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, | |||
4535 | ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, | 4657 | ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, |
4536 | hr_qp->state, IB_QPS_RESET); | 4658 | hr_qp->state, IB_QPS_RESET); |
4537 | if (ret) { | 4659 | if (ret) { |
4538 | dev_err(dev, "modify QP %06lx to ERR failed.\n", | 4660 | ibdev_err(ibdev, "modify QP to Reset failed.\n"); |
4539 | hr_qp->qpn); | ||
4540 | return ret; | 4661 | return ret; |
4541 | } | 4662 | } |
4542 | } | 4663 | } |
@@ -4605,7 +4726,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) | |||
4605 | 4726 | ||
4606 | ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); | 4727 | ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); |
4607 | if (ret) { | 4728 | if (ret) { |
4608 | dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); | 4729 | ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", |
4730 | hr_qp->qpn, ret); | ||
4609 | return ret; | 4731 | return ret; |
4610 | } | 4732 | } |
4611 | 4733 | ||
@@ -4829,7 +4951,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, | |||
4829 | static void set_eq_cons_index_v2(struct hns_roce_eq *eq) | 4951 | static void set_eq_cons_index_v2(struct hns_roce_eq *eq) |
4830 | { | 4952 | { |
4831 | struct hns_roce_dev *hr_dev = eq->hr_dev; | 4953 | struct hns_roce_dev *hr_dev = eq->hr_dev; |
4832 | u32 doorbell[2]; | 4954 | __le32 doorbell[2]; |
4833 | 4955 | ||
4834 | doorbell[0] = 0; | 4956 | doorbell[0] = 0; |
4835 | doorbell[1] = 0; | 4957 | doorbell[1] = 0; |
@@ -4904,7 +5026,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, | |||
4904 | struct hns_roce_eq *eq) | 5026 | struct hns_roce_eq *eq) |
4905 | { | 5027 | { |
4906 | struct device *dev = hr_dev->dev; | 5028 | struct device *dev = hr_dev->dev; |
4907 | struct hns_roce_aeqe *aeqe; | 5029 | struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); |
4908 | int aeqe_found = 0; | 5030 | int aeqe_found = 0; |
4909 | int event_type; | 5031 | int event_type; |
4910 | int sub_type; | 5032 | int sub_type; |
@@ -4912,8 +5034,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, | |||
4912 | u32 qpn; | 5034 | u32 qpn; |
4913 | u32 cqn; | 5035 | u32 cqn; |
4914 | 5036 | ||
4915 | while ((aeqe = next_aeqe_sw_v2(eq))) { | 5037 | while (aeqe) { |
4916 | |||
4917 | /* Make sure we read AEQ entry after we have checked the | 5038 | /* Make sure we read AEQ entry after we have checked the |
4918 | * ownership bit | 5039 | * ownership bit |
4919 | */ | 5040 | */ |
@@ -4977,11 +5098,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, | |||
4977 | ++eq->cons_index; | 5098 | ++eq->cons_index; |
4978 | aeqe_found = 1; | 5099 | aeqe_found = 1; |
4979 | 5100 | ||
4980 | if (eq->cons_index > (2 * eq->entries - 1)) { | 5101 | if (eq->cons_index > (2 * eq->entries - 1)) |
4981 | dev_warn(dev, "cons_index overflow, set back to 0.\n"); | ||
4982 | eq->cons_index = 0; | 5102 | eq->cons_index = 0; |
4983 | } | 5103 | |
4984 | hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); | 5104 | hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); |
5105 | |||
5106 | aeqe = next_aeqe_sw_v2(eq); | ||
4985 | } | 5107 | } |
4986 | 5108 | ||
4987 | set_eq_cons_index_v2(eq); | 5109 | set_eq_cons_index_v2(eq); |
@@ -5034,12 +5156,11 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, | |||
5034 | struct hns_roce_eq *eq) | 5156 | struct hns_roce_eq *eq) |
5035 | { | 5157 | { |
5036 | struct device *dev = hr_dev->dev; | 5158 | struct device *dev = hr_dev->dev; |
5037 | struct hns_roce_ceqe *ceqe; | 5159 | struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); |
5038 | int ceqe_found = 0; | 5160 | int ceqe_found = 0; |
5039 | u32 cqn; | 5161 | u32 cqn; |
5040 | 5162 | ||
5041 | while ((ceqe = next_ceqe_sw_v2(eq))) { | 5163 | while (ceqe) { |
5042 | |||
5043 | /* Make sure we read CEQ entry after we have checked the | 5164 | /* Make sure we read CEQ entry after we have checked the |
5044 | * ownership bit | 5165 | * ownership bit |
5045 | */ | 5166 | */ |
@@ -5054,10 +5175,12 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, | |||
5054 | ++eq->cons_index; | 5175 | ++eq->cons_index; |
5055 | ceqe_found = 1; | 5176 | ceqe_found = 1; |
5056 | 5177 | ||
5057 | if (eq->cons_index > (2 * eq->entries - 1)) { | 5178 | if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) { |
5058 | dev_warn(dev, "cons_index overflow, set back to 0.\n"); | 5179 | dev_warn(dev, "cons_index overflow, set back to 0.\n"); |
5059 | eq->cons_index = 0; | 5180 | eq->cons_index = 0; |
5060 | } | 5181 | } |
5182 | |||
5183 | ceqe = next_ceqe_sw_v2(eq); | ||
5061 | } | 5184 | } |
5062 | 5185 | ||
5063 | set_eq_cons_index_v2(eq); | 5186 | set_eq_cons_index_v2(eq); |
@@ -5093,14 +5216,14 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) | |||
5093 | int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); | 5216 | int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); |
5094 | int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); | 5217 | int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); |
5095 | 5218 | ||
5096 | if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { | 5219 | if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { |
5097 | struct pci_dev *pdev = hr_dev->pci_dev; | 5220 | struct pci_dev *pdev = hr_dev->pci_dev; |
5098 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | 5221 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
5099 | const struct hnae3_ae_ops *ops = ae_dev->ops; | 5222 | const struct hnae3_ae_ops *ops = ae_dev->ops; |
5100 | 5223 | ||
5101 | dev_err(dev, "AEQ overflow!\n"); | 5224 | dev_err(dev, "AEQ overflow!\n"); |
5102 | 5225 | ||
5103 | roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1); | 5226 | int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; |
5104 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); | 5227 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); |
5105 | 5228 | ||
5106 | /* Set reset level for reset_event() */ | 5229 | /* Set reset level for reset_event() */ |
@@ -5110,27 +5233,27 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) | |||
5110 | if (ops->reset_event) | 5233 | if (ops->reset_event) |
5111 | ops->reset_event(pdev, NULL); | 5234 | ops->reset_event(pdev, NULL); |
5112 | 5235 | ||
5113 | roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); | 5236 | int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; |
5114 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); | 5237 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); |
5115 | 5238 | ||
5116 | int_work = 1; | 5239 | int_work = 1; |
5117 | } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { | 5240 | } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { |
5118 | dev_err(dev, "BUS ERR!\n"); | 5241 | dev_err(dev, "BUS ERR!\n"); |
5119 | 5242 | ||
5120 | roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1); | 5243 | int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S; |
5121 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); | 5244 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); |
5122 | 5245 | ||
5123 | roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); | 5246 | int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; |
5124 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); | 5247 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); |
5125 | 5248 | ||
5126 | int_work = 1; | 5249 | int_work = 1; |
5127 | } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { | 5250 | } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { |
5128 | dev_err(dev, "OTHER ERR!\n"); | 5251 | dev_err(dev, "OTHER ERR!\n"); |
5129 | 5252 | ||
5130 | roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1); | 5253 | int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S; |
5131 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); | 5254 | roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); |
5132 | 5255 | ||
5133 | roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); | 5256 | int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; |
5134 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); | 5257 | roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); |
5135 | 5258 | ||
5136 | int_work = 1; | 5259 | int_work = 1; |
@@ -5202,14 +5325,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, | |||
5202 | buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); | 5325 | buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); |
5203 | bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); | 5326 | bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); |
5204 | 5327 | ||
5205 | /* hop_num = 0 */ | ||
5206 | if (mhop_num == HNS_ROCE_HOP_NUM_0) { | 5328 | if (mhop_num == HNS_ROCE_HOP_NUM_0) { |
5207 | dma_free_coherent(dev, (unsigned int)(eq->entries * | 5329 | dma_free_coherent(dev, (unsigned int)(eq->entries * |
5208 | eq->eqe_size), eq->bt_l0, eq->l0_dma); | 5330 | eq->eqe_size), eq->bt_l0, eq->l0_dma); |
5209 | return; | 5331 | return; |
5210 | } | 5332 | } |
5211 | 5333 | ||
5212 | /* hop_num = 1 or hop = 2 */ | ||
5213 | dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); | 5334 | dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); |
5214 | if (mhop_num == 1) { | 5335 | if (mhop_num == 1) { |
5215 | for (i = 0; i < eq->l0_last_num; i++) { | 5336 | for (i = 0; i < eq->l0_last_num; i++) { |
@@ -5449,7 +5570,6 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | |||
5449 | buf_chk_sz); | 5570 | buf_chk_sz); |
5450 | bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN); | 5571 | bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN); |
5451 | 5572 | ||
5452 | /* hop_num = 0 */ | ||
5453 | if (mhop_num == HNS_ROCE_HOP_NUM_0) { | 5573 | if (mhop_num == HNS_ROCE_HOP_NUM_0) { |
5454 | if (eq->entries > buf_chk_sz / eq->eqe_size) { | 5574 | if (eq->entries > buf_chk_sz / eq->eqe_size) { |
5455 | dev_err(dev, "eq entries %d is larger than buf_pg_sz!", | 5575 | dev_err(dev, "eq entries %d is larger than buf_pg_sz!", |
@@ -5515,7 +5635,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | |||
5515 | break; | 5635 | break; |
5516 | } | 5636 | } |
5517 | eq->cur_eqe_ba = eq->buf_dma[0]; | 5637 | eq->cur_eqe_ba = eq->buf_dma[0]; |
5518 | eq->nxt_eqe_ba = eq->buf_dma[1]; | 5638 | if (ba_num > 1) |
5639 | eq->nxt_eqe_ba = eq->buf_dma[1]; | ||
5519 | 5640 | ||
5520 | } else if (mhop_num == 2) { | 5641 | } else if (mhop_num == 2) { |
5521 | /* alloc L1 BT and buf */ | 5642 | /* alloc L1 BT and buf */ |
@@ -5556,7 +5677,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | |||
5556 | break; | 5677 | break; |
5557 | } | 5678 | } |
5558 | eq->cur_eqe_ba = eq->buf_dma[0]; | 5679 | eq->cur_eqe_ba = eq->buf_dma[0]; |
5559 | eq->nxt_eqe_ba = eq->buf_dma[1]; | 5680 | if (ba_num > 1) |
5681 | eq->nxt_eqe_ba = eq->buf_dma[1]; | ||
5560 | } | 5682 | } |
5561 | 5683 | ||
5562 | eq->l0_last_num = i + 1; | 5684 | eq->l0_last_num = i + 1; |
@@ -5699,6 +5821,95 @@ free_cmd_mbox: | |||
5699 | return ret; | 5821 | return ret; |
5700 | } | 5822 | } |
5701 | 5823 | ||
5824 | static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, | ||
5825 | int comp_num, int aeq_num, int other_num) | ||
5826 | { | ||
5827 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; | ||
5828 | int i, j; | ||
5829 | int ret; | ||
5830 | |||
5831 | for (i = 0; i < irq_num; i++) { | ||
5832 | hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, | ||
5833 | GFP_KERNEL); | ||
5834 | if (!hr_dev->irq_names[i]) { | ||
5835 | ret = -ENOMEM; | ||
5836 | goto err_kzalloc_failed; | ||
5837 | } | ||
5838 | } | ||
5839 | |||
5840 | /* irq contains: abnormal + AEQ + CEQ */ | ||
5841 | for (j = 0; j < other_num; j++) | ||
5842 | snprintf((char *)hr_dev->irq_names[j], | ||
5843 | HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); | ||
5844 | |||
5845 | for (j = other_num; j < (other_num + aeq_num); j++) | ||
5846 | snprintf((char *)hr_dev->irq_names[j], | ||
5847 | HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", | ||
5848 | j - other_num); | ||
5849 | |||
5850 | for (j = (other_num + aeq_num); j < irq_num; j++) | ||
5851 | snprintf((char *)hr_dev->irq_names[j], | ||
5852 | HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", | ||
5853 | j - other_num - aeq_num); | ||
5854 | |||
5855 | for (j = 0; j < irq_num; j++) { | ||
5856 | if (j < other_num) | ||
5857 | ret = request_irq(hr_dev->irq[j], | ||
5858 | hns_roce_v2_msix_interrupt_abn, | ||
5859 | 0, hr_dev->irq_names[j], hr_dev); | ||
5860 | |||
5861 | else if (j < (other_num + comp_num)) | ||
5862 | ret = request_irq(eq_table->eq[j - other_num].irq, | ||
5863 | hns_roce_v2_msix_interrupt_eq, | ||
5864 | 0, hr_dev->irq_names[j + aeq_num], | ||
5865 | &eq_table->eq[j - other_num]); | ||
5866 | else | ||
5867 | ret = request_irq(eq_table->eq[j - other_num].irq, | ||
5868 | hns_roce_v2_msix_interrupt_eq, | ||
5869 | 0, hr_dev->irq_names[j - comp_num], | ||
5870 | &eq_table->eq[j - other_num]); | ||
5871 | if (ret) { | ||
5872 | dev_err(hr_dev->dev, "Request irq error!\n"); | ||
5873 | goto err_request_failed; | ||
5874 | } | ||
5875 | } | ||
5876 | |||
5877 | return 0; | ||
5878 | |||
5879 | err_request_failed: | ||
5880 | for (j -= 1; j >= 0; j--) | ||
5881 | if (j < other_num) | ||
5882 | free_irq(hr_dev->irq[j], hr_dev); | ||
5883 | else | ||
5884 | free_irq(eq_table->eq[j - other_num].irq, | ||
5885 | &eq_table->eq[j - other_num]); | ||
5886 | |||
5887 | err_kzalloc_failed: | ||
5888 | for (i -= 1; i >= 0; i--) | ||
5889 | kfree(hr_dev->irq_names[i]); | ||
5890 | |||
5891 | return ret; | ||
5892 | } | ||
5893 | |||
5894 | static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) | ||
5895 | { | ||
5896 | int irq_num; | ||
5897 | int eq_num; | ||
5898 | int i; | ||
5899 | |||
5900 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; | ||
5901 | irq_num = eq_num + hr_dev->caps.num_other_vectors; | ||
5902 | |||
5903 | for (i = 0; i < hr_dev->caps.num_other_vectors; i++) | ||
5904 | free_irq(hr_dev->irq[i], hr_dev); | ||
5905 | |||
5906 | for (i = 0; i < eq_num; i++) | ||
5907 | free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); | ||
5908 | |||
5909 | for (i = 0; i < irq_num; i++) | ||
5910 | kfree(hr_dev->irq_names[i]); | ||
5911 | } | ||
5912 | |||
5702 | static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | 5913 | static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) |
5703 | { | 5914 | { |
5704 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; | 5915 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
@@ -5710,7 +5921,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | |||
5710 | int other_num; | 5921 | int other_num; |
5711 | int comp_num; | 5922 | int comp_num; |
5712 | int aeq_num; | 5923 | int aeq_num; |
5713 | int i, j, k; | 5924 | int i; |
5714 | int ret; | 5925 | int ret; |
5715 | 5926 | ||
5716 | other_num = hr_dev->caps.num_other_vectors; | 5927 | other_num = hr_dev->caps.num_other_vectors; |
@@ -5724,27 +5935,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | |||
5724 | if (!eq_table->eq) | 5935 | if (!eq_table->eq) |
5725 | return -ENOMEM; | 5936 | return -ENOMEM; |
5726 | 5937 | ||
5727 | for (i = 0; i < irq_num; i++) { | ||
5728 | hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, | ||
5729 | GFP_KERNEL); | ||
5730 | if (!hr_dev->irq_names[i]) { | ||
5731 | ret = -ENOMEM; | ||
5732 | goto err_failed_kzalloc; | ||
5733 | } | ||
5734 | } | ||
5735 | |||
5736 | /* create eq */ | 5938 | /* create eq */ |
5737 | for (j = 0; j < eq_num; j++) { | 5939 | for (i = 0; i < eq_num; i++) { |
5738 | eq = &eq_table->eq[j]; | 5940 | eq = &eq_table->eq[i]; |
5739 | eq->hr_dev = hr_dev; | 5941 | eq->hr_dev = hr_dev; |
5740 | eq->eqn = j; | 5942 | eq->eqn = i; |
5741 | if (j < comp_num) { | 5943 | if (i < comp_num) { |
5742 | /* CEQ */ | 5944 | /* CEQ */ |
5743 | eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; | 5945 | eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; |
5744 | eq->type_flag = HNS_ROCE_CEQ; | 5946 | eq->type_flag = HNS_ROCE_CEQ; |
5745 | eq->entries = hr_dev->caps.ceqe_depth; | 5947 | eq->entries = hr_dev->caps.ceqe_depth; |
5746 | eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; | 5948 | eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; |
5747 | eq->irq = hr_dev->irq[j + other_num + aeq_num]; | 5949 | eq->irq = hr_dev->irq[i + other_num + aeq_num]; |
5748 | eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; | 5950 | eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; |
5749 | eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; | 5951 | eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; |
5750 | } else { | 5952 | } else { |
@@ -5753,7 +5955,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | |||
5753 | eq->type_flag = HNS_ROCE_AEQ; | 5955 | eq->type_flag = HNS_ROCE_AEQ; |
5754 | eq->entries = hr_dev->caps.aeqe_depth; | 5956 | eq->entries = hr_dev->caps.aeqe_depth; |
5755 | eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; | 5957 | eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; |
5756 | eq->irq = hr_dev->irq[j - comp_num + other_num]; | 5958 | eq->irq = hr_dev->irq[i - comp_num + other_num]; |
5757 | eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; | 5959 | eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; |
5758 | eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; | 5960 | eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; |
5759 | } | 5961 | } |
@@ -5768,40 +5970,11 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | |||
5768 | /* enable irq */ | 5970 | /* enable irq */ |
5769 | hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); | 5971 | hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); |
5770 | 5972 | ||
5771 | /* irq contains: abnormal + AEQ + CEQ*/ | 5973 | ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, |
5772 | for (k = 0; k < irq_num; k++) | 5974 | aeq_num, other_num); |
5773 | if (k < other_num) | 5975 | if (ret) { |
5774 | snprintf((char *)hr_dev->irq_names[k], | 5976 | dev_err(dev, "Request irq failed.\n"); |
5775 | HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k); | 5977 | goto err_request_irq_fail; |
5776 | else if (k < (other_num + aeq_num)) | ||
5777 | snprintf((char *)hr_dev->irq_names[k], | ||
5778 | HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", | ||
5779 | k - other_num); | ||
5780 | else | ||
5781 | snprintf((char *)hr_dev->irq_names[k], | ||
5782 | HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", | ||
5783 | k - other_num - aeq_num); | ||
5784 | |||
5785 | for (k = 0; k < irq_num; k++) { | ||
5786 | if (k < other_num) | ||
5787 | ret = request_irq(hr_dev->irq[k], | ||
5788 | hns_roce_v2_msix_interrupt_abn, | ||
5789 | 0, hr_dev->irq_names[k], hr_dev); | ||
5790 | |||
5791 | else if (k < (other_num + comp_num)) | ||
5792 | ret = request_irq(eq_table->eq[k - other_num].irq, | ||
5793 | hns_roce_v2_msix_interrupt_eq, | ||
5794 | 0, hr_dev->irq_names[k + aeq_num], | ||
5795 | &eq_table->eq[k - other_num]); | ||
5796 | else | ||
5797 | ret = request_irq(eq_table->eq[k - other_num].irq, | ||
5798 | hns_roce_v2_msix_interrupt_eq, | ||
5799 | 0, hr_dev->irq_names[k - comp_num], | ||
5800 | &eq_table->eq[k - other_num]); | ||
5801 | if (ret) { | ||
5802 | dev_err(dev, "Request irq error!\n"); | ||
5803 | goto err_request_irq_fail; | ||
5804 | } | ||
5805 | } | 5978 | } |
5806 | 5979 | ||
5807 | hr_dev->irq_workq = | 5980 | hr_dev->irq_workq = |
@@ -5809,26 +5982,20 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) | |||
5809 | if (!hr_dev->irq_workq) { | 5982 | if (!hr_dev->irq_workq) { |
5810 | dev_err(dev, "Create irq workqueue failed!\n"); | 5983 | dev_err(dev, "Create irq workqueue failed!\n"); |
5811 | ret = -ENOMEM; | 5984 | ret = -ENOMEM; |
5812 | goto err_request_irq_fail; | 5985 | goto err_create_wq_fail; |
5813 | } | 5986 | } |
5814 | 5987 | ||
5815 | return 0; | 5988 | return 0; |
5816 | 5989 | ||
5990 | err_create_wq_fail: | ||
5991 | __hns_roce_free_irq(hr_dev); | ||
5992 | |||
5817 | err_request_irq_fail: | 5993 | err_request_irq_fail: |
5818 | for (k -= 1; k >= 0; k--) | 5994 | hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); |
5819 | if (k < other_num) | ||
5820 | free_irq(hr_dev->irq[k], hr_dev); | ||
5821 | else | ||
5822 | free_irq(eq_table->eq[k - other_num].irq, | ||
5823 | &eq_table->eq[k - other_num]); | ||
5824 | 5995 | ||
5825 | err_create_eq_fail: | 5996 | err_create_eq_fail: |
5826 | for (j -= 1; j >= 0; j--) | ||
5827 | hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]); | ||
5828 | |||
5829 | err_failed_kzalloc: | ||
5830 | for (i -= 1; i >= 0; i--) | 5997 | for (i -= 1; i >= 0; i--) |
5831 | kfree(hr_dev->irq_names[i]); | 5998 | hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); |
5832 | kfree(eq_table->eq); | 5999 | kfree(eq_table->eq); |
5833 | 6000 | ||
5834 | return ret; | 6001 | return ret; |
@@ -5837,30 +6004,22 @@ err_failed_kzalloc: | |||
5837 | static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) | 6004 | static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) |
5838 | { | 6005 | { |
5839 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; | 6006 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
5840 | int irq_num; | ||
5841 | int eq_num; | 6007 | int eq_num; |
5842 | int i; | 6008 | int i; |
5843 | 6009 | ||
5844 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; | 6010 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; |
5845 | irq_num = eq_num + hr_dev->caps.num_other_vectors; | ||
5846 | 6011 | ||
5847 | /* Disable irq */ | 6012 | /* Disable irq */ |
5848 | hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); | 6013 | hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); |
5849 | 6014 | ||
5850 | for (i = 0; i < hr_dev->caps.num_other_vectors; i++) | 6015 | __hns_roce_free_irq(hr_dev); |
5851 | free_irq(hr_dev->irq[i], hr_dev); | ||
5852 | 6016 | ||
5853 | for (i = 0; i < eq_num; i++) { | 6017 | for (i = 0; i < eq_num; i++) { |
5854 | hns_roce_v2_destroy_eqc(hr_dev, i); | 6018 | hns_roce_v2_destroy_eqc(hr_dev, i); |
5855 | 6019 | ||
5856 | free_irq(eq_table->eq[i].irq, &eq_table->eq[i]); | ||
5857 | |||
5858 | hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); | 6020 | hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); |
5859 | } | 6021 | } |
5860 | 6022 | ||
5861 | for (i = 0; i < irq_num; i++) | ||
5862 | kfree(hr_dev->irq_names[i]); | ||
5863 | |||
5864 | kfree(eq_table->eq); | 6023 | kfree(eq_table->eq); |
5865 | 6024 | ||
5866 | flush_workqueue(hr_dev->irq_workq); | 6025 | flush_workqueue(hr_dev->irq_workq); |
@@ -5904,7 +6063,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, | |||
5904 | roce_set_field(srq_context->byte_24_wqe_bt_ba, | 6063 | roce_set_field(srq_context->byte_24_wqe_bt_ba, |
5905 | SRQC_BYTE_24_SRQ_WQE_BT_BA_M, | 6064 | SRQC_BYTE_24_SRQ_WQE_BT_BA_M, |
5906 | SRQC_BYTE_24_SRQ_WQE_BT_BA_S, | 6065 | SRQC_BYTE_24_SRQ_WQE_BT_BA_S, |
5907 | cpu_to_le32(dma_handle_wqe >> 35)); | 6066 | dma_handle_wqe >> 35); |
5908 | 6067 | ||
5909 | roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, | 6068 | roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, |
5910 | SRQC_BYTE_28_PD_S, pdn); | 6069 | SRQC_BYTE_28_PD_S, pdn); |
@@ -5912,20 +6071,18 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, | |||
5912 | SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : | 6071 | SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : |
5913 | fls(srq->max_gs - 1)); | 6072 | fls(srq->max_gs - 1)); |
5914 | 6073 | ||
5915 | srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3); | 6074 | srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3); |
5916 | srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba); | ||
5917 | roce_set_field(srq_context->rsv_idx_bt_ba, | 6075 | roce_set_field(srq_context->rsv_idx_bt_ba, |
5918 | SRQC_BYTE_36_SRQ_IDX_BT_BA_M, | 6076 | SRQC_BYTE_36_SRQ_IDX_BT_BA_M, |
5919 | SRQC_BYTE_36_SRQ_IDX_BT_BA_S, | 6077 | SRQC_BYTE_36_SRQ_IDX_BT_BA_S, |
5920 | cpu_to_le32(dma_handle_idx >> 35)); | 6078 | dma_handle_idx >> 35); |
5921 | 6079 | ||
5922 | srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT); | ||
5923 | srq_context->idx_cur_blk_addr = | 6080 | srq_context->idx_cur_blk_addr = |
5924 | cpu_to_le32(srq_context->idx_cur_blk_addr); | 6081 | cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT); |
5925 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, | 6082 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, |
5926 | SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M, | 6083 | SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M, |
5927 | SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S, | 6084 | SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S, |
5928 | cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT))); | 6085 | mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT)); |
5929 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, | 6086 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, |
5930 | SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M, | 6087 | SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M, |
5931 | SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S, | 6088 | SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S, |
@@ -5941,13 +6098,12 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, | |||
5941 | SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, | 6098 | SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, |
5942 | hr_dev->caps.idx_buf_pg_sz); | 6099 | hr_dev->caps.idx_buf_pg_sz); |
5943 | 6100 | ||
5944 | srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT); | ||
5945 | srq_context->idx_nxt_blk_addr = | 6101 | srq_context->idx_nxt_blk_addr = |
5946 | cpu_to_le32(srq_context->idx_nxt_blk_addr); | 6102 | cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT); |
5947 | roce_set_field(srq_context->rsv_idxnxtblkaddr, | 6103 | roce_set_field(srq_context->rsv_idxnxtblkaddr, |
5948 | SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M, | 6104 | SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M, |
5949 | SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S, | 6105 | SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S, |
5950 | cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT))); | 6106 | mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT)); |
5951 | roce_set_field(srq_context->byte_56_xrc_cqn, | 6107 | roce_set_field(srq_context->byte_56_xrc_cqn, |
5952 | SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, | 6108 | SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, |
5953 | cqn); | 6109 | cqn); |
@@ -6141,9 +6297,10 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, | |||
6141 | */ | 6297 | */ |
6142 | wmb(); | 6298 | wmb(); |
6143 | 6299 | ||
6144 | srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S | | 6300 | srq_db.byte_4 = |
6145 | (srq->srqn & V2_DB_BYTE_4_TAG_M); | 6301 | cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S | |
6146 | srq_db.parameter = srq->head; | 6302 | (srq->srqn & V2_DB_BYTE_4_TAG_M)); |
6303 | srq_db.parameter = cpu_to_le32(srq->head); | ||
6147 | 6304 | ||
6148 | hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); | 6305 | hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); |
6149 | 6306 | ||
@@ -6433,7 +6590,7 @@ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) | |||
6433 | 6590 | ||
6434 | handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; | 6591 | handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; |
6435 | dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); | 6592 | dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); |
6436 | msleep(100); | 6593 | msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY); |
6437 | __hns_roce_hw_v2_uninit_instance(handle, false); | 6594 | __hns_roce_hw_v2_uninit_instance(handle, false); |
6438 | 6595 | ||
6439 | return 0; | 6596 | return 0; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 478f5a5b7aa1..43219d2f7de0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h | |||
@@ -96,7 +96,10 @@ | |||
96 | #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 | 96 | #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 |
97 | #define HNS_ROCE_V2_RSV_QPS 8 | 97 | #define HNS_ROCE_V2_RSV_QPS 8 |
98 | 98 | ||
99 | #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 | 99 | #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 |
100 | #define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100 | ||
101 | |||
102 | #define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20 | ||
100 | 103 | ||
101 | #define HNS_ROCE_CONTEXT_HOP_NUM 1 | 104 | #define HNS_ROCE_CONTEXT_HOP_NUM 1 |
102 | #define HNS_ROCE_SCCC_HOP_NUM 1 | 105 | #define HNS_ROCE_SCCC_HOP_NUM 1 |
@@ -126,8 +129,6 @@ | |||
126 | #define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT) | 129 | #define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT) |
127 | 130 | ||
128 | #define HNS_ROCE_CMQ_DESC_NUM_S 3 | 131 | #define HNS_ROCE_CMQ_DESC_NUM_S 3 |
129 | #define HNS_ROCE_CMQ_EN_B 16 | ||
130 | #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) | ||
131 | 132 | ||
132 | #define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 | 133 | #define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 |
133 | 134 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 1e4ba48f5613..b5d196c119ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c | |||
@@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, | |||
262 | props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? | 262 | props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? |
263 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 263 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
264 | props->phys_state = (props->state == IB_PORT_ACTIVE) ? | 264 | props->phys_state = (props->state == IB_PORT_ACTIVE) ? |
265 | HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED; | 265 | IB_PORT_PHYS_STATE_LINK_UP : |
266 | IB_PORT_PHYS_STATE_DISABLED; | ||
266 | 267 | ||
267 | spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); | 268 | spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
268 | 269 | ||
@@ -901,6 +902,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) | |||
901 | goto error_failed_cmd_init; | 902 | goto error_failed_cmd_init; |
902 | } | 903 | } |
903 | 904 | ||
905 | /* EQ depends on poll mode, event mode depends on EQ */ | ||
904 | ret = hr_dev->hw->init_eq(hr_dev); | 906 | ret = hr_dev->hw->init_eq(hr_dev); |
905 | if (ret) { | 907 | if (ret) { |
906 | dev_err(dev, "eq init failed!\n"); | 908 | dev_err(dev, "eq init failed!\n"); |
@@ -910,8 +912,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) | |||
910 | if (hr_dev->cmd_mod) { | 912 | if (hr_dev->cmd_mod) { |
911 | ret = hns_roce_cmd_use_events(hr_dev); | 913 | ret = hns_roce_cmd_use_events(hr_dev); |
912 | if (ret) { | 914 | if (ret) { |
913 | dev_err(dev, "Switch to event-driven cmd failed!\n"); | 915 | dev_warn(dev, |
914 | goto error_failed_use_event; | 916 | "Cmd event mode failed, set back to poll!\n"); |
917 | hns_roce_cmd_use_polling(hr_dev); | ||
915 | } | 918 | } |
916 | } | 919 | } |
917 | 920 | ||
@@ -954,8 +957,6 @@ error_failed_setup_hca: | |||
954 | error_failed_init_hem: | 957 | error_failed_init_hem: |
955 | if (hr_dev->cmd_mod) | 958 | if (hr_dev->cmd_mod) |
956 | hns_roce_cmd_use_polling(hr_dev); | 959 | hns_roce_cmd_use_polling(hr_dev); |
957 | |||
958 | error_failed_use_event: | ||
959 | hr_dev->hw->cleanup_eq(hr_dev); | 960 | hr_dev->hw->cleanup_eq(hr_dev); |
960 | 961 | ||
961 | error_failed_eq_table: | 962 | error_failed_eq_table: |
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 549e1a38dfe0..5f8416ba09a9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c | |||
@@ -347,155 +347,207 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, | |||
347 | mr->pbl_bt_l0 = NULL; | 347 | mr->pbl_bt_l0 = NULL; |
348 | mr->pbl_l0_dma_addr = 0; | 348 | mr->pbl_l0_dma_addr = 0; |
349 | } | 349 | } |
350 | static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, | ||
351 | struct hns_roce_mr *mr, u32 pbl_bt_sz) | ||
352 | { | ||
353 | struct device *dev = hr_dev->dev; | ||
350 | 354 | ||
351 | /* PBL multi hop addressing */ | 355 | if (npages > pbl_bt_sz / 8) { |
352 | static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, | 356 | dev_err(dev, "npages %d is larger than buf_pg_sz!", |
353 | struct hns_roce_mr *mr) | 357 | npages); |
358 | return -EINVAL; | ||
359 | } | ||
360 | mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, | ||
361 | &(mr->pbl_dma_addr), | ||
362 | GFP_KERNEL); | ||
363 | if (!mr->pbl_buf) | ||
364 | return -ENOMEM; | ||
365 | |||
366 | mr->pbl_size = npages; | ||
367 | mr->pbl_ba = mr->pbl_dma_addr; | ||
368 | mr->pbl_hop_num = 1; | ||
369 | mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; | ||
370 | mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; | ||
371 | return 0; | ||
372 | |||
373 | } | ||
374 | |||
375 | |||
376 | static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, | ||
377 | struct hns_roce_mr *mr, u32 pbl_bt_sz) | ||
354 | { | 378 | { |
355 | struct device *dev = hr_dev->dev; | 379 | struct device *dev = hr_dev->dev; |
356 | int mr_alloc_done = 0; | ||
357 | int npages_allocated; | 380 | int npages_allocated; |
358 | int i = 0, j = 0; | ||
359 | u32 pbl_bt_sz; | ||
360 | u32 mhop_num; | ||
361 | u64 pbl_last_bt_num; | 381 | u64 pbl_last_bt_num; |
362 | u64 pbl_bt_cnt = 0; | 382 | u64 pbl_bt_cnt = 0; |
363 | u64 bt_idx; | ||
364 | u64 size; | 383 | u64 size; |
384 | int i; | ||
365 | 385 | ||
366 | mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); | ||
367 | pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); | ||
368 | pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); | 386 | pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); |
369 | 387 | ||
370 | if (mhop_num == HNS_ROCE_HOP_NUM_0) | 388 | /* alloc L1 BT */ |
371 | return 0; | 389 | for (i = 0; i < pbl_bt_sz / 8; i++) { |
372 | 390 | if (pbl_bt_cnt + 1 < pbl_last_bt_num) { | |
373 | /* hop_num = 1 */ | 391 | size = pbl_bt_sz; |
374 | if (mhop_num == 1) { | 392 | } else { |
375 | if (npages > pbl_bt_sz / 8) { | 393 | npages_allocated = i * (pbl_bt_sz / 8); |
376 | dev_err(dev, "npages %d is larger than buf_pg_sz!", | 394 | size = (npages - npages_allocated) * 8; |
377 | npages); | ||
378 | return -EINVAL; | ||
379 | } | 395 | } |
380 | mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, | 396 | mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, |
381 | &(mr->pbl_dma_addr), | 397 | &(mr->pbl_l1_dma_addr[i]), |
382 | GFP_KERNEL); | 398 | GFP_KERNEL); |
383 | if (!mr->pbl_buf) | 399 | if (!mr->pbl_bt_l1[i]) { |
400 | hns_roce_loop_free(hr_dev, mr, 1, i, 0); | ||
384 | return -ENOMEM; | 401 | return -ENOMEM; |
402 | } | ||
385 | 403 | ||
386 | mr->pbl_size = npages; | 404 | *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; |
387 | mr->pbl_ba = mr->pbl_dma_addr; | 405 | |
388 | mr->pbl_hop_num = mhop_num; | 406 | pbl_bt_cnt++; |
389 | mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; | 407 | if (pbl_bt_cnt >= pbl_last_bt_num) |
390 | mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; | 408 | break; |
391 | return 0; | ||
392 | } | 409 | } |
393 | 410 | ||
394 | mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, | 411 | mr->l0_chunk_last_num = i + 1; |
395 | sizeof(*mr->pbl_l1_dma_addr), | 412 | |
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, | ||
417 | struct hns_roce_mr *mr, u32 pbl_bt_sz) | ||
418 | { | ||
419 | struct device *dev = hr_dev->dev; | ||
420 | int mr_alloc_done = 0; | ||
421 | int npages_allocated; | ||
422 | u64 pbl_last_bt_num; | ||
423 | u64 pbl_bt_cnt = 0; | ||
424 | u64 bt_idx; | ||
425 | u64 size; | ||
426 | int i; | ||
427 | int j = 0; | ||
428 | |||
429 | pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); | ||
430 | |||
431 | mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, | ||
432 | sizeof(*mr->pbl_l2_dma_addr), | ||
396 | GFP_KERNEL); | 433 | GFP_KERNEL); |
397 | if (!mr->pbl_l1_dma_addr) | 434 | if (!mr->pbl_l2_dma_addr) |
398 | return -ENOMEM; | 435 | return -ENOMEM; |
399 | 436 | ||
400 | mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), | 437 | mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, |
438 | sizeof(*mr->pbl_bt_l2), | ||
401 | GFP_KERNEL); | 439 | GFP_KERNEL); |
402 | if (!mr->pbl_bt_l1) | 440 | if (!mr->pbl_bt_l2) |
403 | goto err_kcalloc_bt_l1; | 441 | goto err_kcalloc_bt_l2; |
404 | 442 | ||
405 | if (mhop_num == 3) { | 443 | /* alloc L1, L2 BT */ |
406 | mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, | 444 | for (i = 0; i < pbl_bt_sz / 8; i++) { |
407 | sizeof(*mr->pbl_l2_dma_addr), | 445 | mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, |
408 | GFP_KERNEL); | 446 | &(mr->pbl_l1_dma_addr[i]), |
409 | if (!mr->pbl_l2_dma_addr) | 447 | GFP_KERNEL); |
410 | goto err_kcalloc_l2_dma; | 448 | if (!mr->pbl_bt_l1[i]) { |
449 | hns_roce_loop_free(hr_dev, mr, 1, i, 0); | ||
450 | goto err_dma_alloc_l0; | ||
451 | } | ||
411 | 452 | ||
412 | mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, | 453 | *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; |
413 | sizeof(*mr->pbl_bt_l2), | ||
414 | GFP_KERNEL); | ||
415 | if (!mr->pbl_bt_l2) | ||
416 | goto err_kcalloc_bt_l2; | ||
417 | } | ||
418 | 454 | ||
419 | /* alloc L0 BT */ | 455 | for (j = 0; j < pbl_bt_sz / 8; j++) { |
420 | mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, | 456 | bt_idx = i * pbl_bt_sz / 8 + j; |
421 | &(mr->pbl_l0_dma_addr), | ||
422 | GFP_KERNEL); | ||
423 | if (!mr->pbl_bt_l0) | ||
424 | goto err_dma_alloc_l0; | ||
425 | 457 | ||
426 | if (mhop_num == 2) { | ||
427 | /* alloc L1 BT */ | ||
428 | for (i = 0; i < pbl_bt_sz / 8; i++) { | ||
429 | if (pbl_bt_cnt + 1 < pbl_last_bt_num) { | 458 | if (pbl_bt_cnt + 1 < pbl_last_bt_num) { |
430 | size = pbl_bt_sz; | 459 | size = pbl_bt_sz; |
431 | } else { | 460 | } else { |
432 | npages_allocated = i * (pbl_bt_sz / 8); | 461 | npages_allocated = bt_idx * |
462 | (pbl_bt_sz / 8); | ||
433 | size = (npages - npages_allocated) * 8; | 463 | size = (npages - npages_allocated) * 8; |
434 | } | 464 | } |
435 | mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, | 465 | mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( |
436 | &(mr->pbl_l1_dma_addr[i]), | 466 | dev, size, |
437 | GFP_KERNEL); | 467 | &(mr->pbl_l2_dma_addr[bt_idx]), |
438 | if (!mr->pbl_bt_l1[i]) { | 468 | GFP_KERNEL); |
439 | hns_roce_loop_free(hr_dev, mr, 1, i, 0); | 469 | if (!mr->pbl_bt_l2[bt_idx]) { |
470 | hns_roce_loop_free(hr_dev, mr, 2, i, j); | ||
440 | goto err_dma_alloc_l0; | 471 | goto err_dma_alloc_l0; |
441 | } | 472 | } |
442 | 473 | ||
443 | *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; | 474 | *(mr->pbl_bt_l1[i] + j) = |
475 | mr->pbl_l2_dma_addr[bt_idx]; | ||
444 | 476 | ||
445 | pbl_bt_cnt++; | 477 | pbl_bt_cnt++; |
446 | if (pbl_bt_cnt >= pbl_last_bt_num) | 478 | if (pbl_bt_cnt >= pbl_last_bt_num) { |
479 | mr_alloc_done = 1; | ||
447 | break; | 480 | break; |
448 | } | ||
449 | } else if (mhop_num == 3) { | ||
450 | /* alloc L1, L2 BT */ | ||
451 | for (i = 0; i < pbl_bt_sz / 8; i++) { | ||
452 | mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, | ||
453 | &(mr->pbl_l1_dma_addr[i]), | ||
454 | GFP_KERNEL); | ||
455 | if (!mr->pbl_bt_l1[i]) { | ||
456 | hns_roce_loop_free(hr_dev, mr, 1, i, 0); | ||
457 | goto err_dma_alloc_l0; | ||
458 | } | 481 | } |
482 | } | ||
459 | 483 | ||
460 | *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; | 484 | if (mr_alloc_done) |
485 | break; | ||
486 | } | ||
461 | 487 | ||
462 | for (j = 0; j < pbl_bt_sz / 8; j++) { | 488 | mr->l0_chunk_last_num = i + 1; |
463 | bt_idx = i * pbl_bt_sz / 8 + j; | 489 | mr->l1_chunk_last_num = j + 1; |
464 | 490 | ||
465 | if (pbl_bt_cnt + 1 < pbl_last_bt_num) { | ||
466 | size = pbl_bt_sz; | ||
467 | } else { | ||
468 | npages_allocated = bt_idx * | ||
469 | (pbl_bt_sz / 8); | ||
470 | size = (npages - npages_allocated) * 8; | ||
471 | } | ||
472 | mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( | ||
473 | dev, size, | ||
474 | &(mr->pbl_l2_dma_addr[bt_idx]), | ||
475 | GFP_KERNEL); | ||
476 | if (!mr->pbl_bt_l2[bt_idx]) { | ||
477 | hns_roce_loop_free(hr_dev, mr, 2, i, j); | ||
478 | goto err_dma_alloc_l0; | ||
479 | } | ||
480 | 491 | ||
481 | *(mr->pbl_bt_l1[i] + j) = | 492 | return 0; |
482 | mr->pbl_l2_dma_addr[bt_idx]; | ||
483 | 493 | ||
484 | pbl_bt_cnt++; | 494 | err_dma_alloc_l0: |
485 | if (pbl_bt_cnt >= pbl_last_bt_num) { | 495 | kfree(mr->pbl_bt_l2); |
486 | mr_alloc_done = 1; | 496 | mr->pbl_bt_l2 = NULL; |
487 | break; | ||
488 | } | ||
489 | } | ||
490 | 497 | ||
491 | if (mr_alloc_done) | 498 | err_kcalloc_bt_l2: |
492 | break; | 499 | kfree(mr->pbl_l2_dma_addr); |
493 | } | 500 | mr->pbl_l2_dma_addr = NULL; |
501 | |||
502 | return -ENOMEM; | ||
503 | } | ||
504 | |||
505 | |||
506 | /* PBL multi hop addressing */ | ||
507 | static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, | ||
508 | struct hns_roce_mr *mr) | ||
509 | { | ||
510 | struct device *dev = hr_dev->dev; | ||
511 | u32 pbl_bt_sz; | ||
512 | u32 mhop_num; | ||
513 | |||
514 | mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); | ||
515 | pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); | ||
516 | |||
517 | if (mhop_num == HNS_ROCE_HOP_NUM_0) | ||
518 | return 0; | ||
519 | |||
520 | if (mhop_num == 1) | ||
521 | return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); | ||
522 | |||
523 | mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, | ||
524 | sizeof(*mr->pbl_l1_dma_addr), | ||
525 | GFP_KERNEL); | ||
526 | if (!mr->pbl_l1_dma_addr) | ||
527 | return -ENOMEM; | ||
528 | |||
529 | mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), | ||
530 | GFP_KERNEL); | ||
531 | if (!mr->pbl_bt_l1) | ||
532 | goto err_kcalloc_bt_l1; | ||
533 | |||
534 | /* alloc L0 BT */ | ||
535 | mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, | ||
536 | &(mr->pbl_l0_dma_addr), | ||
537 | GFP_KERNEL); | ||
538 | if (!mr->pbl_bt_l0) | ||
539 | goto err_kcalloc_l2_dma; | ||
540 | |||
541 | if (mhop_num == 2) { | ||
542 | if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) | ||
543 | goto err_kcalloc_l2_dma; | ||
544 | } | ||
545 | |||
546 | if (mhop_num == 3) { | ||
547 | if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) | ||
548 | goto err_kcalloc_l2_dma; | ||
494 | } | 549 | } |
495 | 550 | ||
496 | mr->l0_chunk_last_num = i + 1; | ||
497 | if (mhop_num == 3) | ||
498 | mr->l1_chunk_last_num = j + 1; | ||
499 | 551 | ||
500 | mr->pbl_size = npages; | 552 | mr->pbl_size = npages; |
501 | mr->pbl_ba = mr->pbl_l0_dma_addr; | 553 | mr->pbl_ba = mr->pbl_l0_dma_addr; |
@@ -505,14 +557,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, | |||
505 | 557 | ||
506 | return 0; | 558 | return 0; |
507 | 559 | ||
508 | err_dma_alloc_l0: | ||
509 | kfree(mr->pbl_bt_l2); | ||
510 | mr->pbl_bt_l2 = NULL; | ||
511 | |||
512 | err_kcalloc_bt_l2: | ||
513 | kfree(mr->pbl_l2_dma_addr); | ||
514 | mr->pbl_l2_dma_addr = NULL; | ||
515 | |||
516 | err_kcalloc_l2_dma: | 560 | err_kcalloc_l2_dma: |
517 | kfree(mr->pbl_bt_l1); | 561 | kfree(mr->pbl_bt_l1); |
518 | mr->pbl_bt_l1 = NULL; | 562 | mr->pbl_bt_l1 = NULL; |
@@ -1161,6 +1205,83 @@ err_free: | |||
1161 | return ERR_PTR(ret); | 1205 | return ERR_PTR(ret); |
1162 | } | 1206 | } |
1163 | 1207 | ||
1208 | static int rereg_mr_trans(struct ib_mr *ibmr, int flags, | ||
1209 | u64 start, u64 length, | ||
1210 | u64 virt_addr, int mr_access_flags, | ||
1211 | struct hns_roce_cmd_mailbox *mailbox, | ||
1212 | u32 pdn, struct ib_udata *udata) | ||
1213 | { | ||
1214 | struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); | ||
1215 | struct hns_roce_mr *mr = to_hr_mr(ibmr); | ||
1216 | struct device *dev = hr_dev->dev; | ||
1217 | int npages; | ||
1218 | int ret; | ||
1219 | |||
1220 | if (mr->size != ~0ULL) { | ||
1221 | npages = ib_umem_page_count(mr->umem); | ||
1222 | |||
1223 | if (hr_dev->caps.pbl_hop_num) | ||
1224 | hns_roce_mhop_free(hr_dev, mr); | ||
1225 | else | ||
1226 | dma_free_coherent(dev, npages * 8, | ||
1227 | mr->pbl_buf, mr->pbl_dma_addr); | ||
1228 | } | ||
1229 | ib_umem_release(mr->umem); | ||
1230 | |||
1231 | mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0); | ||
1232 | if (IS_ERR(mr->umem)) { | ||
1233 | ret = PTR_ERR(mr->umem); | ||
1234 | mr->umem = NULL; | ||
1235 | return -ENOMEM; | ||
1236 | } | ||
1237 | npages = ib_umem_page_count(mr->umem); | ||
1238 | |||
1239 | if (hr_dev->caps.pbl_hop_num) { | ||
1240 | ret = hns_roce_mhop_alloc(hr_dev, npages, mr); | ||
1241 | if (ret) | ||
1242 | goto release_umem; | ||
1243 | } else { | ||
1244 | mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, | ||
1245 | &(mr->pbl_dma_addr), | ||
1246 | GFP_KERNEL); | ||
1247 | if (!mr->pbl_buf) { | ||
1248 | ret = -ENOMEM; | ||
1249 | goto release_umem; | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, | ||
1254 | mr_access_flags, virt_addr, | ||
1255 | length, mailbox->buf); | ||
1256 | if (ret) | ||
1257 | goto release_umem; | ||
1258 | |||
1259 | |||
1260 | ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); | ||
1261 | if (ret) { | ||
1262 | if (mr->size != ~0ULL) { | ||
1263 | npages = ib_umem_page_count(mr->umem); | ||
1264 | |||
1265 | if (hr_dev->caps.pbl_hop_num) | ||
1266 | hns_roce_mhop_free(hr_dev, mr); | ||
1267 | else | ||
1268 | dma_free_coherent(dev, npages * 8, | ||
1269 | mr->pbl_buf, | ||
1270 | mr->pbl_dma_addr); | ||
1271 | } | ||
1272 | |||
1273 | goto release_umem; | ||
1274 | } | ||
1275 | |||
1276 | return 0; | ||
1277 | |||
1278 | release_umem: | ||
1279 | ib_umem_release(mr->umem); | ||
1280 | return ret; | ||
1281 | |||
1282 | } | ||
1283 | |||
1284 | |||
1164 | int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, | 1285 | int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, |
1165 | u64 virt_addr, int mr_access_flags, struct ib_pd *pd, | 1286 | u64 virt_addr, int mr_access_flags, struct ib_pd *pd, |
1166 | struct ib_udata *udata) | 1287 | struct ib_udata *udata) |
@@ -1171,7 +1292,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, | |||
1171 | struct device *dev = hr_dev->dev; | 1292 | struct device *dev = hr_dev->dev; |
1172 | unsigned long mtpt_idx; | 1293 | unsigned long mtpt_idx; |
1173 | u32 pdn = 0; | 1294 | u32 pdn = 0; |
1174 | int npages; | ||
1175 | int ret; | 1295 | int ret; |
1176 | 1296 | ||
1177 | if (!mr->enabled) | 1297 | if (!mr->enabled) |
@@ -1198,73 +1318,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, | |||
1198 | pdn = to_hr_pd(pd)->pdn; | 1318 | pdn = to_hr_pd(pd)->pdn; |
1199 | 1319 | ||
1200 | if (flags & IB_MR_REREG_TRANS) { | 1320 | if (flags & IB_MR_REREG_TRANS) { |
1201 | if (mr->size != ~0ULL) { | 1321 | ret = rereg_mr_trans(ibmr, flags, |
1202 | npages = ib_umem_page_count(mr->umem); | 1322 | start, length, |
1203 | 1323 | virt_addr, mr_access_flags, | |
1204 | if (hr_dev->caps.pbl_hop_num) | 1324 | mailbox, pdn, udata); |
1205 | hns_roce_mhop_free(hr_dev, mr); | 1325 | if (ret) |
1206 | else | ||
1207 | dma_free_coherent(dev, npages * 8, mr->pbl_buf, | ||
1208 | mr->pbl_dma_addr); | ||
1209 | } | ||
1210 | ib_umem_release(mr->umem); | ||
1211 | |||
1212 | mr->umem = | ||
1213 | ib_umem_get(udata, start, length, mr_access_flags, 0); | ||
1214 | if (IS_ERR(mr->umem)) { | ||
1215 | ret = PTR_ERR(mr->umem); | ||
1216 | mr->umem = NULL; | ||
1217 | goto free_cmd_mbox; | 1326 | goto free_cmd_mbox; |
1218 | } | 1327 | } else { |
1219 | npages = ib_umem_page_count(mr->umem); | 1328 | ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, |
1220 | 1329 | mr_access_flags, virt_addr, | |
1221 | if (hr_dev->caps.pbl_hop_num) { | 1330 | length, mailbox->buf); |
1222 | ret = hns_roce_mhop_alloc(hr_dev, npages, mr); | 1331 | if (ret) |
1223 | if (ret) | ||
1224 | goto release_umem; | ||
1225 | } else { | ||
1226 | mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, | ||
1227 | &(mr->pbl_dma_addr), | ||
1228 | GFP_KERNEL); | ||
1229 | if (!mr->pbl_buf) { | ||
1230 | ret = -ENOMEM; | ||
1231 | goto release_umem; | ||
1232 | } | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, | ||
1237 | mr_access_flags, virt_addr, | ||
1238 | length, mailbox->buf); | ||
1239 | if (ret) { | ||
1240 | if (flags & IB_MR_REREG_TRANS) | ||
1241 | goto release_umem; | ||
1242 | else | ||
1243 | goto free_cmd_mbox; | 1332 | goto free_cmd_mbox; |
1244 | } | 1333 | } |
1245 | 1334 | ||
1246 | if (flags & IB_MR_REREG_TRANS) { | ||
1247 | ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); | ||
1248 | if (ret) { | ||
1249 | if (mr->size != ~0ULL) { | ||
1250 | npages = ib_umem_page_count(mr->umem); | ||
1251 | |||
1252 | if (hr_dev->caps.pbl_hop_num) | ||
1253 | hns_roce_mhop_free(hr_dev, mr); | ||
1254 | else | ||
1255 | dma_free_coherent(dev, npages * 8, | ||
1256 | mr->pbl_buf, | ||
1257 | mr->pbl_dma_addr); | ||
1258 | } | ||
1259 | |||
1260 | goto release_umem; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); | 1335 | ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); |
1265 | if (ret) { | 1336 | if (ret) { |
1266 | dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); | 1337 | dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); |
1267 | goto release_umem; | 1338 | ib_umem_release(mr->umem); |
1339 | goto free_cmd_mbox; | ||
1268 | } | 1340 | } |
1269 | 1341 | ||
1270 | mr->enabled = 1; | 1342 | mr->enabled = 1; |
@@ -1275,9 +1347,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, | |||
1275 | 1347 | ||
1276 | return 0; | 1348 | return 0; |
1277 | 1349 | ||
1278 | release_umem: | ||
1279 | ib_umem_release(mr->umem); | ||
1280 | |||
1281 | free_cmd_mbox: | 1350 | free_cmd_mbox: |
1282 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); | 1351 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); |
1283 | 1352 | ||
@@ -1357,7 +1426,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) | |||
1357 | { | 1426 | { |
1358 | struct hns_roce_mr *mr = to_hr_mr(ibmr); | 1427 | struct hns_roce_mr *mr = to_hr_mr(ibmr); |
1359 | 1428 | ||
1360 | mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); | 1429 | mr->pbl_buf[mr->npages++] = addr; |
1361 | 1430 | ||
1362 | return 0; | 1431 | return 0; |
1363 | } | 1432 | } |
@@ -1528,10 +1597,9 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev, | |||
1528 | /* Save page addr, low 12 bits : 0 */ | 1597 | /* Save page addr, low 12 bits : 0 */ |
1529 | for (i = 0; i < count; i++) { | 1598 | for (i = 0; i < count; i++) { |
1530 | if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) | 1599 | if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) |
1531 | mtts[i] = cpu_to_le64(bufs[npage] >> | 1600 | mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT; |
1532 | PAGE_ADDR_SHIFT); | ||
1533 | else | 1601 | else |
1534 | mtts[i] = cpu_to_le64(bufs[npage]); | 1602 | mtts[i] = bufs[npage]; |
1535 | 1603 | ||
1536 | npage++; | 1604 | npage++; |
1537 | } | 1605 | } |
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e0424029b058..bd78ff90d998 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c | |||
@@ -324,31 +324,46 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, | |||
324 | return 0; | 324 | return 0; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, | 327 | static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, |
328 | struct ib_qp_cap *cap, | 328 | struct ib_qp_cap *cap, |
329 | struct hns_roce_qp *hr_qp, | 329 | struct hns_roce_ib_create_qp *ucmd) |
330 | struct hns_roce_ib_create_qp *ucmd) | ||
331 | { | 330 | { |
332 | u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); | 331 | u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); |
333 | u8 max_sq_stride = ilog2(roundup_sq_stride); | 332 | u8 max_sq_stride = ilog2(roundup_sq_stride); |
334 | u32 ex_sge_num; | ||
335 | u32 page_size; | ||
336 | u32 max_cnt; | ||
337 | 333 | ||
338 | /* Sanity check SQ size before proceeding */ | 334 | /* Sanity check SQ size before proceeding */ |
339 | if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || | 335 | if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || |
340 | ucmd->log_sq_stride > max_sq_stride || | 336 | ucmd->log_sq_stride > max_sq_stride || |
341 | ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { | 337 | ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { |
342 | dev_err(hr_dev->dev, "check SQ size error!\n"); | 338 | ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); |
343 | return -EINVAL; | 339 | return -EINVAL; |
344 | } | 340 | } |
345 | 341 | ||
346 | if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { | 342 | if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
347 | dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", | 343 | ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n", |
348 | cap->max_send_sge); | 344 | cap->max_send_sge); |
349 | return -EINVAL; | 345 | return -EINVAL; |
350 | } | 346 | } |
351 | 347 | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, | ||
352 | struct ib_qp_cap *cap, | ||
353 | struct hns_roce_qp *hr_qp, | ||
354 | struct hns_roce_ib_create_qp *ucmd) | ||
355 | { | ||
356 | u32 ex_sge_num; | ||
357 | u32 page_size; | ||
358 | u32 max_cnt; | ||
359 | int ret; | ||
360 | |||
361 | ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); | ||
362 | if (ret) { | ||
363 | ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
352 | hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; | 367 | hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; |
353 | hr_qp->sq.wqe_shift = ucmd->log_sq_stride; | 368 | hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
354 | 369 | ||
@@ -501,6 +516,35 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev, | |||
501 | return bt_pg_shift - PAGE_SHIFT; | 516 | return bt_pg_shift - PAGE_SHIFT; |
502 | } | 517 | } |
503 | 518 | ||
519 | static int set_extend_sge_param(struct hns_roce_dev *hr_dev, | ||
520 | struct hns_roce_qp *hr_qp) | ||
521 | { | ||
522 | struct device *dev = hr_dev->dev; | ||
523 | |||
524 | if (hr_qp->sq.max_gs > 2) { | ||
525 | hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * | ||
526 | (hr_qp->sq.max_gs - 2)); | ||
527 | hr_qp->sge.sge_shift = 4; | ||
528 | } | ||
529 | |||
530 | /* ud sqwqe's sge use extend sge */ | ||
531 | if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { | ||
532 | hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * | ||
533 | hr_qp->sq.max_gs); | ||
534 | hr_qp->sge.sge_shift = 4; | ||
535 | } | ||
536 | |||
537 | if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { | ||
538 | if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { | ||
539 | dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", | ||
540 | hr_qp->sge.sge_cnt); | ||
541 | return -EINVAL; | ||
542 | } | ||
543 | } | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
504 | static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, | 548 | static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, |
505 | struct ib_qp_cap *cap, | 549 | struct ib_qp_cap *cap, |
506 | struct hns_roce_qp *hr_qp) | 550 | struct hns_roce_qp *hr_qp) |
@@ -509,6 +553,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, | |||
509 | u32 page_size; | 553 | u32 page_size; |
510 | u32 max_cnt; | 554 | u32 max_cnt; |
511 | int size; | 555 | int size; |
556 | int ret; | ||
512 | 557 | ||
513 | if (cap->max_send_wr > hr_dev->caps.max_wqes || | 558 | if (cap->max_send_wr > hr_dev->caps.max_wqes || |
514 | cap->max_send_sge > hr_dev->caps.max_sq_sg || | 559 | cap->max_send_sge > hr_dev->caps.max_sq_sg || |
@@ -518,8 +563,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, | |||
518 | } | 563 | } |
519 | 564 | ||
520 | hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); | 565 | hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); |
521 | hr_qp->sq_max_wqes_per_wr = 1; | ||
522 | hr_qp->sq_spare_wqes = 0; | ||
523 | 566 | ||
524 | if (hr_dev->caps.min_wqes) | 567 | if (hr_dev->caps.min_wqes) |
525 | max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); | 568 | max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); |
@@ -539,25 +582,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, | |||
539 | else | 582 | else |
540 | hr_qp->sq.max_gs = max_cnt; | 583 | hr_qp->sq.max_gs = max_cnt; |
541 | 584 | ||
542 | if (hr_qp->sq.max_gs > 2) { | 585 | ret = set_extend_sge_param(hr_dev, hr_qp); |
543 | hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * | 586 | if (ret) { |
544 | (hr_qp->sq.max_gs - 2)); | 587 | dev_err(dev, "set extend sge parameters fail\n"); |
545 | hr_qp->sge.sge_shift = 4; | 588 | return ret; |
546 | } | ||
547 | |||
548 | /* ud sqwqe's sge use extend sge */ | ||
549 | if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { | ||
550 | hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * | ||
551 | hr_qp->sq.max_gs); | ||
552 | hr_qp->sge.sge_shift = 4; | ||
553 | } | ||
554 | |||
555 | if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { | ||
556 | if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { | ||
557 | dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", | ||
558 | hr_qp->sge.sge_cnt); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | } | 589 | } |
562 | 590 | ||
563 | /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ | 591 | /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ |
@@ -607,13 +635,57 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) | |||
607 | return 1; | 635 | return 1; |
608 | } | 636 | } |
609 | 637 | ||
638 | static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, | ||
639 | struct ib_qp_init_attr *init_attr) | ||
640 | { | ||
641 | u32 max_recv_sge = init_attr->cap.max_recv_sge; | ||
642 | struct hns_roce_rinl_wqe *wqe_list; | ||
643 | u32 wqe_cnt = hr_qp->rq.wqe_cnt; | ||
644 | int i; | ||
645 | |||
646 | /* allocate recv inline buf */ | ||
647 | wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), | ||
648 | GFP_KERNEL); | ||
649 | |||
650 | if (!wqe_list) | ||
651 | goto err; | ||
652 | |||
653 | /* Allocate a continuous buffer for all inline sge we need */ | ||
654 | wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * | ||
655 | sizeof(struct hns_roce_rinl_sge)), | ||
656 | GFP_KERNEL); | ||
657 | if (!wqe_list[0].sg_list) | ||
658 | goto err_wqe_list; | ||
659 | |||
660 | /* Assign buffers of sg_list to each inline wqe */ | ||
661 | for (i = 1; i < wqe_cnt; i++) | ||
662 | wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; | ||
663 | |||
664 | hr_qp->rq_inl_buf.wqe_list = wqe_list; | ||
665 | hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt; | ||
666 | |||
667 | return 0; | ||
668 | |||
669 | err_wqe_list: | ||
670 | kfree(wqe_list); | ||
671 | |||
672 | err: | ||
673 | return -ENOMEM; | ||
674 | } | ||
675 | |||
676 | static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) | ||
677 | { | ||
678 | kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); | ||
679 | kfree(hr_qp->rq_inl_buf.wqe_list); | ||
680 | } | ||
681 | |||
610 | static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | 682 | static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, |
611 | struct ib_pd *ib_pd, | 683 | struct ib_pd *ib_pd, |
612 | struct ib_qp_init_attr *init_attr, | 684 | struct ib_qp_init_attr *init_attr, |
613 | struct ib_udata *udata, unsigned long sqpn, | 685 | struct ib_udata *udata, unsigned long sqpn, |
614 | struct hns_roce_qp *hr_qp) | 686 | struct hns_roce_qp *hr_qp) |
615 | { | 687 | { |
616 | dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 }; | 688 | dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; |
617 | struct device *dev = hr_dev->dev; | 689 | struct device *dev = hr_dev->dev; |
618 | struct hns_roce_ib_create_qp ucmd; | 690 | struct hns_roce_ib_create_qp ucmd; |
619 | struct hns_roce_ib_create_qp_resp resp = {}; | 691 | struct hns_roce_ib_create_qp_resp resp = {}; |
@@ -635,9 +707,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
635 | hr_qp->ibqp.qp_type = init_attr->qp_type; | 707 | hr_qp->ibqp.qp_type = init_attr->qp_type; |
636 | 708 | ||
637 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 709 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
638 | hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); | 710 | hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; |
639 | else | 711 | else |
640 | hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); | 712 | hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; |
641 | 713 | ||
642 | ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, | 714 | ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, |
643 | hns_roce_qp_has_rq(init_attr), hr_qp); | 715 | hns_roce_qp_has_rq(init_attr), hr_qp); |
@@ -648,33 +720,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
648 | 720 | ||
649 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && | 721 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && |
650 | hns_roce_qp_has_rq(init_attr)) { | 722 | hns_roce_qp_has_rq(init_attr)) { |
651 | /* allocate recv inline buf */ | 723 | ret = alloc_rq_inline_buf(hr_qp, init_attr); |
652 | hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, | 724 | if (ret) { |
653 | sizeof(struct hns_roce_rinl_wqe), | 725 | dev_err(dev, "allocate receive inline buffer failed\n"); |
654 | GFP_KERNEL); | ||
655 | if (!hr_qp->rq_inl_buf.wqe_list) { | ||
656 | ret = -ENOMEM; | ||
657 | goto err_out; | 726 | goto err_out; |
658 | } | 727 | } |
659 | |||
660 | hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; | ||
661 | |||
662 | /* Firstly, allocate a list of sge space buffer */ | ||
663 | hr_qp->rq_inl_buf.wqe_list[0].sg_list = | ||
664 | kcalloc(hr_qp->rq_inl_buf.wqe_cnt, | ||
665 | init_attr->cap.max_recv_sge * | ||
666 | sizeof(struct hns_roce_rinl_sge), | ||
667 | GFP_KERNEL); | ||
668 | if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { | ||
669 | ret = -ENOMEM; | ||
670 | goto err_wqe_list; | ||
671 | } | ||
672 | |||
673 | for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) | ||
674 | /* Secondly, reallocate the buffer */ | ||
675 | hr_qp->rq_inl_buf.wqe_list[i].sg_list = | ||
676 | &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * | ||
677 | init_attr->cap.max_recv_sge]; | ||
678 | } | 728 | } |
679 | 729 | ||
680 | page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; | 730 | page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
@@ -682,14 +732,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
682 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | 732 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { |
683 | dev_err(dev, "ib_copy_from_udata error for create qp\n"); | 733 | dev_err(dev, "ib_copy_from_udata error for create qp\n"); |
684 | ret = -EFAULT; | 734 | ret = -EFAULT; |
685 | goto err_rq_sge_list; | 735 | goto err_alloc_rq_inline_buf; |
686 | } | 736 | } |
687 | 737 | ||
688 | ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, | 738 | ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, |
689 | &ucmd); | 739 | &ucmd); |
690 | if (ret) { | 740 | if (ret) { |
691 | dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); | 741 | dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); |
692 | goto err_rq_sge_list; | 742 | goto err_alloc_rq_inline_buf; |
693 | } | 743 | } |
694 | 744 | ||
695 | hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, | 745 | hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, |
@@ -697,7 +747,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
697 | if (IS_ERR(hr_qp->umem)) { | 747 | if (IS_ERR(hr_qp->umem)) { |
698 | dev_err(dev, "ib_umem_get error for create qp\n"); | 748 | dev_err(dev, "ib_umem_get error for create qp\n"); |
699 | ret = PTR_ERR(hr_qp->umem); | 749 | ret = PTR_ERR(hr_qp->umem); |
700 | goto err_rq_sge_list; | 750 | goto err_alloc_rq_inline_buf; |
701 | } | 751 | } |
702 | hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, | 752 | hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, |
703 | hr_qp->regions, ARRAY_SIZE(hr_qp->regions), | 753 | hr_qp->regions, ARRAY_SIZE(hr_qp->regions), |
@@ -758,13 +808,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
758 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { | 808 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { |
759 | dev_err(dev, "init_attr->create_flags error!\n"); | 809 | dev_err(dev, "init_attr->create_flags error!\n"); |
760 | ret = -EINVAL; | 810 | ret = -EINVAL; |
761 | goto err_rq_sge_list; | 811 | goto err_alloc_rq_inline_buf; |
762 | } | 812 | } |
763 | 813 | ||
764 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { | 814 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { |
765 | dev_err(dev, "init_attr->create_flags error!\n"); | 815 | dev_err(dev, "init_attr->create_flags error!\n"); |
766 | ret = -EINVAL; | 816 | ret = -EINVAL; |
767 | goto err_rq_sge_list; | 817 | goto err_alloc_rq_inline_buf; |
768 | } | 818 | } |
769 | 819 | ||
770 | /* Set SQ size */ | 820 | /* Set SQ size */ |
@@ -772,7 +822,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
772 | hr_qp); | 822 | hr_qp); |
773 | if (ret) { | 823 | if (ret) { |
774 | dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); | 824 | dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); |
775 | goto err_rq_sge_list; | 825 | goto err_alloc_rq_inline_buf; |
776 | } | 826 | } |
777 | 827 | ||
778 | /* QP doorbell register address */ | 828 | /* QP doorbell register address */ |
@@ -786,7 +836,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
786 | ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); | 836 | ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); |
787 | if (ret) { | 837 | if (ret) { |
788 | dev_err(dev, "rq record doorbell alloc failed!\n"); | 838 | dev_err(dev, "rq record doorbell alloc failed!\n"); |
789 | goto err_rq_sge_list; | 839 | goto err_alloc_rq_inline_buf; |
790 | } | 840 | } |
791 | *hr_qp->rdb.db_record = 0; | 841 | *hr_qp->rdb.db_record = 0; |
792 | hr_qp->rdb_en = 1; | 842 | hr_qp->rdb_en = 1; |
@@ -826,11 +876,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
826 | 876 | ||
827 | hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), | 877 | hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), |
828 | GFP_KERNEL); | 878 | GFP_KERNEL); |
829 | hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), | 879 | if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { |
830 | GFP_KERNEL); | ||
831 | if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { | ||
832 | ret = -ENOMEM; | 880 | ret = -ENOMEM; |
833 | goto err_wrid; | 881 | goto err_get_bufs; |
882 | } | ||
883 | |||
884 | if (hr_qp->rq.wqe_cnt) { | ||
885 | hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), | ||
886 | GFP_KERNEL); | ||
887 | if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) { | ||
888 | ret = -ENOMEM; | ||
889 | goto err_sq_wrid; | ||
890 | } | ||
834 | } | 891 | } |
835 | } | 892 | } |
836 | 893 | ||
@@ -875,7 +932,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
875 | if (sqpn) | 932 | if (sqpn) |
876 | hr_qp->doorbell_qpn = 1; | 933 | hr_qp->doorbell_qpn = 1; |
877 | else | 934 | else |
878 | hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); | 935 | hr_qp->doorbell_qpn = (u32)hr_qp->qpn; |
879 | 936 | ||
880 | if (udata) { | 937 | if (udata) { |
881 | ret = ib_copy_to_udata(udata, &resp, | 938 | ret = ib_copy_to_udata(udata, &resp, |
@@ -916,8 +973,8 @@ err_wrid: | |||
916 | hns_roce_qp_has_rq(init_attr)) | 973 | hns_roce_qp_has_rq(init_attr)) |
917 | hns_roce_db_unmap_user(uctx, &hr_qp->rdb); | 974 | hns_roce_db_unmap_user(uctx, &hr_qp->rdb); |
918 | } else { | 975 | } else { |
919 | kfree(hr_qp->sq.wrid); | 976 | if (hr_qp->rq.wqe_cnt) |
920 | kfree(hr_qp->rq.wrid); | 977 | kfree(hr_qp->rq.wrid); |
921 | } | 978 | } |
922 | 979 | ||
923 | err_sq_dbmap: | 980 | err_sq_dbmap: |
@@ -928,6 +985,10 @@ err_sq_dbmap: | |||
928 | hns_roce_qp_has_sq(init_attr)) | 985 | hns_roce_qp_has_sq(init_attr)) |
929 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); | 986 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
930 | 987 | ||
988 | err_sq_wrid: | ||
989 | if (!udata) | ||
990 | kfree(hr_qp->sq.wrid); | ||
991 | |||
931 | err_get_bufs: | 992 | err_get_bufs: |
932 | hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); | 993 | hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); |
933 | 994 | ||
@@ -941,13 +1002,10 @@ err_db: | |||
941 | (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) | 1002 | (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) |
942 | hns_roce_free_db(hr_dev, &hr_qp->rdb); | 1003 | hns_roce_free_db(hr_dev, &hr_qp->rdb); |
943 | 1004 | ||
944 | err_rq_sge_list: | 1005 | err_alloc_rq_inline_buf: |
945 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) | 1006 | if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && |
946 | kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); | 1007 | hns_roce_qp_has_rq(init_attr)) |
947 | 1008 | free_rq_inline_buf(hr_qp); | |
948 | err_wqe_list: | ||
949 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) | ||
950 | kfree(hr_qp->rq_inl_buf.wqe_list); | ||
951 | 1009 | ||
952 | err_out: | 1010 | err_out: |
953 | return ret; | 1011 | return ret; |
@@ -958,7 +1016,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, | |||
958 | struct ib_udata *udata) | 1016 | struct ib_udata *udata) |
959 | { | 1017 | { |
960 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); | 1018 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); |
961 | struct device *dev = hr_dev->dev; | 1019 | struct ib_device *ibdev = &hr_dev->ib_dev; |
962 | struct hns_roce_sqp *hr_sqp; | 1020 | struct hns_roce_sqp *hr_sqp; |
963 | struct hns_roce_qp *hr_qp; | 1021 | struct hns_roce_qp *hr_qp; |
964 | int ret; | 1022 | int ret; |
@@ -972,7 +1030,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, | |||
972 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, | 1030 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, |
973 | hr_qp); | 1031 | hr_qp); |
974 | if (ret) { | 1032 | if (ret) { |
975 | dev_err(dev, "Create RC QP failed\n"); | 1033 | ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n", |
1034 | hr_qp->qpn, ret); | ||
976 | kfree(hr_qp); | 1035 | kfree(hr_qp); |
977 | return ERR_PTR(ret); | 1036 | return ERR_PTR(ret); |
978 | } | 1037 | } |
@@ -984,7 +1043,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, | |||
984 | case IB_QPT_GSI: { | 1043 | case IB_QPT_GSI: { |
985 | /* Userspace is not allowed to create special QPs: */ | 1044 | /* Userspace is not allowed to create special QPs: */ |
986 | if (udata) { | 1045 | if (udata) { |
987 | dev_err(dev, "not support usr space GSI\n"); | 1046 | ibdev_err(ibdev, "not support usr space GSI\n"); |
988 | return ERR_PTR(-EINVAL); | 1047 | return ERR_PTR(-EINVAL); |
989 | } | 1048 | } |
990 | 1049 | ||
@@ -1006,7 +1065,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, | |||
1006 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, | 1065 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, |
1007 | hr_qp->ibqp.qp_num, hr_qp); | 1066 | hr_qp->ibqp.qp_num, hr_qp); |
1008 | if (ret) { | 1067 | if (ret) { |
1009 | dev_err(dev, "Create GSI QP failed!\n"); | 1068 | ibdev_err(ibdev, "Create GSI QP failed!\n"); |
1010 | kfree(hr_sqp); | 1069 | kfree(hr_sqp); |
1011 | return ERR_PTR(ret); | 1070 | return ERR_PTR(ret); |
1012 | } | 1071 | } |
@@ -1014,7 +1073,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, | |||
1014 | break; | 1073 | break; |
1015 | } | 1074 | } |
1016 | default:{ | 1075 | default:{ |
1017 | dev_err(dev, "not support QP type %d\n", init_attr->qp_type); | 1076 | ibdev_err(ibdev, "not support QP type %d\n", |
1077 | init_attr->qp_type); | ||
1018 | return ERR_PTR(-EINVAL); | 1078 | return ERR_PTR(-EINVAL); |
1019 | } | 1079 | } |
1020 | } | 1080 | } |
@@ -1040,23 +1100,88 @@ int to_hr_qp_type(int qp_type) | |||
1040 | return transport_type; | 1100 | return transport_type; |
1041 | } | 1101 | } |
1042 | 1102 | ||
1103 | static int check_mtu_validate(struct hns_roce_dev *hr_dev, | ||
1104 | struct hns_roce_qp *hr_qp, | ||
1105 | struct ib_qp_attr *attr, int attr_mask) | ||
1106 | { | ||
1107 | enum ib_mtu active_mtu; | ||
1108 | int p; | ||
1109 | |||
1110 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | ||
1111 | active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); | ||
1112 | |||
1113 | if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && | ||
1114 | attr->path_mtu > hr_dev->caps.max_mtu) || | ||
1115 | attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { | ||
1116 | ibdev_err(&hr_dev->ib_dev, | ||
1117 | "attr path_mtu(%d)invalid while modify qp", | ||
1118 | attr->path_mtu); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | |||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
1126 | int attr_mask) | ||
1127 | { | ||
1128 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | ||
1129 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | ||
1130 | int p; | ||
1131 | |||
1132 | if ((attr_mask & IB_QP_PORT) && | ||
1133 | (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { | ||
1134 | ibdev_err(&hr_dev->ib_dev, | ||
1135 | "attr port_num invalid.attr->port_num=%d\n", | ||
1136 | attr->port_num); | ||
1137 | return -EINVAL; | ||
1138 | } | ||
1139 | |||
1140 | if (attr_mask & IB_QP_PKEY_INDEX) { | ||
1141 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | ||
1142 | if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { | ||
1143 | ibdev_err(&hr_dev->ib_dev, | ||
1144 | "attr pkey_index invalid.attr->pkey_index=%d\n", | ||
1145 | attr->pkey_index); | ||
1146 | return -EINVAL; | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && | ||
1151 | attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { | ||
1152 | ibdev_err(&hr_dev->ib_dev, | ||
1153 | "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", | ||
1154 | attr->max_rd_atomic); | ||
1155 | return -EINVAL; | ||
1156 | } | ||
1157 | |||
1158 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | ||
1159 | attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { | ||
1160 | ibdev_err(&hr_dev->ib_dev, | ||
1161 | "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", | ||
1162 | attr->max_dest_rd_atomic); | ||
1163 | return -EINVAL; | ||
1164 | } | ||
1165 | |||
1166 | if (attr_mask & IB_QP_PATH_MTU) | ||
1167 | return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1043 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 1172 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
1044 | int attr_mask, struct ib_udata *udata) | 1173 | int attr_mask, struct ib_udata *udata) |
1045 | { | 1174 | { |
1046 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | 1175 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
1047 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 1176 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
1048 | enum ib_qp_state cur_state, new_state; | 1177 | enum ib_qp_state cur_state, new_state; |
1049 | struct device *dev = hr_dev->dev; | ||
1050 | int ret = -EINVAL; | 1178 | int ret = -EINVAL; |
1051 | int p; | ||
1052 | enum ib_mtu active_mtu; | ||
1053 | 1179 | ||
1054 | mutex_lock(&hr_qp->mutex); | 1180 | mutex_lock(&hr_qp->mutex); |
1055 | 1181 | ||
1056 | cur_state = attr_mask & IB_QP_CUR_STATE ? | 1182 | cur_state = attr_mask & IB_QP_CUR_STATE ? |
1057 | attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; | 1183 | attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; |
1058 | new_state = attr_mask & IB_QP_STATE ? | 1184 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1059 | attr->qp_state : cur_state; | ||
1060 | 1185 | ||
1061 | if (ibqp->uobject && | 1186 | if (ibqp->uobject && |
1062 | (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { | 1187 | (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { |
@@ -1066,67 +1191,27 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1066 | if (hr_qp->rdb_en == 1) | 1191 | if (hr_qp->rdb_en == 1) |
1067 | hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); | 1192 | hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); |
1068 | } else { | 1193 | } else { |
1069 | dev_warn(dev, "flush cqe is not supported in userspace!\n"); | 1194 | ibdev_warn(&hr_dev->ib_dev, |
1195 | "flush cqe is not supported in userspace!\n"); | ||
1070 | goto out; | 1196 | goto out; |
1071 | } | 1197 | } |
1072 | } | 1198 | } |
1073 | 1199 | ||
1074 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | 1200 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, |
1075 | attr_mask)) { | 1201 | attr_mask)) { |
1076 | dev_err(dev, "ib_modify_qp_is_ok failed\n"); | 1202 | ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); |
1077 | goto out; | ||
1078 | } | ||
1079 | |||
1080 | if ((attr_mask & IB_QP_PORT) && | ||
1081 | (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { | ||
1082 | dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", | ||
1083 | attr->port_num); | ||
1084 | goto out; | 1203 | goto out; |
1085 | } | 1204 | } |
1086 | 1205 | ||
1087 | if (attr_mask & IB_QP_PKEY_INDEX) { | 1206 | ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); |
1088 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | 1207 | if (ret) |
1089 | if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { | ||
1090 | dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", | ||
1091 | attr->pkey_index); | ||
1092 | goto out; | ||
1093 | } | ||
1094 | } | ||
1095 | |||
1096 | if (attr_mask & IB_QP_PATH_MTU) { | ||
1097 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | ||
1098 | active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); | ||
1099 | |||
1100 | if ((hr_dev->caps.max_mtu == IB_MTU_4096 && | ||
1101 | attr->path_mtu > IB_MTU_4096) || | ||
1102 | (hr_dev->caps.max_mtu == IB_MTU_2048 && | ||
1103 | attr->path_mtu > IB_MTU_2048) || | ||
1104 | attr->path_mtu < IB_MTU_256 || | ||
1105 | attr->path_mtu > active_mtu) { | ||
1106 | dev_err(dev, "attr path_mtu(%d)invalid while modify qp", | ||
1107 | attr->path_mtu); | ||
1108 | goto out; | ||
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && | ||
1113 | attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { | ||
1114 | dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", | ||
1115 | attr->max_rd_atomic); | ||
1116 | goto out; | ||
1117 | } | ||
1118 | |||
1119 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | ||
1120 | attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { | ||
1121 | dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", | ||
1122 | attr->max_dest_rd_atomic); | ||
1123 | goto out; | 1208 | goto out; |
1124 | } | ||
1125 | 1209 | ||
1126 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { | 1210 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
1127 | if (hr_dev->caps.min_wqes) { | 1211 | if (hr_dev->caps.min_wqes) { |
1128 | ret = -EPERM; | 1212 | ret = -EPERM; |
1129 | dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, | 1213 | ibdev_err(&hr_dev->ib_dev, |
1214 | "cur_state=%d new_state=%d\n", cur_state, | ||
1130 | new_state); | 1215 | new_state); |
1131 | } else { | 1216 | } else { |
1132 | ret = 0; | 1217 | ret = 0; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 38bb548eaa6d..9591457eb768 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c | |||
@@ -175,6 +175,76 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev, | |||
175 | hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); | 175 | hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); |
176 | } | 176 | } |
177 | 177 | ||
178 | static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, | ||
179 | int srq_buf_size) | ||
180 | { | ||
181 | struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); | ||
182 | struct hns_roce_ib_create_srq ucmd; | ||
183 | u32 page_shift; | ||
184 | u32 npages; | ||
185 | int ret; | ||
186 | |||
187 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) | ||
188 | return -EFAULT; | ||
189 | |||
190 | srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); | ||
191 | if (IS_ERR(srq->umem)) | ||
192 | return PTR_ERR(srq->umem); | ||
193 | |||
194 | npages = (ib_umem_page_count(srq->umem) + | ||
195 | (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / | ||
196 | (1 << hr_dev->caps.srqwqe_buf_pg_sz); | ||
197 | page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; | ||
198 | ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt); | ||
199 | if (ret) | ||
200 | goto err_user_buf; | ||
201 | |||
202 | ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); | ||
203 | if (ret) | ||
204 | goto err_user_srq_mtt; | ||
205 | |||
206 | /* config index queue BA */ | ||
207 | srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, | ||
208 | srq->idx_que.buf_size, 0, 0); | ||
209 | if (IS_ERR(srq->idx_que.umem)) { | ||
210 | dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); | ||
211 | ret = PTR_ERR(srq->idx_que.umem); | ||
212 | goto err_user_srq_mtt; | ||
213 | } | ||
214 | |||
215 | ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem), | ||
216 | PAGE_SHIFT, &srq->idx_que.mtt); | ||
217 | |||
218 | if (ret) { | ||
219 | dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n"); | ||
220 | goto err_user_idx_mtt; | ||
221 | } | ||
222 | |||
223 | ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, | ||
224 | srq->idx_que.umem); | ||
225 | if (ret) { | ||
226 | dev_err(hr_dev->dev, | ||
227 | "hns_roce_ib_umem_write_mtt error for idx que\n"); | ||
228 | goto err_user_idx_buf; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | |||
233 | err_user_idx_buf: | ||
234 | hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); | ||
235 | |||
236 | err_user_idx_mtt: | ||
237 | ib_umem_release(srq->idx_que.umem); | ||
238 | |||
239 | err_user_srq_mtt: | ||
240 | hns_roce_mtt_cleanup(hr_dev, &srq->mtt); | ||
241 | |||
242 | err_user_buf: | ||
243 | ib_umem_release(srq->umem); | ||
244 | |||
245 | return ret; | ||
246 | } | ||
247 | |||
178 | static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, | 248 | static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, |
179 | u32 page_shift) | 249 | u32 page_shift) |
180 | { | 250 | { |
@@ -196,6 +266,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, | |||
196 | return 0; | 266 | return 0; |
197 | } | 267 | } |
198 | 268 | ||
269 | static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size) | ||
270 | { | ||
271 | struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); | ||
272 | u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; | ||
273 | int ret; | ||
274 | |||
275 | if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2, | ||
276 | &srq->buf, page_shift)) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | srq->head = 0; | ||
280 | srq->tail = srq->max - 1; | ||
281 | |||
282 | ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift, | ||
283 | &srq->mtt); | ||
284 | if (ret) | ||
285 | goto err_kernel_buf; | ||
286 | |||
287 | ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); | ||
288 | if (ret) | ||
289 | goto err_kernel_srq_mtt; | ||
290 | |||
291 | page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; | ||
292 | ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift); | ||
293 | if (ret) { | ||
294 | dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret); | ||
295 | goto err_kernel_srq_mtt; | ||
296 | } | ||
297 | |||
298 | /* Init mtt table for idx_que */ | ||
299 | ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, | ||
300 | srq->idx_que.idx_buf.page_shift, | ||
301 | &srq->idx_que.mtt); | ||
302 | if (ret) | ||
303 | goto err_kernel_create_idx; | ||
304 | |||
305 | /* Write buffer address into the mtt table */ | ||
306 | ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, | ||
307 | &srq->idx_que.idx_buf); | ||
308 | if (ret) | ||
309 | goto err_kernel_idx_buf; | ||
310 | |||
311 | srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); | ||
312 | if (!srq->wrid) { | ||
313 | ret = -ENOMEM; | ||
314 | goto err_kernel_idx_buf; | ||
315 | } | ||
316 | |||
317 | return 0; | ||
318 | |||
319 | err_kernel_idx_buf: | ||
320 | hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); | ||
321 | |||
322 | err_kernel_create_idx: | ||
323 | hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, | ||
324 | &srq->idx_que.idx_buf); | ||
325 | kfree(srq->idx_que.bitmap); | ||
326 | |||
327 | err_kernel_srq_mtt: | ||
328 | hns_roce_mtt_cleanup(hr_dev, &srq->mtt); | ||
329 | |||
330 | err_kernel_buf: | ||
331 | hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); | ||
332 | |||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | static void destroy_user_srq(struct hns_roce_dev *hr_dev, | ||
337 | struct hns_roce_srq *srq) | ||
338 | { | ||
339 | hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); | ||
340 | ib_umem_release(srq->idx_que.umem); | ||
341 | hns_roce_mtt_cleanup(hr_dev, &srq->mtt); | ||
342 | ib_umem_release(srq->umem); | ||
343 | } | ||
344 | |||
345 | static void destroy_kernel_srq(struct hns_roce_dev *hr_dev, | ||
346 | struct hns_roce_srq *srq, int srq_buf_size) | ||
347 | { | ||
348 | kvfree(srq->wrid); | ||
349 | hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); | ||
350 | hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf); | ||
351 | kfree(srq->idx_que.bitmap); | ||
352 | hns_roce_mtt_cleanup(hr_dev, &srq->mtt); | ||
353 | hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); | ||
354 | } | ||
355 | |||
199 | int hns_roce_create_srq(struct ib_srq *ib_srq, | 356 | int hns_roce_create_srq(struct ib_srq *ib_srq, |
200 | struct ib_srq_init_attr *srq_init_attr, | 357 | struct ib_srq_init_attr *srq_init_attr, |
201 | struct ib_udata *udata) | 358 | struct ib_udata *udata) |
@@ -205,9 +362,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, | |||
205 | struct hns_roce_srq *srq = to_hr_srq(ib_srq); | 362 | struct hns_roce_srq *srq = to_hr_srq(ib_srq); |
206 | int srq_desc_size; | 363 | int srq_desc_size; |
207 | int srq_buf_size; | 364 | int srq_buf_size; |
208 | u32 page_shift; | ||
209 | int ret = 0; | 365 | int ret = 0; |
210 | u32 npages; | ||
211 | u32 cqn; | 366 | u32 cqn; |
212 | 367 | ||
213 | /* Check the actual SRQ wqe and SRQ sge num */ | 368 | /* Check the actual SRQ wqe and SRQ sge num */ |
@@ -233,115 +388,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, | |||
233 | srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; | 388 | srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; |
234 | 389 | ||
235 | if (udata) { | 390 | if (udata) { |
236 | struct hns_roce_ib_create_srq ucmd; | 391 | ret = create_user_srq(srq, udata, srq_buf_size); |
237 | |||
238 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) | ||
239 | return -EFAULT; | ||
240 | |||
241 | srq->umem = | ||
242 | ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); | ||
243 | if (IS_ERR(srq->umem)) | ||
244 | return PTR_ERR(srq->umem); | ||
245 | |||
246 | if (hr_dev->caps.srqwqe_buf_pg_sz) { | ||
247 | npages = (ib_umem_page_count(srq->umem) + | ||
248 | (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / | ||
249 | (1 << hr_dev->caps.srqwqe_buf_pg_sz); | ||
250 | page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; | ||
251 | ret = hns_roce_mtt_init(hr_dev, npages, | ||
252 | page_shift, | ||
253 | &srq->mtt); | ||
254 | } else | ||
255 | ret = hns_roce_mtt_init(hr_dev, | ||
256 | ib_umem_page_count(srq->umem), | ||
257 | PAGE_SHIFT, &srq->mtt); | ||
258 | if (ret) | ||
259 | goto err_buf; | ||
260 | |||
261 | ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); | ||
262 | if (ret) | ||
263 | goto err_srq_mtt; | ||
264 | |||
265 | /* config index queue BA */ | ||
266 | srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, | ||
267 | srq->idx_que.buf_size, 0, 0); | ||
268 | if (IS_ERR(srq->idx_que.umem)) { | ||
269 | dev_err(hr_dev->dev, | ||
270 | "ib_umem_get error for index queue\n"); | ||
271 | ret = PTR_ERR(srq->idx_que.umem); | ||
272 | goto err_srq_mtt; | ||
273 | } | ||
274 | |||
275 | if (hr_dev->caps.idx_buf_pg_sz) { | ||
276 | npages = (ib_umem_page_count(srq->idx_que.umem) + | ||
277 | (1 << hr_dev->caps.idx_buf_pg_sz) - 1) / | ||
278 | (1 << hr_dev->caps.idx_buf_pg_sz); | ||
279 | page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; | ||
280 | ret = hns_roce_mtt_init(hr_dev, npages, | ||
281 | page_shift, &srq->idx_que.mtt); | ||
282 | } else { | ||
283 | ret = hns_roce_mtt_init( | ||
284 | hr_dev, ib_umem_page_count(srq->idx_que.umem), | ||
285 | PAGE_SHIFT, &srq->idx_que.mtt); | ||
286 | } | ||
287 | |||
288 | if (ret) { | 392 | if (ret) { |
289 | dev_err(hr_dev->dev, | 393 | dev_err(hr_dev->dev, "Create user srq failed\n"); |
290 | "hns_roce_mtt_init error for idx que\n"); | 394 | goto err_srq; |
291 | goto err_idx_mtt; | ||
292 | } | ||
293 | |||
294 | ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, | ||
295 | srq->idx_que.umem); | ||
296 | if (ret) { | ||
297 | dev_err(hr_dev->dev, | ||
298 | "hns_roce_ib_umem_write_mtt error for idx que\n"); | ||
299 | goto err_idx_buf; | ||
300 | } | 395 | } |
301 | } else { | 396 | } else { |
302 | page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; | 397 | ret = create_kernel_srq(srq, srq_buf_size); |
303 | if (hns_roce_buf_alloc(hr_dev, srq_buf_size, | ||
304 | (1 << page_shift) * 2, &srq->buf, | ||
305 | page_shift)) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | srq->head = 0; | ||
309 | srq->tail = srq->max - 1; | ||
310 | |||
311 | ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, | ||
312 | srq->buf.page_shift, &srq->mtt); | ||
313 | if (ret) | ||
314 | goto err_buf; | ||
315 | |||
316 | ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); | ||
317 | if (ret) | ||
318 | goto err_srq_mtt; | ||
319 | |||
320 | page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; | ||
321 | ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift); | ||
322 | if (ret) { | 398 | if (ret) { |
323 | dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", | 399 | dev_err(hr_dev->dev, "Create kernel srq failed\n"); |
324 | ret); | 400 | goto err_srq; |
325 | goto err_srq_mtt; | ||
326 | } | ||
327 | |||
328 | /* Init mtt table for idx_que */ | ||
329 | ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, | ||
330 | srq->idx_que.idx_buf.page_shift, | ||
331 | &srq->idx_que.mtt); | ||
332 | if (ret) | ||
333 | goto err_create_idx; | ||
334 | |||
335 | /* Write buffer address into the mtt table */ | ||
336 | ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, | ||
337 | &srq->idx_que.idx_buf); | ||
338 | if (ret) | ||
339 | goto err_idx_buf; | ||
340 | |||
341 | srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); | ||
342 | if (!srq->wrid) { | ||
343 | ret = -ENOMEM; | ||
344 | goto err_idx_buf; | ||
345 | } | 401 | } |
346 | } | 402 | } |
347 | 403 | ||
@@ -356,7 +412,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, | |||
356 | goto err_wrid; | 412 | goto err_wrid; |
357 | 413 | ||
358 | srq->event = hns_roce_ib_srq_event; | 414 | srq->event = hns_roce_ib_srq_event; |
359 | srq->ibsrq.ext.xrc.srq_num = srq->srqn; | ||
360 | resp.srqn = srq->srqn; | 415 | resp.srqn = srq->srqn; |
361 | 416 | ||
362 | if (udata) { | 417 | if (udata) { |
@@ -373,27 +428,12 @@ err_srqc_alloc: | |||
373 | hns_roce_srq_free(hr_dev, srq); | 428 | hns_roce_srq_free(hr_dev, srq); |
374 | 429 | ||
375 | err_wrid: | 430 | err_wrid: |
376 | kvfree(srq->wrid); | 431 | if (udata) |
377 | 432 | destroy_user_srq(hr_dev, srq); | |
378 | err_idx_buf: | 433 | else |
379 | hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); | 434 | destroy_kernel_srq(hr_dev, srq, srq_buf_size); |
380 | |||
381 | err_idx_mtt: | ||
382 | ib_umem_release(srq->idx_que.umem); | ||
383 | |||
384 | err_create_idx: | ||
385 | hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, | ||
386 | &srq->idx_que.idx_buf); | ||
387 | bitmap_free(srq->idx_que.bitmap); | ||
388 | |||
389 | err_srq_mtt: | ||
390 | hns_roce_mtt_cleanup(hr_dev, &srq->mtt); | ||
391 | |||
392 | err_buf: | ||
393 | ib_umem_release(srq->umem); | ||
394 | if (!udata) | ||
395 | hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); | ||
396 | 435 | ||
436 | err_srq: | ||
397 | return ret; | 437 | return ret; |
398 | } | 438 | } |
399 | 439 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index d169a8031375..8056930bbe2c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -97,18 +97,7 @@ static int i40iw_query_port(struct ib_device *ibdev, | |||
97 | u8 port, | 97 | u8 port, |
98 | struct ib_port_attr *props) | 98 | struct ib_port_attr *props) |
99 | { | 99 | { |
100 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
101 | struct net_device *netdev = iwdev->netdev; | ||
102 | |||
103 | /* props being zeroed by the caller, avoid zeroing it here */ | ||
104 | props->max_mtu = IB_MTU_4096; | ||
105 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | ||
106 | |||
107 | props->lid = 1; | 100 | props->lid = 1; |
108 | if (netif_carrier_ok(iwdev->netdev)) | ||
109 | props->state = IB_PORT_ACTIVE; | ||
110 | else | ||
111 | props->state = IB_PORT_DOWN; | ||
112 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | | 101 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | |
113 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | 102 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; |
114 | props->gid_tbl_len = 1; | 103 | props->gid_tbl_len = 1; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8790101facb7..8d2f1e38b891 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -734,7 +734,8 @@ out: | |||
734 | 734 | ||
735 | static u8 state_to_phys_state(enum ib_port_state state) | 735 | static u8 state_to_phys_state(enum ib_port_state state) |
736 | { | 736 | { |
737 | return state == IB_PORT_ACTIVE ? 5 : 3; | 737 | return state == IB_PORT_ACTIVE ? |
738 | IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; | ||
738 | } | 739 | } |
739 | 740 | ||
740 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, | 741 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 753479285ce9..6ae503cfc526 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -377,6 +377,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, | |||
377 | * again | 377 | * again |
378 | */ | 378 | */ |
379 | if (!ib_access_writable(access_flags)) { | 379 | if (!ib_access_writable(access_flags)) { |
380 | unsigned long untagged_start = untagged_addr(start); | ||
380 | struct vm_area_struct *vma; | 381 | struct vm_area_struct *vma; |
381 | 382 | ||
382 | down_read(¤t->mm->mmap_sem); | 383 | down_read(¤t->mm->mmap_sem); |
@@ -385,9 +386,9 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, | |||
385 | * cover the memory, but for now it requires a single vma to | 386 | * cover the memory, but for now it requires a single vma to |
386 | * entirely cover the MR to support RO mappings. | 387 | * entirely cover the MR to support RO mappings. |
387 | */ | 388 | */ |
388 | vma = find_vma(current->mm, start); | 389 | vma = find_vma(current->mm, untagged_start); |
389 | if (vma && vma->vm_end >= start + length && | 390 | if (vma && vma->vm_end >= untagged_start + length && |
390 | vma->vm_start <= start) { | 391 | vma->vm_start <= untagged_start) { |
391 | if (vma->vm_flags & VM_WRITE) | 392 | if (vma->vm_flags & VM_WRITE) |
392 | access_flags |= IB_ACCESS_LOCAL_WRITE; | 393 | access_flags |= IB_ACCESS_LOCAL_WRITE; |
393 | } else { | 394 | } else { |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 82aff2f2fdc2..bd4aa04416c6 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -325,7 +325,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | 327 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
328 | bool is_user, int has_rq, struct mlx4_ib_qp *qp, | 328 | bool is_user, bool has_rq, struct mlx4_ib_qp *qp, |
329 | u32 inl_recv_sz) | 329 | u32 inl_recv_sz) |
330 | { | 330 | { |
331 | /* Sanity check RQ size before proceeding */ | 331 | /* Sanity check RQ size before proceeding */ |
@@ -506,10 +506,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) | |||
506 | kfree(qp->sqp_proxy_rcv); | 506 | kfree(qp->sqp_proxy_rcv); |
507 | } | 507 | } |
508 | 508 | ||
509 | static int qp_has_rq(struct ib_qp_init_attr *attr) | 509 | static bool qp_has_rq(struct ib_qp_init_attr *attr) |
510 | { | 510 | { |
511 | if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) | 511 | if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) |
512 | return 0; | 512 | return false; |
513 | 513 | ||
514 | return !attr->srq; | 514 | return !attr->srq; |
515 | } | 515 | } |
@@ -855,12 +855,143 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context, | |||
855 | mutex_unlock(&context->wqn_ranges_mutex); | 855 | mutex_unlock(&context->wqn_ranges_mutex); |
856 | } | 856 | } |
857 | 857 | ||
858 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | 858 | static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, |
859 | enum mlx4_ib_source_type src, | 859 | struct ib_udata *udata, struct mlx4_ib_qp *qp) |
860 | struct ib_qp_init_attr *init_attr, | 860 | { |
861 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
862 | int qpn; | ||
863 | int err; | ||
864 | struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( | ||
865 | udata, struct mlx4_ib_ucontext, ibucontext); | ||
866 | struct mlx4_ib_cq *mcq; | ||
867 | unsigned long flags; | ||
868 | int range_size; | ||
869 | struct mlx4_ib_create_wq wq; | ||
870 | size_t copy_len; | ||
871 | int shift; | ||
872 | int n; | ||
873 | |||
874 | qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; | ||
875 | |||
876 | mutex_init(&qp->mutex); | ||
877 | spin_lock_init(&qp->sq.lock); | ||
878 | spin_lock_init(&qp->rq.lock); | ||
879 | INIT_LIST_HEAD(&qp->gid_list); | ||
880 | INIT_LIST_HEAD(&qp->steering_rules); | ||
881 | |||
882 | qp->state = IB_QPS_RESET; | ||
883 | |||
884 | copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen); | ||
885 | |||
886 | if (ib_copy_from_udata(&wq, udata, copy_len)) { | ||
887 | err = -EFAULT; | ||
888 | goto err; | ||
889 | } | ||
890 | |||
891 | if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] || | ||
892 | wq.reserved[2]) { | ||
893 | pr_debug("user command isn't supported\n"); | ||
894 | err = -EOPNOTSUPP; | ||
895 | goto err; | ||
896 | } | ||
897 | |||
898 | if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) { | ||
899 | pr_debug("WQN range size must be equal or smaller than %d\n", | ||
900 | dev->dev->caps.max_rss_tbl_sz); | ||
901 | err = -EOPNOTSUPP; | ||
902 | goto err; | ||
903 | } | ||
904 | range_size = 1 << wq.log_range_size; | ||
905 | |||
906 | if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) | ||
907 | qp->flags |= MLX4_IB_QP_SCATTER_FCS; | ||
908 | |||
909 | err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); | ||
910 | if (err) | ||
911 | goto err; | ||
912 | |||
913 | qp->sq_no_prefetch = 1; | ||
914 | qp->sq.wqe_cnt = 1; | ||
915 | qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; | ||
916 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + | ||
917 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | ||
918 | |||
919 | qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0); | ||
920 | if (IS_ERR(qp->umem)) { | ||
921 | err = PTR_ERR(qp->umem); | ||
922 | goto err; | ||
923 | } | ||
924 | |||
925 | n = ib_umem_page_count(qp->umem); | ||
926 | shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); | ||
927 | err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); | ||
928 | |||
929 | if (err) | ||
930 | goto err_buf; | ||
931 | |||
932 | err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); | ||
933 | if (err) | ||
934 | goto err_mtt; | ||
935 | |||
936 | err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); | ||
937 | if (err) | ||
938 | goto err_mtt; | ||
939 | qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; | ||
940 | |||
941 | err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); | ||
942 | if (err) | ||
943 | goto err_wrid; | ||
944 | |||
945 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | ||
946 | if (err) | ||
947 | goto err_qpn; | ||
948 | |||
949 | /* | ||
950 | * Hardware wants QPN written in big-endian order (after | ||
951 | * shifting) for send doorbell. Precompute this value to save | ||
952 | * a little bit when posting sends. | ||
953 | */ | ||
954 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); | ||
955 | |||
956 | qp->mqp.event = mlx4_ib_wq_event; | ||
957 | |||
958 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); | ||
959 | mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), | ||
960 | to_mcq(init_attr->recv_cq)); | ||
961 | /* Maintain device to QPs access, needed for further handling | ||
962 | * via reset flow | ||
963 | */ | ||
964 | list_add_tail(&qp->qps_list, &dev->qp_list); | ||
965 | /* Maintain CQ to QPs access, needed for further handling | ||
966 | * via reset flow | ||
967 | */ | ||
968 | mcq = to_mcq(init_attr->send_cq); | ||
969 | list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); | ||
970 | mcq = to_mcq(init_attr->recv_cq); | ||
971 | list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); | ||
972 | mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), | ||
973 | to_mcq(init_attr->recv_cq)); | ||
974 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | ||
975 | return 0; | ||
976 | |||
977 | err_qpn: | ||
978 | mlx4_ib_release_wqn(context, qp, 0); | ||
979 | err_wrid: | ||
980 | mlx4_ib_db_unmap_user(context, &qp->db); | ||
981 | |||
982 | err_mtt: | ||
983 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | ||
984 | err_buf: | ||
985 | ib_umem_release(qp->umem); | ||
986 | err: | ||
987 | return err; | ||
988 | } | ||
989 | |||
990 | static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, | ||
861 | struct ib_udata *udata, int sqpn, | 991 | struct ib_udata *udata, int sqpn, |
862 | struct mlx4_ib_qp **caller_qp) | 992 | struct mlx4_ib_qp **caller_qp) |
863 | { | 993 | { |
994 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
864 | int qpn; | 995 | int qpn; |
865 | int err; | 996 | int err; |
866 | struct mlx4_ib_sqp *sqp = NULL; | 997 | struct mlx4_ib_sqp *sqp = NULL; |
@@ -870,7 +1001,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
870 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; | 1001 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; |
871 | struct mlx4_ib_cq *mcq; | 1002 | struct mlx4_ib_cq *mcq; |
872 | unsigned long flags; | 1003 | unsigned long flags; |
873 | int range_size = 0; | ||
874 | 1004 | ||
875 | /* When tunneling special qps, we use a plain UD qp */ | 1005 | /* When tunneling special qps, we use a plain UD qp */ |
876 | if (sqpn) { | 1006 | if (sqpn) { |
@@ -921,15 +1051,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
921 | if (!sqp) | 1051 | if (!sqp) |
922 | return -ENOMEM; | 1052 | return -ENOMEM; |
923 | qp = &sqp->qp; | 1053 | qp = &sqp->qp; |
924 | qp->pri.vid = 0xFFFF; | ||
925 | qp->alt.vid = 0xFFFF; | ||
926 | } else { | 1054 | } else { |
927 | qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); | 1055 | qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); |
928 | if (!qp) | 1056 | if (!qp) |
929 | return -ENOMEM; | 1057 | return -ENOMEM; |
930 | qp->pri.vid = 0xFFFF; | ||
931 | qp->alt.vid = 0xFFFF; | ||
932 | } | 1058 | } |
1059 | qp->pri.vid = 0xFFFF; | ||
1060 | qp->alt.vid = 0xFFFF; | ||
933 | } else | 1061 | } else |
934 | qp = *caller_qp; | 1062 | qp = *caller_qp; |
935 | 1063 | ||
@@ -941,48 +1069,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
941 | INIT_LIST_HEAD(&qp->gid_list); | 1069 | INIT_LIST_HEAD(&qp->gid_list); |
942 | INIT_LIST_HEAD(&qp->steering_rules); | 1070 | INIT_LIST_HEAD(&qp->steering_rules); |
943 | 1071 | ||
944 | qp->state = IB_QPS_RESET; | 1072 | qp->state = IB_QPS_RESET; |
945 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 1073 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
946 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | 1074 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); |
947 | 1075 | ||
948 | |||
949 | if (udata) { | 1076 | if (udata) { |
950 | union { | 1077 | struct mlx4_ib_create_qp ucmd; |
951 | struct mlx4_ib_create_qp qp; | ||
952 | struct mlx4_ib_create_wq wq; | ||
953 | } ucmd; | ||
954 | size_t copy_len; | 1078 | size_t copy_len; |
955 | int shift; | 1079 | int shift; |
956 | int n; | 1080 | int n; |
957 | 1081 | ||
958 | copy_len = (src == MLX4_IB_QP_SRC) ? | 1082 | copy_len = sizeof(struct mlx4_ib_create_qp); |
959 | sizeof(struct mlx4_ib_create_qp) : | ||
960 | min(sizeof(struct mlx4_ib_create_wq), udata->inlen); | ||
961 | 1083 | ||
962 | if (ib_copy_from_udata(&ucmd, udata, copy_len)) { | 1084 | if (ib_copy_from_udata(&ucmd, udata, copy_len)) { |
963 | err = -EFAULT; | 1085 | err = -EFAULT; |
964 | goto err; | 1086 | goto err; |
965 | } | 1087 | } |
966 | 1088 | ||
967 | if (src == MLX4_IB_RWQ_SRC) { | 1089 | qp->inl_recv_sz = ucmd.inl_recv_sz; |
968 | if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] || | ||
969 | ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) { | ||
970 | pr_debug("user command isn't supported\n"); | ||
971 | err = -EOPNOTSUPP; | ||
972 | goto err; | ||
973 | } | ||
974 | |||
975 | if (ucmd.wq.log_range_size > | ||
976 | ilog2(dev->dev->caps.max_rss_tbl_sz)) { | ||
977 | pr_debug("WQN range size must be equal or smaller than %d\n", | ||
978 | dev->dev->caps.max_rss_tbl_sz); | ||
979 | err = -EOPNOTSUPP; | ||
980 | goto err; | ||
981 | } | ||
982 | range_size = 1 << ucmd.wq.log_range_size; | ||
983 | } else { | ||
984 | qp->inl_recv_sz = ucmd.qp.inl_recv_sz; | ||
985 | } | ||
986 | 1090 | ||
987 | if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { | 1091 | if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { |
988 | if (!(dev->dev->caps.flags & | 1092 | if (!(dev->dev->caps.flags & |
@@ -1000,30 +1104,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
1000 | if (err) | 1104 | if (err) |
1001 | goto err; | 1105 | goto err; |
1002 | 1106 | ||
1003 | if (src == MLX4_IB_QP_SRC) { | 1107 | qp->sq_no_prefetch = ucmd.sq_no_prefetch; |
1004 | qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch; | ||
1005 | 1108 | ||
1006 | err = set_user_sq_size(dev, qp, | 1109 | err = set_user_sq_size(dev, qp, &ucmd); |
1007 | (struct mlx4_ib_create_qp *) | 1110 | if (err) |
1008 | &ucmd); | 1111 | goto err; |
1009 | if (err) | ||
1010 | goto err; | ||
1011 | } else { | ||
1012 | qp->sq_no_prefetch = 1; | ||
1013 | qp->sq.wqe_cnt = 1; | ||
1014 | qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; | ||
1015 | /* Allocated buffer expects to have at least that SQ | ||
1016 | * size. | ||
1017 | */ | ||
1018 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + | ||
1019 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | ||
1020 | } | ||
1021 | 1112 | ||
1022 | qp->umem = | 1113 | qp->umem = |
1023 | ib_umem_get(udata, | 1114 | ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0); |
1024 | (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr : | ||
1025 | ucmd.wq.buf_addr, | ||
1026 | qp->buf_size, 0, 0); | ||
1027 | if (IS_ERR(qp->umem)) { | 1115 | if (IS_ERR(qp->umem)) { |
1028 | err = PTR_ERR(qp->umem); | 1116 | err = PTR_ERR(qp->umem); |
1029 | goto err; | 1117 | goto err; |
@@ -1041,11 +1129,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
1041 | goto err_mtt; | 1129 | goto err_mtt; |
1042 | 1130 | ||
1043 | if (qp_has_rq(init_attr)) { | 1131 | if (qp_has_rq(init_attr)) { |
1044 | err = mlx4_ib_db_map_user(udata, | 1132 | err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); |
1045 | (src == MLX4_IB_QP_SRC) ? | ||
1046 | ucmd.qp.db_addr : | ||
1047 | ucmd.wq.db_addr, | ||
1048 | &qp->db); | ||
1049 | if (err) | 1133 | if (err) |
1050 | goto err_mtt; | 1134 | goto err_mtt; |
1051 | } | 1135 | } |
@@ -1115,10 +1199,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
1115 | goto err_wrid; | 1199 | goto err_wrid; |
1116 | } | 1200 | } |
1117 | } | 1201 | } |
1118 | } else if (src == MLX4_IB_RWQ_SRC) { | ||
1119 | err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); | ||
1120 | if (err) | ||
1121 | goto err_wrid; | ||
1122 | } else { | 1202 | } else { |
1123 | /* Raw packet QPNs may not have bits 6,7 set in their qp_num; | 1203 | /* Raw packet QPNs may not have bits 6,7 set in their qp_num; |
1124 | * otherwise, the WQE BlueFlame setup flow wrongly causes | 1204 | * otherwise, the WQE BlueFlame setup flow wrongly causes |
@@ -1157,8 +1237,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
1157 | */ | 1237 | */ |
1158 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); | 1238 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); |
1159 | 1239 | ||
1160 | qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event : | 1240 | qp->mqp.event = mlx4_ib_qp_event; |
1161 | mlx4_ib_wq_event; | ||
1162 | 1241 | ||
1163 | if (!*caller_qp) | 1242 | if (!*caller_qp) |
1164 | *caller_qp = qp; | 1243 | *caller_qp = qp; |
@@ -1186,8 +1265,6 @@ err_qpn: | |||
1186 | if (!sqpn) { | 1265 | if (!sqpn) { |
1187 | if (qp->flags & MLX4_IB_QP_NETIF) | 1266 | if (qp->flags & MLX4_IB_QP_NETIF) |
1188 | mlx4_ib_steer_qp_free(dev, qpn, 1); | 1267 | mlx4_ib_steer_qp_free(dev, qpn, 1); |
1189 | else if (src == MLX4_IB_RWQ_SRC) | ||
1190 | mlx4_ib_release_wqn(context, qp, 0); | ||
1191 | else | 1268 | else |
1192 | mlx4_qp_release_range(dev->dev, qpn, 1); | 1269 | mlx4_qp_release_range(dev->dev, qpn, 1); |
1193 | } | 1270 | } |
@@ -1518,8 +1595,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, | |||
1518 | /* fall through */ | 1595 | /* fall through */ |
1519 | case IB_QPT_UD: | 1596 | case IB_QPT_UD: |
1520 | { | 1597 | { |
1521 | err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, | 1598 | err = create_qp_common(pd, init_attr, udata, 0, &qp); |
1522 | init_attr, udata, 0, &qp); | ||
1523 | if (err) { | 1599 | if (err) { |
1524 | kfree(qp); | 1600 | kfree(qp); |
1525 | return ERR_PTR(err); | 1601 | return ERR_PTR(err); |
@@ -1549,8 +1625,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, | |||
1549 | sqpn = get_sqp_num(to_mdev(pd->device), init_attr); | 1625 | sqpn = get_sqp_num(to_mdev(pd->device), init_attr); |
1550 | } | 1626 | } |
1551 | 1627 | ||
1552 | err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, | 1628 | err = create_qp_common(pd, init_attr, udata, sqpn, &qp); |
1553 | init_attr, udata, sqpn, &qp); | ||
1554 | if (err) | 1629 | if (err) |
1555 | return ERR_PTR(err); | 1630 | return ERR_PTR(err); |
1556 | 1631 | ||
@@ -4047,8 +4122,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, | |||
4047 | struct ib_wq_init_attr *init_attr, | 4122 | struct ib_wq_init_attr *init_attr, |
4048 | struct ib_udata *udata) | 4123 | struct ib_udata *udata) |
4049 | { | 4124 | { |
4050 | struct mlx4_ib_dev *dev; | 4125 | struct mlx4_dev *dev = to_mdev(pd->device)->dev; |
4051 | struct ib_qp_init_attr ib_qp_init_attr; | 4126 | struct ib_qp_init_attr ib_qp_init_attr = {}; |
4052 | struct mlx4_ib_qp *qp; | 4127 | struct mlx4_ib_qp *qp; |
4053 | struct mlx4_ib_create_wq ucmd; | 4128 | struct mlx4_ib_create_wq ucmd; |
4054 | int err, required_cmd_sz; | 4129 | int err, required_cmd_sz; |
@@ -4073,14 +4148,13 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, | |||
4073 | if (udata->outlen) | 4148 | if (udata->outlen) |
4074 | return ERR_PTR(-EOPNOTSUPP); | 4149 | return ERR_PTR(-EOPNOTSUPP); |
4075 | 4150 | ||
4076 | dev = to_mdev(pd->device); | ||
4077 | |||
4078 | if (init_attr->wq_type != IB_WQT_RQ) { | 4151 | if (init_attr->wq_type != IB_WQT_RQ) { |
4079 | pr_debug("unsupported wq type %d\n", init_attr->wq_type); | 4152 | pr_debug("unsupported wq type %d\n", init_attr->wq_type); |
4080 | return ERR_PTR(-EOPNOTSUPP); | 4153 | return ERR_PTR(-EOPNOTSUPP); |
4081 | } | 4154 | } |
4082 | 4155 | ||
4083 | if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) { | 4156 | if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS || |
4157 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { | ||
4084 | pr_debug("unsupported create_flags %u\n", | 4158 | pr_debug("unsupported create_flags %u\n", |
4085 | init_attr->create_flags); | 4159 | init_attr->create_flags); |
4086 | return ERR_PTR(-EOPNOTSUPP); | 4160 | return ERR_PTR(-EOPNOTSUPP); |
@@ -4093,7 +4167,6 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, | |||
4093 | qp->pri.vid = 0xFFFF; | 4167 | qp->pri.vid = 0xFFFF; |
4094 | qp->alt.vid = 0xFFFF; | 4168 | qp->alt.vid = 0xFFFF; |
4095 | 4169 | ||
4096 | memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr)); | ||
4097 | ib_qp_init_attr.qp_context = init_attr->wq_context; | 4170 | ib_qp_init_attr.qp_context = init_attr->wq_context; |
4098 | ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; | 4171 | ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; |
4099 | ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; | 4172 | ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; |
@@ -4104,8 +4177,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, | |||
4104 | if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) | 4177 | if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) |
4105 | ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; | 4178 | ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; |
4106 | 4179 | ||
4107 | err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr, | 4180 | err = create_rq(pd, &ib_qp_init_attr, udata, qp); |
4108 | udata, 0, &qp); | ||
4109 | if (err) { | 4181 | if (err) { |
4110 | kfree(qp); | 4182 | kfree(qp); |
4111 | return ERR_PTR(err); | 4183 | return ERR_PTR(err); |
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 25b6482c5368..59022b744144 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c | |||
@@ -233,6 +233,8 @@ static bool is_legacy_obj_event_num(u16 event_num) | |||
233 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | 233 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
234 | case MLX5_EVENT_TYPE_DCT_DRAINED: | 234 | case MLX5_EVENT_TYPE_DCT_DRAINED: |
235 | case MLX5_EVENT_TYPE_COMP: | 235 | case MLX5_EVENT_TYPE_COMP: |
236 | case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: | ||
237 | case MLX5_EVENT_TYPE_XRQ_ERROR: | ||
236 | return true; | 238 | return true; |
237 | default: | 239 | default: |
238 | return false; | 240 | return false; |
@@ -315,8 +317,10 @@ static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe) | |||
315 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | 317 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
316 | return eqe->data.qp_srq.type; | 318 | return eqe->data.qp_srq.type; |
317 | case MLX5_EVENT_TYPE_CQ_ERROR: | 319 | case MLX5_EVENT_TYPE_CQ_ERROR: |
320 | case MLX5_EVENT_TYPE_XRQ_ERROR: | ||
318 | return 0; | 321 | return 0; |
319 | case MLX5_EVENT_TYPE_DCT_DRAINED: | 322 | case MLX5_EVENT_TYPE_DCT_DRAINED: |
323 | case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: | ||
320 | return MLX5_EVENT_QUEUE_TYPE_DCT; | 324 | return MLX5_EVENT_QUEUE_TYPE_DCT; |
321 | default: | 325 | default: |
322 | return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); | 326 | return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); |
@@ -542,6 +546,8 @@ static u64 devx_get_obj_id(const void *in) | |||
542 | break; | 546 | break; |
543 | case MLX5_CMD_OP_ARM_XRQ: | 547 | case MLX5_CMD_OP_ARM_XRQ: |
544 | case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: | 548 | case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: |
549 | case MLX5_CMD_OP_RELEASE_XRQ_ERROR: | ||
550 | case MLX5_CMD_OP_MODIFY_XRQ: | ||
545 | obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, | 551 | obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, |
546 | MLX5_GET(arm_xrq_in, in, xrqn)); | 552 | MLX5_GET(arm_xrq_in, in, xrqn)); |
547 | break; | 553 | break; |
@@ -776,6 +782,14 @@ static bool devx_is_obj_create_cmd(const void *in, u16 *opcode) | |||
776 | return true; | 782 | return true; |
777 | return false; | 783 | return false; |
778 | } | 784 | } |
785 | case MLX5_CMD_OP_CREATE_PSV: | ||
786 | { | ||
787 | u8 num_psv = MLX5_GET(create_psv_in, in, num_psv); | ||
788 | |||
789 | if (num_psv == 1) | ||
790 | return true; | ||
791 | return false; | ||
792 | } | ||
779 | default: | 793 | default: |
780 | return false; | 794 | return false; |
781 | } | 795 | } |
@@ -810,6 +824,8 @@ static bool devx_is_obj_modify_cmd(const void *in) | |||
810 | case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: | 824 | case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: |
811 | case MLX5_CMD_OP_ARM_XRQ: | 825 | case MLX5_CMD_OP_ARM_XRQ: |
812 | case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: | 826 | case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: |
827 | case MLX5_CMD_OP_RELEASE_XRQ_ERROR: | ||
828 | case MLX5_CMD_OP_MODIFY_XRQ: | ||
813 | return true; | 829 | return true; |
814 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: | 830 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: |
815 | { | 831 | { |
@@ -1216,6 +1232,12 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, | |||
1216 | case MLX5_CMD_OP_ALLOC_XRCD: | 1232 | case MLX5_CMD_OP_ALLOC_XRCD: |
1217 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); | 1233 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); |
1218 | break; | 1234 | break; |
1235 | case MLX5_CMD_OP_CREATE_PSV: | ||
1236 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, | ||
1237 | MLX5_CMD_OP_DESTROY_PSV); | ||
1238 | MLX5_SET(destroy_psv_in, din, psvn, | ||
1239 | MLX5_GET(create_psv_out, out, psv0_index)); | ||
1240 | break; | ||
1219 | default: | 1241 | default: |
1220 | /* The entry must match to one of the devx_is_obj_create_cmd */ | 1242 | /* The entry must match to one of the devx_is_obj_create_cmd */ |
1221 | WARN_ON(true); | 1243 | WARN_ON(true); |
@@ -2286,7 +2308,11 @@ static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data) | |||
2286 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | 2308 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
2287 | obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; | 2309 | obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
2288 | break; | 2310 | break; |
2311 | case MLX5_EVENT_TYPE_XRQ_ERROR: | ||
2312 | obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; | ||
2313 | break; | ||
2289 | case MLX5_EVENT_TYPE_DCT_DRAINED: | 2314 | case MLX5_EVENT_TYPE_DCT_DRAINED: |
2315 | case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: | ||
2290 | obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; | 2316 | obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; |
2291 | break; | 2317 | break; |
2292 | case MLX5_EVENT_TYPE_CQ_ERROR: | 2318 | case MLX5_EVENT_TYPE_CQ_ERROR: |
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index 1c8f04abee0c..b198ff10cde9 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c | |||
@@ -32,6 +32,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, | |||
32 | case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: | 32 | case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: |
33 | *namespace = MLX5_FLOW_NAMESPACE_FDB; | 33 | *namespace = MLX5_FLOW_NAMESPACE_FDB; |
34 | break; | 34 | break; |
35 | case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: | ||
36 | *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; | ||
37 | break; | ||
35 | default: | 38 | default: |
36 | return -EINVAL; | 39 | return -EINVAL; |
37 | } | 40 | } |
@@ -101,6 +104,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( | |||
101 | if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx) | 104 | if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx) |
102 | return -EINVAL; | 105 | return -EINVAL; |
103 | 106 | ||
107 | /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ | ||
108 | if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && | ||
109 | ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) | ||
110 | return -EINVAL; | ||
111 | |||
104 | if (dest_devx) { | 112 | if (dest_devx) { |
105 | devx_obj = uverbs_attr_get_obj( | 113 | devx_obj = uverbs_attr_get_obj( |
106 | attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); | 114 | attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); |
@@ -112,8 +120,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( | |||
112 | */ | 120 | */ |
113 | if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type)) | 121 | if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type)) |
114 | return -EINVAL; | 122 | return -EINVAL; |
115 | /* Allow only flow table as dest when inserting to FDB */ | 123 | /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ |
116 | if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && | 124 | if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || |
125 | fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && | ||
117 | dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) | 126 | dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) |
118 | return -EINVAL; | 127 | return -EINVAL; |
119 | } else if (dest_qp) { | 128 | } else if (dest_qp) { |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index bface798ee59..831539419c30 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -535,7 +535,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, | |||
535 | props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); | 535 | props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); |
536 | props->pkey_tbl_len = 1; | 536 | props->pkey_tbl_len = 1; |
537 | props->state = IB_PORT_DOWN; | 537 | props->state = IB_PORT_DOWN; |
538 | props->phys_state = 3; | 538 | props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
539 | 539 | ||
540 | mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); | 540 | mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); |
541 | props->qkey_viol_cntr = qkey_viol_cntr; | 541 | props->qkey_viol_cntr = qkey_viol_cntr; |
@@ -561,7 +561,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, | |||
561 | 561 | ||
562 | if (netif_running(ndev) && netif_carrier_ok(ndev)) { | 562 | if (netif_running(ndev) && netif_carrier_ok(ndev)) { |
563 | props->state = IB_PORT_ACTIVE; | 563 | props->state = IB_PORT_ACTIVE; |
564 | props->phys_state = 5; | 564 | props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
565 | } | 565 | } |
566 | 566 | ||
567 | ndev_ib_mtu = iboe_get_mtu(ndev->mtu); | 567 | ndev_ib_mtu = iboe_get_mtu(ndev->mtu); |
@@ -3971,6 +3971,11 @@ _get_flow_table(struct mlx5_ib_dev *dev, | |||
3971 | esw_encap) | 3971 | esw_encap) |
3972 | flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; | 3972 | flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; |
3973 | priority = FDB_BYPASS_PATH; | 3973 | priority = FDB_BYPASS_PATH; |
3974 | } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) { | ||
3975 | max_table_size = | ||
3976 | BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, | ||
3977 | log_max_ft_size)); | ||
3978 | priority = fs_matcher->priority; | ||
3974 | } | 3979 | } |
3975 | 3980 | ||
3976 | max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); | 3981 | max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); |
@@ -3985,6 +3990,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, | |||
3985 | prio = &dev->flow_db->egress_prios[priority]; | 3990 | prio = &dev->flow_db->egress_prios[priority]; |
3986 | else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) | 3991 | else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) |
3987 | prio = &dev->flow_db->fdb; | 3992 | prio = &dev->flow_db->fdb; |
3993 | else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) | ||
3994 | prio = &dev->flow_db->rdma_rx[priority]; | ||
3988 | 3995 | ||
3989 | if (!prio) | 3996 | if (!prio) |
3990 | return ERR_PTR(-EINVAL); | 3997 | return ERR_PTR(-EINVAL); |
@@ -5326,11 +5333,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { | |||
5326 | INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), | 5333 | INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), |
5327 | }; | 5334 | }; |
5328 | 5335 | ||
5336 | static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) | ||
5337 | { | ||
5338 | return MLX5_ESWITCH_MANAGER(mdev) && | ||
5339 | mlx5_ib_eswitch_mode(mdev->priv.eswitch) == | ||
5340 | MLX5_ESWITCH_OFFLOADS; | ||
5341 | } | ||
5342 | |||
5329 | static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) | 5343 | static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) |
5330 | { | 5344 | { |
5345 | int num_cnt_ports; | ||
5331 | int i; | 5346 | int i; |
5332 | 5347 | ||
5333 | for (i = 0; i < dev->num_ports; i++) { | 5348 | num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; |
5349 | |||
5350 | for (i = 0; i < num_cnt_ports; i++) { | ||
5334 | if (dev->port[i].cnts.set_id_valid) | 5351 | if (dev->port[i].cnts.set_id_valid) |
5335 | mlx5_core_dealloc_q_counter(dev->mdev, | 5352 | mlx5_core_dealloc_q_counter(dev->mdev, |
5336 | dev->port[i].cnts.set_id); | 5353 | dev->port[i].cnts.set_id); |
@@ -5432,13 +5449,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, | |||
5432 | 5449 | ||
5433 | static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) | 5450 | static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) |
5434 | { | 5451 | { |
5452 | int num_cnt_ports; | ||
5435 | int err = 0; | 5453 | int err = 0; |
5436 | int i; | 5454 | int i; |
5437 | bool is_shared; | 5455 | bool is_shared; |
5438 | 5456 | ||
5439 | is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; | 5457 | is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; |
5458 | num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; | ||
5440 | 5459 | ||
5441 | for (i = 0; i < dev->num_ports; i++) { | 5460 | for (i = 0; i < num_cnt_ports; i++) { |
5442 | err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); | 5461 | err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); |
5443 | if (err) | 5462 | if (err) |
5444 | goto err_alloc; | 5463 | goto err_alloc; |
@@ -5458,7 +5477,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) | |||
5458 | } | 5477 | } |
5459 | dev->port[i].cnts.set_id_valid = true; | 5478 | dev->port[i].cnts.set_id_valid = true; |
5460 | } | 5479 | } |
5461 | |||
5462 | return 0; | 5480 | return 0; |
5463 | 5481 | ||
5464 | err_alloc: | 5482 | err_alloc: |
@@ -5466,25 +5484,50 @@ err_alloc: | |||
5466 | return err; | 5484 | return err; |
5467 | } | 5485 | } |
5468 | 5486 | ||
5487 | static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, | ||
5488 | u8 port_num) | ||
5489 | { | ||
5490 | return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : | ||
5491 | &dev->port[port_num].cnts; | ||
5492 | } | ||
5493 | |||
5494 | /** | ||
5495 | * mlx5_ib_get_counters_id - Returns counters id to use for device+port | ||
5496 | * @dev: Pointer to mlx5 IB device | ||
5497 | * @port_num: Zero based port number | ||
5498 | * | ||
5499 | * mlx5_ib_get_counters_id() Returns counters set id to use for given | ||
5500 | * device port combination in switchdev and non switchdev mode of the | ||
5501 | * parent device. | ||
5502 | */ | ||
5503 | u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num) | ||
5504 | { | ||
5505 | const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); | ||
5506 | |||
5507 | return cnts->set_id; | ||
5508 | } | ||
5509 | |||
5469 | static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, | 5510 | static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, |
5470 | u8 port_num) | 5511 | u8 port_num) |
5471 | { | 5512 | { |
5472 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 5513 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
5473 | struct mlx5_ib_port *port = &dev->port[port_num - 1]; | 5514 | const struct mlx5_ib_counters *cnts; |
5515 | bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); | ||
5474 | 5516 | ||
5475 | /* We support only per port stats */ | 5517 | if ((is_switchdev && port_num) || (!is_switchdev && !port_num)) |
5476 | if (port_num == 0) | ||
5477 | return NULL; | 5518 | return NULL; |
5478 | 5519 | ||
5479 | return rdma_alloc_hw_stats_struct(port->cnts.names, | 5520 | cnts = get_counters(dev, port_num - 1); |
5480 | port->cnts.num_q_counters + | 5521 | |
5481 | port->cnts.num_cong_counters + | 5522 | return rdma_alloc_hw_stats_struct(cnts->names, |
5482 | port->cnts.num_ext_ppcnt_counters, | 5523 | cnts->num_q_counters + |
5524 | cnts->num_cong_counters + | ||
5525 | cnts->num_ext_ppcnt_counters, | ||
5483 | RDMA_HW_STATS_DEFAULT_LIFESPAN); | 5526 | RDMA_HW_STATS_DEFAULT_LIFESPAN); |
5484 | } | 5527 | } |
5485 | 5528 | ||
5486 | static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, | 5529 | static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, |
5487 | struct mlx5_ib_port *port, | 5530 | const struct mlx5_ib_counters *cnts, |
5488 | struct rdma_hw_stats *stats, | 5531 | struct rdma_hw_stats *stats, |
5489 | u16 set_id) | 5532 | u16 set_id) |
5490 | { | 5533 | { |
@@ -5501,8 +5544,8 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, | |||
5501 | if (ret) | 5544 | if (ret) |
5502 | goto free; | 5545 | goto free; |
5503 | 5546 | ||
5504 | for (i = 0; i < port->cnts.num_q_counters; i++) { | 5547 | for (i = 0; i < cnts->num_q_counters; i++) { |
5505 | val = *(__be32 *)(out + port->cnts.offsets[i]); | 5548 | val = *(__be32 *)(out + cnts->offsets[i]); |
5506 | stats->value[i] = (u64)be32_to_cpu(val); | 5549 | stats->value[i] = (u64)be32_to_cpu(val); |
5507 | } | 5550 | } |
5508 | 5551 | ||
@@ -5512,10 +5555,10 @@ free: | |||
5512 | } | 5555 | } |
5513 | 5556 | ||
5514 | static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, | 5557 | static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, |
5515 | struct mlx5_ib_port *port, | 5558 | const struct mlx5_ib_counters *cnts, |
5516 | struct rdma_hw_stats *stats) | 5559 | struct rdma_hw_stats *stats) |
5517 | { | 5560 | { |
5518 | int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters; | 5561 | int offset = cnts->num_q_counters + cnts->num_cong_counters; |
5519 | int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); | 5562 | int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); |
5520 | int ret, i; | 5563 | int ret, i; |
5521 | void *out; | 5564 | void *out; |
@@ -5528,12 +5571,10 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, | |||
5528 | if (ret) | 5571 | if (ret) |
5529 | goto free; | 5572 | goto free; |
5530 | 5573 | ||
5531 | for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) { | 5574 | for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) |
5532 | stats->value[i + offset] = | 5575 | stats->value[i + offset] = |
5533 | be64_to_cpup((__be64 *)(out + | 5576 | be64_to_cpup((__be64 *)(out + |
5534 | port->cnts.offsets[i + offset])); | 5577 | cnts->offsets[i + offset])); |
5535 | } | ||
5536 | |||
5537 | free: | 5578 | free: |
5538 | kvfree(out); | 5579 | kvfree(out); |
5539 | return ret; | 5580 | return ret; |
@@ -5544,7 +5585,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, | |||
5544 | u8 port_num, int index) | 5585 | u8 port_num, int index) |
5545 | { | 5586 | { |
5546 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 5587 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
5547 | struct mlx5_ib_port *port = &dev->port[port_num - 1]; | 5588 | const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); |
5548 | struct mlx5_core_dev *mdev; | 5589 | struct mlx5_core_dev *mdev; |
5549 | int ret, num_counters; | 5590 | int ret, num_counters; |
5550 | u8 mdev_port_num; | 5591 | u8 mdev_port_num; |
@@ -5552,18 +5593,17 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, | |||
5552 | if (!stats) | 5593 | if (!stats) |
5553 | return -EINVAL; | 5594 | return -EINVAL; |
5554 | 5595 | ||
5555 | num_counters = port->cnts.num_q_counters + | 5596 | num_counters = cnts->num_q_counters + |
5556 | port->cnts.num_cong_counters + | 5597 | cnts->num_cong_counters + |
5557 | port->cnts.num_ext_ppcnt_counters; | 5598 | cnts->num_ext_ppcnt_counters; |
5558 | 5599 | ||
5559 | /* q_counters are per IB device, query the master mdev */ | 5600 | /* q_counters are per IB device, query the master mdev */ |
5560 | ret = mlx5_ib_query_q_counters(dev->mdev, port, stats, | 5601 | ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); |
5561 | port->cnts.set_id); | ||
5562 | if (ret) | 5602 | if (ret) |
5563 | return ret; | 5603 | return ret; |
5564 | 5604 | ||
5565 | if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { | 5605 | if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { |
5566 | ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats); | 5606 | ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats); |
5567 | if (ret) | 5607 | if (ret) |
5568 | return ret; | 5608 | return ret; |
5569 | } | 5609 | } |
@@ -5580,10 +5620,10 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, | |||
5580 | } | 5620 | } |
5581 | ret = mlx5_lag_query_cong_counters(dev->mdev, | 5621 | ret = mlx5_lag_query_cong_counters(dev->mdev, |
5582 | stats->value + | 5622 | stats->value + |
5583 | port->cnts.num_q_counters, | 5623 | cnts->num_q_counters, |
5584 | port->cnts.num_cong_counters, | 5624 | cnts->num_cong_counters, |
5585 | port->cnts.offsets + | 5625 | cnts->offsets + |
5586 | port->cnts.num_q_counters); | 5626 | cnts->num_q_counters); |
5587 | 5627 | ||
5588 | mlx5_ib_put_native_port_mdev(dev, port_num); | 5628 | mlx5_ib_put_native_port_mdev(dev, port_num); |
5589 | if (ret) | 5629 | if (ret) |
@@ -5598,20 +5638,22 @@ static struct rdma_hw_stats * | |||
5598 | mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) | 5638 | mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) |
5599 | { | 5639 | { |
5600 | struct mlx5_ib_dev *dev = to_mdev(counter->device); | 5640 | struct mlx5_ib_dev *dev = to_mdev(counter->device); |
5601 | struct mlx5_ib_port *port = &dev->port[counter->port - 1]; | 5641 | const struct mlx5_ib_counters *cnts = |
5642 | get_counters(dev, counter->port - 1); | ||
5602 | 5643 | ||
5603 | /* Q counters are in the beginning of all counters */ | 5644 | /* Q counters are in the beginning of all counters */ |
5604 | return rdma_alloc_hw_stats_struct(port->cnts.names, | 5645 | return rdma_alloc_hw_stats_struct(cnts->names, |
5605 | port->cnts.num_q_counters, | 5646 | cnts->num_q_counters, |
5606 | RDMA_HW_STATS_DEFAULT_LIFESPAN); | 5647 | RDMA_HW_STATS_DEFAULT_LIFESPAN); |
5607 | } | 5648 | } |
5608 | 5649 | ||
5609 | static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) | 5650 | static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) |
5610 | { | 5651 | { |
5611 | struct mlx5_ib_dev *dev = to_mdev(counter->device); | 5652 | struct mlx5_ib_dev *dev = to_mdev(counter->device); |
5612 | struct mlx5_ib_port *port = &dev->port[counter->port - 1]; | 5653 | const struct mlx5_ib_counters *cnts = |
5654 | get_counters(dev, counter->port - 1); | ||
5613 | 5655 | ||
5614 | return mlx5_ib_query_q_counters(dev->mdev, port, | 5656 | return mlx5_ib_query_q_counters(dev->mdev, cnts, |
5615 | counter->stats, counter->id); | 5657 | counter->stats, counter->id); |
5616 | } | 5658 | } |
5617 | 5659 | ||
@@ -5788,7 +5830,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) | |||
5788 | mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); | 5830 | mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); |
5789 | } | 5831 | } |
5790 | 5832 | ||
5791 | /* The mlx5_ib_multiport_mutex should be held when calling this function */ | ||
5792 | static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, | 5833 | static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, |
5793 | struct mlx5_ib_multiport_info *mpi) | 5834 | struct mlx5_ib_multiport_info *mpi) |
5794 | { | 5835 | { |
@@ -5798,6 +5839,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, | |||
5798 | int err; | 5839 | int err; |
5799 | int i; | 5840 | int i; |
5800 | 5841 | ||
5842 | lockdep_assert_held(&mlx5_ib_multiport_mutex); | ||
5843 | |||
5801 | mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); | 5844 | mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); |
5802 | 5845 | ||
5803 | spin_lock(&port->mp.mpi_lock); | 5846 | spin_lock(&port->mp.mpi_lock); |
@@ -5847,13 +5890,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, | |||
5847 | ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; | 5890 | ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; |
5848 | } | 5891 | } |
5849 | 5892 | ||
5850 | /* The mlx5_ib_multiport_mutex should be held when calling this function */ | ||
5851 | static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, | 5893 | static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, |
5852 | struct mlx5_ib_multiport_info *mpi) | 5894 | struct mlx5_ib_multiport_info *mpi) |
5853 | { | 5895 | { |
5854 | u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; | 5896 | u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; |
5855 | int err; | 5897 | int err; |
5856 | 5898 | ||
5899 | lockdep_assert_held(&mlx5_ib_multiport_mutex); | ||
5900 | |||
5857 | spin_lock(&ibdev->port[port_num].mp.mpi_lock); | 5901 | spin_lock(&ibdev->port[port_num].mp.mpi_lock); |
5858 | if (ibdev->port[port_num].mp.mpi) { | 5902 | if (ibdev->port[port_num].mp.mpi) { |
5859 | mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", | 5903 | mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", |
@@ -6882,7 +6926,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
6882 | dev->port = kcalloc(num_ports, sizeof(*dev->port), | 6926 | dev->port = kcalloc(num_ports, sizeof(*dev->port), |
6883 | GFP_KERNEL); | 6927 | GFP_KERNEL); |
6884 | if (!dev->port) { | 6928 | if (!dev->port) { |
6885 | ib_dealloc_device((struct ib_device *)dev); | 6929 | ib_dealloc_device(&dev->ib_dev); |
6886 | return NULL; | 6930 | return NULL; |
6887 | } | 6931 | } |
6888 | 6932 | ||
@@ -6909,6 +6953,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) | |||
6909 | mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); | 6953 | mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); |
6910 | list_del(&mpi->list); | 6954 | list_del(&mpi->list); |
6911 | mutex_unlock(&mlx5_ib_multiport_mutex); | 6955 | mutex_unlock(&mlx5_ib_multiport_mutex); |
6956 | kfree(mpi); | ||
6912 | return; | 6957 | return; |
6913 | } | 6958 | } |
6914 | 6959 | ||
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 125a507c10ed..2ceaef3ea3fb 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -200,6 +200,7 @@ struct mlx5_ib_flow_db { | |||
200 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; | 200 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; |
201 | struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; | 201 | struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; |
202 | struct mlx5_ib_flow_prio fdb; | 202 | struct mlx5_ib_flow_prio fdb; |
203 | struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; | ||
203 | struct mlx5_flow_table *lag_demux_ft; | 204 | struct mlx5_flow_table *lag_demux_ft; |
204 | /* Protect flow steering bypass flow tables | 205 | /* Protect flow steering bypass flow tables |
205 | * when add/del flow rules. | 206 | * when add/del flow rules. |
@@ -1476,6 +1477,7 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, | |||
1476 | bool dyn_bfreg); | 1477 | bool dyn_bfreg); |
1477 | 1478 | ||
1478 | int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); | 1479 | int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); |
1480 | u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num); | ||
1479 | 1481 | ||
1480 | static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, | 1482 | static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, |
1481 | bool do_modify_atomic) | 1483 | bool do_modify_atomic) |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index dd26e7acb37e..2e9b43061797 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
@@ -982,17 +982,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, | |||
982 | return ret < 0 ? ret : npages; | 982 | return ret < 0 ? ret : npages; |
983 | } | 983 | } |
984 | 984 | ||
985 | static const u32 mlx5_ib_odp_opcode_cap[] = { | ||
986 | [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, | ||
987 | [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, | ||
988 | [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, | ||
989 | [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, | ||
990 | [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, | ||
991 | [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, | ||
992 | [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, | ||
993 | [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, | ||
994 | }; | ||
995 | |||
996 | /* | 985 | /* |
997 | * Parse initiator WQE. Advances the wqe pointer to point at the | 986 | * Parse initiator WQE. Advances the wqe pointer to point at the |
998 | * scatter-gather list, and set wqe_end to the end of the WQE. | 987 | * scatter-gather list, and set wqe_end to the end of the WQE. |
@@ -1003,12 +992,8 @@ static int mlx5_ib_mr_initiator_pfault_handler( | |||
1003 | { | 992 | { |
1004 | struct mlx5_wqe_ctrl_seg *ctrl = *wqe; | 993 | struct mlx5_wqe_ctrl_seg *ctrl = *wqe; |
1005 | u16 wqe_index = pfault->wqe.wqe_index; | 994 | u16 wqe_index = pfault->wqe.wqe_index; |
1006 | u32 transport_caps; | ||
1007 | struct mlx5_base_av *av; | 995 | struct mlx5_base_av *av; |
1008 | unsigned ds, opcode; | 996 | unsigned ds, opcode; |
1009 | #if defined(DEBUG) | ||
1010 | u32 ctrl_wqe_index, ctrl_qpn; | ||
1011 | #endif | ||
1012 | u32 qpn = qp->trans_qp.base.mqp.qpn; | 997 | u32 qpn = qp->trans_qp.base.mqp.qpn; |
1013 | 998 | ||
1014 | ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; | 999 | ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; |
@@ -1024,58 +1009,17 @@ static int mlx5_ib_mr_initiator_pfault_handler( | |||
1024 | return -EFAULT; | 1009 | return -EFAULT; |
1025 | } | 1010 | } |
1026 | 1011 | ||
1027 | #if defined(DEBUG) | ||
1028 | ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) & | ||
1029 | MLX5_WQE_CTRL_WQE_INDEX_MASK) >> | ||
1030 | MLX5_WQE_CTRL_WQE_INDEX_SHIFT; | ||
1031 | if (wqe_index != ctrl_wqe_index) { | ||
1032 | mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", | ||
1033 | wqe_index, qpn, | ||
1034 | ctrl_wqe_index); | ||
1035 | return -EFAULT; | ||
1036 | } | ||
1037 | |||
1038 | ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> | ||
1039 | MLX5_WQE_CTRL_QPN_SHIFT; | ||
1040 | if (qpn != ctrl_qpn) { | ||
1041 | mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", | ||
1042 | wqe_index, qpn, | ||
1043 | ctrl_qpn); | ||
1044 | return -EFAULT; | ||
1045 | } | ||
1046 | #endif /* DEBUG */ | ||
1047 | |||
1048 | *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; | 1012 | *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; |
1049 | *wqe += sizeof(*ctrl); | 1013 | *wqe += sizeof(*ctrl); |
1050 | 1014 | ||
1051 | opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & | 1015 | opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & |
1052 | MLX5_WQE_CTRL_OPCODE_MASK; | 1016 | MLX5_WQE_CTRL_OPCODE_MASK; |
1053 | 1017 | ||
1054 | switch (qp->ibqp.qp_type) { | 1018 | if (qp->ibqp.qp_type == IB_QPT_XRC_INI) |
1055 | case IB_QPT_XRC_INI: | ||
1056 | *wqe += sizeof(struct mlx5_wqe_xrc_seg); | 1019 | *wqe += sizeof(struct mlx5_wqe_xrc_seg); |
1057 | transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps; | ||
1058 | break; | ||
1059 | case IB_QPT_RC: | ||
1060 | transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; | ||
1061 | break; | ||
1062 | case IB_QPT_UD: | ||
1063 | transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; | ||
1064 | break; | ||
1065 | default: | ||
1066 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", | ||
1067 | qp->ibqp.qp_type); | ||
1068 | return -EFAULT; | ||
1069 | } | ||
1070 | |||
1071 | if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || | ||
1072 | !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { | ||
1073 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", | ||
1074 | opcode); | ||
1075 | return -EFAULT; | ||
1076 | } | ||
1077 | 1020 | ||
1078 | if (qp->ibqp.qp_type == IB_QPT_UD) { | 1021 | if (qp->ibqp.qp_type == IB_QPT_UD || |
1022 | qp->qp_sub_type == MLX5_IB_QPT_DCI) { | ||
1079 | av = *wqe; | 1023 | av = *wqe; |
1080 | if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) | 1024 | if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) |
1081 | *wqe += sizeof(struct mlx5_av); | 1025 | *wqe += sizeof(struct mlx5_av); |
@@ -1138,19 +1082,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, | |||
1138 | return -EFAULT; | 1082 | return -EFAULT; |
1139 | } | 1083 | } |
1140 | 1084 | ||
1141 | switch (qp->ibqp.qp_type) { | ||
1142 | case IB_QPT_RC: | ||
1143 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | ||
1144 | IB_ODP_SUPPORT_RECV)) | ||
1145 | goto invalid_transport_or_opcode; | ||
1146 | break; | ||
1147 | default: | ||
1148 | invalid_transport_or_opcode: | ||
1149 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n", | ||
1150 | qp->ibqp.qp_type); | ||
1151 | return -EFAULT; | ||
1152 | } | ||
1153 | |||
1154 | *wqe_end = wqe + wqe_size; | 1085 | *wqe_end = wqe + wqe_size; |
1155 | 1086 | ||
1156 | return 0; | 1087 | return 0; |
@@ -1200,7 +1131,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, | |||
1200 | { | 1131 | { |
1201 | bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; | 1132 | bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; |
1202 | u16 wqe_index = pfault->wqe.wqe_index; | 1133 | u16 wqe_index = pfault->wqe.wqe_index; |
1203 | void *wqe = NULL, *wqe_end = NULL; | 1134 | void *wqe, *wqe_start = NULL, *wqe_end = NULL; |
1204 | u32 bytes_mapped, total_wqe_bytes; | 1135 | u32 bytes_mapped, total_wqe_bytes; |
1205 | struct mlx5_core_rsc_common *res; | 1136 | struct mlx5_core_rsc_common *res; |
1206 | int resume_with_error = 1; | 1137 | int resume_with_error = 1; |
@@ -1221,12 +1152,13 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, | |||
1221 | goto resolve_page_fault; | 1152 | goto resolve_page_fault; |
1222 | } | 1153 | } |
1223 | 1154 | ||
1224 | wqe = (void *)__get_free_page(GFP_KERNEL); | 1155 | wqe_start = (void *)__get_free_page(GFP_KERNEL); |
1225 | if (!wqe) { | 1156 | if (!wqe_start) { |
1226 | mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); | 1157 | mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); |
1227 | goto resolve_page_fault; | 1158 | goto resolve_page_fault; |
1228 | } | 1159 | } |
1229 | 1160 | ||
1161 | wqe = wqe_start; | ||
1230 | qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; | 1162 | qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; |
1231 | if (qp && sq) { | 1163 | if (qp && sq) { |
1232 | ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, | 1164 | ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, |
@@ -1281,7 +1213,7 @@ resolve_page_fault: | |||
1281 | pfault->wqe.wq_num, resume_with_error, | 1213 | pfault->wqe.wq_num, resume_with_error, |
1282 | pfault->type); | 1214 | pfault->type); |
1283 | mlx5_core_res_put(res); | 1215 | mlx5_core_res_put(res); |
1284 | free_page((unsigned long)wqe); | 1216 | free_page((unsigned long)wqe_start); |
1285 | } | 1217 | } |
1286 | 1218 | ||
1287 | static int pages_in_range(u64 address, u32 length) | 1219 | static int pages_in_range(u64 address, u32 length) |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 72869ff4a334..8937d72ddcf6 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -3386,19 +3386,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, | |||
3386 | struct mlx5_ib_dev *dev = to_mdev(qp->device); | 3386 | struct mlx5_ib_dev *dev = to_mdev(qp->device); |
3387 | struct mlx5_ib_qp *mqp = to_mqp(qp); | 3387 | struct mlx5_ib_qp *mqp = to_mqp(qp); |
3388 | struct mlx5_qp_context context = {}; | 3388 | struct mlx5_qp_context context = {}; |
3389 | struct mlx5_ib_port *mibport = NULL; | ||
3390 | struct mlx5_ib_qp_base *base; | 3389 | struct mlx5_ib_qp_base *base; |
3391 | u32 set_id; | 3390 | u32 set_id; |
3392 | 3391 | ||
3393 | if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) | 3392 | if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) |
3394 | return 0; | 3393 | return 0; |
3395 | 3394 | ||
3396 | if (counter) { | 3395 | if (counter) |
3397 | set_id = counter->id; | 3396 | set_id = counter->id; |
3398 | } else { | 3397 | else |
3399 | mibport = &dev->port[mqp->port - 1]; | 3398 | set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); |
3400 | set_id = mibport->cnts.set_id; | ||
3401 | } | ||
3402 | 3399 | ||
3403 | base = &mqp->trans_qp.base; | 3400 | base = &mqp->trans_qp.base; |
3404 | context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); | 3401 | context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); |
@@ -3459,7 +3456,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
3459 | struct mlx5_ib_cq *send_cq, *recv_cq; | 3456 | struct mlx5_ib_cq *send_cq, *recv_cq; |
3460 | struct mlx5_qp_context *context; | 3457 | struct mlx5_qp_context *context; |
3461 | struct mlx5_ib_pd *pd; | 3458 | struct mlx5_ib_pd *pd; |
3462 | struct mlx5_ib_port *mibport = NULL; | ||
3463 | enum mlx5_qp_state mlx5_cur, mlx5_new; | 3459 | enum mlx5_qp_state mlx5_cur, mlx5_new; |
3464 | enum mlx5_qp_optpar optpar; | 3460 | enum mlx5_qp_optpar optpar; |
3465 | u32 set_id = 0; | 3461 | u32 set_id = 0; |
@@ -3624,11 +3620,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
3624 | if (qp->flags & MLX5_IB_QP_UNDERLAY) | 3620 | if (qp->flags & MLX5_IB_QP_UNDERLAY) |
3625 | port_num = 0; | 3621 | port_num = 0; |
3626 | 3622 | ||
3627 | mibport = &dev->port[port_num]; | ||
3628 | if (ibqp->counter) | 3623 | if (ibqp->counter) |
3629 | set_id = ibqp->counter->id; | 3624 | set_id = ibqp->counter->id; |
3630 | else | 3625 | else |
3631 | set_id = mibport->cnts.set_id; | 3626 | set_id = mlx5_ib_get_counters_id(dev, port_num); |
3632 | context->qp_counter_set_usr_page |= | 3627 | context->qp_counter_set_usr_page |= |
3633 | cpu_to_be32(set_id << 24); | 3628 | cpu_to_be32(set_id << 24); |
3634 | } | 3629 | } |
@@ -3817,6 +3812,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3817 | 3812 | ||
3818 | dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); | 3813 | dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); |
3819 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { | 3814 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
3815 | u16 set_id; | ||
3816 | |||
3820 | required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; | 3817 | required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; |
3821 | if (!is_valid_mask(attr_mask, required, 0)) | 3818 | if (!is_valid_mask(attr_mask, required, 0)) |
3822 | return -EINVAL; | 3819 | return -EINVAL; |
@@ -3843,7 +3840,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3843 | } | 3840 | } |
3844 | MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); | 3841 | MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); |
3845 | MLX5_SET(dctc, dctc, port, attr->port_num); | 3842 | MLX5_SET(dctc, dctc, port, attr->port_num); |
3846 | MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id); | 3843 | |
3844 | set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); | ||
3845 | MLX5_SET(dctc, dctc, counter_set_id, set_id); | ||
3847 | 3846 | ||
3848 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { | 3847 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { |
3849 | struct mlx5_ib_modify_qp_resp resp = {}; | 3848 | struct mlx5_ib_modify_qp_resp resp = {}; |
@@ -6345,11 +6344,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | |||
6345 | } | 6344 | } |
6346 | 6345 | ||
6347 | if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { | 6346 | if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { |
6347 | u16 set_id; | ||
6348 | |||
6349 | set_id = mlx5_ib_get_counters_id(dev, 0); | ||
6348 | if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { | 6350 | if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { |
6349 | MLX5_SET64(modify_rq_in, in, modify_bitmask, | 6351 | MLX5_SET64(modify_rq_in, in, modify_bitmask, |
6350 | MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); | 6352 | MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); |
6351 | MLX5_SET(rqc, rqc, counter_set_id, | 6353 | MLX5_SET(rqc, rqc, counter_set_id, set_id); |
6352 | dev->port->cnts.set_id); | ||
6353 | } else | 6354 | } else |
6354 | dev_info_once( | 6355 | dev_info_once( |
6355 | &dev->ib_dev.dev, | 6356 | &dev->ib_dev.dev, |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index bccc11378109..e8267e590772 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -163,10 +163,10 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
163 | netdev = dev->nic_info.netdev; | 163 | netdev = dev->nic_info.netdev; |
164 | if (netif_running(netdev) && netif_oper_up(netdev)) { | 164 | if (netif_running(netdev) && netif_oper_up(netdev)) { |
165 | port_state = IB_PORT_ACTIVE; | 165 | port_state = IB_PORT_ACTIVE; |
166 | props->phys_state = 5; | 166 | props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
167 | } else { | 167 | } else { |
168 | port_state = IB_PORT_DOWN; | 168 | port_state = IB_PORT_DOWN; |
169 | props->phys_state = 3; | 169 | props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
170 | } | 170 | } |
171 | props->max_mtu = IB_MTU_4096; | 171 | props->max_mtu = IB_MTU_4096; |
172 | props->active_mtu = iboe_get_mtu(netdev->mtu); | 172 | props->active_mtu = iboe_get_mtu(netdev->mtu); |
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index f97b3d65b30c..5136b835e1ba 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c | |||
@@ -826,7 +826,7 @@ static int qedr_init_hw(struct qedr_dev *dev) | |||
826 | if (rc) | 826 | if (rc) |
827 | goto out; | 827 | goto out; |
828 | 828 | ||
829 | dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr; | 829 | dev->db_addr = out_params.dpi_addr; |
830 | dev->db_phys_addr = out_params.dpi_phys_addr; | 830 | dev->db_phys_addr = out_params.dpi_phys_addr; |
831 | dev->db_size = out_params.dpi_size; | 831 | dev->db_size = out_params.dpi_size; |
832 | dev->dpi = out_params.dpi; | 832 | dev->dpi = out_params.dpi; |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index a92ca22e5de1..0cfd849b13d6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
@@ -229,7 +229,7 @@ struct qedr_ucontext { | |||
229 | struct ib_ucontext ibucontext; | 229 | struct ib_ucontext ibucontext; |
230 | struct qedr_dev *dev; | 230 | struct qedr_dev *dev; |
231 | struct qedr_pd *pd; | 231 | struct qedr_pd *pd; |
232 | u64 dpi_addr; | 232 | void __iomem *dpi_addr; |
233 | u64 dpi_phys_addr; | 233 | u64 dpi_phys_addr; |
234 | u32 dpi_size; | 234 | u32 dpi_size; |
235 | u16 dpi; | 235 | u16 dpi; |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 27d90a84ea01..6f3ce86019b7 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -221,10 +221,10 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) | |||
221 | /* *attr being zeroed by the caller, avoid zeroing it here */ | 221 | /* *attr being zeroed by the caller, avoid zeroing it here */ |
222 | if (rdma_port->port_state == QED_RDMA_PORT_UP) { | 222 | if (rdma_port->port_state == QED_RDMA_PORT_UP) { |
223 | attr->state = IB_PORT_ACTIVE; | 223 | attr->state = IB_PORT_ACTIVE; |
224 | attr->phys_state = 5; | 224 | attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
225 | } else { | 225 | } else { |
226 | attr->state = IB_PORT_DOWN; | 226 | attr->state = IB_PORT_DOWN; |
227 | attr->phys_state = 3; | 227 | attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
228 | } | 228 | } |
229 | attr->max_mtu = IB_MTU_4096; | 229 | attr->max_mtu = IB_MTU_4096; |
230 | attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); | 230 | attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); |
@@ -2451,7 +2451,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) | |||
2451 | struct qedr_dev *dev = qp->dev; | 2451 | struct qedr_dev *dev = qp->dev; |
2452 | struct ib_qp_attr attr; | 2452 | struct ib_qp_attr attr; |
2453 | int attr_mask = 0; | 2453 | int attr_mask = 0; |
2454 | int rc = 0; | ||
2455 | 2454 | ||
2456 | DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", | 2455 | DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", |
2457 | qp, qp->qp_type); | 2456 | qp, qp->qp_type); |
@@ -2496,7 +2495,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) | |||
2496 | xa_erase_irq(&dev->qps, qp->qp_id); | 2495 | xa_erase_irq(&dev->qps, qp->qp_id); |
2497 | kfree(qp); | 2496 | kfree(qp); |
2498 | } | 2497 | } |
2499 | return rc; | 2498 | return 0; |
2500 | } | 2499 | } |
2501 | 2500 | ||
2502 | int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, | 2501 | int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 27b6e664e59d..b0144229cf3b 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -1789,7 +1789,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd) | |||
1789 | 1789 | ||
1790 | static int qib_close(struct inode *in, struct file *fp) | 1790 | static int qib_close(struct inode *in, struct file *fp) |
1791 | { | 1791 | { |
1792 | int ret = 0; | ||
1793 | struct qib_filedata *fd; | 1792 | struct qib_filedata *fd; |
1794 | struct qib_ctxtdata *rcd; | 1793 | struct qib_ctxtdata *rcd; |
1795 | struct qib_devdata *dd; | 1794 | struct qib_devdata *dd; |
@@ -1873,7 +1872,7 @@ static int qib_close(struct inode *in, struct file *fp) | |||
1873 | 1872 | ||
1874 | bail: | 1873 | bail: |
1875 | kfree(fd); | 1874 | kfree(fd); |
1876 | return ret; | 1875 | return 0; |
1877 | } | 1876 | } |
1878 | 1877 | ||
1879 | static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) | 1878 | static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 1d5e2d4ee257..aaf7438258fa 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -313,11 +313,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) | |||
313 | case IB_WR_SEND: | 313 | case IB_WR_SEND: |
314 | case IB_WR_SEND_WITH_IMM: | 314 | case IB_WR_SEND_WITH_IMM: |
315 | /* If no credit, return. */ | 315 | /* If no credit, return. */ |
316 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && | 316 | if (!rvt_rc_credit_avail(qp, wqe)) |
317 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
318 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; | ||
319 | goto bail; | 317 | goto bail; |
320 | } | ||
321 | if (len > pmtu) { | 318 | if (len > pmtu) { |
322 | qp->s_state = OP(SEND_FIRST); | 319 | qp->s_state = OP(SEND_FIRST); |
323 | len = pmtu; | 320 | len = pmtu; |
@@ -344,11 +341,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) | |||
344 | goto no_flow_control; | 341 | goto no_flow_control; |
345 | case IB_WR_RDMA_WRITE_WITH_IMM: | 342 | case IB_WR_RDMA_WRITE_WITH_IMM: |
346 | /* If no credit, return. */ | 343 | /* If no credit, return. */ |
347 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && | 344 | if (!rvt_rc_credit_avail(qp, wqe)) |
348 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
349 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; | ||
350 | goto bail; | 345 | goto bail; |
351 | } | ||
352 | no_flow_control: | 346 | no_flow_control: |
353 | ohdr->u.rc.reth.vaddr = | 347 | ohdr->u.rc.reth.vaddr = |
354 | cpu_to_be64(wqe->rdma_wr.remote_addr); | 348 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 905206a0c2d5..3926be78036e 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -436,6 +436,7 @@ QIB_DIAGC_ATTR(dmawait); | |||
436 | QIB_DIAGC_ATTR(unaligned); | 436 | QIB_DIAGC_ATTR(unaligned); |
437 | QIB_DIAGC_ATTR(rc_dupreq); | 437 | QIB_DIAGC_ATTR(rc_dupreq); |
438 | QIB_DIAGC_ATTR(rc_seqnak); | 438 | QIB_DIAGC_ATTR(rc_seqnak); |
439 | QIB_DIAGC_ATTR(rc_crwaits); | ||
439 | 440 | ||
440 | static struct attribute *diagc_default_attributes[] = { | 441 | static struct attribute *diagc_default_attributes[] = { |
441 | &qib_diagc_attr_rc_resends.attr, | 442 | &qib_diagc_attr_rc_resends.attr, |
@@ -453,6 +454,7 @@ static struct attribute *diagc_default_attributes[] = { | |||
453 | &qib_diagc_attr_unaligned.attr, | 454 | &qib_diagc_attr_unaligned.attr, |
454 | &qib_diagc_attr_rc_dupreq.attr, | 455 | &qib_diagc_attr_rc_dupreq.attr, |
455 | &qib_diagc_attr_rc_seqnak.attr, | 456 | &qib_diagc_attr_rc_seqnak.attr, |
457 | &qib_diagc_attr_rc_crwaits.attr, | ||
456 | NULL | 458 | NULL |
457 | }; | 459 | }; |
458 | 460 | ||
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 03f54eb9404b..c9abe1c01e4e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c | |||
@@ -89,9 +89,15 @@ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) | |||
89 | 89 | ||
90 | void usnic_ib_log_vf(struct usnic_ib_vf *vf) | 90 | void usnic_ib_log_vf(struct usnic_ib_vf *vf) |
91 | { | 91 | { |
92 | char buf[1000]; | 92 | char *buf = kzalloc(1000, GFP_KERNEL); |
93 | usnic_ib_dump_vf(vf, buf, sizeof(buf)); | 93 | |
94 | if (!buf) | ||
95 | return; | ||
96 | |||
97 | usnic_ib_dump_vf(vf, buf, 1000); | ||
94 | usnic_dbg("%s\n", buf); | 98 | usnic_dbg("%s\n", buf); |
99 | |||
100 | kfree(buf); | ||
95 | } | 101 | } |
96 | 102 | ||
97 | /* Start of netdev section */ | 103 | /* Start of netdev section */ |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index eeb07b245ef9..556b8e44a51c 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c | |||
@@ -194,7 +194,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, | |||
194 | return ERR_CAST(dev_list); | 194 | return ERR_CAST(dev_list); |
195 | for (i = 0; dev_list[i]; i++) { | 195 | for (i = 0; dev_list[i]; i++) { |
196 | dev = dev_list[i]; | 196 | dev = dev_list[i]; |
197 | vf = pci_get_drvdata(to_pci_dev(dev)); | 197 | vf = dev_get_drvdata(dev); |
198 | spin_lock(&vf->lock); | 198 | spin_lock(&vf->lock); |
199 | vnic = vf->vnic; | 199 | vnic = vf->vnic; |
200 | if (!usnic_vnic_check_room(vnic, res_spec)) { | 200 | if (!usnic_vnic_check_room(vnic, res_spec)) { |
@@ -356,13 +356,14 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, | |||
356 | 356 | ||
357 | if (!us_ibdev->ufdev->link_up) { | 357 | if (!us_ibdev->ufdev->link_up) { |
358 | props->state = IB_PORT_DOWN; | 358 | props->state = IB_PORT_DOWN; |
359 | props->phys_state = 3; | 359 | props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
360 | } else if (!us_ibdev->ufdev->inaddr) { | 360 | } else if (!us_ibdev->ufdev->inaddr) { |
361 | props->state = IB_PORT_INIT; | 361 | props->state = IB_PORT_INIT; |
362 | props->phys_state = 4; | 362 | props->phys_state = |
363 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; | ||
363 | } else { | 364 | } else { |
364 | props->state = IB_PORT_ACTIVE; | 365 | props->state = IB_PORT_ACTIVE; |
365 | props->phys_state = 5; | 366 | props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
366 | } | 367 | } |
367 | 368 | ||
368 | props->port_cap_flags = 0; | 369 | props->port_cap_flags = 0; |
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecf6e659c0da..fb07eed9e402 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h | |||
@@ -65,10 +65,6 @@ | |||
65 | */ | 65 | */ |
66 | #define RXE_UVERBS_ABI_VERSION 2 | 66 | #define RXE_UVERBS_ABI_VERSION 2 |
67 | 67 | ||
68 | #define RDMA_LINK_PHYS_STATE_LINK_UP (5) | ||
69 | #define RDMA_LINK_PHYS_STATE_DISABLED (3) | ||
70 | #define RDMA_LINK_PHYS_STATE_POLLING (2) | ||
71 | |||
72 | #define RXE_ROCE_V2_SPORT (0xc000) | 68 | #define RXE_ROCE_V2_SPORT (0xc000) |
73 | 69 | ||
74 | static inline u32 rxe_crc32(struct rxe_dev *rxe, | 70 | static inline u32 rxe_crc32(struct rxe_dev *rxe, |
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 1abed47ca221..fe5207386700 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h | |||
@@ -154,7 +154,7 @@ enum rxe_port_param { | |||
154 | RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X, | 154 | RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X, |
155 | RXE_PORT_ACTIVE_SPEED = 1, | 155 | RXE_PORT_ACTIVE_SPEED = 1, |
156 | RXE_PORT_PKEY_TBL_LEN = 64, | 156 | RXE_PORT_PKEY_TBL_LEN = 64, |
157 | RXE_PORT_PHYS_STATE = 2, | 157 | RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING, |
158 | RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL, | 158 | RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL, |
159 | }; | 159 | }; |
160 | 160 | ||
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 4ebdfcf4d33e..623129f27f5a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
@@ -69,11 +69,11 @@ static int rxe_query_port(struct ib_device *dev, | |||
69 | &attr->active_width); | 69 | &attr->active_width); |
70 | 70 | ||
71 | if (attr->state == IB_PORT_ACTIVE) | 71 | if (attr->state == IB_PORT_ACTIVE) |
72 | attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP; | 72 | attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
73 | else if (dev_get_flags(rxe->ndev) & IFF_UP) | 73 | else if (dev_get_flags(rxe->ndev) & IFF_UP) |
74 | attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING; | 74 | attr->phys_state = IB_PORT_PHYS_STATE_POLLING; |
75 | else | 75 | else |
76 | attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED; | 76 | attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
77 | 77 | ||
78 | mutex_unlock(&rxe->usdev_lock); | 78 | mutex_unlock(&rxe->usdev_lock); |
79 | 79 | ||
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 438a2917a47c..5d97bba0ce6d 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c | |||
@@ -76,16 +76,15 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) | |||
76 | if (unlikely(!p)) | 76 | if (unlikely(!p)) |
77 | return -EFAULT; | 77 | return -EFAULT; |
78 | 78 | ||
79 | buffer = kmap_atomic(p); | 79 | buffer = kmap(p); |
80 | 80 | ||
81 | if (likely(PAGE_SIZE - off >= bytes)) { | 81 | if (likely(PAGE_SIZE - off >= bytes)) { |
82 | memcpy(paddr, buffer + off, bytes); | 82 | memcpy(paddr, buffer + off, bytes); |
83 | kunmap_atomic(buffer); | ||
84 | } else { | 83 | } else { |
85 | unsigned long part = bytes - (PAGE_SIZE - off); | 84 | unsigned long part = bytes - (PAGE_SIZE - off); |
86 | 85 | ||
87 | memcpy(paddr, buffer + off, part); | 86 | memcpy(paddr, buffer + off, part); |
88 | kunmap_atomic(buffer); | 87 | kunmap(p); |
89 | 88 | ||
90 | if (!mem->is_pbl) | 89 | if (!mem->is_pbl) |
91 | p = siw_get_upage(mem->umem, | 90 | p = siw_get_upage(mem->umem, |
@@ -97,11 +96,10 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) | |||
97 | if (unlikely(!p)) | 96 | if (unlikely(!p)) |
98 | return -EFAULT; | 97 | return -EFAULT; |
99 | 98 | ||
100 | buffer = kmap_atomic(p); | 99 | buffer = kmap(p); |
101 | memcpy(paddr + part, buffer, | 100 | memcpy(paddr + part, buffer, bytes - part); |
102 | bytes - part); | ||
103 | kunmap_atomic(buffer); | ||
104 | } | 101 | } |
102 | kunmap(p); | ||
105 | } | 103 | } |
106 | } | 104 | } |
107 | return (int)bytes; | 105 | return (int)bytes; |
@@ -518,11 +516,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) | |||
518 | c_tx->mpa_crc_hd, | 516 | c_tx->mpa_crc_hd, |
519 | iov[seg].iov_base, | 517 | iov[seg].iov_base, |
520 | plen); | 518 | plen); |
521 | } else if (do_crc) | 519 | } else if (do_crc) { |
522 | crypto_shash_update( | 520 | crypto_shash_update(c_tx->mpa_crc_hd, |
523 | c_tx->mpa_crc_hd, | 521 | kmap(p) + fp_off, |
524 | page_address(p) + fp_off, | 522 | plen); |
525 | plen); | 523 | kunmap(p); |
524 | } | ||
526 | } else { | 525 | } else { |
527 | u64 va = sge->laddr + sge_off; | 526 | u64 va = sge->laddr + sge_off; |
528 | 527 | ||
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index da52c90e06d4..869e02b69a01 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c | |||
@@ -206,7 +206,8 @@ int siw_query_port(struct ib_device *base_dev, u8 port, | |||
206 | attr->gid_tbl_len = 1; | 206 | attr->gid_tbl_len = 1; |
207 | attr->max_msg_sz = -1; | 207 | attr->max_msg_sz = -1; |
208 | attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); | 208 | attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); |
209 | attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3; | 209 | attr->phys_state = sdev->state == IB_PORT_ACTIVE ? |
210 | IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; | ||
210 | attr->pkey_tbl_len = 1; | 211 | attr->pkey_tbl_len = 1; |
211 | attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; | 212 | attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; |
212 | attr->state = sdev->state; | 213 | attr->state = sdev->state; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 39bf213444cb..52ce63592dcf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -102,9 +102,10 @@ | |||
102 | 102 | ||
103 | /* Default support is 512KB I/O size */ | 103 | /* Default support is 512KB I/O size */ |
104 | #define ISER_DEF_MAX_SECTORS 1024 | 104 | #define ISER_DEF_MAX_SECTORS 1024 |
105 | #define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) | 105 | #define ISCSI_ISER_DEF_SG_TABLESIZE \ |
106 | /* Maximum support is 8MB I/O size */ | 106 | ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K) |
107 | #define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) | 107 | /* Maximum support is 16MB I/O size */ |
108 | #define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K) | ||
108 | 109 | ||
109 | #define ISER_DEF_XMIT_CMDS_DEFAULT 512 | 110 | #define ISER_DEF_XMIT_CMDS_DEFAULT 512 |
110 | #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT | 111 | #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 1a039f16d315..e25c70a56be6 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1767,8 +1767,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) | |||
1767 | goto out; | 1767 | goto out; |
1768 | 1768 | ||
1769 | retry: | 1769 | retry: |
1770 | ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size, | 1770 | ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size, |
1771 | 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE); | 1771 | IB_POLL_WORKQUEUE); |
1772 | if (IS_ERR(ch->cq)) { | 1772 | if (IS_ERR(ch->cq)) { |
1773 | ret = PTR_ERR(ch->cq); | 1773 | ret = PTR_ERR(ch->cq); |
1774 | pr_err("failed to create CQ cqe= %d ret= %d\n", | 1774 | pr_err("failed to create CQ cqe= %d ret= %d\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index bc86dffdc43c..01c380425f9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c | |||
@@ -188,8 +188,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, | |||
188 | /* new rate limit */ | 188 | /* new rate limit */ |
189 | err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); | 189 | err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); |
190 | if (err) { | 190 | if (err) { |
191 | mlx5_core_err(dev, "Failed configuring rate limit(err %d): \ | 191 | mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n", |
192 | rate %u, max_burst_sz %u, typical_pkt_sz %u\n", | ||
193 | err, rl->rate, rl->max_burst_sz, | 192 | err, rl->rate, rl->max_burst_sz, |
194 | rl->typical_pkt_sz); | 193 | rl->typical_pkt_sz); |
195 | goto out; | 194 | goto out; |
@@ -218,8 +217,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) | |||
218 | mutex_lock(&table->rl_lock); | 217 | mutex_lock(&table->rl_lock); |
219 | entry = find_rl_entry(table, rl); | 218 | entry = find_rl_entry(table, rl); |
220 | if (!entry || !entry->refcount) { | 219 | if (!entry || !entry->refcount) { |
221 | mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \ | 220 | mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n", |
222 | are not configured\n", | ||
223 | rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); | 221 | rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); |
224 | goto out; | 222 | goto out; |
225 | } | 223 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 158ac0738911..38b1f402f7ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
@@ -798,9 +798,8 @@ static int qed_rdma_add_user(void *rdma_cxt, | |||
798 | /* Calculate the corresponding DPI address */ | 798 | /* Calculate the corresponding DPI address */ |
799 | dpi_start_offset = p_hwfn->dpi_start_offset; | 799 | dpi_start_offset = p_hwfn->dpi_start_offset; |
800 | 800 | ||
801 | out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + | 801 | out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + |
802 | dpi_start_offset + | 802 | out_params->dpi * p_hwfn->dpi_size; |
803 | ((out_params->dpi) * p_hwfn->dpi_size)); | ||
804 | 803 | ||
805 | out_params->dpi_phys_addr = p_hwfn->db_phys_addr + | 804 | out_params->dpi_phys_addr = p_hwfn->db_phys_addr + |
806 | dpi_start_offset + | 805 | dpi_start_offset + |
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index cd07e5301d42..3c91fa97c9a8 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c | |||
@@ -1654,15 +1654,17 @@ static struct smbd_connection *_smbd_get_connection( | |||
1654 | 1654 | ||
1655 | info->send_cq = NULL; | 1655 | info->send_cq = NULL; |
1656 | info->recv_cq = NULL; | 1656 | info->recv_cq = NULL; |
1657 | info->send_cq = ib_alloc_cq(info->id->device, info, | 1657 | info->send_cq = |
1658 | info->send_credit_target, 0, IB_POLL_SOFTIRQ); | 1658 | ib_alloc_cq_any(info->id->device, info, |
1659 | info->send_credit_target, IB_POLL_SOFTIRQ); | ||
1659 | if (IS_ERR(info->send_cq)) { | 1660 | if (IS_ERR(info->send_cq)) { |
1660 | info->send_cq = NULL; | 1661 | info->send_cq = NULL; |
1661 | goto alloc_cq_failed; | 1662 | goto alloc_cq_failed; |
1662 | } | 1663 | } |
1663 | 1664 | ||
1664 | info->recv_cq = ib_alloc_cq(info->id->device, info, | 1665 | info->recv_cq = |
1665 | info->receive_credit_max, 0, IB_POLL_SOFTIRQ); | 1666 | ib_alloc_cq_any(info->id->device, info, |
1667 | info->receive_credit_max, IB_POLL_SOFTIRQ); | ||
1666 | if (IS_ERR(info->recv_cq)) { | 1668 | if (IS_ERR(info->recv_cq)) { |
1667 | info->recv_cq = NULL; | 1669 | info->recv_cq = NULL; |
1668 | goto alloc_cq_failed; | 1670 | goto alloc_cq_failed; |
diff --git a/include/Kbuild b/include/Kbuild index 7259d872cfc9..4ae65e13c3f0 100644 --- a/include/Kbuild +++ b/include/Kbuild | |||
@@ -881,12 +881,6 @@ header-test- += net/xdp.h | |||
881 | header-test- += net/xdp_priv.h | 881 | header-test- += net/xdp_priv.h |
882 | header-test- += pcmcia/cistpl.h | 882 | header-test- += pcmcia/cistpl.h |
883 | header-test- += pcmcia/ds.h | 883 | header-test- += pcmcia/ds.h |
884 | header-test- += rdma/ib.h | ||
885 | header-test- += rdma/iw_portmap.h | ||
886 | header-test- += rdma/opa_port_info.h | ||
887 | header-test- += rdma/rdmavt_cq.h | ||
888 | header-test- += rdma/restrack.h | ||
889 | header-test- += rdma/signature.h | ||
890 | header-test- += rdma/tid_rdma_defs.h | 884 | header-test- += rdma/tid_rdma_defs.h |
891 | header-test- += scsi/fc/fc_encaps.h | 885 | header-test- += scsi/fc/fc_encaps.h |
892 | header-test- += scsi/fc/fc_fc2.h | 886 | header-test- += scsi/fc/fc_fc2.h |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f3773e8536bb..cc1c230f10ee 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -328,6 +328,7 @@ enum mlx5_event { | |||
328 | MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, | 328 | MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, |
329 | MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, | 329 | MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, |
330 | MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, | 330 | MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, |
331 | MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, | ||
331 | MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, | 332 | MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, |
332 | MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, | 333 | MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, |
333 | MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, | 334 | MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, |
@@ -345,6 +346,7 @@ enum mlx5_event { | |||
345 | MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, | 346 | MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, |
346 | 347 | ||
347 | MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, | 348 | MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, |
349 | MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, | ||
348 | 350 | ||
349 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, | 351 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, |
350 | MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, | 352 | MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, |
@@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err { | |||
584 | u8 syndrome; | 586 | u8 syndrome; |
585 | }; | 587 | }; |
586 | 588 | ||
589 | struct mlx5_eqe_xrq_err { | ||
590 | __be32 reserved1[5]; | ||
591 | __be32 type_xrqn; | ||
592 | __be32 reserved2; | ||
593 | }; | ||
594 | |||
587 | struct mlx5_eqe_port_state { | 595 | struct mlx5_eqe_port_state { |
588 | u8 reserved0[8]; | 596 | u8 reserved0[8]; |
589 | u8 port; | 597 | u8 port; |
@@ -698,6 +706,7 @@ union ev_data { | |||
698 | struct mlx5_eqe_pps pps; | 706 | struct mlx5_eqe_pps pps; |
699 | struct mlx5_eqe_dct dct; | 707 | struct mlx5_eqe_dct dct; |
700 | struct mlx5_eqe_temp_warning temp_warning; | 708 | struct mlx5_eqe_temp_warning temp_warning; |
709 | struct mlx5_eqe_xrq_err xrq_err; | ||
701 | } __packed; | 710 | } __packed; |
702 | 711 | ||
703 | struct mlx5_eqe { | 712 | struct mlx5_eqe { |
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 898f595ea3d6..74efca15fde7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h | |||
@@ -225,7 +225,7 @@ struct qed_rdma_start_in_params { | |||
225 | 225 | ||
226 | struct qed_rdma_add_user_out_params { | 226 | struct qed_rdma_add_user_out_params { |
227 | u16 dpi; | 227 | u16 dpi; |
228 | u64 dpi_addr; | 228 | void __iomem *dpi_addr; |
229 | u64 dpi_phys_addr; | 229 | u64 dpi_phys_addr; |
230 | u32 dpi_size; | 230 | u32 dpi_size; |
231 | u16 wid_count; | 231 | u16 wid_count; |
diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 4f385ec54f80..fe2fc9e91588 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/cred.h> | 38 | #include <linux/cred.h> |
39 | #include <linux/uaccess.h> | ||
40 | #include <linux/fs.h> | ||
39 | 41 | ||
40 | struct ib_addr { | 42 | struct ib_addr { |
41 | union { | 43 | union { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index f659f4a02aa9..6a47ba85c54c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -98,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...); | |||
98 | #if defined(CONFIG_DYNAMIC_DEBUG) | 98 | #if defined(CONFIG_DYNAMIC_DEBUG) |
99 | #define ibdev_dbg(__dev, format, args...) \ | 99 | #define ibdev_dbg(__dev, format, args...) \ |
100 | dynamic_ibdev_dbg(__dev, format, ##args) | 100 | dynamic_ibdev_dbg(__dev, format, ##args) |
101 | #elif defined(DEBUG) | ||
102 | #define ibdev_dbg(__dev, format, args...) \ | ||
103 | ibdev_printk(KERN_DEBUG, __dev, format, ##args) | ||
104 | #else | 101 | #else |
105 | __printf(2, 3) __cold | 102 | __printf(2, 3) __cold |
106 | static inline | 103 | static inline |
107 | void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} | 104 | void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} |
108 | #endif | 105 | #endif |
109 | 106 | ||
107 | #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ | ||
108 | do { \ | ||
109 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
110 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
111 | DEFAULT_RATELIMIT_BURST); \ | ||
112 | if (__ratelimit(&_rs)) \ | ||
113 | ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ | ||
114 | } while (0) | ||
115 | |||
116 | #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ | ||
117 | ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) | ||
118 | #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ | ||
119 | ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) | ||
120 | #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ | ||
121 | ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) | ||
122 | #define ibdev_err_ratelimited(ibdev, fmt, ...) \ | ||
123 | ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) | ||
124 | #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ | ||
125 | ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) | ||
126 | #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ | ||
127 | ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) | ||
128 | #define ibdev_info_ratelimited(ibdev, fmt, ...) \ | ||
129 | ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) | ||
130 | |||
131 | #if defined(CONFIG_DYNAMIC_DEBUG) | ||
132 | /* descriptor check is first to prevent flooding with "callbacks suppressed" */ | ||
133 | #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ | ||
134 | do { \ | ||
135 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
136 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
137 | DEFAULT_RATELIMIT_BURST); \ | ||
138 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | ||
139 | if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ | ||
140 | __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ | ||
141 | ##__VA_ARGS__); \ | ||
142 | } while (0) | ||
143 | #else | ||
144 | __printf(2, 3) __cold | ||
145 | static inline | ||
146 | void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} | ||
147 | #endif | ||
148 | |||
110 | union ib_gid { | 149 | union ib_gid { |
111 | u8 raw[16]; | 150 | u8 raw[16]; |
112 | struct { | 151 | struct { |
@@ -451,6 +490,16 @@ enum ib_port_state { | |||
451 | IB_PORT_ACTIVE_DEFER = 5 | 490 | IB_PORT_ACTIVE_DEFER = 5 |
452 | }; | 491 | }; |
453 | 492 | ||
493 | enum ib_port_phys_state { | ||
494 | IB_PORT_PHYS_STATE_SLEEP = 1, | ||
495 | IB_PORT_PHYS_STATE_POLLING = 2, | ||
496 | IB_PORT_PHYS_STATE_DISABLED = 3, | ||
497 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, | ||
498 | IB_PORT_PHYS_STATE_LINK_UP = 5, | ||
499 | IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, | ||
500 | IB_PORT_PHYS_STATE_PHY_TEST = 7, | ||
501 | }; | ||
502 | |||
454 | enum ib_port_width { | 503 | enum ib_port_width { |
455 | IB_WIDTH_1X = 1, | 504 | IB_WIDTH_1X = 1, |
456 | IB_WIDTH_2X = 16, | 505 | IB_WIDTH_2X = 16, |
@@ -3710,6 +3759,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, | |||
3710 | NULL); | 3759 | NULL); |
3711 | } | 3760 | } |
3712 | 3761 | ||
3762 | struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, | ||
3763 | int nr_cqe, enum ib_poll_context poll_ctx, | ||
3764 | const char *caller); | ||
3765 | |||
3766 | /** | ||
3767 | * ib_alloc_cq_any: Allocate kernel CQ | ||
3768 | * @dev: The IB device | ||
3769 | * @private: Private data attached to the CQE | ||
3770 | * @nr_cqe: Number of CQEs in the CQ | ||
3771 | * @poll_ctx: Context used for polling the CQ | ||
3772 | */ | ||
3773 | static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, | ||
3774 | void *private, int nr_cqe, | ||
3775 | enum ib_poll_context poll_ctx) | ||
3776 | { | ||
3777 | return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, | ||
3778 | KBUILD_MODNAME); | ||
3779 | } | ||
3780 | |||
3713 | /** | 3781 | /** |
3714 | * ib_free_cq_user - Free kernel/user CQ | 3782 | * ib_free_cq_user - Free kernel/user CQ |
3715 | * @cq: The CQ to free | 3783 | * @cq: The CQ to free |
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index b9fee7feeeb5..c89535047c42 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h | |||
@@ -33,6 +33,9 @@ | |||
33 | #ifndef _IW_PORTMAP_H | 33 | #ifndef _IW_PORTMAP_H |
34 | #define _IW_PORTMAP_H | 34 | #define _IW_PORTMAP_H |
35 | 35 | ||
36 | #include <linux/socket.h> | ||
37 | #include <linux/netlink.h> | ||
38 | |||
36 | #define IWPM_ULIBNAME_SIZE 32 | 39 | #define IWPM_ULIBNAME_SIZE 32 |
37 | #define IWPM_DEVNAME_SIZE 32 | 40 | #define IWPM_DEVNAME_SIZE 32 |
38 | #define IWPM_IFNAME_SIZE 16 | 41 | #define IWPM_IFNAME_SIZE 16 |
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 7147a9263011..bdbfe25d3854 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h | |||
@@ -33,6 +33,8 @@ | |||
33 | #if !defined(OPA_PORT_INFO_H) | 33 | #if !defined(OPA_PORT_INFO_H) |
34 | #define OPA_PORT_INFO_H | 34 | #define OPA_PORT_INFO_H |
35 | 35 | ||
36 | #include <rdma/opa_smi.h> | ||
37 | |||
36 | #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ | 38 | #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ |
37 | #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ | 39 | #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ |
38 | 40 | ||
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 6631624e4d7c..ab22759de7ea 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h | |||
@@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
76 | 76 | ||
77 | /** | 77 | /** |
78 | * Send the supplied skb to a specific userspace PID. | 78 | * Send the supplied skb to a specific userspace PID. |
79 | * @net: Net namespace in which to send the skb | ||
79 | * @skb: The netlink skb | 80 | * @skb: The netlink skb |
80 | * @pid: Userspace netlink process ID | 81 | * @pid: Userspace netlink process ID |
81 | * Returns 0 on success or a negative error code. | 82 | * Returns 0 on success or a negative error code. |
82 | */ | 83 | */ |
83 | int rdma_nl_unicast(struct sk_buff *skb, u32 pid); | 84 | int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid); |
84 | 85 | ||
85 | /** | 86 | /** |
86 | * Send, with wait/1 retry, the supplied skb to a specific userspace PID. | 87 | * Send, with wait/1 retry, the supplied skb to a specific userspace PID. |
88 | * @net: Net namespace in which to send the skb | ||
87 | * @skb: The netlink skb | 89 | * @skb: The netlink skb |
88 | * @pid: Userspace netlink process ID | 90 | * @pid: Userspace netlink process ID |
89 | * Returns 0 on success or a negative error code. | 91 | * Returns 0 on success or a negative error code. |
90 | */ | 92 | */ |
91 | int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid); | 93 | int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid); |
92 | 94 | ||
93 | /** | 95 | /** |
94 | * Send the supplied skb to a netlink group. | 96 | * Send the supplied skb to a netlink group. |
97 | * @net: Net namespace in which to send the skb | ||
95 | * @skb: The netlink skb | 98 | * @skb: The netlink skb |
96 | * @group: Netlink group ID | 99 | * @group: Netlink group ID |
97 | * @flags: allocation flags | 100 | * @flags: allocation flags |
98 | * Returns 0 on success or a negative error code. | 101 | * Returns 0 on success or a negative error code. |
99 | */ | 102 | */ |
100 | int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); | 103 | int rdma_nl_multicast(struct net *net, struct sk_buff *skb, |
104 | unsigned int group, gfp_t flags); | ||
101 | 105 | ||
102 | /** | 106 | /** |
103 | * Check if there are any listeners to the netlink group | 107 | * Check if there are any listeners to the netlink group |
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 525848e227dc..ac5a9430abb6 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h | |||
@@ -116,6 +116,7 @@ struct rvt_ibport { | |||
116 | u64 n_unaligned; | 116 | u64 n_unaligned; |
117 | u64 n_rc_dupreq; | 117 | u64 n_rc_dupreq; |
118 | u64 n_rc_seqnak; | 118 | u64 n_rc_seqnak; |
119 | u64 n_rc_crwaits; | ||
119 | u16 pkey_violations; | 120 | u16 pkey_violations; |
120 | u16 qkey_violations; | 121 | u16 qkey_violations; |
121 | u16 mkey_violations; | 122 | u16 mkey_violations; |
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h index 04c519ef6d71..574eb7278f46 100644 --- a/include/rdma/rdmavt_cq.h +++ b/include/rdma/rdmavt_cq.h | |||
@@ -53,6 +53,7 @@ | |||
53 | 53 | ||
54 | #include <linux/kthread.h> | 54 | #include <linux/kthread.h> |
55 | #include <rdma/ib_user_verbs.h> | 55 | #include <rdma/ib_user_verbs.h> |
56 | #include <rdma/ib_verbs.h> | ||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Define an ib_cq_notify value that is not valid so we know when CQ | 59 | * Define an ib_cq_notify value that is not valid so we know when CQ |
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index e06c77d76463..b550ae89bf85 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h | |||
@@ -973,6 +973,41 @@ static inline void rvt_free_rq(struct rvt_rq *rq) | |||
973 | rq->wq = NULL; | 973 | rq->wq = NULL; |
974 | } | 974 | } |
975 | 975 | ||
976 | /** | ||
977 | * rvt_to_iport - Get the ibport pointer | ||
978 | * @qp: the qp pointer | ||
979 | * | ||
980 | * This function returns the ibport pointer from the qp pointer. | ||
981 | */ | ||
982 | static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp) | ||
983 | { | ||
984 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); | ||
985 | |||
986 | return rdi->ports[qp->port_num - 1]; | ||
987 | } | ||
988 | |||
989 | /** | ||
990 | * rvt_rc_credit_avail - Check if there are enough RC credits for the request | ||
991 | * @qp: the qp | ||
992 | * @wqe: the request | ||
993 | * | ||
994 | * This function returns false when there are not enough credits for the given | ||
995 | * request and true otherwise. | ||
996 | */ | ||
997 | static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe) | ||
998 | { | ||
999 | lockdep_assert_held(&qp->s_lock); | ||
1000 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && | ||
1001 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
1002 | struct rvt_ibport *rvp = rvt_to_iport(qp); | ||
1003 | |||
1004 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; | ||
1005 | rvp->n_rc_crwaits++; | ||
1006 | return false; | ||
1007 | } | ||
1008 | return true; | ||
1009 | } | ||
1010 | |||
976 | struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, | 1011 | struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, |
977 | u64 v, | 1012 | u64 v, |
978 | void (*cb)(struct rvt_qp *qp, u64 v)); | 1013 | void (*cb)(struct rvt_qp *qp, u64 v)); |
diff --git a/include/rdma/signature.h b/include/rdma/signature.h index f24cc2a1d3c5..d16b0fcc8344 100644 --- a/include/rdma/signature.h +++ b/include/rdma/signature.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #ifndef _RDMA_SIGNATURE_H_ | 6 | #ifndef _RDMA_SIGNATURE_H_ |
7 | #define _RDMA_SIGNATURE_H_ | 7 | #define _RDMA_SIGNATURE_H_ |
8 | 8 | ||
9 | #include <linux/types.h> | ||
10 | |||
9 | enum ib_signature_prot_cap { | 11 | enum ib_signature_prot_cap { |
10 | IB_PROT_T10DIF_TYPE_1 = 1, | 12 | IB_PROT_T10DIF_TYPE_1 = 1, |
11 | IB_PROT_T10DIF_TYPE_2 = 1 << 1, | 13 | IB_PROT_T10DIF_TYPE_2 = 1 << 1, |
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 7e9900b0e746..88b6ca70c2fe 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h | |||
@@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type { | |||
43 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, | 43 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, |
44 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, | 44 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, |
45 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, | 45 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, |
46 | MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, | ||
46 | }; | 47 | }; |
47 | 48 | ||
48 | enum mlx5_ib_uapi_flow_action_packet_reformat_type { | 49 | enum mlx5_ib_uapi_flow_action_packet_reformat_type { |
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index bac8dad5dd69..b21c3c209815 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -685,9 +685,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) | |||
685 | goto error; | 685 | goto error; |
686 | 686 | ||
687 | /* Create the Completion Queue */ | 687 | /* Create the Completion Queue */ |
688 | rdma->cq = ib_alloc_cq(rdma->cm_id->device, client, | 688 | rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, |
689 | opts.sq_depth + opts.rq_depth + 1, | 689 | opts.sq_depth + opts.rq_depth + 1, |
690 | 0, IB_POLL_SOFTIRQ); | 690 | IB_POLL_SOFTIRQ); |
691 | if (IS_ERR(rdma->cq)) | 691 | if (IS_ERR(rdma->cq)) |
692 | goto error; | 692 | goto error; |
693 | 693 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3fe665152d95..4d3db6ee7f09 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -454,14 +454,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
454 | dprintk("svcrdma: error creating PD for connect request\n"); | 454 | dprintk("svcrdma: error creating PD for connect request\n"); |
455 | goto errout; | 455 | goto errout; |
456 | } | 456 | } |
457 | newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth, | 457 | newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth, |
458 | 0, IB_POLL_WORKQUEUE); | 458 | IB_POLL_WORKQUEUE); |
459 | if (IS_ERR(newxprt->sc_sq_cq)) { | 459 | if (IS_ERR(newxprt->sc_sq_cq)) { |
460 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); | 460 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); |
461 | goto errout; | 461 | goto errout; |
462 | } | 462 | } |
463 | newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth, | 463 | newxprt->sc_rq_cq = |
464 | 0, IB_POLL_WORKQUEUE); | 464 | ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); |
465 | if (IS_ERR(newxprt->sc_rq_cq)) { | 465 | if (IS_ERR(newxprt->sc_rq_cq)) { |
466 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); | 466 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); |
467 | goto errout; | 467 | goto errout; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 805b1f35e1ca..b10aa16557f0 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -521,18 +521,17 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) | |||
521 | init_waitqueue_head(&ep->rep_connect_wait); | 521 | init_waitqueue_head(&ep->rep_connect_wait); |
522 | ep->rep_receive_count = 0; | 522 | ep->rep_receive_count = 0; |
523 | 523 | ||
524 | sendcq = ib_alloc_cq(ia->ri_id->device, NULL, | 524 | sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL, |
525 | ep->rep_attr.cap.max_send_wr + 1, | 525 | ep->rep_attr.cap.max_send_wr + 1, |
526 | ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0, | 526 | IB_POLL_WORKQUEUE); |
527 | IB_POLL_WORKQUEUE); | ||
528 | if (IS_ERR(sendcq)) { | 527 | if (IS_ERR(sendcq)) { |
529 | rc = PTR_ERR(sendcq); | 528 | rc = PTR_ERR(sendcq); |
530 | goto out1; | 529 | goto out1; |
531 | } | 530 | } |
532 | 531 | ||
533 | recvcq = ib_alloc_cq(ia->ri_id->device, NULL, | 532 | recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL, |
534 | ep->rep_attr.cap.max_recv_wr + 1, | 533 | ep->rep_attr.cap.max_recv_wr + 1, |
535 | 0, IB_POLL_WORKQUEUE); | 534 | IB_POLL_WORKQUEUE); |
536 | if (IS_ERR(recvcq)) { | 535 | if (IS_ERR(recvcq)) { |
537 | rc = PTR_ERR(recvcq); | 536 | rc = PTR_ERR(recvcq); |
538 | goto out2; | 537 | goto out2; |