diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-14 13:09:05 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-14 13:09:05 -0400 |
| commit | e3b1fd56f175526db42ae94c457f29c2fa810aca (patch) | |
| tree | 3e2948ca44fb7fd5348244c2a83eef864b3110b4 | |
| parent | 0680eb1f485ba5aac2ee02c9f0622239c9a4b16c (diff) | |
| parent | d087f6ad724dfbcdc3df8e0191b80d9d8d839e71 (diff) | |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma updates from Roland Dreier:
"Main set of InfiniBand/RDMA updates for 3.17 merge window:
- MR reregistration support
- MAD support for RMPP in userspace
- iSER and SRP initiator updates
- ocrdma hardware driver updates
- other fixes..."
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (52 commits)
IB/srp: Fix return value check in srp_init_module()
RDMA/ocrdma: report asic-id in query device
RDMA/ocrdma: Update sli data structure for endianness
RDMA/ocrdma: Obtain SL from device structure
RDMA/uapi: Include socket.h in rdma_user_cm.h
IB/srpt: Handle GID change events
IB/mlx5: Use ARRAY_SIZE instead of sizeof/sizeof[0]
IB/mlx4: Use ARRAY_SIZE instead of sizeof/sizeof[0]
RDMA/amso1100: Check for integer overflow in c2_alloc_cq_buf()
IPoIB: Remove unnecessary test for NULL before debugfs_remove()
IB/mad: Add user space RMPP support
IB/mad: add new ioctl to ABI to support new registration options
IB/mad: Add dev_notice messages for various umad/mad registration failures
IB/mad: Update module to [pr|dev]_* style print messages
IB/ipoib: Avoid multicast join attempts with invalid P_key
IB/umad: Update module to [pr|dev]_* style print messages
IB/ipoib: Avoid flushing the workqueue from worker context
IB/ipoib: Use P_Key change event instead of P_Key polling mechanism
IB/ipath: Add P_Key change event support
mlx4_core: Add support for secure-host and SMP firewall
...
57 files changed, 1851 insertions, 455 deletions
diff --git a/Documentation/infiniband/user_mad.txt b/Documentation/infiniband/user_mad.txt index 8a366959f5cc..7aca13a54a3a 100644 --- a/Documentation/infiniband/user_mad.txt +++ b/Documentation/infiniband/user_mad.txt | |||
| @@ -26,6 +26,11 @@ Creating MAD agents | |||
| 26 | ioctl. Also, all agents registered through a file descriptor will | 26 | ioctl. Also, all agents registered through a file descriptor will |
| 27 | be unregistered when the descriptor is closed. | 27 | be unregistered when the descriptor is closed. |
| 28 | 28 | ||
| 29 | 2014 -- a new registration ioctl is now provided which allows additional | ||
| 30 | fields to be provided during registration. | ||
| 31 | Users of this registration call are implicitly setting the use of | ||
| 32 | pkey_index (see below). | ||
| 33 | |||
| 29 | Receiving MADs | 34 | Receiving MADs |
| 30 | 35 | ||
| 31 | MADs are received using read(). The receive side now supports | 36 | MADs are received using read(). The receive side now supports |
| @@ -104,10 +109,10 @@ P_Key Index Handling | |||
| 104 | The old ib_umad interface did not allow setting the P_Key index for | 109 | The old ib_umad interface did not allow setting the P_Key index for |
| 105 | MADs that are sent and did not provide a way for obtaining the P_Key | 110 | MADs that are sent and did not provide a way for obtaining the P_Key |
| 106 | index of received MADs. A new layout for struct ib_user_mad_hdr | 111 | index of received MADs. A new layout for struct ib_user_mad_hdr |
| 107 | with a pkey_index member has been defined; however, to preserve | 112 | with a pkey_index member has been defined; however, to preserve binary |
| 108 | binary compatibility with older applications, this new layout will | 113 | compatibility with older applications, this new layout will not be used |
| 109 | not be used unless the IB_USER_MAD_ENABLE_PKEY ioctl is called | 114 | unless one of IB_USER_MAD_ENABLE_PKEY or IB_USER_MAD_REGISTER_AGENT2 ioctl's |
| 110 | before a file descriptor is used for anything else. | 115 | are called before a file descriptor is used for anything else. |
| 111 | 116 | ||
| 112 | In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented | 117 | In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented |
| 113 | to 6, the new layout of struct ib_user_mad_hdr will be used by | 118 | to 6, the new layout of struct ib_user_mad_hdr will be used by |
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 2bc7f5af64f4..f6d29614cb01 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
| @@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 94 | port_priv = ib_get_agent_port(device, port_num); | 94 | port_priv = ib_get_agent_port(device, port_num); |
| 95 | 95 | ||
| 96 | if (!port_priv) { | 96 | if (!port_priv) { |
| 97 | printk(KERN_ERR SPFX "Unable to find port agent\n"); | 97 | dev_err(&device->dev, "Unable to find port agent\n"); |
| 98 | return; | 98 | return; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | agent = port_priv->agent[qpn]; | 101 | agent = port_priv->agent[qpn]; |
| 102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); | 102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); |
| 103 | if (IS_ERR(ah)) { | 103 | if (IS_ERR(ah)) { |
| 104 | printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", | 104 | dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", |
| 105 | PTR_ERR(ah)); | 105 | PTR_ERR(ah)); |
| 106 | return; | 106 | return; |
| 107 | } | 107 | } |
| @@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 110 | IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 110 | IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
| 111 | GFP_KERNEL); | 111 | GFP_KERNEL); |
| 112 | if (IS_ERR(send_buf)) { | 112 | if (IS_ERR(send_buf)) { |
| 113 | printk(KERN_ERR SPFX "ib_create_send_mad error\n"); | 113 | dev_err(&device->dev, "ib_create_send_mad error\n"); |
| 114 | goto err1; | 114 | goto err1; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| @@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | if (ib_post_send_mad(send_buf, NULL)) { | 127 | if (ib_post_send_mad(send_buf, NULL)) { |
| 128 | printk(KERN_ERR SPFX "ib_post_send_mad error\n"); | 128 | dev_err(&device->dev, "ib_post_send_mad error\n"); |
| 129 | goto err2; | 129 | goto err2; |
| 130 | } | 130 | } |
| 131 | return; | 131 | return; |
| @@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 151 | /* Create new device info */ | 151 | /* Create new device info */ |
| 152 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 152 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
| 153 | if (!port_priv) { | 153 | if (!port_priv) { |
| 154 | printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); | 154 | dev_err(&device->dev, "No memory for ib_agent_port_private\n"); |
| 155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
| 156 | goto error1; | 156 | goto error1; |
| 157 | } | 157 | } |
| @@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 161 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, | 161 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, |
| 162 | IB_QPT_SMI, NULL, 0, | 162 | IB_QPT_SMI, NULL, 0, |
| 163 | &agent_send_handler, | 163 | &agent_send_handler, |
| 164 | NULL, NULL); | 164 | NULL, NULL, 0); |
| 165 | if (IS_ERR(port_priv->agent[0])) { | 165 | if (IS_ERR(port_priv->agent[0])) { |
| 166 | ret = PTR_ERR(port_priv->agent[0]); | 166 | ret = PTR_ERR(port_priv->agent[0]); |
| 167 | goto error2; | 167 | goto error2; |
| @@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 172 | port_priv->agent[1] = ib_register_mad_agent(device, port_num, | 172 | port_priv->agent[1] = ib_register_mad_agent(device, port_num, |
| 173 | IB_QPT_GSI, NULL, 0, | 173 | IB_QPT_GSI, NULL, 0, |
| 174 | &agent_send_handler, | 174 | &agent_send_handler, |
| 175 | NULL, NULL); | 175 | NULL, NULL, 0); |
| 176 | if (IS_ERR(port_priv->agent[1])) { | 176 | if (IS_ERR(port_priv->agent[1])) { |
| 177 | ret = PTR_ERR(port_priv->agent[1]); | 177 | ret = PTR_ERR(port_priv->agent[1]); |
| 178 | goto error3; | 178 | goto error3; |
| @@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
| 202 | port_priv = __ib_get_agent_port(device, port_num); | 202 | port_priv = __ib_get_agent_port(device, port_num); |
| 203 | if (port_priv == NULL) { | 203 | if (port_priv == NULL) { |
| 204 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 204 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
| 205 | printk(KERN_ERR SPFX "Port %d not found\n", port_num); | 205 | dev_err(&device->dev, "Port %d not found\n", port_num); |
| 206 | return -ENODEV; | 206 | return -ENODEV; |
| 207 | } | 207 | } |
| 208 | list_del(&port_priv->port_list); | 208 | list_del(&port_priv->port_list); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c3239170d8b7..e28a494e2a3a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device) | |||
| 3753 | struct cm_port *port; | 3753 | struct cm_port *port; |
| 3754 | struct ib_mad_reg_req reg_req = { | 3754 | struct ib_mad_reg_req reg_req = { |
| 3755 | .mgmt_class = IB_MGMT_CLASS_CM, | 3755 | .mgmt_class = IB_MGMT_CLASS_CM, |
| 3756 | .mgmt_class_version = IB_CM_CLASS_VERSION | 3756 | .mgmt_class_version = IB_CM_CLASS_VERSION, |
| 3757 | }; | 3757 | }; |
| 3758 | struct ib_port_modify port_modify = { | 3758 | struct ib_port_modify port_modify = { |
| 3759 | .set_port_cap_mask = IB_PORT_CM_SUP | 3759 | .set_port_cap_mask = IB_PORT_CM_SUP |
| @@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device) | |||
| 3801 | 0, | 3801 | 0, |
| 3802 | cm_send_handler, | 3802 | cm_send_handler, |
| 3803 | cm_recv_handler, | 3803 | cm_recv_handler, |
| 3804 | port); | 3804 | port, |
| 3805 | 0); | ||
| 3805 | if (IS_ERR(port->mad_agent)) | 3806 | if (IS_ERR(port->mad_agent)) |
| 3806 | goto error2; | 3807 | goto error2; |
| 3807 | 3808 | ||
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 3d2e489ab732..ff9163dc1596 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/completion.h> | 46 | #include <linux/completion.h> |
| 47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
| 48 | #include <linux/module.h> | 48 | #include <linux/module.h> |
| 49 | #include <linux/sysctl.h> | ||
| 49 | 50 | ||
| 50 | #include <rdma/iw_cm.h> | 51 | #include <rdma/iw_cm.h> |
| 51 | #include <rdma/ib_addr.h> | 52 | #include <rdma/ib_addr.h> |
| @@ -65,6 +66,20 @@ struct iwcm_work { | |||
| 65 | struct list_head free_list; | 66 | struct list_head free_list; |
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 69 | static unsigned int default_backlog = 256; | ||
| 70 | |||
| 71 | static struct ctl_table_header *iwcm_ctl_table_hdr; | ||
| 72 | static struct ctl_table iwcm_ctl_table[] = { | ||
| 73 | { | ||
| 74 | .procname = "default_backlog", | ||
| 75 | .data = &default_backlog, | ||
| 76 | .maxlen = sizeof(default_backlog), | ||
| 77 | .mode = 0644, | ||
| 78 | .proc_handler = proc_dointvec, | ||
| 79 | }, | ||
| 80 | { } | ||
| 81 | }; | ||
| 82 | |||
| 68 | /* | 83 | /* |
| 69 | * The following services provide a mechanism for pre-allocating iwcm_work | 84 | * The following services provide a mechanism for pre-allocating iwcm_work |
| 70 | * elements. The design pre-allocates them based on the cm_id type: | 85 | * elements. The design pre-allocates them based on the cm_id type: |
| @@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | |||
| 425 | 440 | ||
| 426 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 441 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
| 427 | 442 | ||
| 443 | if (!backlog) | ||
| 444 | backlog = default_backlog; | ||
| 445 | |||
| 428 | ret = alloc_work_entries(cm_id_priv, backlog); | 446 | ret = alloc_work_entries(cm_id_priv, backlog); |
| 429 | if (ret) | 447 | if (ret) |
| 430 | return ret; | 448 | return ret; |
| @@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void) | |||
| 1030 | if (!iwcm_wq) | 1048 | if (!iwcm_wq) |
| 1031 | return -ENOMEM; | 1049 | return -ENOMEM; |
| 1032 | 1050 | ||
| 1051 | iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", | ||
| 1052 | iwcm_ctl_table); | ||
| 1053 | if (!iwcm_ctl_table_hdr) { | ||
| 1054 | pr_err("iw_cm: couldn't register sysctl paths\n"); | ||
| 1055 | destroy_workqueue(iwcm_wq); | ||
| 1056 | return -ENOMEM; | ||
| 1057 | } | ||
| 1058 | |||
| 1033 | return 0; | 1059 | return 0; |
| 1034 | } | 1060 | } |
| 1035 | 1061 | ||
| 1036 | static void __exit iw_cm_cleanup(void) | 1062 | static void __exit iw_cm_cleanup(void) |
| 1037 | { | 1063 | { |
| 1064 | unregister_net_sysctl_table(iwcm_ctl_table_hdr); | ||
| 1038 | destroy_workqueue(iwcm_wq); | 1065 | destroy_workqueue(iwcm_wq); |
| 1039 | } | 1066 | } |
| 1040 | 1067 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ab31f136d04b..74c30f4c557e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -33,6 +33,9 @@ | |||
| 33 | * SOFTWARE. | 33 | * SOFTWARE. |
| 34 | * | 34 | * |
| 35 | */ | 35 | */ |
| 36 | |||
| 37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 38 | |||
| 36 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 38 | #include <linux/module.h> | 41 | #include <linux/module.h> |
| @@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
| 195 | u8 rmpp_version, | 198 | u8 rmpp_version, |
| 196 | ib_mad_send_handler send_handler, | 199 | ib_mad_send_handler send_handler, |
| 197 | ib_mad_recv_handler recv_handler, | 200 | ib_mad_recv_handler recv_handler, |
| 198 | void *context) | 201 | void *context, |
| 202 | u32 registration_flags) | ||
| 199 | { | 203 | { |
| 200 | struct ib_mad_port_private *port_priv; | 204 | struct ib_mad_port_private *port_priv; |
| 201 | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | 205 | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); |
| @@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
| 211 | 215 | ||
| 212 | /* Validate parameters */ | 216 | /* Validate parameters */ |
| 213 | qpn = get_spl_qp_index(qp_type); | 217 | qpn = get_spl_qp_index(qp_type); |
| 214 | if (qpn == -1) | 218 | if (qpn == -1) { |
| 219 | dev_notice(&device->dev, | ||
| 220 | "ib_register_mad_agent: invalid QP Type %d\n", | ||
| 221 | qp_type); | ||
| 215 | goto error1; | 222 | goto error1; |
| 223 | } | ||
| 216 | 224 | ||
| 217 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) | 225 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { |
| 226 | dev_notice(&device->dev, | ||
| 227 | "ib_register_mad_agent: invalid RMPP Version %u\n", | ||
| 228 | rmpp_version); | ||
| 218 | goto error1; | 229 | goto error1; |
| 230 | } | ||
| 219 | 231 | ||
| 220 | /* Validate MAD registration request if supplied */ | 232 | /* Validate MAD registration request if supplied */ |
| 221 | if (mad_reg_req) { | 233 | if (mad_reg_req) { |
| 222 | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) | 234 | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { |
| 235 | dev_notice(&device->dev, | ||
| 236 | "ib_register_mad_agent: invalid Class Version %u\n", | ||
| 237 | mad_reg_req->mgmt_class_version); | ||
| 223 | goto error1; | 238 | goto error1; |
| 224 | if (!recv_handler) | 239 | } |
| 240 | if (!recv_handler) { | ||
| 241 | dev_notice(&device->dev, | ||
| 242 | "ib_register_mad_agent: no recv_handler\n"); | ||
| 225 | goto error1; | 243 | goto error1; |
| 244 | } | ||
| 226 | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { | 245 | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { |
| 227 | /* | 246 | /* |
| 228 | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | 247 | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only |
| 229 | * one in this range currently allowed | 248 | * one in this range currently allowed |
| 230 | */ | 249 | */ |
| 231 | if (mad_reg_req->mgmt_class != | 250 | if (mad_reg_req->mgmt_class != |
| 232 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | 251 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
| 252 | dev_notice(&device->dev, | ||
| 253 | "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", | ||
| 254 | mad_reg_req->mgmt_class); | ||
| 233 | goto error1; | 255 | goto error1; |
| 256 | } | ||
| 234 | } else if (mad_reg_req->mgmt_class == 0) { | 257 | } else if (mad_reg_req->mgmt_class == 0) { |
| 235 | /* | 258 | /* |
| 236 | * Class 0 is reserved in IBA and is used for | 259 | * Class 0 is reserved in IBA and is used for |
| 237 | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | 260 | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE |
| 238 | */ | 261 | */ |
| 262 | dev_notice(&device->dev, | ||
| 263 | "ib_register_mad_agent: Invalid Mgmt Class 0\n"); | ||
| 239 | goto error1; | 264 | goto error1; |
| 240 | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { | 265 | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { |
| 241 | /* | 266 | /* |
| 242 | * If class is in "new" vendor range, | 267 | * If class is in "new" vendor range, |
| 243 | * ensure supplied OUI is not zero | 268 | * ensure supplied OUI is not zero |
| 244 | */ | 269 | */ |
| 245 | if (!is_vendor_oui(mad_reg_req->oui)) | 270 | if (!is_vendor_oui(mad_reg_req->oui)) { |
| 271 | dev_notice(&device->dev, | ||
| 272 | "ib_register_mad_agent: No OUI specified for class 0x%x\n", | ||
| 273 | mad_reg_req->mgmt_class); | ||
| 246 | goto error1; | 274 | goto error1; |
| 275 | } | ||
| 247 | } | 276 | } |
| 248 | /* Make sure class supplied is consistent with RMPP */ | 277 | /* Make sure class supplied is consistent with RMPP */ |
| 249 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { | 278 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { |
| 250 | if (rmpp_version) | 279 | if (rmpp_version) { |
| 280 | dev_notice(&device->dev, | ||
| 281 | "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", | ||
| 282 | mad_reg_req->mgmt_class); | ||
| 251 | goto error1; | 283 | goto error1; |
| 284 | } | ||
| 252 | } | 285 | } |
| 286 | |||
| 253 | /* Make sure class supplied is consistent with QP type */ | 287 | /* Make sure class supplied is consistent with QP type */ |
| 254 | if (qp_type == IB_QPT_SMI) { | 288 | if (qp_type == IB_QPT_SMI) { |
| 255 | if ((mad_reg_req->mgmt_class != | 289 | if ((mad_reg_req->mgmt_class != |
| 256 | IB_MGMT_CLASS_SUBN_LID_ROUTED) && | 290 | IB_MGMT_CLASS_SUBN_LID_ROUTED) && |
| 257 | (mad_reg_req->mgmt_class != | 291 | (mad_reg_req->mgmt_class != |
| 258 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | 292 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
| 293 | dev_notice(&device->dev, | ||
| 294 | "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", | ||
| 295 | mad_reg_req->mgmt_class); | ||
| 259 | goto error1; | 296 | goto error1; |
| 297 | } | ||
| 260 | } else { | 298 | } else { |
| 261 | if ((mad_reg_req->mgmt_class == | 299 | if ((mad_reg_req->mgmt_class == |
| 262 | IB_MGMT_CLASS_SUBN_LID_ROUTED) || | 300 | IB_MGMT_CLASS_SUBN_LID_ROUTED) || |
| 263 | (mad_reg_req->mgmt_class == | 301 | (mad_reg_req->mgmt_class == |
| 264 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | 302 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
| 303 | dev_notice(&device->dev, | ||
| 304 | "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", | ||
| 305 | mad_reg_req->mgmt_class); | ||
| 265 | goto error1; | 306 | goto error1; |
| 307 | } | ||
| 266 | } | 308 | } |
| 267 | } else { | 309 | } else { |
| 268 | /* No registration request supplied */ | 310 | /* No registration request supplied */ |
| 269 | if (!send_handler) | 311 | if (!send_handler) |
| 270 | goto error1; | 312 | goto error1; |
| 313 | if (registration_flags & IB_MAD_USER_RMPP) | ||
| 314 | goto error1; | ||
| 271 | } | 315 | } |
| 272 | 316 | ||
| 273 | /* Validate device and port */ | 317 | /* Validate device and port */ |
| 274 | port_priv = ib_get_mad_port(device, port_num); | 318 | port_priv = ib_get_mad_port(device, port_num); |
| 275 | if (!port_priv) { | 319 | if (!port_priv) { |
| 320 | dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); | ||
| 276 | ret = ERR_PTR(-ENODEV); | 321 | ret = ERR_PTR(-ENODEV); |
| 277 | goto error1; | 322 | goto error1; |
| 278 | } | 323 | } |
| @@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
| 280 | /* Verify the QP requested is supported. For example, Ethernet devices | 325 | /* Verify the QP requested is supported. For example, Ethernet devices |
| 281 | * will not have QP0 */ | 326 | * will not have QP0 */ |
| 282 | if (!port_priv->qp_info[qpn].qp) { | 327 | if (!port_priv->qp_info[qpn].qp) { |
| 328 | dev_notice(&device->dev, | ||
| 329 | "ib_register_mad_agent: QP %d not supported\n", qpn); | ||
| 283 | ret = ERR_PTR(-EPROTONOSUPPORT); | 330 | ret = ERR_PTR(-EPROTONOSUPPORT); |
| 284 | goto error1; | 331 | goto error1; |
| 285 | } | 332 | } |
| @@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
| 316 | mad_agent_priv->agent.context = context; | 363 | mad_agent_priv->agent.context = context; |
| 317 | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | 364 | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; |
| 318 | mad_agent_priv->agent.port_num = port_num; | 365 | mad_agent_priv->agent.port_num = port_num; |
| 366 | mad_agent_priv->agent.flags = registration_flags; | ||
| 319 | spin_lock_init(&mad_agent_priv->lock); | 367 | spin_lock_init(&mad_agent_priv->lock); |
| 320 | INIT_LIST_HEAD(&mad_agent_priv->send_list); | 368 | INIT_LIST_HEAD(&mad_agent_priv->send_list); |
| 321 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 369 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
| @@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 706 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == | 754 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == |
| 707 | IB_SMI_DISCARD) { | 755 | IB_SMI_DISCARD) { |
| 708 | ret = -EINVAL; | 756 | ret = -EINVAL; |
| 709 | printk(KERN_ERR PFX "Invalid directed route\n"); | 757 | dev_err(&device->dev, "Invalid directed route\n"); |
| 710 | goto out; | 758 | goto out; |
| 711 | } | 759 | } |
| 712 | 760 | ||
| @@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 718 | local = kmalloc(sizeof *local, GFP_ATOMIC); | 766 | local = kmalloc(sizeof *local, GFP_ATOMIC); |
| 719 | if (!local) { | 767 | if (!local) { |
| 720 | ret = -ENOMEM; | 768 | ret = -ENOMEM; |
| 721 | printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); | 769 | dev_err(&device->dev, "No memory for ib_mad_local_private\n"); |
| 722 | goto out; | 770 | goto out; |
| 723 | } | 771 | } |
| 724 | local->mad_priv = NULL; | 772 | local->mad_priv = NULL; |
| @@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 726 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); | 774 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); |
| 727 | if (!mad_priv) { | 775 | if (!mad_priv) { |
| 728 | ret = -ENOMEM; | 776 | ret = -ENOMEM; |
| 729 | printk(KERN_ERR PFX "No memory for local response MAD\n"); | 777 | dev_err(&device->dev, "No memory for local response MAD\n"); |
| 730 | kfree(local); | 778 | kfree(local); |
| 731 | goto out; | 779 | goto out; |
| 732 | } | 780 | } |
| @@ -837,9 +885,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | |||
| 837 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | 885 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { |
| 838 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | 886 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); |
| 839 | if (!seg) { | 887 | if (!seg) { |
| 840 | printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " | 888 | dev_err(&send_buf->mad_agent->device->dev, |
| 841 | "alloc failed for len %zd, gfp %#x\n", | 889 | "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", |
| 842 | sizeof (*seg) + seg_size, gfp_mask); | 890 | sizeof (*seg) + seg_size, gfp_mask); |
| 843 | free_send_rmpp_list(send_wr); | 891 | free_send_rmpp_list(send_wr); |
| 844 | return -ENOMEM; | 892 | return -ENOMEM; |
| 845 | } | 893 | } |
| @@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | |||
| 862 | return 0; | 910 | return 0; |
| 863 | } | 911 | } |
| 864 | 912 | ||
| 913 | int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) | ||
| 914 | { | ||
| 915 | return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); | ||
| 916 | } | ||
| 917 | EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); | ||
| 918 | |||
| 865 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | 919 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
| 866 | u32 remote_qpn, u16 pkey_index, | 920 | u32 remote_qpn, u16 pkey_index, |
| 867 | int rmpp_active, | 921 | int rmpp_active, |
| @@ -878,10 +932,12 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
| 878 | pad = get_pad_size(hdr_len, data_len); | 932 | pad = get_pad_size(hdr_len, data_len); |
| 879 | message_size = hdr_len + data_len + pad; | 933 | message_size = hdr_len + data_len + pad; |
| 880 | 934 | ||
| 881 | if ((!mad_agent->rmpp_version && | 935 | if (ib_mad_kernel_rmpp_agent(mad_agent)) { |
| 882 | (rmpp_active || message_size > sizeof(struct ib_mad))) || | 936 | if (!rmpp_active && message_size > sizeof(struct ib_mad)) |
| 883 | (!rmpp_active && message_size > sizeof(struct ib_mad))) | 937 | return ERR_PTR(-EINVAL); |
| 884 | return ERR_PTR(-EINVAL); | 938 | } else |
| 939 | if (rmpp_active || message_size > sizeof(struct ib_mad)) | ||
| 940 | return ERR_PTR(-EINVAL); | ||
| 885 | 941 | ||
| 886 | size = rmpp_active ? hdr_len : sizeof(struct ib_mad); | 942 | size = rmpp_active ? hdr_len : sizeof(struct ib_mad); |
| 887 | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); | 943 | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); |
| @@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | |||
| 1135 | &mad_agent_priv->send_list); | 1191 | &mad_agent_priv->send_list); |
| 1136 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1192 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 1137 | 1193 | ||
| 1138 | if (mad_agent_priv->agent.rmpp_version) { | 1194 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
| 1139 | ret = ib_send_rmpp_mad(mad_send_wr); | 1195 | ret = ib_send_rmpp_mad(mad_send_wr); |
| 1140 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | 1196 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) |
| 1141 | ret = ib_send_mad(mad_send_wr); | 1197 | ret = ib_send_mad(mad_send_wr); |
| @@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); | |||
| 1199 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | 1255 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, |
| 1200 | struct ib_wc *wc) | 1256 | struct ib_wc *wc) |
| 1201 | { | 1257 | { |
| 1202 | printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); | 1258 | dev_err(&mad_agent->device->dev, |
| 1259 | "ib_process_mad_wc() not implemented yet\n"); | ||
| 1203 | return 0; | 1260 | return 0; |
| 1204 | } | 1261 | } |
| 1205 | EXPORT_SYMBOL(ib_process_mad_wc); | 1262 | EXPORT_SYMBOL(ib_process_mad_wc); |
| @@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, | |||
| 1211 | 1268 | ||
| 1212 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { | 1269 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { |
| 1213 | if ((*method)->agent[i]) { | 1270 | if ((*method)->agent[i]) { |
| 1214 | printk(KERN_ERR PFX "Method %d already in use\n", i); | 1271 | pr_err("Method %d already in use\n", i); |
| 1215 | return -EINVAL; | 1272 | return -EINVAL; |
| 1216 | } | 1273 | } |
| 1217 | } | 1274 | } |
| @@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | |||
| 1223 | /* Allocate management method table */ | 1280 | /* Allocate management method table */ |
| 1224 | *method = kzalloc(sizeof **method, GFP_ATOMIC); | 1281 | *method = kzalloc(sizeof **method, GFP_ATOMIC); |
| 1225 | if (!*method) { | 1282 | if (!*method) { |
| 1226 | printk(KERN_ERR PFX "No memory for " | 1283 | pr_err("No memory for ib_mad_mgmt_method_table\n"); |
| 1227 | "ib_mad_mgmt_method_table\n"); | ||
| 1228 | return -ENOMEM; | 1284 | return -ENOMEM; |
| 1229 | } | 1285 | } |
| 1230 | 1286 | ||
| @@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1319 | /* Allocate management class table for "new" class version */ | 1375 | /* Allocate management class table for "new" class version */ |
| 1320 | *class = kzalloc(sizeof **class, GFP_ATOMIC); | 1376 | *class = kzalloc(sizeof **class, GFP_ATOMIC); |
| 1321 | if (!*class) { | 1377 | if (!*class) { |
| 1322 | printk(KERN_ERR PFX "No memory for " | 1378 | dev_err(&agent_priv->agent.device->dev, |
| 1323 | "ib_mad_mgmt_class_table\n"); | 1379 | "No memory for ib_mad_mgmt_class_table\n"); |
| 1324 | ret = -ENOMEM; | 1380 | ret = -ENOMEM; |
| 1325 | goto error1; | 1381 | goto error1; |
| 1326 | } | 1382 | } |
| @@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1386 | /* Allocate mgmt vendor class table for "new" class version */ | 1442 | /* Allocate mgmt vendor class table for "new" class version */ |
| 1387 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); | 1443 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); |
| 1388 | if (!vendor) { | 1444 | if (!vendor) { |
| 1389 | printk(KERN_ERR PFX "No memory for " | 1445 | dev_err(&agent_priv->agent.device->dev, |
| 1390 | "ib_mad_mgmt_vendor_class_table\n"); | 1446 | "No memory for ib_mad_mgmt_vendor_class_table\n"); |
| 1391 | goto error1; | 1447 | goto error1; |
| 1392 | } | 1448 | } |
| 1393 | 1449 | ||
| @@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1397 | /* Allocate table for this management vendor class */ | 1453 | /* Allocate table for this management vendor class */ |
| 1398 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); | 1454 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); |
| 1399 | if (!vendor_class) { | 1455 | if (!vendor_class) { |
| 1400 | printk(KERN_ERR PFX "No memory for " | 1456 | dev_err(&agent_priv->agent.device->dev, |
| 1401 | "ib_mad_mgmt_vendor_class\n"); | 1457 | "No memory for ib_mad_mgmt_vendor_class\n"); |
| 1402 | goto error2; | 1458 | goto error2; |
| 1403 | } | 1459 | } |
| 1404 | 1460 | ||
| @@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1429 | goto check_in_use; | 1485 | goto check_in_use; |
| 1430 | } | 1486 | } |
| 1431 | } | 1487 | } |
| 1432 | printk(KERN_ERR PFX "All OUI slots in use\n"); | 1488 | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); |
| 1433 | goto error3; | 1489 | goto error3; |
| 1434 | 1490 | ||
| 1435 | check_in_use: | 1491 | check_in_use: |
| @@ -1640,9 +1696,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
| 1640 | if (mad_agent->agent.recv_handler) | 1696 | if (mad_agent->agent.recv_handler) |
| 1641 | atomic_inc(&mad_agent->refcount); | 1697 | atomic_inc(&mad_agent->refcount); |
| 1642 | else { | 1698 | else { |
| 1643 | printk(KERN_NOTICE PFX "No receive handler for client " | 1699 | dev_notice(&port_priv->device->dev, |
| 1644 | "%p on port %d\n", | 1700 | "No receive handler for client %p on port %d\n", |
| 1645 | &mad_agent->agent, port_priv->port_num); | 1701 | &mad_agent->agent, port_priv->port_num); |
| 1646 | mad_agent = NULL; | 1702 | mad_agent = NULL; |
| 1647 | } | 1703 | } |
| 1648 | } | 1704 | } |
| @@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) | |||
| 1658 | 1714 | ||
| 1659 | /* Make sure MAD base version is understood */ | 1715 | /* Make sure MAD base version is understood */ |
| 1660 | if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { | 1716 | if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { |
| 1661 | printk(KERN_ERR PFX "MAD received with unsupported base " | 1717 | pr_err("MAD received with unsupported base version %d\n", |
| 1662 | "version %d\n", mad->mad_hdr.base_version); | 1718 | mad->mad_hdr.base_version); |
| 1663 | goto out; | 1719 | goto out; |
| 1664 | } | 1720 | } |
| 1665 | 1721 | ||
| @@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, | |||
| 1685 | 1741 | ||
| 1686 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | 1742 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; |
| 1687 | return !mad_agent_priv->agent.rmpp_version || | 1743 | return !mad_agent_priv->agent.rmpp_version || |
| 1744 | !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || | ||
| 1688 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 1745 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
| 1689 | IB_MGMT_RMPP_FLAG_ACTIVE) || | 1746 | IB_MGMT_RMPP_FLAG_ACTIVE) || |
| 1690 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | 1747 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); |
| @@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
| 1812 | 1869 | ||
| 1813 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); | 1870 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
| 1814 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | 1871 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); |
| 1815 | if (mad_agent_priv->agent.rmpp_version) { | 1872 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
| 1816 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | 1873 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
| 1817 | mad_recv_wc); | 1874 | mad_recv_wc); |
| 1818 | if (!mad_recv_wc) { | 1875 | if (!mad_recv_wc) { |
| @@ -1827,23 +1884,39 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
| 1827 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); | 1884 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
| 1828 | if (!mad_send_wr) { | 1885 | if (!mad_send_wr) { |
| 1829 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1886 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 1830 | ib_free_recv_mad(mad_recv_wc); | 1887 | if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) |
| 1831 | deref_mad_agent(mad_agent_priv); | 1888 | && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) |
| 1832 | return; | 1889 | && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) |
| 1833 | } | 1890 | & IB_MGMT_RMPP_FLAG_ACTIVE)) { |
| 1834 | ib_mark_mad_done(mad_send_wr); | 1891 | /* user rmpp is in effect |
| 1835 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1892 | * and this is an active RMPP MAD |
| 1893 | */ | ||
| 1894 | mad_recv_wc->wc->wr_id = 0; | ||
| 1895 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | ||
| 1896 | mad_recv_wc); | ||
| 1897 | atomic_dec(&mad_agent_priv->refcount); | ||
| 1898 | } else { | ||
| 1899 | /* not user rmpp, revert to normal behavior and | ||
| 1900 | * drop the mad */ | ||
| 1901 | ib_free_recv_mad(mad_recv_wc); | ||
| 1902 | deref_mad_agent(mad_agent_priv); | ||
| 1903 | return; | ||
| 1904 | } | ||
| 1905 | } else { | ||
| 1906 | ib_mark_mad_done(mad_send_wr); | ||
| 1907 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||
| 1836 | 1908 | ||
| 1837 | /* Defined behavior is to complete response before request */ | 1909 | /* Defined behavior is to complete response before request */ |
| 1838 | mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; | 1910 | mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; |
| 1839 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1911 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
| 1840 | mad_recv_wc); | 1912 | mad_recv_wc); |
| 1841 | atomic_dec(&mad_agent_priv->refcount); | 1913 | atomic_dec(&mad_agent_priv->refcount); |
| 1842 | 1914 | ||
| 1843 | mad_send_wc.status = IB_WC_SUCCESS; | 1915 | mad_send_wc.status = IB_WC_SUCCESS; |
| 1844 | mad_send_wc.vendor_err = 0; | 1916 | mad_send_wc.vendor_err = 0; |
| 1845 | mad_send_wc.send_buf = &mad_send_wr->send_buf; | 1917 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
| 1846 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 1918 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
| 1919 | } | ||
| 1847 | } else { | 1920 | } else { |
| 1848 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1921 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
| 1849 | mad_recv_wc); | 1922 | mad_recv_wc); |
| @@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
| 1911 | 1984 | ||
| 1912 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 1985 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
| 1913 | if (!response) { | 1986 | if (!response) { |
| 1914 | printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | 1987 | dev_err(&port_priv->device->dev, |
| 1915 | "for response buffer\n"); | 1988 | "ib_mad_recv_done_handler no memory for response buffer\n"); |
| 1916 | goto out; | 1989 | goto out; |
| 1917 | } | 1990 | } |
| 1918 | 1991 | ||
| @@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
| 2083 | 2156 | ||
| 2084 | mad_agent_priv = mad_send_wr->mad_agent_priv; | 2157 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
| 2085 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2158 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 2086 | if (mad_agent_priv->agent.rmpp_version) { | 2159 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
| 2087 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | 2160 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); |
| 2088 | if (ret == IB_RMPP_RESULT_CONSUMED) | 2161 | if (ret == IB_RMPP_RESULT_CONSUMED) |
| 2089 | goto done; | 2162 | goto done; |
| @@ -2176,7 +2249,8 @@ retry: | |||
| 2176 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | 2249 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, |
| 2177 | &bad_send_wr); | 2250 | &bad_send_wr); |
| 2178 | if (ret) { | 2251 | if (ret) { |
| 2179 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | 2252 | dev_err(&port_priv->device->dev, |
| 2253 | "ib_post_send failed: %d\n", ret); | ||
| 2180 | mad_send_wr = queued_send_wr; | 2254 | mad_send_wr = queued_send_wr; |
| 2181 | wc->status = IB_WC_LOC_QP_OP_ERR; | 2255 | wc->status = IB_WC_LOC_QP_OP_ERR; |
| 2182 | goto retry; | 2256 | goto retry; |
| @@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
| 2248 | IB_QP_STATE | IB_QP_CUR_STATE); | 2322 | IB_QP_STATE | IB_QP_CUR_STATE); |
| 2249 | kfree(attr); | 2323 | kfree(attr); |
| 2250 | if (ret) | 2324 | if (ret) |
| 2251 | printk(KERN_ERR PFX "mad_error_handler - " | 2325 | dev_err(&port_priv->device->dev, |
| 2252 | "ib_modify_qp to RTS : %d\n", ret); | 2326 | "mad_error_handler - ib_modify_qp to RTS : %d\n", |
| 2327 | ret); | ||
| 2253 | else | 2328 | else |
| 2254 | mark_sends_for_retry(qp_info); | 2329 | mark_sends_for_retry(qp_info); |
| 2255 | } | 2330 | } |
| @@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work) | |||
| 2408 | if (local->mad_priv) { | 2483 | if (local->mad_priv) { |
| 2409 | recv_mad_agent = local->recv_mad_agent; | 2484 | recv_mad_agent = local->recv_mad_agent; |
| 2410 | if (!recv_mad_agent) { | 2485 | if (!recv_mad_agent) { |
| 2411 | printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); | 2486 | dev_err(&mad_agent_priv->agent.device->dev, |
| 2487 | "No receive MAD agent for local completion\n"); | ||
| 2412 | free_mad = 1; | 2488 | free_mad = 1; |
| 2413 | goto local_send_completion; | 2489 | goto local_send_completion; |
| 2414 | } | 2490 | } |
| @@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
| 2476 | 2552 | ||
| 2477 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); | 2553 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
| 2478 | 2554 | ||
| 2479 | if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { | 2555 | if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { |
| 2480 | ret = ib_retry_rmpp(mad_send_wr); | 2556 | ret = ib_retry_rmpp(mad_send_wr); |
| 2481 | switch (ret) { | 2557 | switch (ret) { |
| 2482 | case IB_RMPP_RESULT_UNHANDLED: | 2558 | case IB_RMPP_RESULT_UNHANDLED: |
| @@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
| 2589 | } else { | 2665 | } else { |
| 2590 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 2666 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
| 2591 | if (!mad_priv) { | 2667 | if (!mad_priv) { |
| 2592 | printk(KERN_ERR PFX "No memory for receive buffer\n"); | 2668 | dev_err(&qp_info->port_priv->device->dev, |
| 2669 | "No memory for receive buffer\n"); | ||
| 2593 | ret = -ENOMEM; | 2670 | ret = -ENOMEM; |
| 2594 | break; | 2671 | break; |
| 2595 | } | 2672 | } |
| @@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
| 2625 | sizeof mad_priv->header, | 2702 | sizeof mad_priv->header, |
| 2626 | DMA_FROM_DEVICE); | 2703 | DMA_FROM_DEVICE); |
| 2627 | kmem_cache_free(ib_mad_cache, mad_priv); | 2704 | kmem_cache_free(ib_mad_cache, mad_priv); |
| 2628 | printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); | 2705 | dev_err(&qp_info->port_priv->device->dev, |
| 2706 | "ib_post_recv failed: %d\n", ret); | ||
| 2629 | break; | 2707 | break; |
| 2630 | } | 2708 | } |
| 2631 | } while (post); | 2709 | } while (post); |
| @@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2681 | 2759 | ||
| 2682 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2760 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 2683 | if (!attr) { | 2761 | if (!attr) { |
| 2684 | printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); | 2762 | dev_err(&port_priv->device->dev, |
| 2763 | "Couldn't kmalloc ib_qp_attr\n"); | ||
| 2685 | return -ENOMEM; | 2764 | return -ENOMEM; |
| 2686 | } | 2765 | } |
| 2687 | 2766 | ||
| @@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2705 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | 2784 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | |
| 2706 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | 2785 | IB_QP_PKEY_INDEX | IB_QP_QKEY); |
| 2707 | if (ret) { | 2786 | if (ret) { |
| 2708 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2787 | dev_err(&port_priv->device->dev, |
| 2709 | "INIT: %d\n", i, ret); | 2788 | "Couldn't change QP%d state to INIT: %d\n", |
| 2789 | i, ret); | ||
| 2710 | goto out; | 2790 | goto out; |
| 2711 | } | 2791 | } |
| 2712 | 2792 | ||
| 2713 | attr->qp_state = IB_QPS_RTR; | 2793 | attr->qp_state = IB_QPS_RTR; |
| 2714 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); | 2794 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); |
| 2715 | if (ret) { | 2795 | if (ret) { |
| 2716 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2796 | dev_err(&port_priv->device->dev, |
| 2717 | "RTR: %d\n", i, ret); | 2797 | "Couldn't change QP%d state to RTR: %d\n", |
| 2798 | i, ret); | ||
| 2718 | goto out; | 2799 | goto out; |
| 2719 | } | 2800 | } |
| 2720 | 2801 | ||
| @@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2722 | attr->sq_psn = IB_MAD_SEND_Q_PSN; | 2803 | attr->sq_psn = IB_MAD_SEND_Q_PSN; |
| 2723 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | 2804 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); |
| 2724 | if (ret) { | 2805 | if (ret) { |
| 2725 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2806 | dev_err(&port_priv->device->dev, |
| 2726 | "RTS: %d\n", i, ret); | 2807 | "Couldn't change QP%d state to RTS: %d\n", |
| 2808 | i, ret); | ||
| 2727 | goto out; | 2809 | goto out; |
| 2728 | } | 2810 | } |
| 2729 | } | 2811 | } |
| 2730 | 2812 | ||
| 2731 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2813 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
| 2732 | if (ret) { | 2814 | if (ret) { |
| 2733 | printk(KERN_ERR PFX "Failed to request completion " | 2815 | dev_err(&port_priv->device->dev, |
| 2734 | "notification: %d\n", ret); | 2816 | "Failed to request completion notification: %d\n", |
| 2817 | ret); | ||
| 2735 | goto out; | 2818 | goto out; |
| 2736 | } | 2819 | } |
| 2737 | 2820 | ||
| @@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2741 | 2824 | ||
| 2742 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | 2825 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
| 2743 | if (ret) { | 2826 | if (ret) { |
| 2744 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | 2827 | dev_err(&port_priv->device->dev, |
| 2828 | "Couldn't post receive WRs\n"); | ||
| 2745 | goto out; | 2829 | goto out; |
| 2746 | } | 2830 | } |
| 2747 | } | 2831 | } |
| @@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) | |||
| 2755 | struct ib_mad_qp_info *qp_info = qp_context; | 2839 | struct ib_mad_qp_info *qp_info = qp_context; |
| 2756 | 2840 | ||
| 2757 | /* It's worse than that! He's dead, Jim! */ | 2841 | /* It's worse than that! He's dead, Jim! */ |
| 2758 | printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", | 2842 | dev_err(&qp_info->port_priv->device->dev, |
| 2843 | "Fatal error (%d) on MAD QP (%d)\n", | ||
| 2759 | event->event, qp_info->qp->qp_num); | 2844 | event->event, qp_info->qp->qp_num); |
| 2760 | } | 2845 | } |
| 2761 | 2846 | ||
| @@ -2801,8 +2886,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |||
| 2801 | qp_init_attr.event_handler = qp_event_handler; | 2886 | qp_init_attr.event_handler = qp_event_handler; |
| 2802 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | 2887 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); |
| 2803 | if (IS_ERR(qp_info->qp)) { | 2888 | if (IS_ERR(qp_info->qp)) { |
| 2804 | printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", | 2889 | dev_err(&qp_info->port_priv->device->dev, |
| 2805 | get_spl_qp_index(qp_type)); | 2890 | "Couldn't create ib_mad QP%d\n", |
| 2891 | get_spl_qp_index(qp_type)); | ||
| 2806 | ret = PTR_ERR(qp_info->qp); | 2892 | ret = PTR_ERR(qp_info->qp); |
| 2807 | goto error; | 2893 | goto error; |
| 2808 | } | 2894 | } |
| @@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2840 | /* Create new device info */ | 2926 | /* Create new device info */ |
| 2841 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 2927 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
| 2842 | if (!port_priv) { | 2928 | if (!port_priv) { |
| 2843 | printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); | 2929 | dev_err(&device->dev, "No memory for ib_mad_port_private\n"); |
| 2844 | return -ENOMEM; | 2930 | return -ENOMEM; |
| 2845 | } | 2931 | } |
| 2846 | 2932 | ||
| @@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2860 | ib_mad_thread_completion_handler, | 2946 | ib_mad_thread_completion_handler, |
| 2861 | NULL, port_priv, cq_size, 0); | 2947 | NULL, port_priv, cq_size, 0); |
| 2862 | if (IS_ERR(port_priv->cq)) { | 2948 | if (IS_ERR(port_priv->cq)) { |
| 2863 | printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); | 2949 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
| 2864 | ret = PTR_ERR(port_priv->cq); | 2950 | ret = PTR_ERR(port_priv->cq); |
| 2865 | goto error3; | 2951 | goto error3; |
| 2866 | } | 2952 | } |
| 2867 | 2953 | ||
| 2868 | port_priv->pd = ib_alloc_pd(device); | 2954 | port_priv->pd = ib_alloc_pd(device); |
| 2869 | if (IS_ERR(port_priv->pd)) { | 2955 | if (IS_ERR(port_priv->pd)) { |
| 2870 | printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); | 2956 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); |
| 2871 | ret = PTR_ERR(port_priv->pd); | 2957 | ret = PTR_ERR(port_priv->pd); |
| 2872 | goto error4; | 2958 | goto error4; |
| 2873 | } | 2959 | } |
| 2874 | 2960 | ||
| 2875 | port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); | 2961 | port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); |
| 2876 | if (IS_ERR(port_priv->mr)) { | 2962 | if (IS_ERR(port_priv->mr)) { |
| 2877 | printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); | 2963 | dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); |
| 2878 | ret = PTR_ERR(port_priv->mr); | 2964 | ret = PTR_ERR(port_priv->mr); |
| 2879 | goto error5; | 2965 | goto error5; |
| 2880 | } | 2966 | } |
| @@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2902 | 2988 | ||
| 2903 | ret = ib_mad_port_start(port_priv); | 2989 | ret = ib_mad_port_start(port_priv); |
| 2904 | if (ret) { | 2990 | if (ret) { |
| 2905 | printk(KERN_ERR PFX "Couldn't start port\n"); | 2991 | dev_err(&device->dev, "Couldn't start port\n"); |
| 2906 | goto error9; | 2992 | goto error9; |
| 2907 | } | 2993 | } |
| 2908 | 2994 | ||
| @@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) | |||
| 2946 | port_priv = __ib_get_mad_port(device, port_num); | 3032 | port_priv = __ib_get_mad_port(device, port_num); |
| 2947 | if (port_priv == NULL) { | 3033 | if (port_priv == NULL) { |
| 2948 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 3034 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 2949 | printk(KERN_ERR PFX "Port %d not found\n", port_num); | 3035 | dev_err(&device->dev, "Port %d not found\n", port_num); |
| 2950 | return -ENODEV; | 3036 | return -ENODEV; |
| 2951 | } | 3037 | } |
| 2952 | list_del_init(&port_priv->port_list); | 3038 | list_del_init(&port_priv->port_list); |
| @@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device) | |||
| 2984 | 3070 | ||
| 2985 | for (i = start; i <= end; i++) { | 3071 | for (i = start; i <= end; i++) { |
| 2986 | if (ib_mad_port_open(device, i)) { | 3072 | if (ib_mad_port_open(device, i)) { |
| 2987 | printk(KERN_ERR PFX "Couldn't open %s port %d\n", | 3073 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
| 2988 | device->name, i); | ||
| 2989 | goto error; | 3074 | goto error; |
| 2990 | } | 3075 | } |
| 2991 | if (ib_agent_port_open(device, i)) { | 3076 | if (ib_agent_port_open(device, i)) { |
| 2992 | printk(KERN_ERR PFX "Couldn't open %s port %d " | 3077 | dev_err(&device->dev, |
| 2993 | "for agents\n", | 3078 | "Couldn't open port %d for agents\n", i); |
| 2994 | device->name, i); | ||
| 2995 | goto error_agent; | 3079 | goto error_agent; |
| 2996 | } | 3080 | } |
| 2997 | } | 3081 | } |
| @@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device) | |||
| 2999 | 3083 | ||
| 3000 | error_agent: | 3084 | error_agent: |
| 3001 | if (ib_mad_port_close(device, i)) | 3085 | if (ib_mad_port_close(device, i)) |
| 3002 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3086 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
| 3003 | device->name, i); | ||
| 3004 | 3087 | ||
| 3005 | error: | 3088 | error: |
| 3006 | i--; | 3089 | i--; |
| 3007 | 3090 | ||
| 3008 | while (i >= start) { | 3091 | while (i >= start) { |
| 3009 | if (ib_agent_port_close(device, i)) | 3092 | if (ib_agent_port_close(device, i)) |
| 3010 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 3093 | dev_err(&device->dev, |
| 3011 | "for agents\n", | 3094 | "Couldn't close port %d for agents\n", i); |
| 3012 | device->name, i); | ||
| 3013 | if (ib_mad_port_close(device, i)) | 3095 | if (ib_mad_port_close(device, i)) |
| 3014 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3096 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
| 3015 | device->name, i); | ||
| 3016 | i--; | 3097 | i--; |
| 3017 | } | 3098 | } |
| 3018 | } | 3099 | } |
| @@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device) | |||
| 3033 | } | 3114 | } |
| 3034 | for (i = 0; i < num_ports; i++, cur_port++) { | 3115 | for (i = 0; i < num_ports; i++, cur_port++) { |
| 3035 | if (ib_agent_port_close(device, cur_port)) | 3116 | if (ib_agent_port_close(device, cur_port)) |
| 3036 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 3117 | dev_err(&device->dev, |
| 3037 | "for agents\n", | 3118 | "Couldn't close port %d for agents\n", |
| 3038 | device->name, cur_port); | 3119 | cur_port); |
| 3039 | if (ib_mad_port_close(device, cur_port)) | 3120 | if (ib_mad_port_close(device, cur_port)) |
| 3040 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3121 | dev_err(&device->dev, "Couldn't close port %d\n", |
| 3041 | device->name, cur_port); | 3122 | cur_port); |
| 3042 | } | 3123 | } |
| 3043 | } | 3124 | } |
| 3044 | 3125 | ||
| @@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void) | |||
| 3064 | SLAB_HWCACHE_ALIGN, | 3145 | SLAB_HWCACHE_ALIGN, |
| 3065 | NULL); | 3146 | NULL); |
| 3066 | if (!ib_mad_cache) { | 3147 | if (!ib_mad_cache) { |
| 3067 | printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); | 3148 | pr_err("Couldn't create ib_mad cache\n"); |
| 3068 | ret = -ENOMEM; | 3149 | ret = -ENOMEM; |
| 3069 | goto error1; | 3150 | goto error1; |
| 3070 | } | 3151 | } |
| @@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void) | |||
| 3072 | INIT_LIST_HEAD(&ib_mad_port_list); | 3153 | INIT_LIST_HEAD(&ib_mad_port_list); |
| 3073 | 3154 | ||
| 3074 | if (ib_register_client(&mad_client)) { | 3155 | if (ib_register_client(&mad_client)) { |
| 3075 | printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); | 3156 | pr_err("Couldn't register ib_mad client\n"); |
| 3076 | ret = -EINVAL; | 3157 | ret = -EINVAL; |
| 3077 | goto error2; | 3158 | goto error2; |
| 3078 | } | 3159 | } |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 9430ab4969c5..d1a0b0ee9444 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
| @@ -42,9 +42,6 @@ | |||
| 42 | #include <rdma/ib_mad.h> | 42 | #include <rdma/ib_mad.h> |
| 43 | #include <rdma/ib_smi.h> | 43 | #include <rdma/ib_smi.h> |
| 44 | 44 | ||
| 45 | |||
| 46 | #define PFX "ib_mad: " | ||
| 47 | |||
| 48 | #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ | 45 | #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ |
| 49 | 46 | ||
| 50 | /* QP and CQ parameters */ | 47 | /* QP and CQ parameters */ |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 233eaf541f55..c38f030f0dc9 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
| @@ -1184,7 +1184,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
| 1184 | sa_dev->port[i].agent = | 1184 | sa_dev->port[i].agent = |
| 1185 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | 1185 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, |
| 1186 | NULL, 0, send_handler, | 1186 | NULL, 0, send_handler, |
| 1187 | recv_handler, sa_dev); | 1187 | recv_handler, sa_dev, 0); |
| 1188 | if (IS_ERR(sa_dev->port[i].agent)) | 1188 | if (IS_ERR(sa_dev->port[i].agent)) |
| 1189 | goto err; | 1189 | goto err; |
| 1190 | 1190 | ||
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 1acb99100556..928cdd20e2d1 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | * SOFTWARE. | 33 | * SOFTWARE. |
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| 36 | #define pr_fmt(fmt) "user_mad: " fmt | ||
| 37 | |||
| 36 | #include <linux/module.h> | 38 | #include <linux/module.h> |
| 37 | #include <linux/init.h> | 39 | #include <linux/init.h> |
| 38 | #include <linux/device.h> | 40 | #include <linux/device.h> |
| @@ -504,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
| 504 | 506 | ||
| 505 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; | 507 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; |
| 506 | hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); | 508 | hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); |
| 507 | if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { | 509 | |
| 508 | copy_offset = IB_MGMT_MAD_HDR; | 510 | if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) |
| 509 | rmpp_active = 0; | 511 | && ib_mad_kernel_rmpp_agent(agent)) { |
| 510 | } else { | ||
| 511 | copy_offset = IB_MGMT_RMPP_HDR; | 512 | copy_offset = IB_MGMT_RMPP_HDR; |
| 512 | rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 513 | rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
| 513 | IB_MGMT_RMPP_FLAG_ACTIVE; | 514 | IB_MGMT_RMPP_FLAG_ACTIVE; |
| 515 | } else { | ||
| 516 | copy_offset = IB_MGMT_MAD_HDR; | ||
| 517 | rmpp_active = 0; | ||
| 514 | } | 518 | } |
| 515 | 519 | ||
| 516 | data_len = count - hdr_size(file) - hdr_len; | 520 | data_len = count - hdr_size(file) - hdr_len; |
| @@ -556,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
| 556 | rmpp_mad->mad_hdr.tid = *tid; | 560 | rmpp_mad->mad_hdr.tid = *tid; |
| 557 | } | 561 | } |
| 558 | 562 | ||
| 559 | spin_lock_irq(&file->send_lock); | 563 | if (!ib_mad_kernel_rmpp_agent(agent) |
| 560 | ret = is_duplicate(file, packet); | 564 | && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) |
| 561 | if (!ret) | 565 | && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { |
| 566 | spin_lock_irq(&file->send_lock); | ||
| 562 | list_add_tail(&packet->list, &file->send_list); | 567 | list_add_tail(&packet->list, &file->send_list); |
| 563 | spin_unlock_irq(&file->send_lock); | 568 | spin_unlock_irq(&file->send_lock); |
| 564 | if (ret) { | 569 | } else { |
| 565 | ret = -EINVAL; | 570 | spin_lock_irq(&file->send_lock); |
| 566 | goto err_msg; | 571 | ret = is_duplicate(file, packet); |
| 572 | if (!ret) | ||
| 573 | list_add_tail(&packet->list, &file->send_list); | ||
| 574 | spin_unlock_irq(&file->send_lock); | ||
| 575 | if (ret) { | ||
| 576 | ret = -EINVAL; | ||
| 577 | goto err_msg; | ||
| 578 | } | ||
| 567 | } | 579 | } |
| 568 | 580 | ||
| 569 | ret = ib_post_send_mad(packet->msg, NULL); | 581 | ret = ib_post_send_mad(packet->msg, NULL); |
| @@ -614,6 +626,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, | |||
| 614 | mutex_lock(&file->mutex); | 626 | mutex_lock(&file->mutex); |
| 615 | 627 | ||
| 616 | if (!file->port->ib_dev) { | 628 | if (!file->port->ib_dev) { |
| 629 | dev_notice(file->port->dev, | ||
| 630 | "ib_umad_reg_agent: invalid device\n"); | ||
| 617 | ret = -EPIPE; | 631 | ret = -EPIPE; |
| 618 | goto out; | 632 | goto out; |
| 619 | } | 633 | } |
| @@ -624,6 +638,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, | |||
| 624 | } | 638 | } |
| 625 | 639 | ||
| 626 | if (ureq.qpn != 0 && ureq.qpn != 1) { | 640 | if (ureq.qpn != 0 && ureq.qpn != 1) { |
| 641 | dev_notice(file->port->dev, | ||
| 642 | "ib_umad_reg_agent: invalid QPN %d specified\n", | ||
| 643 | ureq.qpn); | ||
| 627 | ret = -EINVAL; | 644 | ret = -EINVAL; |
| 628 | goto out; | 645 | goto out; |
| 629 | } | 646 | } |
| @@ -632,11 +649,15 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, | |||
| 632 | if (!__get_agent(file, agent_id)) | 649 | if (!__get_agent(file, agent_id)) |
| 633 | goto found; | 650 | goto found; |
| 634 | 651 | ||
| 652 | dev_notice(file->port->dev, | ||
| 653 | "ib_umad_reg_agent: Max Agents (%u) reached\n", | ||
| 654 | IB_UMAD_MAX_AGENTS); | ||
| 635 | ret = -ENOMEM; | 655 | ret = -ENOMEM; |
| 636 | goto out; | 656 | goto out; |
| 637 | 657 | ||
| 638 | found: | 658 | found: |
| 639 | if (ureq.mgmt_class) { | 659 | if (ureq.mgmt_class) { |
| 660 | memset(&req, 0, sizeof(req)); | ||
| 640 | req.mgmt_class = ureq.mgmt_class; | 661 | req.mgmt_class = ureq.mgmt_class; |
| 641 | req.mgmt_class_version = ureq.mgmt_class_version; | 662 | req.mgmt_class_version = ureq.mgmt_class_version; |
| 642 | memcpy(req.oui, ureq.oui, sizeof req.oui); | 663 | memcpy(req.oui, ureq.oui, sizeof req.oui); |
| @@ -657,7 +678,7 @@ found: | |||
| 657 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, | 678 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, |
| 658 | ureq.mgmt_class ? &req : NULL, | 679 | ureq.mgmt_class ? &req : NULL, |
| 659 | ureq.rmpp_version, | 680 | ureq.rmpp_version, |
| 660 | send_handler, recv_handler, file); | 681 | send_handler, recv_handler, file, 0); |
| 661 | if (IS_ERR(agent)) { | 682 | if (IS_ERR(agent)) { |
| 662 | ret = PTR_ERR(agent); | 683 | ret = PTR_ERR(agent); |
| 663 | agent = NULL; | 684 | agent = NULL; |
| @@ -673,10 +694,11 @@ found: | |||
| 673 | if (!file->already_used) { | 694 | if (!file->already_used) { |
| 674 | file->already_used = 1; | 695 | file->already_used = 1; |
| 675 | if (!file->use_pkey_index) { | 696 | if (!file->use_pkey_index) { |
| 676 | printk(KERN_WARNING "user_mad: process %s did not enable " | 697 | dev_warn(file->port->dev, |
| 677 | "P_Key index support.\n", current->comm); | 698 | "process %s did not enable P_Key index support.\n", |
| 678 | printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " | 699 | current->comm); |
| 679 | "has info on the new ABI.\n"); | 700 | dev_warn(file->port->dev, |
| 701 | " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); | ||
| 680 | } | 702 | } |
| 681 | } | 703 | } |
| 682 | 704 | ||
| @@ -694,6 +716,119 @@ out: | |||
| 694 | return ret; | 716 | return ret; |
| 695 | } | 717 | } |
| 696 | 718 | ||
| 719 | static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) | ||
| 720 | { | ||
| 721 | struct ib_user_mad_reg_req2 ureq; | ||
| 722 | struct ib_mad_reg_req req; | ||
| 723 | struct ib_mad_agent *agent = NULL; | ||
| 724 | int agent_id; | ||
| 725 | int ret; | ||
| 726 | |||
| 727 | mutex_lock(&file->port->file_mutex); | ||
| 728 | mutex_lock(&file->mutex); | ||
| 729 | |||
| 730 | if (!file->port->ib_dev) { | ||
| 731 | dev_notice(file->port->dev, | ||
| 732 | "ib_umad_reg_agent2: invalid device\n"); | ||
| 733 | ret = -EPIPE; | ||
| 734 | goto out; | ||
| 735 | } | ||
| 736 | |||
| 737 | if (copy_from_user(&ureq, arg, sizeof(ureq))) { | ||
| 738 | ret = -EFAULT; | ||
| 739 | goto out; | ||
| 740 | } | ||
| 741 | |||
| 742 | if (ureq.qpn != 0 && ureq.qpn != 1) { | ||
| 743 | dev_notice(file->port->dev, | ||
| 744 | "ib_umad_reg_agent2: invalid QPN %d specified\n", | ||
| 745 | ureq.qpn); | ||
| 746 | ret = -EINVAL; | ||
| 747 | goto out; | ||
| 748 | } | ||
| 749 | |||
| 750 | if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { | ||
| 751 | dev_notice(file->port->dev, | ||
| 752 | "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", | ||
| 753 | ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); | ||
| 754 | ret = -EINVAL; | ||
| 755 | |||
| 756 | if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, | ||
| 757 | (u32 __user *) (arg + offsetof(struct | ||
| 758 | ib_user_mad_reg_req2, flags)))) | ||
| 759 | ret = -EFAULT; | ||
| 760 | |||
| 761 | goto out; | ||
| 762 | } | ||
| 763 | |||
| 764 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) | ||
| 765 | if (!__get_agent(file, agent_id)) | ||
| 766 | goto found; | ||
| 767 | |||
| 768 | dev_notice(file->port->dev, | ||
| 769 | "ib_umad_reg_agent2: Max Agents (%u) reached\n", | ||
| 770 | IB_UMAD_MAX_AGENTS); | ||
| 771 | ret = -ENOMEM; | ||
| 772 | goto out; | ||
| 773 | |||
| 774 | found: | ||
| 775 | if (ureq.mgmt_class) { | ||
| 776 | memset(&req, 0, sizeof(req)); | ||
| 777 | req.mgmt_class = ureq.mgmt_class; | ||
| 778 | req.mgmt_class_version = ureq.mgmt_class_version; | ||
| 779 | if (ureq.oui & 0xff000000) { | ||
| 780 | dev_notice(file->port->dev, | ||
| 781 | "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", | ||
| 782 | ureq.oui); | ||
| 783 | ret = -EINVAL; | ||
| 784 | goto out; | ||
| 785 | } | ||
| 786 | req.oui[2] = ureq.oui & 0x0000ff; | ||
| 787 | req.oui[1] = (ureq.oui & 0x00ff00) >> 8; | ||
| 788 | req.oui[0] = (ureq.oui & 0xff0000) >> 16; | ||
| 789 | memcpy(req.method_mask, ureq.method_mask, | ||
| 790 | sizeof(req.method_mask)); | ||
| 791 | } | ||
| 792 | |||
| 793 | agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, | ||
| 794 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, | ||
| 795 | ureq.mgmt_class ? &req : NULL, | ||
| 796 | ureq.rmpp_version, | ||
| 797 | send_handler, recv_handler, file, | ||
| 798 | ureq.flags); | ||
| 799 | if (IS_ERR(agent)) { | ||
| 800 | ret = PTR_ERR(agent); | ||
| 801 | agent = NULL; | ||
| 802 | goto out; | ||
| 803 | } | ||
| 804 | |||
| 805 | if (put_user(agent_id, | ||
| 806 | (u32 __user *)(arg + | ||
| 807 | offsetof(struct ib_user_mad_reg_req2, id)))) { | ||
| 808 | ret = -EFAULT; | ||
| 809 | goto out; | ||
| 810 | } | ||
| 811 | |||
| 812 | if (!file->already_used) { | ||
| 813 | file->already_used = 1; | ||
| 814 | file->use_pkey_index = 1; | ||
| 815 | } | ||
| 816 | |||
| 817 | file->agent[agent_id] = agent; | ||
| 818 | ret = 0; | ||
| 819 | |||
| 820 | out: | ||
| 821 | mutex_unlock(&file->mutex); | ||
| 822 | |||
| 823 | if (ret && agent) | ||
| 824 | ib_unregister_mad_agent(agent); | ||
| 825 | |||
| 826 | mutex_unlock(&file->port->file_mutex); | ||
| 827 | |||
| 828 | return ret; | ||
| 829 | } | ||
| 830 | |||
| 831 | |||
| 697 | static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | 832 | static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) |
| 698 | { | 833 | { |
| 699 | struct ib_mad_agent *agent = NULL; | 834 | struct ib_mad_agent *agent = NULL; |
| @@ -749,6 +884,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd, | |||
| 749 | return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); | 884 | return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); |
| 750 | case IB_USER_MAD_ENABLE_PKEY: | 885 | case IB_USER_MAD_ENABLE_PKEY: |
| 751 | return ib_umad_enable_pkey(filp->private_data); | 886 | return ib_umad_enable_pkey(filp->private_data); |
| 887 | case IB_USER_MAD_REGISTER_AGENT2: | ||
| 888 | return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); | ||
| 752 | default: | 889 | default: |
| 753 | return -ENOIOCTLCMD; | 890 | return -ENOIOCTLCMD; |
| 754 | } | 891 | } |
| @@ -765,6 +902,8 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 765 | return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); | 902 | return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); |
| 766 | case IB_USER_MAD_ENABLE_PKEY: | 903 | case IB_USER_MAD_ENABLE_PKEY: |
| 767 | return ib_umad_enable_pkey(filp->private_data); | 904 | return ib_umad_enable_pkey(filp->private_data); |
| 905 | case IB_USER_MAD_REGISTER_AGENT2: | ||
| 906 | return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); | ||
| 768 | default: | 907 | default: |
| 769 | return -ENOIOCTLCMD; | 908 | return -ENOIOCTLCMD; |
| 770 | } | 909 | } |
| @@ -983,7 +1122,7 @@ static CLASS_ATTR_STRING(abi_version, S_IRUGO, | |||
| 983 | 1122 | ||
| 984 | static dev_t overflow_maj; | 1123 | static dev_t overflow_maj; |
| 985 | static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); | 1124 | static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); |
| 986 | static int find_overflow_devnum(void) | 1125 | static int find_overflow_devnum(struct ib_device *device) |
| 987 | { | 1126 | { |
| 988 | int ret; | 1127 | int ret; |
| 989 | 1128 | ||
| @@ -991,7 +1130,8 @@ static int find_overflow_devnum(void) | |||
| 991 | ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, | 1130 | ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, |
| 992 | "infiniband_mad"); | 1131 | "infiniband_mad"); |
| 993 | if (ret) { | 1132 | if (ret) { |
| 994 | printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); | 1133 | dev_err(&device->dev, |
| 1134 | "couldn't register dynamic device number\n"); | ||
| 995 | return ret; | 1135 | return ret; |
| 996 | } | 1136 | } |
| 997 | } | 1137 | } |
| @@ -1014,7 +1154,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
| 1014 | devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); | 1154 | devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); |
| 1015 | if (devnum >= IB_UMAD_MAX_PORTS) { | 1155 | if (devnum >= IB_UMAD_MAX_PORTS) { |
| 1016 | spin_unlock(&port_lock); | 1156 | spin_unlock(&port_lock); |
| 1017 | devnum = find_overflow_devnum(); | 1157 | devnum = find_overflow_devnum(device); |
| 1018 | if (devnum < 0) | 1158 | if (devnum < 0) |
| 1019 | return -1; | 1159 | return -1; |
| 1020 | 1160 | ||
| @@ -1200,14 +1340,14 @@ static int __init ib_umad_init(void) | |||
| 1200 | ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, | 1340 | ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, |
| 1201 | "infiniband_mad"); | 1341 | "infiniband_mad"); |
| 1202 | if (ret) { | 1342 | if (ret) { |
| 1203 | printk(KERN_ERR "user_mad: couldn't register device number\n"); | 1343 | pr_err("couldn't register device number\n"); |
| 1204 | goto out; | 1344 | goto out; |
| 1205 | } | 1345 | } |
| 1206 | 1346 | ||
| 1207 | umad_class = class_create(THIS_MODULE, "infiniband_mad"); | 1347 | umad_class = class_create(THIS_MODULE, "infiniband_mad"); |
| 1208 | if (IS_ERR(umad_class)) { | 1348 | if (IS_ERR(umad_class)) { |
| 1209 | ret = PTR_ERR(umad_class); | 1349 | ret = PTR_ERR(umad_class); |
| 1210 | printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); | 1350 | pr_err("couldn't create class infiniband_mad\n"); |
| 1211 | goto out_chrdev; | 1351 | goto out_chrdev; |
| 1212 | } | 1352 | } |
| 1213 | 1353 | ||
| @@ -1215,13 +1355,13 @@ static int __init ib_umad_init(void) | |||
| 1215 | 1355 | ||
| 1216 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); | 1356 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); |
| 1217 | if (ret) { | 1357 | if (ret) { |
| 1218 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); | 1358 | pr_err("couldn't create abi_version attribute\n"); |
| 1219 | goto out_class; | 1359 | goto out_class; |
| 1220 | } | 1360 | } |
| 1221 | 1361 | ||
| 1222 | ret = ib_register_client(&umad_client); | 1362 | ret = ib_register_client(&umad_client); |
| 1223 | if (ret) { | 1363 | if (ret) { |
| 1224 | printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); | 1364 | pr_err("couldn't register ib_umad client\n"); |
| 1225 | goto out_class; | 1365 | goto out_class; |
| 1226 | } | 1366 | } |
| 1227 | 1367 | ||
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index a283274a5a09..643c08a025a5 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
| @@ -221,6 +221,7 @@ IB_UVERBS_DECLARE_CMD(query_port); | |||
| 221 | IB_UVERBS_DECLARE_CMD(alloc_pd); | 221 | IB_UVERBS_DECLARE_CMD(alloc_pd); |
| 222 | IB_UVERBS_DECLARE_CMD(dealloc_pd); | 222 | IB_UVERBS_DECLARE_CMD(dealloc_pd); |
| 223 | IB_UVERBS_DECLARE_CMD(reg_mr); | 223 | IB_UVERBS_DECLARE_CMD(reg_mr); |
| 224 | IB_UVERBS_DECLARE_CMD(rereg_mr); | ||
| 224 | IB_UVERBS_DECLARE_CMD(dereg_mr); | 225 | IB_UVERBS_DECLARE_CMD(dereg_mr); |
| 225 | IB_UVERBS_DECLARE_CMD(alloc_mw); | 226 | IB_UVERBS_DECLARE_CMD(alloc_mw); |
| 226 | IB_UVERBS_DECLARE_CMD(dealloc_mw); | 227 | IB_UVERBS_DECLARE_CMD(dealloc_mw); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index ea6203ee7bcc..0600c50e6215 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1002,6 +1002,99 @@ err_free: | |||
| 1002 | return ret; | 1002 | return ret; |
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, | ||
| 1006 | const char __user *buf, int in_len, | ||
| 1007 | int out_len) | ||
| 1008 | { | ||
| 1009 | struct ib_uverbs_rereg_mr cmd; | ||
| 1010 | struct ib_uverbs_rereg_mr_resp resp; | ||
| 1011 | struct ib_udata udata; | ||
| 1012 | struct ib_pd *pd = NULL; | ||
| 1013 | struct ib_mr *mr; | ||
| 1014 | struct ib_pd *old_pd; | ||
| 1015 | int ret; | ||
| 1016 | struct ib_uobject *uobj; | ||
| 1017 | |||
| 1018 | if (out_len < sizeof(resp)) | ||
| 1019 | return -ENOSPC; | ||
| 1020 | |||
| 1021 | if (copy_from_user(&cmd, buf, sizeof(cmd))) | ||
| 1022 | return -EFAULT; | ||
| 1023 | |||
| 1024 | INIT_UDATA(&udata, buf + sizeof(cmd), | ||
| 1025 | (unsigned long) cmd.response + sizeof(resp), | ||
| 1026 | in_len - sizeof(cmd), out_len - sizeof(resp)); | ||
| 1027 | |||
| 1028 | if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) | ||
| 1029 | return -EINVAL; | ||
| 1030 | |||
| 1031 | if ((cmd.flags & IB_MR_REREG_TRANS) && | ||
| 1032 | (!cmd.start || !cmd.hca_va || 0 >= cmd.length || | ||
| 1033 | (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) | ||
| 1034 | return -EINVAL; | ||
| 1035 | |||
| 1036 | uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, | ||
| 1037 | file->ucontext); | ||
| 1038 | |||
| 1039 | if (!uobj) | ||
| 1040 | return -EINVAL; | ||
| 1041 | |||
| 1042 | mr = uobj->object; | ||
| 1043 | |||
| 1044 | if (cmd.flags & IB_MR_REREG_ACCESS) { | ||
| 1045 | ret = ib_check_mr_access(cmd.access_flags); | ||
| 1046 | if (ret) | ||
| 1047 | goto put_uobjs; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | if (cmd.flags & IB_MR_REREG_PD) { | ||
| 1051 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
| 1052 | if (!pd) { | ||
| 1053 | ret = -EINVAL; | ||
| 1054 | goto put_uobjs; | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | if (atomic_read(&mr->usecnt)) { | ||
| 1059 | ret = -EBUSY; | ||
| 1060 | goto put_uobj_pd; | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | old_pd = mr->pd; | ||
| 1064 | ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, | ||
| 1065 | cmd.length, cmd.hca_va, | ||
| 1066 | cmd.access_flags, pd, &udata); | ||
| 1067 | if (!ret) { | ||
| 1068 | if (cmd.flags & IB_MR_REREG_PD) { | ||
| 1069 | atomic_inc(&pd->usecnt); | ||
| 1070 | mr->pd = pd; | ||
| 1071 | atomic_dec(&old_pd->usecnt); | ||
| 1072 | } | ||
| 1073 | } else { | ||
| 1074 | goto put_uobj_pd; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | memset(&resp, 0, sizeof(resp)); | ||
| 1078 | resp.lkey = mr->lkey; | ||
| 1079 | resp.rkey = mr->rkey; | ||
| 1080 | |||
| 1081 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
| 1082 | &resp, sizeof(resp))) | ||
| 1083 | ret = -EFAULT; | ||
| 1084 | else | ||
| 1085 | ret = in_len; | ||
| 1086 | |||
| 1087 | put_uobj_pd: | ||
| 1088 | if (cmd.flags & IB_MR_REREG_PD) | ||
| 1089 | put_pd_read(pd); | ||
| 1090 | |||
| 1091 | put_uobjs: | ||
| 1092 | |||
| 1093 | put_uobj_write(mr->uobject); | ||
| 1094 | |||
| 1095 | return ret; | ||
| 1096 | } | ||
| 1097 | |||
| 1005 | ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, | 1098 | ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, |
| 1006 | const char __user *buf, int in_len, | 1099 | const char __user *buf, int in_len, |
| 1007 | int out_len) | 1100 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 08219fb3338b..c73b22a257fe 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -87,6 +87,7 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
| 87 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, | 87 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, |
| 88 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | 88 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, |
| 89 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | 89 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, |
| 90 | [IB_USER_VERBS_CMD_REREG_MR] = ib_uverbs_rereg_mr, | ||
| 90 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | 91 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, |
| 91 | [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, | 92 | [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, |
| 92 | [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, | 93 | [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, |
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 49e0e8533f74..1b63185b4ad4 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c | |||
| @@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) | |||
| 260 | mq->msg_pool.host, dma_unmap_addr(mq, mapping)); | 260 | mq->msg_pool.host, dma_unmap_addr(mq, mapping)); |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | 263 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, |
| 264 | int msg_size) | 264 | size_t q_size, size_t msg_size) |
| 265 | { | 265 | { |
| 266 | u8 *pool_start; | 266 | u8 *pool_start; |
| 267 | 267 | ||
| 268 | if (q_size > SIZE_MAX / msg_size) | ||
| 269 | return -EINVAL; | ||
| 270 | |||
| 268 | pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, | 271 | pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, |
| 269 | &mq->host_dma, GFP_KERNEL); | 272 | &mq->host_dma, GFP_KERNEL); |
| 270 | if (!pool_start) | 273 | if (!pool_start) |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index fbe6051af254..c9df0549f51d 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
| @@ -227,6 +227,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) | |||
| 227 | 227 | ||
| 228 | chp = get_chp(dev, qid); | 228 | chp = get_chp(dev, qid); |
| 229 | if (chp) { | 229 | if (chp) { |
| 230 | t4_clear_cq_armed(&chp->cq); | ||
| 230 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | 231 | spin_lock_irqsave(&chp->comp_handler_lock, flag); |
| 231 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 232 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
| 232 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | 233 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index c158fcc02bca..41cd6882b648 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -1105,7 +1105,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
| 1105 | struct c4iw_cq *schp) | 1105 | struct c4iw_cq *schp) |
| 1106 | { | 1106 | { |
| 1107 | int count; | 1107 | int count; |
| 1108 | int flushed; | 1108 | int rq_flushed, sq_flushed; |
| 1109 | unsigned long flag; | 1109 | unsigned long flag; |
| 1110 | 1110 | ||
| 1111 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 1111 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
| @@ -1123,27 +1123,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
| 1123 | 1123 | ||
| 1124 | c4iw_flush_hw_cq(rchp); | 1124 | c4iw_flush_hw_cq(rchp); |
| 1125 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); | 1125 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); |
| 1126 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | 1126 | rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
| 1127 | spin_unlock(&qhp->lock); | 1127 | spin_unlock(&qhp->lock); |
| 1128 | spin_unlock_irqrestore(&rchp->lock, flag); | 1128 | spin_unlock_irqrestore(&rchp->lock, flag); |
| 1129 | if (flushed) { | ||
| 1130 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
| 1131 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | ||
| 1132 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
| 1133 | } | ||
| 1134 | 1129 | ||
| 1135 | /* locking hierarchy: cq lock first, then qp lock. */ | 1130 | /* locking hierarchy: cq lock first, then qp lock. */ |
| 1136 | spin_lock_irqsave(&schp->lock, flag); | 1131 | spin_lock_irqsave(&schp->lock, flag); |
| 1137 | spin_lock(&qhp->lock); | 1132 | spin_lock(&qhp->lock); |
| 1138 | if (schp != rchp) | 1133 | if (schp != rchp) |
| 1139 | c4iw_flush_hw_cq(schp); | 1134 | c4iw_flush_hw_cq(schp); |
| 1140 | flushed = c4iw_flush_sq(qhp); | 1135 | sq_flushed = c4iw_flush_sq(qhp); |
| 1141 | spin_unlock(&qhp->lock); | 1136 | spin_unlock(&qhp->lock); |
| 1142 | spin_unlock_irqrestore(&schp->lock, flag); | 1137 | spin_unlock_irqrestore(&schp->lock, flag); |
| 1143 | if (flushed) { | 1138 | |
| 1144 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | 1139 | if (schp == rchp) { |
| 1145 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 1140 | if (t4_clear_cq_armed(&rchp->cq) && |
| 1146 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | 1141 | (rq_flushed || sq_flushed)) { |
| 1142 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
| 1143 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | ||
| 1144 | rchp->ibcq.cq_context); | ||
| 1145 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
| 1146 | } | ||
| 1147 | } else { | ||
| 1148 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | ||
| 1149 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
| 1150 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | ||
| 1151 | rchp->ibcq.cq_context); | ||
| 1152 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
| 1153 | } | ||
| 1154 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | ||
| 1155 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
| 1156 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
| 1157 | schp->ibcq.cq_context); | ||
| 1158 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
| 1159 | } | ||
| 1147 | } | 1160 | } |
| 1148 | } | 1161 | } |
| 1149 | 1162 | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index df5edfa31a8f..c04e5134b30c 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
| @@ -524,6 +524,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) | |||
| 524 | return !wq->rq.queue[wq->rq.size].status.db_off; | 524 | return !wq->rq.queue[wq->rq.size].status.db_off; |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | enum t4_cq_flags { | ||
| 528 | CQ_ARMED = 1, | ||
| 529 | }; | ||
| 530 | |||
| 527 | struct t4_cq { | 531 | struct t4_cq { |
| 528 | struct t4_cqe *queue; | 532 | struct t4_cqe *queue; |
| 529 | dma_addr_t dma_addr; | 533 | dma_addr_t dma_addr; |
| @@ -544,12 +548,19 @@ struct t4_cq { | |||
| 544 | u16 cidx_inc; | 548 | u16 cidx_inc; |
| 545 | u8 gen; | 549 | u8 gen; |
| 546 | u8 error; | 550 | u8 error; |
| 551 | unsigned long flags; | ||
| 547 | }; | 552 | }; |
| 548 | 553 | ||
| 554 | static inline int t4_clear_cq_armed(struct t4_cq *cq) | ||
| 555 | { | ||
| 556 | return test_and_clear_bit(CQ_ARMED, &cq->flags); | ||
| 557 | } | ||
| 558 | |||
| 549 | static inline int t4_arm_cq(struct t4_cq *cq, int se) | 559 | static inline int t4_arm_cq(struct t4_cq *cq, int se) |
| 550 | { | 560 | { |
| 551 | u32 val; | 561 | u32 val; |
| 552 | 562 | ||
| 563 | set_bit(CQ_ARMED, &cq->flags); | ||
| 553 | while (cq->cidx_inc > CIDXINC_MASK) { | 564 | while (cq->cidx_inc > CIDXINC_MASK) { |
| 554 | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | | 565 | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | |
| 555 | INGRESSQID(cq->cqid); | 566 | INGRESSQID(cq->cqid); |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 43f2d0424d4f..e890e5ba0e01 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
| @@ -726,7 +726,7 @@ bail: | |||
| 726 | * @dd: the infinipath device | 726 | * @dd: the infinipath device |
| 727 | * @pkeys: the PKEY table | 727 | * @pkeys: the PKEY table |
| 728 | */ | 728 | */ |
| 729 | static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) | 729 | static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port) |
| 730 | { | 730 | { |
| 731 | struct ipath_portdata *pd; | 731 | struct ipath_portdata *pd; |
| 732 | int i; | 732 | int i; |
| @@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) | |||
| 759 | } | 759 | } |
| 760 | if (changed) { | 760 | if (changed) { |
| 761 | u64 pkey; | 761 | u64 pkey; |
| 762 | struct ib_event event; | ||
| 762 | 763 | ||
| 763 | pkey = (u64) dd->ipath_pkeys[0] | | 764 | pkey = (u64) dd->ipath_pkeys[0] | |
| 764 | ((u64) dd->ipath_pkeys[1] << 16) | | 765 | ((u64) dd->ipath_pkeys[1] << 16) | |
| @@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) | |||
| 768 | (unsigned long long) pkey); | 769 | (unsigned long long) pkey); |
| 769 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | 770 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, |
| 770 | pkey); | 771 | pkey); |
| 772 | |||
| 773 | event.event = IB_EVENT_PKEY_CHANGE; | ||
| 774 | event.device = &dd->verbs_dev->ibdev; | ||
| 775 | event.element.port_num = port; | ||
| 776 | ib_dispatch_event(&event); | ||
| 771 | } | 777 | } |
| 772 | return 0; | 778 | return 0; |
| 773 | } | 779 | } |
| 774 | 780 | ||
| 775 | static int recv_subn_set_pkeytable(struct ib_smp *smp, | 781 | static int recv_subn_set_pkeytable(struct ib_smp *smp, |
| 776 | struct ib_device *ibdev) | 782 | struct ib_device *ibdev, u8 port) |
| 777 | { | 783 | { |
| 778 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | 784 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); |
| 779 | __be16 *p = (__be16 *) smp->data; | 785 | __be16 *p = (__be16 *) smp->data; |
| @@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, | |||
| 784 | for (i = 0; i < n; i++) | 790 | for (i = 0; i < n; i++) |
| 785 | q[i] = be16_to_cpu(p[i]); | 791 | q[i] = be16_to_cpu(p[i]); |
| 786 | 792 | ||
| 787 | if (startpx != 0 || set_pkeys(dev->dd, q) != 0) | 793 | if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0) |
| 788 | smp->status |= IB_SMP_INVALID_FIELD; | 794 | smp->status |= IB_SMP_INVALID_FIELD; |
| 789 | 795 | ||
| 790 | return recv_subn_get_pkeytable(smp, ibdev); | 796 | return recv_subn_get_pkeytable(smp, ibdev); |
| @@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, | |||
| 1342 | ret = recv_subn_set_portinfo(smp, ibdev, port_num); | 1348 | ret = recv_subn_set_portinfo(smp, ibdev, port_num); |
| 1343 | goto bail; | 1349 | goto bail; |
| 1344 | case IB_SMP_ATTR_PKEY_TABLE: | 1350 | case IB_SMP_ATTR_PKEY_TABLE: |
| 1345 | ret = recv_subn_set_pkeytable(smp, ibdev); | 1351 | ret = recv_subn_set_pkeytable(smp, ibdev, port_num); |
| 1346 | goto bail; | 1352 | goto bail; |
| 1347 | case IB_SMP_ATTR_SM_INFO: | 1353 | case IB_SMP_ATTR_SM_INFO: |
| 1348 | if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { | 1354 | if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 287ad0564acd..82a7dd87089b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
| 891 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | 891 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, |
| 892 | q ? IB_QPT_GSI : IB_QPT_SMI, | 892 | q ? IB_QPT_GSI : IB_QPT_SMI, |
| 893 | NULL, 0, send_handler, | 893 | NULL, 0, send_handler, |
| 894 | NULL, NULL); | 894 | NULL, NULL, 0); |
| 895 | if (IS_ERR(agent)) { | 895 | if (IS_ERR(agent)) { |
| 896 | ret = PTR_ERR(agent); | 896 | ret = PTR_ERR(agent); |
| 897 | goto err; | 897 | goto err; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0f7027e7db13..e1e558a3d692 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -910,8 +910,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp, | |||
| 910 | const struct default_rules *pdefault_rules = default_table; | 910 | const struct default_rules *pdefault_rules = default_table; |
| 911 | u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); | 911 | u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); |
| 912 | 912 | ||
| 913 | for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++, | 913 | for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { |
| 914 | pdefault_rules++) { | ||
| 915 | __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; | 914 | __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; |
| 916 | memset(&field_types, 0, sizeof(field_types)); | 915 | memset(&field_types, 0, sizeof(field_types)); |
| 917 | 916 | ||
| @@ -965,8 +964,7 @@ static int __mlx4_ib_create_default_rules( | |||
| 965 | int size = 0; | 964 | int size = 0; |
| 966 | int i; | 965 | int i; |
| 967 | 966 | ||
| 968 | for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/ | 967 | for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { |
| 969 | sizeof(pdefault_rules->rules_create_list[0]); i++) { | ||
| 970 | int ret; | 968 | int ret; |
| 971 | union ib_flow_spec ib_spec; | 969 | union ib_flow_spec ib_spec; |
| 972 | switch (pdefault_rules->rules_create_list[i]) { | 970 | switch (pdefault_rules->rules_create_list[i]) { |
| @@ -2007,6 +2005,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2007 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | 2005 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | |
| 2008 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | 2006 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | |
| 2009 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | 2007 | (1ull << IB_USER_VERBS_CMD_REG_MR) | |
| 2008 | (1ull << IB_USER_VERBS_CMD_REREG_MR) | | ||
| 2010 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | 2009 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | |
| 2011 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | 2010 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | |
| 2012 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | 2011 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | |
| @@ -2059,6 +2058,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2059 | ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; | 2058 | ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; |
| 2060 | ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; | 2059 | ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; |
| 2061 | ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; | 2060 | ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; |
| 2061 | ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; | ||
| 2062 | ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; | 2062 | ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; |
| 2063 | ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; | 2063 | ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; |
| 2064 | ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; | 2064 | ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 369da3ca5d64..e8cad3926bfc 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -788,5 +788,9 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn); | |||
| 788 | void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); | 788 | void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); |
| 789 | int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | 789 | int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, |
| 790 | int is_attach); | 790 | int is_attach); |
| 791 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | ||
| 792 | u64 start, u64 length, u64 virt_addr, | ||
| 793 | int mr_access_flags, struct ib_pd *pd, | ||
| 794 | struct ib_udata *udata); | ||
| 791 | 795 | ||
| 792 | #endif /* MLX4_IB_H */ | 796 | #endif /* MLX4_IB_H */ |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index cb2a8727f3fb..9b0e80e59b08 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -144,8 +144,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 144 | if (!mr) | 144 | if (!mr) |
| 145 | return ERR_PTR(-ENOMEM); | 145 | return ERR_PTR(-ENOMEM); |
| 146 | 146 | ||
| 147 | /* Force registering the memory as writable. */ | ||
| 148 | /* Used for memory re-registeration. HCA protects the access */ | ||
| 147 | mr->umem = ib_umem_get(pd->uobject->context, start, length, | 149 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
| 148 | access_flags, 0); | 150 | access_flags | IB_ACCESS_LOCAL_WRITE, 0); |
| 149 | if (IS_ERR(mr->umem)) { | 151 | if (IS_ERR(mr->umem)) { |
| 150 | err = PTR_ERR(mr->umem); | 152 | err = PTR_ERR(mr->umem); |
| 151 | goto err_free; | 153 | goto err_free; |
| @@ -183,6 +185,90 @@ err_free: | |||
| 183 | return ERR_PTR(err); | 185 | return ERR_PTR(err); |
| 184 | } | 186 | } |
| 185 | 187 | ||
| 188 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | ||
| 189 | u64 start, u64 length, u64 virt_addr, | ||
| 190 | int mr_access_flags, struct ib_pd *pd, | ||
| 191 | struct ib_udata *udata) | ||
| 192 | { | ||
| 193 | struct mlx4_ib_dev *dev = to_mdev(mr->device); | ||
| 194 | struct mlx4_ib_mr *mmr = to_mmr(mr); | ||
| 195 | struct mlx4_mpt_entry *mpt_entry; | ||
| 196 | struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; | ||
| 197 | int err; | ||
| 198 | |||
| 199 | /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, | ||
| 200 | * we assume that the calls can't run concurrently. Otherwise, a | ||
| 201 | * race exists. | ||
| 202 | */ | ||
| 203 | err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); | ||
| 204 | |||
| 205 | if (err) | ||
| 206 | return err; | ||
| 207 | |||
| 208 | if (flags & IB_MR_REREG_PD) { | ||
| 209 | err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, | ||
| 210 | to_mpd(pd)->pdn); | ||
| 211 | |||
| 212 | if (err) | ||
| 213 | goto release_mpt_entry; | ||
| 214 | } | ||
| 215 | |||
| 216 | if (flags & IB_MR_REREG_ACCESS) { | ||
| 217 | err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, | ||
| 218 | convert_access(mr_access_flags)); | ||
| 219 | |||
| 220 | if (err) | ||
| 221 | goto release_mpt_entry; | ||
| 222 | } | ||
| 223 | |||
| 224 | if (flags & IB_MR_REREG_TRANS) { | ||
| 225 | int shift; | ||
| 226 | int err; | ||
| 227 | int n; | ||
| 228 | |||
| 229 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | ||
| 230 | ib_umem_release(mmr->umem); | ||
| 231 | mmr->umem = ib_umem_get(mr->uobject->context, start, length, | ||
| 232 | mr_access_flags | | ||
| 233 | IB_ACCESS_LOCAL_WRITE, | ||
| 234 | 0); | ||
| 235 | if (IS_ERR(mmr->umem)) { | ||
| 236 | err = PTR_ERR(mmr->umem); | ||
| 237 | mmr->umem = NULL; | ||
| 238 | goto release_mpt_entry; | ||
| 239 | } | ||
| 240 | n = ib_umem_page_count(mmr->umem); | ||
| 241 | shift = ilog2(mmr->umem->page_size); | ||
| 242 | |||
| 243 | mmr->mmr.iova = virt_addr; | ||
| 244 | mmr->mmr.size = length; | ||
| 245 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, | ||
| 246 | virt_addr, length, n, shift, | ||
| 247 | *pmpt_entry); | ||
| 248 | if (err) { | ||
| 249 | ib_umem_release(mmr->umem); | ||
| 250 | goto release_mpt_entry; | ||
| 251 | } | ||
| 252 | |||
| 253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | ||
| 254 | if (err) { | ||
| 255 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | ||
| 256 | ib_umem_release(mmr->umem); | ||
| 257 | goto release_mpt_entry; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | /* If we couldn't transfer the MR to the HCA, just remember to | ||
| 262 | * return a failure. But dereg_mr will free the resources. | ||
| 263 | */ | ||
| 264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | ||
| 265 | |||
| 266 | release_mpt_entry: | ||
| 267 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | ||
| 268 | |||
| 269 | return err; | ||
| 270 | } | ||
| 271 | |||
| 186 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | 272 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
| 187 | { | 273 | { |
| 188 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | 274 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7efe6e3f3542..8c574b63d77b 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2501 | spin_lock_irqsave(&qp->sq.lock, flags); | 2501 | spin_lock_irqsave(&qp->sq.lock, flags); |
| 2502 | 2502 | ||
| 2503 | for (nreq = 0; wr; nreq++, wr = wr->next) { | 2503 | for (nreq = 0; wr; nreq++, wr = wr->next) { |
| 2504 | if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { | 2504 | if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { |
| 2505 | mlx5_ib_warn(dev, "\n"); | 2505 | mlx5_ib_warn(dev, "\n"); |
| 2506 | err = -EINVAL; | 2506 | err = -EINVAL; |
| 2507 | *bad_wr = wr; | 2507 | *bad_wr = wr; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index b6f7f457fc55..8881fa376e06 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
| @@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev) | |||
| 294 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | 294 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, |
| 295 | q ? IB_QPT_GSI : IB_QPT_SMI, | 295 | q ? IB_QPT_GSI : IB_QPT_SMI, |
| 296 | NULL, 0, send_handler, | 296 | NULL, 0, send_handler, |
| 297 | NULL, NULL); | 297 | NULL, NULL, 0); |
| 298 | if (IS_ERR(agent)) { | 298 | if (IS_ERR(agent)) { |
| 299 | ret = PTR_ERR(agent); | 299 | ret = PTR_ERR(agent); |
| 300 | goto err; | 300 | goto err; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 19011dbb930f..b43456ae124b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <be_roce.h> | 40 | #include <be_roce.h> |
| 41 | #include "ocrdma_sli.h" | 41 | #include "ocrdma_sli.h" |
| 42 | 42 | ||
| 43 | #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" | 43 | #define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" |
| 44 | 44 | ||
| 45 | #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" | 45 | #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" |
| 46 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | 46 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
| @@ -137,6 +137,7 @@ struct mqe_ctx { | |||
| 137 | u16 cqe_status; | 137 | u16 cqe_status; |
| 138 | u16 ext_status; | 138 | u16 ext_status; |
| 139 | bool cmd_done; | 139 | bool cmd_done; |
| 140 | bool fw_error_state; | ||
| 140 | }; | 141 | }; |
| 141 | 142 | ||
| 142 | struct ocrdma_hw_mr { | 143 | struct ocrdma_hw_mr { |
| @@ -235,7 +236,10 @@ struct ocrdma_dev { | |||
| 235 | struct list_head entry; | 236 | struct list_head entry; |
| 236 | struct rcu_head rcu; | 237 | struct rcu_head rcu; |
| 237 | int id; | 238 | int id; |
| 238 | u64 stag_arr[OCRDMA_MAX_STAG]; | 239 | u64 *stag_arr; |
| 240 | u8 sl; /* service level */ | ||
| 241 | bool pfc_state; | ||
| 242 | atomic_t update_sl; | ||
| 239 | u16 pvid; | 243 | u16 pvid; |
| 240 | u32 asic_id; | 244 | u32 asic_id; |
| 241 | 245 | ||
| @@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) | |||
| 518 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; | 522 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; |
| 519 | } | 523 | } |
| 520 | 524 | ||
| 525 | static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio) | ||
| 526 | { | ||
| 527 | return *(pfc + prio); | ||
| 528 | } | ||
| 529 | |||
| 530 | static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio) | ||
| 531 | { | ||
| 532 | return *(app_prio + prio); | ||
| 533 | } | ||
| 534 | |||
| 535 | static inline u8 ocrdma_is_enabled_and_synced(u32 state) | ||
| 536 | { /* May also be used to interpret TC-state, QCN-state | ||
| 537 | * Appl-state and Logical-link-state in future. | ||
| 538 | */ | ||
| 539 | return (state & OCRDMA_STATE_FLAG_ENABLED) && | ||
| 540 | (state & OCRDMA_STATE_FLAG_SYNC); | ||
| 541 | } | ||
| 542 | |||
| 521 | #endif | 543 | #endif |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index d4cc01f10c01..40f8536c10b0 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -35,6 +35,8 @@ | |||
| 35 | #include "ocrdma_ah.h" | 35 | #include "ocrdma_ah.h" |
| 36 | #include "ocrdma_hw.h" | 36 | #include "ocrdma_hw.h" |
| 37 | 37 | ||
| 38 | #define OCRDMA_VID_PCP_SHIFT 0xD | ||
| 39 | |||
| 38 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | 40 | static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, |
| 39 | struct ib_ah_attr *attr, int pdid) | 41 | struct ib_ah_attr *attr, int pdid) |
| 40 | { | 42 | { |
| @@ -55,7 +57,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
| 55 | if (vlan_tag && (vlan_tag < 0x1000)) { | 57 | if (vlan_tag && (vlan_tag < 0x1000)) { |
| 56 | eth.eth_type = cpu_to_be16(0x8100); | 58 | eth.eth_type = cpu_to_be16(0x8100); |
| 57 | eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); | 59 | eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); |
| 58 | vlan_tag |= (attr->sl & 7) << 13; | 60 | vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; |
| 59 | eth.vlan_tag = cpu_to_be16(vlan_tag); | 61 | eth.vlan_tag = cpu_to_be16(vlan_tag); |
| 60 | eth_sz = sizeof(struct ocrdma_eth_vlan); | 62 | eth_sz = sizeof(struct ocrdma_eth_vlan); |
| 61 | vlan_enabled = true; | 63 | vlan_enabled = true; |
| @@ -100,6 +102,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |||
| 100 | if (!(attr->ah_flags & IB_AH_GRH)) | 102 | if (!(attr->ah_flags & IB_AH_GRH)) |
| 101 | return ERR_PTR(-EINVAL); | 103 | return ERR_PTR(-EINVAL); |
| 102 | 104 | ||
| 105 | if (atomic_cmpxchg(&dev->update_sl, 1, 0)) | ||
| 106 | ocrdma_init_service_level(dev); | ||
| 103 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); | 107 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); |
| 104 | if (!ah) | 108 | if (!ah) |
| 105 | return ERR_PTR(-ENOMEM); | 109 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 3bbf2010a821..dd35ae558ae1 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
| @@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, | |||
| 525 | 525 | ||
| 526 | cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; | 526 | cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; |
| 527 | cmd->eqn = eq->id; | 527 | cmd->eqn = eq->id; |
| 528 | cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe); | 528 | cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); |
| 529 | 529 | ||
| 530 | ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, | 530 | ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, |
| 531 | cq->dma, PAGE_SIZE_4K); | 531 | cq->dma, PAGE_SIZE_4K); |
| @@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 661 | { | 661 | { |
| 662 | struct ocrdma_qp *qp = NULL; | 662 | struct ocrdma_qp *qp = NULL; |
| 663 | struct ocrdma_cq *cq = NULL; | 663 | struct ocrdma_cq *cq = NULL; |
| 664 | struct ib_event ib_evt = { 0 }; | 664 | struct ib_event ib_evt; |
| 665 | int cq_event = 0; | 665 | int cq_event = 0; |
| 666 | int qp_event = 1; | 666 | int qp_event = 1; |
| 667 | int srq_event = 0; | 667 | int srq_event = 0; |
| @@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 674 | if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) | 674 | if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) |
| 675 | cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; | 675 | cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; |
| 676 | 676 | ||
| 677 | memset(&ib_evt, 0, sizeof(ib_evt)); | ||
| 678 | |||
| 677 | ib_evt.device = &dev->ibdev; | 679 | ib_evt.device = &dev->ibdev; |
| 678 | 680 | ||
| 679 | switch (type) { | 681 | switch (type) { |
| @@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, | |||
| 771 | OCRDMA_AE_PVID_MCQE_TAG_MASK) >> | 773 | OCRDMA_AE_PVID_MCQE_TAG_MASK) >> |
| 772 | OCRDMA_AE_PVID_MCQE_TAG_SHIFT); | 774 | OCRDMA_AE_PVID_MCQE_TAG_SHIFT); |
| 773 | break; | 775 | break; |
| 776 | |||
| 777 | case OCRDMA_ASYNC_EVENT_COS_VALUE: | ||
| 778 | atomic_set(&dev->update_sl, 1); | ||
| 779 | break; | ||
| 774 | default: | 780 | default: |
| 775 | /* Not interested evts. */ | 781 | /* Not interested evts. */ |
| 776 | break; | 782 | break; |
| @@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) | |||
| 962 | msecs_to_jiffies(30000)); | 968 | msecs_to_jiffies(30000)); |
| 963 | if (status) | 969 | if (status) |
| 964 | return 0; | 970 | return 0; |
| 965 | else | 971 | else { |
| 972 | dev->mqe_ctx.fw_error_state = true; | ||
| 973 | pr_err("%s(%d) mailbox timeout: fw not responding\n", | ||
| 974 | __func__, dev->id); | ||
| 966 | return -1; | 975 | return -1; |
| 976 | } | ||
| 967 | } | 977 | } |
| 968 | 978 | ||
| 969 | /* issue a mailbox command on the MQ */ | 979 | /* issue a mailbox command on the MQ */ |
| @@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | |||
| 975 | struct ocrdma_mbx_rsp *rsp = NULL; | 985 | struct ocrdma_mbx_rsp *rsp = NULL; |
| 976 | 986 | ||
| 977 | mutex_lock(&dev->mqe_ctx.lock); | 987 | mutex_lock(&dev->mqe_ctx.lock); |
| 988 | if (dev->mqe_ctx.fw_error_state) | ||
| 989 | goto mbx_err; | ||
| 978 | ocrdma_post_mqe(dev, mqe); | 990 | ocrdma_post_mqe(dev, mqe); |
| 979 | status = ocrdma_wait_mqe_cmpl(dev); | 991 | status = ocrdma_wait_mqe_cmpl(dev); |
| 980 | if (status) | 992 | if (status) |
| @@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
| 1078 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; | 1090 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; |
| 1079 | attr->max_mw = rsp->max_mw; | 1091 | attr->max_mw = rsp->max_mw; |
| 1080 | attr->max_mr = rsp->max_mr; | 1092 | attr->max_mr = rsp->max_mr; |
| 1081 | attr->max_mr_size = ~0ull; | 1093 | attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) | |
| 1094 | rsp->max_mr_size_lo; | ||
| 1082 | attr->max_fmr = 0; | 1095 | attr->max_fmr = 0; |
| 1083 | attr->max_pages_per_frmr = rsp->max_pages_per_frmr; | 1096 | attr->max_pages_per_frmr = rsp->max_pages_per_frmr; |
| 1084 | attr->max_num_mr_pbl = rsp->max_num_mr_pbl; | 1097 | attr->max_num_mr_pbl = rsp->max_num_mr_pbl; |
| @@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) | |||
| 1252 | ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; | 1265 | ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; |
| 1253 | hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; | 1266 | hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; |
| 1254 | 1267 | ||
| 1255 | dev->hba_port_num = hba_attribs->phy_port; | 1268 | dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv & |
| 1269 | OCRDMA_HBA_ATTRB_PTNUM_MASK) | ||
| 1270 | >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT; | ||
| 1256 | strncpy(dev->model_number, | 1271 | strncpy(dev->model_number, |
| 1257 | hba_attribs->controller_model_number, 31); | 1272 | hba_attribs->controller_model_number, 31); |
| 1258 | } | 1273 | } |
| @@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) | |||
| 1302 | goto mbx_err; | 1317 | goto mbx_err; |
| 1303 | 1318 | ||
| 1304 | rsp = (struct ocrdma_get_link_speed_rsp *)cmd; | 1319 | rsp = (struct ocrdma_get_link_speed_rsp *)cmd; |
| 1305 | *lnk_speed = rsp->phys_port_speed; | 1320 | *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) |
| 1321 | >> OCRDMA_PHY_PS_SHIFT; | ||
| 1306 | 1322 | ||
| 1307 | mbx_err: | 1323 | mbx_err: |
| 1308 | kfree(cmd); | 1324 | kfree(cmd); |
| @@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) | |||
| 1328 | goto mbx_err; | 1344 | goto mbx_err; |
| 1329 | 1345 | ||
| 1330 | rsp = (struct ocrdma_get_phy_info_rsp *)cmd; | 1346 | rsp = (struct ocrdma_get_phy_info_rsp *)cmd; |
| 1331 | dev->phy.phy_type = le16_to_cpu(rsp->phy_type); | 1347 | dev->phy.phy_type = |
| 1348 | (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK); | ||
| 1349 | dev->phy.interface_type = | ||
| 1350 | (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK) | ||
| 1351 | >> OCRDMA_IF_TYPE_SHIFT; | ||
| 1332 | dev->phy.auto_speeds_supported = | 1352 | dev->phy.auto_speeds_supported = |
| 1333 | le16_to_cpu(rsp->auto_speeds_supported); | 1353 | (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK); |
| 1334 | dev->phy.fixed_speeds_supported = | 1354 | dev->phy.fixed_speeds_supported = |
| 1335 | le16_to_cpu(rsp->fixed_speeds_supported); | 1355 | (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK) |
| 1356 | >> OCRDMA_FSPEED_SUPP_SHIFT; | ||
| 1336 | mbx_err: | 1357 | mbx_err: |
| 1337 | kfree(cmd); | 1358 | kfree(cmd); |
| 1338 | return status; | 1359 | return status; |
| @@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) | |||
| 1457 | 1478 | ||
| 1458 | pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; | 1479 | pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; |
| 1459 | for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { | 1480 | for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { |
| 1460 | pbes[i].pa_lo = (u32) (pa & 0xffffffff); | 1481 | pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff); |
| 1461 | pbes[i].pa_hi = (u32) upper_32_bits(pa); | 1482 | pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa)); |
| 1462 | pa += PAGE_SIZE; | 1483 | pa += PAGE_SIZE; |
| 1463 | } | 1484 | } |
| 1464 | cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); | 1485 | cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); |
| @@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) | |||
| 1501 | ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | 1522 | ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); |
| 1502 | dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, | 1523 | dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, |
| 1503 | dev->av_tbl.pa); | 1524 | dev->av_tbl.pa); |
| 1525 | dev->av_tbl.va = NULL; | ||
| 1504 | dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, | 1526 | dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, |
| 1505 | dev->av_tbl.pbl.pa); | 1527 | dev->av_tbl.pbl.pa); |
| 1506 | kfree(cmd); | 1528 | kfree(cmd); |
| @@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
| 1624 | cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << | 1646 | cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << |
| 1625 | OCRDMA_CREATE_CQ_TYPE_SHIFT; | 1647 | OCRDMA_CREATE_CQ_TYPE_SHIFT; |
| 1626 | cq->phase_change = false; | 1648 | cq->phase_change = false; |
| 1627 | cmd->cmd.cqe_count = (cq->len / cqe_size); | 1649 | cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); |
| 1628 | } else { | 1650 | } else { |
| 1629 | cmd->cmd.cqe_count = (cq->len / cqe_size) - 1; | 1651 | cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; |
| 1630 | cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; | 1652 | cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; |
| 1631 | cq->phase_change = true; | 1653 | cq->phase_change = true; |
| 1632 | } | 1654 | } |
| 1633 | 1655 | ||
| 1634 | cmd->cmd.pd_id = pd_id; /* valid only for v3 */ | 1656 | /* pd_id valid only for v3 */ |
| 1657 | cmd->cmd.pdid_cqecnt |= (pd_id << | ||
| 1658 | OCRDMA_CREATE_CQ_CMD_PDID_SHIFT); | ||
| 1635 | ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); | 1659 | ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); |
| 1636 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | 1660 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); |
| 1637 | if (status) | 1661 | if (status) |
| @@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, | |||
| 2206 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; | 2230 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; |
| 2207 | qp->rq_cq = cq; | 2231 | qp->rq_cq = cq; |
| 2208 | 2232 | ||
| 2209 | if (pd->dpp_enabled && pd->num_dpp_qp) { | 2233 | if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && |
| 2234 | (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) { | ||
| 2210 | ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, | 2235 | ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, |
| 2211 | dpp_cq_id); | 2236 | dpp_cq_id); |
| 2212 | } | 2237 | } |
| @@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, | |||
| 2264 | 2289 | ||
| 2265 | if ((ah_attr->ah_flags & IB_AH_GRH) == 0) | 2290 | if ((ah_attr->ah_flags & IB_AH_GRH) == 0) |
| 2266 | return -EINVAL; | 2291 | return -EINVAL; |
| 2292 | if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) | ||
| 2293 | ocrdma_init_service_level(qp->dev); | ||
| 2267 | cmd->params.tclass_sq_psn |= | 2294 | cmd->params.tclass_sq_psn |= |
| 2268 | (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); | 2295 | (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); |
| 2269 | cmd->params.rnt_rc_sl_fl |= | 2296 | cmd->params.rnt_rc_sl_fl |= |
| @@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, | |||
| 2297 | cmd->params.vlan_dmac_b4_to_b5 |= | 2324 | cmd->params.vlan_dmac_b4_to_b5 |= |
| 2298 | vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; | 2325 | vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; |
| 2299 | cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; | 2326 | cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; |
| 2327 | cmd->params.rnt_rc_sl_fl |= | ||
| 2328 | (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; | ||
| 2300 | } | 2329 | } |
| 2301 | return 0; | 2330 | return 0; |
| 2302 | } | 2331 | } |
| @@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) | |||
| 2604 | return status; | 2633 | return status; |
| 2605 | } | 2634 | } |
| 2606 | 2635 | ||
| 2636 | static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype, | ||
| 2637 | struct ocrdma_dcbx_cfg *dcbxcfg) | ||
| 2638 | { | ||
| 2639 | int status = 0; | ||
| 2640 | dma_addr_t pa; | ||
| 2641 | struct ocrdma_mqe cmd; | ||
| 2642 | |||
| 2643 | struct ocrdma_get_dcbx_cfg_req *req = NULL; | ||
| 2644 | struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL; | ||
| 2645 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 2646 | struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge; | ||
| 2647 | |||
| 2648 | memset(&cmd, 0, sizeof(struct ocrdma_mqe)); | ||
| 2649 | cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp), | ||
| 2650 | sizeof(struct ocrdma_get_dcbx_cfg_req)); | ||
| 2651 | req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL); | ||
| 2652 | if (!req) { | ||
| 2653 | status = -ENOMEM; | ||
| 2654 | goto mem_err; | ||
| 2655 | } | ||
| 2656 | |||
| 2657 | cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & | ||
| 2658 | OCRDMA_MQE_HDR_SGE_CNT_MASK; | ||
| 2659 | mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL); | ||
| 2660 | mqe_sge->pa_hi = (u32) upper_32_bits(pa); | ||
| 2661 | mqe_sge->len = cmd.hdr.pyld_len; | ||
| 2662 | |||
| 2663 | memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req)); | ||
| 2664 | ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG, | ||
| 2665 | OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len); | ||
| 2666 | req->param_type = ptype; | ||
| 2667 | |||
| 2668 | status = ocrdma_mbx_cmd(dev, &cmd); | ||
| 2669 | if (status) | ||
| 2670 | goto mbx_err; | ||
| 2671 | |||
| 2672 | rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req; | ||
| 2673 | ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp)); | ||
| 2674 | memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg)); | ||
| 2675 | |||
| 2676 | mbx_err: | ||
| 2677 | dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa); | ||
| 2678 | mem_err: | ||
| 2679 | return status; | ||
| 2680 | } | ||
| 2681 | |||
| 2682 | #define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08 | ||
| 2683 | #define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05 | ||
| 2684 | |||
| 2685 | static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype, | ||
| 2686 | struct ocrdma_dcbx_cfg *dcbxcfg, | ||
| 2687 | u8 *srvc_lvl) | ||
| 2688 | { | ||
| 2689 | int status = -EINVAL, indx, slindx; | ||
| 2690 | int ventry_cnt; | ||
| 2691 | struct ocrdma_app_parameter *app_param; | ||
| 2692 | u8 valid, proto_sel; | ||
| 2693 | u8 app_prio, pfc_prio; | ||
| 2694 | u16 proto; | ||
| 2695 | |||
| 2696 | if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) { | ||
| 2697 | pr_info("%s ocrdma%d DCBX is disabled\n", | ||
| 2698 | dev_name(&dev->nic_info.pdev->dev), dev->id); | ||
| 2699 | goto out; | ||
| 2700 | } | ||
| 2701 | |||
| 2702 | if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) { | ||
| 2703 | pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n", | ||
| 2704 | dev_name(&dev->nic_info.pdev->dev), dev->id, | ||
| 2705 | (ptype > 0 ? "operational" : "admin"), | ||
| 2706 | (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ? | ||
| 2707 | "enabled" : "disabled", | ||
| 2708 | (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ? | ||
| 2709 | "" : ", not sync'ed"); | ||
| 2710 | goto out; | ||
| 2711 | } else { | ||
| 2712 | pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n", | ||
| 2713 | dev_name(&dev->nic_info.pdev->dev), dev->id); | ||
| 2714 | } | ||
| 2715 | |||
| 2716 | ventry_cnt = (dcbxcfg->tcv_aev_opv_st >> | ||
| 2717 | OCRDMA_DCBX_APP_ENTRY_SHIFT) | ||
| 2718 | & OCRDMA_DCBX_STATE_MASK; | ||
| 2719 | |||
| 2720 | for (indx = 0; indx < ventry_cnt; indx++) { | ||
| 2721 | app_param = &dcbxcfg->app_param[indx]; | ||
| 2722 | valid = (app_param->valid_proto_app >> | ||
| 2723 | OCRDMA_APP_PARAM_VALID_SHIFT) | ||
| 2724 | & OCRDMA_APP_PARAM_VALID_MASK; | ||
| 2725 | proto_sel = (app_param->valid_proto_app | ||
| 2726 | >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT) | ||
| 2727 | & OCRDMA_APP_PARAM_PROTO_SEL_MASK; | ||
| 2728 | proto = app_param->valid_proto_app & | ||
| 2729 | OCRDMA_APP_PARAM_APP_PROTO_MASK; | ||
| 2730 | |||
| 2731 | if ( | ||
| 2732 | valid && proto == OCRDMA_APP_PROTO_ROCE && | ||
| 2733 | proto_sel == OCRDMA_PROTO_SELECT_L2) { | ||
| 2734 | for (slindx = 0; slindx < | ||
| 2735 | OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) { | ||
| 2736 | app_prio = ocrdma_get_app_prio( | ||
| 2737 | (u8 *)app_param->app_prio, | ||
| 2738 | slindx); | ||
| 2739 | pfc_prio = ocrdma_get_pfc_prio( | ||
| 2740 | (u8 *)dcbxcfg->pfc_prio, | ||
| 2741 | slindx); | ||
| 2742 | |||
| 2743 | if (app_prio && pfc_prio) { | ||
| 2744 | *srvc_lvl = slindx; | ||
| 2745 | status = 0; | ||
| 2746 | goto out; | ||
| 2747 | } | ||
| 2748 | } | ||
| 2749 | if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) { | ||
| 2750 | pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n", | ||
| 2751 | dev_name(&dev->nic_info.pdev->dev), | ||
| 2752 | dev->id, proto); | ||
| 2753 | } | ||
| 2754 | } | ||
| 2755 | } | ||
| 2756 | |||
| 2757 | out: | ||
| 2758 | return status; | ||
| 2759 | } | ||
| 2760 | |||
| 2761 | void ocrdma_init_service_level(struct ocrdma_dev *dev) | ||
| 2762 | { | ||
| 2763 | int status = 0, indx; | ||
| 2764 | struct ocrdma_dcbx_cfg dcbxcfg; | ||
| 2765 | u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL; | ||
| 2766 | int ptype = OCRDMA_PARAMETER_TYPE_OPER; | ||
| 2767 | |||
| 2768 | for (indx = 0; indx < 2; indx++) { | ||
| 2769 | status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg); | ||
| 2770 | if (status) { | ||
| 2771 | pr_err("%s(): status=%d\n", __func__, status); | ||
| 2772 | ptype = OCRDMA_PARAMETER_TYPE_ADMIN; | ||
| 2773 | continue; | ||
| 2774 | } | ||
| 2775 | |||
| 2776 | status = ocrdma_parse_dcbxcfg_rsp(dev, ptype, | ||
| 2777 | &dcbxcfg, &srvc_lvl); | ||
| 2778 | if (status) { | ||
| 2779 | ptype = OCRDMA_PARAMETER_TYPE_ADMIN; | ||
| 2780 | continue; | ||
| 2781 | } | ||
| 2782 | |||
| 2783 | break; | ||
| 2784 | } | ||
| 2785 | |||
| 2786 | if (status) | ||
| 2787 | pr_info("%s ocrdma%d service level default\n", | ||
| 2788 | dev_name(&dev->nic_info.pdev->dev), dev->id); | ||
| 2789 | else | ||
| 2790 | pr_info("%s ocrdma%d service level %d\n", | ||
| 2791 | dev_name(&dev->nic_info.pdev->dev), dev->id, | ||
| 2792 | srvc_lvl); | ||
| 2793 | |||
| 2794 | dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state); | ||
| 2795 | dev->sl = srvc_lvl; | ||
| 2796 | } | ||
| 2797 | |||
| 2607 | int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) | 2798 | int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) |
| 2608 | { | 2799 | { |
| 2609 | int i; | 2800 | int i; |
| @@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev) | |||
| 2709 | goto conf_err; | 2900 | goto conf_err; |
| 2710 | status = ocrdma_mbx_get_phy_info(dev); | 2901 | status = ocrdma_mbx_get_phy_info(dev); |
| 2711 | if (status) | 2902 | if (status) |
| 2712 | goto conf_err; | 2903 | goto info_attrb_err; |
| 2713 | status = ocrdma_mbx_get_ctrl_attribs(dev); | 2904 | status = ocrdma_mbx_get_ctrl_attribs(dev); |
| 2714 | if (status) | 2905 | if (status) |
| 2715 | goto conf_err; | 2906 | goto info_attrb_err; |
| 2716 | 2907 | ||
| 2717 | return 0; | 2908 | return 0; |
| 2718 | 2909 | ||
| 2910 | info_attrb_err: | ||
| 2911 | ocrdma_mbx_delete_ah_tbl(dev); | ||
| 2719 | conf_err: | 2912 | conf_err: |
| 2720 | ocrdma_destroy_mq(dev); | 2913 | ocrdma_destroy_mq(dev); |
| 2721 | mq_err: | 2914 | mq_err: |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index e513f7293142..6eed8f191322 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h | |||
| @@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); | |||
| 135 | 135 | ||
| 136 | int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); | 136 | int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); |
| 137 | char *port_speed_string(struct ocrdma_dev *dev); | 137 | char *port_speed_string(struct ocrdma_dev *dev); |
| 138 | void ocrdma_init_service_level(struct ocrdma_dev *); | ||
| 139 | |||
| 138 | #endif /* __OCRDMA_HW_H__ */ | 140 | #endif /* __OCRDMA_HW_H__ */ |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7c504e079744..256a06bc0b68 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
| @@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) | |||
| 324 | if (!dev->qp_tbl) | 324 | if (!dev->qp_tbl) |
| 325 | goto alloc_err; | 325 | goto alloc_err; |
| 326 | } | 326 | } |
| 327 | |||
| 328 | dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL); | ||
| 329 | if (dev->stag_arr == NULL) | ||
| 330 | goto alloc_err; | ||
| 331 | |||
| 327 | spin_lock_init(&dev->av_tbl.lock); | 332 | spin_lock_init(&dev->av_tbl.lock); |
| 328 | spin_lock_init(&dev->flush_q_lock); | 333 | spin_lock_init(&dev->flush_q_lock); |
| 329 | return 0; | 334 | return 0; |
| @@ -334,6 +339,7 @@ alloc_err: | |||
| 334 | 339 | ||
| 335 | static void ocrdma_free_resources(struct ocrdma_dev *dev) | 340 | static void ocrdma_free_resources(struct ocrdma_dev *dev) |
| 336 | { | 341 | { |
| 342 | kfree(dev->stag_arr); | ||
| 337 | kfree(dev->qp_tbl); | 343 | kfree(dev->qp_tbl); |
| 338 | kfree(dev->cq_tbl); | 344 | kfree(dev->cq_tbl); |
| 339 | kfree(dev->sgid_tbl); | 345 | kfree(dev->sgid_tbl); |
| @@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | |||
| 353 | { | 359 | { |
| 354 | struct ocrdma_dev *dev = dev_get_drvdata(device); | 360 | struct ocrdma_dev *dev = dev_get_drvdata(device); |
| 355 | 361 | ||
| 356 | return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]); | 362 | return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]); |
| 363 | } | ||
| 364 | |||
| 365 | static ssize_t show_hca_type(struct device *device, | ||
| 366 | struct device_attribute *attr, char *buf) | ||
| 367 | { | ||
| 368 | struct ocrdma_dev *dev = dev_get_drvdata(device); | ||
| 369 | |||
| 370 | return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]); | ||
| 357 | } | 371 | } |
| 358 | 372 | ||
| 359 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 373 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
| 360 | static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | 374 | static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); |
| 375 | static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); | ||
| 361 | 376 | ||
| 362 | static struct device_attribute *ocrdma_attributes[] = { | 377 | static struct device_attribute *ocrdma_attributes[] = { |
| 363 | &dev_attr_hw_rev, | 378 | &dev_attr_hw_rev, |
| 364 | &dev_attr_fw_ver | 379 | &dev_attr_fw_ver, |
| 380 | &dev_attr_hca_type | ||
| 365 | }; | 381 | }; |
| 366 | 382 | ||
| 367 | static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) | 383 | static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) |
| @@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) | |||
| 372 | device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); | 388 | device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); |
| 373 | } | 389 | } |
| 374 | 390 | ||
| 391 | static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev, | ||
| 392 | struct net_device *net) | ||
| 393 | { | ||
| 394 | struct in_device *in_dev; | ||
| 395 | union ib_gid gid; | ||
| 396 | in_dev = in_dev_get(net); | ||
| 397 | if (in_dev) { | ||
| 398 | for_ifa(in_dev) { | ||
| 399 | ipv6_addr_set_v4mapped(ifa->ifa_address, | ||
| 400 | (struct in6_addr *)&gid); | ||
| 401 | ocrdma_add_sgid(dev, &gid); | ||
| 402 | } | ||
| 403 | endfor_ifa(in_dev); | ||
| 404 | in_dev_put(in_dev); | ||
| 405 | } | ||
| 406 | } | ||
| 407 | |||
| 408 | static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev, | ||
| 409 | struct net_device *net) | ||
| 410 | { | ||
| 411 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 412 | struct inet6_dev *in6_dev; | ||
| 413 | union ib_gid *pgid; | ||
| 414 | struct inet6_ifaddr *ifp; | ||
| 415 | in6_dev = in6_dev_get(net); | ||
| 416 | if (in6_dev) { | ||
| 417 | read_lock_bh(&in6_dev->lock); | ||
| 418 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | ||
| 419 | pgid = (union ib_gid *)&ifp->addr; | ||
| 420 | ocrdma_add_sgid(dev, pgid); | ||
| 421 | } | ||
| 422 | read_unlock_bh(&in6_dev->lock); | ||
| 423 | in6_dev_put(in6_dev); | ||
| 424 | } | ||
| 425 | #endif | ||
| 426 | } | ||
| 427 | |||
| 428 | static void ocrdma_init_gid_table(struct ocrdma_dev *dev) | ||
| 429 | { | ||
| 430 | struct net_device *net_dev; | ||
| 431 | |||
| 432 | for_each_netdev(&init_net, net_dev) { | ||
| 433 | struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ? | ||
| 434 | rdma_vlan_dev_real_dev(net_dev) : net_dev; | ||
| 435 | |||
| 436 | if (real_dev == dev->nic_info.netdev) { | ||
| 437 | ocrdma_init_ipv4_gids(dev, net_dev); | ||
| 438 | ocrdma_init_ipv6_gids(dev, net_dev); | ||
| 439 | } | ||
| 440 | } | ||
| 441 | } | ||
| 442 | |||
| 375 | static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | 443 | static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) |
| 376 | { | 444 | { |
| 377 | int status = 0, i; | 445 | int status = 0, i; |
| @@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | |||
| 399 | if (status) | 467 | if (status) |
| 400 | goto alloc_err; | 468 | goto alloc_err; |
| 401 | 469 | ||
| 470 | ocrdma_init_service_level(dev); | ||
| 471 | ocrdma_init_gid_table(dev); | ||
| 402 | status = ocrdma_register_device(dev); | 472 | status = ocrdma_register_device(dev); |
| 403 | if (status) | 473 | if (status) |
| 404 | goto alloc_err; | 474 | goto alloc_err; |
| @@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev) | |||
| 508 | return 0; | 578 | return 0; |
| 509 | } | 579 | } |
| 510 | 580 | ||
| 581 | static void ocrdma_shutdown(struct ocrdma_dev *dev) | ||
| 582 | { | ||
| 583 | ocrdma_close(dev); | ||
| 584 | ocrdma_remove(dev); | ||
| 585 | } | ||
| 586 | |||
| 511 | /* event handling via NIC driver ensures that all the NIC specific | 587 | /* event handling via NIC driver ensures that all the NIC specific |
| 512 | * initialization done before RoCE driver notifies | 588 | * initialization done before RoCE driver notifies |
| 513 | * event to stack. | 589 | * event to stack. |
| @@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) | |||
| 521 | case BE_DEV_DOWN: | 597 | case BE_DEV_DOWN: |
| 522 | ocrdma_close(dev); | 598 | ocrdma_close(dev); |
| 523 | break; | 599 | break; |
| 600 | case BE_DEV_SHUTDOWN: | ||
| 601 | ocrdma_shutdown(dev); | ||
| 602 | break; | ||
| 524 | } | 603 | } |
| 525 | } | 604 | } |
| 526 | 605 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 96c9ee602ba4..904989ec5eaa 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
| @@ -44,35 +44,39 @@ enum { | |||
| 44 | #define OCRDMA_SUBSYS_ROCE 10 | 44 | #define OCRDMA_SUBSYS_ROCE 10 |
| 45 | enum { | 45 | enum { |
| 46 | OCRDMA_CMD_QUERY_CONFIG = 1, | 46 | OCRDMA_CMD_QUERY_CONFIG = 1, |
| 47 | OCRDMA_CMD_ALLOC_PD, | 47 | OCRDMA_CMD_ALLOC_PD = 2, |
| 48 | OCRDMA_CMD_DEALLOC_PD, | 48 | OCRDMA_CMD_DEALLOC_PD = 3, |
| 49 | 49 | ||
| 50 | OCRDMA_CMD_CREATE_AH_TBL, | 50 | OCRDMA_CMD_CREATE_AH_TBL = 4, |
| 51 | OCRDMA_CMD_DELETE_AH_TBL, | 51 | OCRDMA_CMD_DELETE_AH_TBL = 5, |
| 52 | 52 | ||
| 53 | OCRDMA_CMD_CREATE_QP, | 53 | OCRDMA_CMD_CREATE_QP = 6, |
| 54 | OCRDMA_CMD_QUERY_QP, | 54 | OCRDMA_CMD_QUERY_QP = 7, |
| 55 | OCRDMA_CMD_MODIFY_QP, | 55 | OCRDMA_CMD_MODIFY_QP = 8 , |
| 56 | OCRDMA_CMD_DELETE_QP, | 56 | OCRDMA_CMD_DELETE_QP = 9, |
| 57 | 57 | ||
| 58 | OCRDMA_CMD_RSVD1, | 58 | OCRDMA_CMD_RSVD1 = 10, |
| 59 | OCRDMA_CMD_ALLOC_LKEY, | 59 | OCRDMA_CMD_ALLOC_LKEY = 11, |
| 60 | OCRDMA_CMD_DEALLOC_LKEY, | 60 | OCRDMA_CMD_DEALLOC_LKEY = 12, |
| 61 | OCRDMA_CMD_REGISTER_NSMR, | 61 | OCRDMA_CMD_REGISTER_NSMR = 13, |
| 62 | OCRDMA_CMD_REREGISTER_NSMR, | 62 | OCRDMA_CMD_REREGISTER_NSMR = 14, |
| 63 | OCRDMA_CMD_REGISTER_NSMR_CONT, | 63 | OCRDMA_CMD_REGISTER_NSMR_CONT = 15, |
| 64 | OCRDMA_CMD_QUERY_NSMR, | 64 | OCRDMA_CMD_QUERY_NSMR = 16, |
| 65 | OCRDMA_CMD_ALLOC_MW, | 65 | OCRDMA_CMD_ALLOC_MW = 17, |
| 66 | OCRDMA_CMD_QUERY_MW, | 66 | OCRDMA_CMD_QUERY_MW = 18, |
| 67 | 67 | ||
| 68 | OCRDMA_CMD_CREATE_SRQ, | 68 | OCRDMA_CMD_CREATE_SRQ = 19, |
| 69 | OCRDMA_CMD_QUERY_SRQ, | 69 | OCRDMA_CMD_QUERY_SRQ = 20, |
| 70 | OCRDMA_CMD_MODIFY_SRQ, | 70 | OCRDMA_CMD_MODIFY_SRQ = 21, |
| 71 | OCRDMA_CMD_DELETE_SRQ, | 71 | OCRDMA_CMD_DELETE_SRQ = 22, |
| 72 | 72 | ||
| 73 | OCRDMA_CMD_ATTACH_MCAST, | 73 | OCRDMA_CMD_ATTACH_MCAST = 23, |
| 74 | OCRDMA_CMD_DETACH_MCAST, | 74 | OCRDMA_CMD_DETACH_MCAST = 24, |
| 75 | OCRDMA_CMD_GET_RDMA_STATS, | 75 | |
| 76 | OCRDMA_CMD_CREATE_RBQ = 25, | ||
| 77 | OCRDMA_CMD_DESTROY_RBQ = 26, | ||
| 78 | |||
| 79 | OCRDMA_CMD_GET_RDMA_STATS = 27, | ||
| 76 | 80 | ||
| 77 | OCRDMA_CMD_MAX | 81 | OCRDMA_CMD_MAX |
| 78 | }; | 82 | }; |
| @@ -103,7 +107,7 @@ enum { | |||
| 103 | 107 | ||
| 104 | #define OCRDMA_MAX_QP 2048 | 108 | #define OCRDMA_MAX_QP 2048 |
| 105 | #define OCRDMA_MAX_CQ 2048 | 109 | #define OCRDMA_MAX_CQ 2048 |
| 106 | #define OCRDMA_MAX_STAG 8192 | 110 | #define OCRDMA_MAX_STAG 16384 |
| 107 | 111 | ||
| 108 | enum { | 112 | enum { |
| 109 | OCRDMA_DB_RQ_OFFSET = 0xE0, | 113 | OCRDMA_DB_RQ_OFFSET = 0xE0, |
| @@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe { | |||
| 422 | 426 | ||
| 423 | #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 | 427 | #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 |
| 424 | #define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 | 428 | #define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 |
| 425 | #define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3 | 429 | |
| 430 | enum ocrdma_async_grp5_events { | ||
| 431 | OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, | ||
| 432 | OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02, | ||
| 433 | OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03 | ||
| 434 | }; | ||
| 426 | 435 | ||
| 427 | enum OCRDMA_ASYNC_EVENT_TYPE { | 436 | enum OCRDMA_ASYNC_EVENT_TYPE { |
| 428 | OCRDMA_CQ_ERROR = 0x00, | 437 | OCRDMA_CQ_ERROR = 0x00, |
| @@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config { | |||
| 525 | u32 max_ird_ord_per_qp; | 534 | u32 max_ird_ord_per_qp; |
| 526 | u32 max_shared_ird_ord; | 535 | u32 max_shared_ird_ord; |
| 527 | u32 max_mr; | 536 | u32 max_mr; |
| 528 | u32 max_mr_size_lo; | ||
| 529 | u32 max_mr_size_hi; | 537 | u32 max_mr_size_hi; |
| 538 | u32 max_mr_size_lo; | ||
| 530 | u32 max_num_mr_pbl; | 539 | u32 max_num_mr_pbl; |
| 531 | u32 max_mw; | 540 | u32 max_mw; |
| 532 | u32 max_fmr; | 541 | u32 max_fmr; |
| @@ -580,17 +589,26 @@ enum { | |||
| 580 | OCRDMA_FN_MODE_RDMA = 0x4 | 589 | OCRDMA_FN_MODE_RDMA = 0x4 |
| 581 | }; | 590 | }; |
| 582 | 591 | ||
| 592 | enum { | ||
| 593 | OCRDMA_IF_TYPE_MASK = 0xFFFF0000, | ||
| 594 | OCRDMA_IF_TYPE_SHIFT = 0x10, | ||
| 595 | OCRDMA_PHY_TYPE_MASK = 0x0000FFFF, | ||
| 596 | OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000, | ||
| 597 | OCRDMA_FUTURE_DETAILS_SHIFT = 0x10, | ||
| 598 | OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF, | ||
| 599 | OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000, | ||
| 600 | OCRDMA_FSPEED_SUPP_SHIFT = 0x10, | ||
| 601 | OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF | ||
| 602 | }; | ||
| 603 | |||
| 583 | struct ocrdma_get_phy_info_rsp { | 604 | struct ocrdma_get_phy_info_rsp { |
| 584 | struct ocrdma_mqe_hdr hdr; | 605 | struct ocrdma_mqe_hdr hdr; |
| 585 | struct ocrdma_mbx_rsp rsp; | 606 | struct ocrdma_mbx_rsp rsp; |
| 586 | 607 | ||
| 587 | u16 phy_type; | 608 | u32 ityp_ptyp; |
| 588 | u16 interface_type; | ||
| 589 | u32 misc_params; | 609 | u32 misc_params; |
| 590 | u16 ext_phy_details; | 610 | u32 ftrdtl_exphydtl; |
| 591 | u16 rsvd; | 611 | u32 fspeed_aspeed; |
| 592 | u16 auto_speeds_supported; | ||
| 593 | u16 fixed_speeds_supported; | ||
| 594 | u32 future_use[2]; | 612 | u32 future_use[2]; |
| 595 | }; | 613 | }; |
| 596 | 614 | ||
| @@ -603,19 +621,34 @@ enum { | |||
| 603 | OCRDMA_PHY_SPEED_40GBPS = 0x20 | 621 | OCRDMA_PHY_SPEED_40GBPS = 0x20 |
| 604 | }; | 622 | }; |
| 605 | 623 | ||
| 624 | enum { | ||
| 625 | OCRDMA_PORT_NUM_MASK = 0x3F, | ||
| 626 | OCRDMA_PT_MASK = 0xC0, | ||
| 627 | OCRDMA_PT_SHIFT = 0x6, | ||
| 628 | OCRDMA_LINK_DUP_MASK = 0x0000FF00, | ||
| 629 | OCRDMA_LINK_DUP_SHIFT = 0x8, | ||
| 630 | OCRDMA_PHY_PS_MASK = 0x00FF0000, | ||
| 631 | OCRDMA_PHY_PS_SHIFT = 0x10, | ||
| 632 | OCRDMA_PHY_PFLT_MASK = 0xFF000000, | ||
| 633 | OCRDMA_PHY_PFLT_SHIFT = 0x18, | ||
| 634 | OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, | ||
| 635 | OCRDMA_QOS_LNKSP_SHIFT = 0x10, | ||
| 636 | OCRDMA_LLST_MASK = 0xFF, | ||
| 637 | OCRDMA_PLFC_MASK = 0x00000400, | ||
| 638 | OCRDMA_PLFC_SHIFT = 0x8, | ||
| 639 | OCRDMA_PLRFC_MASK = 0x00000200, | ||
| 640 | OCRDMA_PLRFC_SHIFT = 0x8, | ||
| 641 | OCRDMA_PLTFC_MASK = 0x00000100, | ||
| 642 | OCRDMA_PLTFC_SHIFT = 0x8 | ||
| 643 | }; | ||
| 606 | 644 | ||
| 607 | struct ocrdma_get_link_speed_rsp { | 645 | struct ocrdma_get_link_speed_rsp { |
| 608 | struct ocrdma_mqe_hdr hdr; | 646 | struct ocrdma_mqe_hdr hdr; |
| 609 | struct ocrdma_mbx_rsp rsp; | 647 | struct ocrdma_mbx_rsp rsp; |
| 610 | 648 | ||
| 611 | u8 pt_port_num; | 649 | u32 pflt_pps_ld_pnum; |
| 612 | u8 link_duplex; | 650 | u32 qos_lsp; |
| 613 | u8 phys_port_speed; | 651 | u32 res_lls; |
| 614 | u8 phys_port_fault; | ||
| 615 | u16 rsvd1; | ||
| 616 | u16 qos_lnk_speed; | ||
| 617 | u8 logical_lnk_status; | ||
| 618 | u8 rsvd2[3]; | ||
| 619 | }; | 652 | }; |
| 620 | 653 | ||
| 621 | enum { | 654 | enum { |
| @@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd { | |||
| 666 | u32 pgsz_pgcnt; | 699 | u32 pgsz_pgcnt; |
| 667 | u32 ev_cnt_flags; | 700 | u32 ev_cnt_flags; |
| 668 | u32 eqn; | 701 | u32 eqn; |
| 669 | u16 cqe_count; | 702 | u32 pdid_cqecnt; |
| 670 | u16 pd_id; | ||
| 671 | u32 rsvd6; | 703 | u32 rsvd6; |
| 672 | struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; | 704 | struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; |
| 673 | }; | 705 | }; |
| @@ -678,6 +710,10 @@ struct ocrdma_create_cq { | |||
| 678 | }; | 710 | }; |
| 679 | 711 | ||
| 680 | enum { | 712 | enum { |
| 713 | OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10 | ||
| 714 | }; | ||
| 715 | |||
| 716 | enum { | ||
| 681 | OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF | 717 | OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF |
| 682 | }; | 718 | }; |
| 683 | 719 | ||
| @@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq { | |||
| 1231 | 1267 | ||
| 1232 | enum { | 1268 | enum { |
| 1233 | OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), | 1269 | OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), |
| 1234 | OCRDMA_PD_MAX_DPP_ENABLED_QP = 8, | ||
| 1235 | OCRDMA_DPP_PAGE_SIZE = 4096 | 1270 | OCRDMA_DPP_PAGE_SIZE = 4096 |
| 1236 | }; | 1271 | }; |
| 1237 | 1272 | ||
| @@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp { | |||
| 1896 | struct ocrdma_rx_dbg_stats rx_dbg_stats; | 1931 | struct ocrdma_rx_dbg_stats rx_dbg_stats; |
| 1897 | } __packed; | 1932 | } __packed; |
| 1898 | 1933 | ||
| 1934 | enum { | ||
| 1935 | OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF, | ||
| 1936 | OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00, | ||
| 1937 | OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08, | ||
| 1938 | OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF, | ||
| 1939 | OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000, | ||
| 1940 | OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10, | ||
| 1941 | OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000, | ||
| 1942 | OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18, | ||
| 1943 | OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF, | ||
| 1944 | OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00, | ||
| 1945 | OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08, | ||
| 1946 | OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000, | ||
| 1947 | OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10, | ||
| 1948 | OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000, | ||
| 1949 | OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18, | ||
| 1950 | OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF, | ||
| 1951 | OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000, | ||
| 1952 | OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10, | ||
| 1953 | OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000, | ||
| 1954 | OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18, | ||
| 1955 | OCRDMA_HBA_ATTRB_CV_MASK = 0xFF, | ||
| 1956 | OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00, | ||
| 1957 | OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08, | ||
| 1958 | OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000, | ||
| 1959 | OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10, | ||
| 1960 | OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000, | ||
| 1961 | OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18, | ||
| 1962 | OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000, | ||
| 1963 | OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E, | ||
| 1964 | OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF, | ||
| 1965 | OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00, | ||
| 1966 | OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08, | ||
| 1967 | OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF, | ||
| 1968 | OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000, | ||
| 1969 | OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10, | ||
| 1970 | OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF, | ||
| 1971 | OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000, | ||
| 1972 | OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10, | ||
| 1973 | OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF, | ||
| 1974 | OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00, | ||
| 1975 | OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08, | ||
| 1976 | OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000, | ||
| 1977 | OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10, | ||
| 1978 | OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000, | ||
| 1979 | OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18, | ||
| 1980 | OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF | ||
| 1981 | }; | ||
| 1899 | 1982 | ||
| 1900 | struct mgmt_hba_attribs { | 1983 | struct mgmt_hba_attribs { |
| 1901 | u8 flashrom_version_string[32]; | 1984 | u8 flashrom_version_string[32]; |
| 1902 | u8 manufacturer_name[32]; | 1985 | u8 manufacturer_name[32]; |
| 1903 | u32 supported_modes; | 1986 | u32 supported_modes; |
| 1904 | u32 rsvd0[3]; | 1987 | u32 rsvd_eprom_verhi_verlo; |
| 1988 | u32 mbx_ds_ver; | ||
| 1989 | u32 epfw_ds_ver; | ||
| 1905 | u8 ncsi_ver_string[12]; | 1990 | u8 ncsi_ver_string[12]; |
| 1906 | u32 default_extended_timeout; | 1991 | u32 default_extended_timeout; |
| 1907 | u8 controller_model_number[32]; | 1992 | u8 controller_model_number[32]; |
| @@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs { | |||
| 1914 | u8 driver_version_string[32]; | 1999 | u8 driver_version_string[32]; |
| 1915 | u8 fw_on_flash_version_string[32]; | 2000 | u8 fw_on_flash_version_string[32]; |
| 1916 | u32 functionalities_supported; | 2001 | u32 functionalities_supported; |
| 1917 | u16 max_cdblength; | 2002 | u32 guid0_asicrev_cdblen; |
| 1918 | u8 asic_revision; | 2003 | u8 generational_guid[12]; |
| 1919 | u8 generational_guid[16]; | 2004 | u32 portcnt_guid15; |
| 1920 | u8 hba_port_count; | 2005 | u32 mfuncdev_iscsi_ldtout; |
| 1921 | u16 default_link_down_timeout; | 2006 | u32 ptpnum_maxdoms_hbast_cv; |
| 1922 | u8 iscsi_ver_min_max; | ||
| 1923 | u8 multifunction_device; | ||
| 1924 | u8 cache_valid; | ||
| 1925 | u8 hba_status; | ||
| 1926 | u8 max_domains_supported; | ||
| 1927 | u8 phy_port; | ||
| 1928 | u32 firmware_post_status; | 2007 | u32 firmware_post_status; |
| 1929 | u32 hba_mtu[8]; | 2008 | u32 hba_mtu[8]; |
| 1930 | u32 rsvd1[4]; | 2009 | u32 res_asicgen_iscsi_feaures; |
| 2010 | u32 rsvd1[3]; | ||
| 1931 | }; | 2011 | }; |
| 1932 | 2012 | ||
| 1933 | struct mgmt_controller_attrib { | 2013 | struct mgmt_controller_attrib { |
| 1934 | struct mgmt_hba_attribs hba_attribs; | 2014 | struct mgmt_hba_attribs hba_attribs; |
| 1935 | u16 pci_vendor_id; | 2015 | u32 pci_did_vid; |
| 1936 | u16 pci_device_id; | 2016 | u32 pci_ssid_svid; |
| 1937 | u16 pci_sub_vendor_id; | 2017 | u32 ityp_fnum_devnum_bnum; |
| 1938 | u16 pci_sub_system_id; | 2018 | u32 uid_hi; |
| 1939 | u8 pci_bus_number; | 2019 | u32 uid_lo; |
| 1940 | u8 pci_device_number; | 2020 | u32 res_nnetfil; |
| 1941 | u8 pci_function_number; | 2021 | u32 rsvd0[4]; |
| 1942 | u8 interface_type; | ||
| 1943 | u64 unique_identifier; | ||
| 1944 | u32 rsvd0[5]; | ||
| 1945 | }; | 2022 | }; |
| 1946 | 2023 | ||
| 1947 | struct ocrdma_get_ctrl_attribs_rsp { | 2024 | struct ocrdma_get_ctrl_attribs_rsp { |
| @@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp { | |||
| 1949 | struct mgmt_controller_attrib ctrl_attribs; | 2026 | struct mgmt_controller_attrib ctrl_attribs; |
| 1950 | }; | 2027 | }; |
| 1951 | 2028 | ||
| 2029 | #define OCRDMA_SUBSYS_DCBX 0x10 | ||
| 2030 | |||
| 2031 | enum OCRDMA_DCBX_OPCODE { | ||
| 2032 | OCRDMA_CMD_GET_DCBX_CONFIG = 0x01 | ||
| 2033 | }; | ||
| 2034 | |||
| 2035 | enum OCRDMA_DCBX_PARAM_TYPE { | ||
| 2036 | OCRDMA_PARAMETER_TYPE_ADMIN = 0x00, | ||
| 2037 | OCRDMA_PARAMETER_TYPE_OPER = 0x01, | ||
| 2038 | OCRDMA_PARAMETER_TYPE_PEER = 0x02 | ||
| 2039 | }; | ||
| 2040 | |||
| 2041 | enum OCRDMA_DCBX_APP_PROTO { | ||
| 2042 | OCRDMA_APP_PROTO_ROCE = 0x8915 | ||
| 2043 | }; | ||
| 2044 | |||
| 2045 | enum OCRDMA_DCBX_PROTO { | ||
| 2046 | OCRDMA_PROTO_SELECT_L2 = 0x00, | ||
| 2047 | OCRDMA_PROTO_SELECT_L4 = 0x01 | ||
| 2048 | }; | ||
| 2049 | |||
| 2050 | enum OCRDMA_DCBX_APP_PARAM { | ||
| 2051 | OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF, | ||
| 2052 | OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF, | ||
| 2053 | OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10, | ||
| 2054 | OCRDMA_APP_PARAM_VALID_MASK = 0xFF, | ||
| 2055 | OCRDMA_APP_PARAM_VALID_SHIFT = 0x18 | ||
| 2056 | }; | ||
| 2057 | |||
| 2058 | enum OCRDMA_DCBX_STATE_FLAGS { | ||
| 2059 | OCRDMA_STATE_FLAG_ENABLED = 0x01, | ||
| 2060 | OCRDMA_STATE_FLAG_ADDVERTISED = 0x02, | ||
| 2061 | OCRDMA_STATE_FLAG_WILLING = 0x04, | ||
| 2062 | OCRDMA_STATE_FLAG_SYNC = 0x08, | ||
| 2063 | OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000, | ||
| 2064 | OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000 | ||
| 2065 | }; | ||
| 2066 | |||
| 2067 | enum OCRDMA_TCV_AEV_OPV_ST { | ||
| 2068 | OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF, | ||
| 2069 | OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18, | ||
| 2070 | OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10, | ||
| 2071 | OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08, | ||
| 2072 | OCRDMA_DCBX_STATE_MASK = 0xFF | ||
| 2073 | }; | ||
| 2074 | |||
| 2075 | struct ocrdma_app_parameter { | ||
| 2076 | u32 valid_proto_app; | ||
| 2077 | u32 oui; | ||
| 2078 | u32 app_prio[2]; | ||
| 2079 | }; | ||
| 2080 | |||
| 2081 | struct ocrdma_dcbx_cfg { | ||
| 2082 | u32 tcv_aev_opv_st; | ||
| 2083 | u32 tc_state; | ||
| 2084 | u32 pfc_state; | ||
| 2085 | u32 qcn_state; | ||
| 2086 | u32 appl_state; | ||
| 2087 | u32 ll_state; | ||
| 2088 | u32 tc_bw[2]; | ||
| 2089 | u32 tc_prio[8]; | ||
| 2090 | u32 pfc_prio[2]; | ||
| 2091 | struct ocrdma_app_parameter app_param[15]; | ||
| 2092 | }; | ||
| 2093 | |||
| 2094 | struct ocrdma_get_dcbx_cfg_req { | ||
| 2095 | struct ocrdma_mbx_hdr hdr; | ||
| 2096 | u32 param_type; | ||
| 2097 | } __packed; | ||
| 2098 | |||
| 2099 | struct ocrdma_get_dcbx_cfg_rsp { | ||
| 2100 | struct ocrdma_mbx_rsp hdr; | ||
| 2101 | struct ocrdma_dcbx_cfg cfg; | ||
| 2102 | } __packed; | ||
| 1952 | 2103 | ||
| 1953 | #endif /* __OCRDMA_SLI_H__ */ | 2104 | #endif /* __OCRDMA_SLI_H__ */ |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index edf6211d84b8..acb434d16903 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
| 69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], | 69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], |
| 70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); | 70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); |
| 71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); | 71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); |
| 72 | attr->max_mr_size = ~0ull; | 72 | attr->max_mr_size = dev->attr.max_mr_size; |
| 73 | attr->page_size_cap = 0xffff000; | 73 | attr->page_size_cap = 0xffff000; |
| 74 | attr->vendor_id = dev->nic_info.pdev->vendor; | 74 | attr->vendor_id = dev->nic_info.pdev->vendor; |
| 75 | attr->vendor_part_id = dev->nic_info.pdev->device; | 75 | attr->vendor_part_id = dev->nic_info.pdev->device; |
| 76 | attr->hw_ver = 0; | 76 | attr->hw_ver = dev->asic_id; |
| 77 | attr->max_qp = dev->attr.max_qp; | 77 | attr->max_qp = dev->attr.max_qp; |
| 78 | attr->max_ah = OCRDMA_MAX_AH; | 78 | attr->max_ah = OCRDMA_MAX_AH; |
| 79 | attr->max_qp_wr = dev->attr.max_wqe; | 79 | attr->max_qp_wr = dev->attr.max_wqe; |
| @@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, | |||
| 268 | pd->dpp_enabled = | 268 | pd->dpp_enabled = |
| 269 | ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; | 269 | ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; |
| 270 | pd->num_dpp_qp = | 270 | pd->num_dpp_qp = |
| 271 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; | 271 | pd->dpp_enabled ? (dev->nic_info.db_page_size / |
| 272 | dev->attr.wqe_size) : 0; | ||
| 272 | } | 273 | } |
| 273 | 274 | ||
| 274 | retry: | 275 | retry: |
| @@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) | |||
| 328 | struct ocrdma_pd *pd = uctx->cntxt_pd; | 329 | struct ocrdma_pd *pd = uctx->cntxt_pd; |
| 329 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); | 330 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); |
| 330 | 331 | ||
| 331 | BUG_ON(uctx->pd_in_use); | 332 | if (uctx->pd_in_use) { |
| 333 | pr_err("%s(%d) Freeing in use pdid=0x%x.\n", | ||
| 334 | __func__, dev->id, pd->id); | ||
| 335 | } | ||
| 332 | uctx->cntxt_pd = NULL; | 336 | uctx->cntxt_pd = NULL; |
| 333 | status = _ocrdma_dealloc_pd(dev, pd); | 337 | status = _ocrdma_dealloc_pd(dev, pd); |
| 334 | return status; | 338 | return status; |
| @@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |||
| 843 | if (mr->umem) | 847 | if (mr->umem) |
| 844 | ib_umem_release(mr->umem); | 848 | ib_umem_release(mr->umem); |
| 845 | kfree(mr); | 849 | kfree(mr); |
| 850 | |||
| 851 | /* Don't stop cleanup, in case FW is unresponsive */ | ||
| 852 | if (dev->mqe_ctx.fw_error_state) { | ||
| 853 | status = 0; | ||
| 854 | pr_err("%s(%d) fw not responding.\n", | ||
| 855 | __func__, dev->id); | ||
| 856 | } | ||
| 846 | return status; | 857 | return status; |
| 847 | } | 858 | } |
| 848 | 859 | ||
| @@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 2054 | } | 2065 | } |
| 2055 | 2066 | ||
| 2056 | while (wr) { | 2067 | while (wr) { |
| 2068 | if (qp->qp_type == IB_QPT_UD && | ||
| 2069 | (wr->opcode != IB_WR_SEND && | ||
| 2070 | wr->opcode != IB_WR_SEND_WITH_IMM)) { | ||
| 2071 | *bad_wr = wr; | ||
| 2072 | status = -EINVAL; | ||
| 2073 | break; | ||
| 2074 | } | ||
| 2057 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || | 2075 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || |
| 2058 | wr->num_sge > qp->sq.max_sges) { | 2076 | wr->num_sge > qp->sq.max_sges) { |
| 2059 | *bad_wr = wr; | 2077 | *bad_wr = wr; |
| @@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, | |||
| 2488 | *stop = true; | 2506 | *stop = true; |
| 2489 | expand = false; | 2507 | expand = false; |
| 2490 | } | 2508 | } |
| 2509 | } else if (is_hw_sq_empty(qp)) { | ||
| 2510 | /* Do nothing */ | ||
| 2511 | expand = false; | ||
| 2512 | *polled = false; | ||
| 2513 | *stop = false; | ||
| 2491 | } else { | 2514 | } else { |
| 2492 | *polled = true; | 2515 | *polled = true; |
| 2493 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | 2516 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); |
| @@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |||
| 2593 | *stop = true; | 2616 | *stop = true; |
| 2594 | expand = false; | 2617 | expand = false; |
| 2595 | } | 2618 | } |
| 2619 | } else if (is_hw_rq_empty(qp)) { | ||
| 2620 | /* Do nothing */ | ||
| 2621 | expand = false; | ||
| 2622 | *polled = false; | ||
| 2623 | *stop = false; | ||
| 2596 | } else { | 2624 | } else { |
| 2597 | *polled = true; | 2625 | *polled = true; |
| 2598 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | 2626 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 22c720e5740d..636be117b578 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
| @@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev) | |||
| 2476 | ibp = &dd->pport[p].ibport_data; | 2476 | ibp = &dd->pport[p].ibport_data; |
| 2477 | agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, | 2477 | agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, |
| 2478 | NULL, 0, send_handler, | 2478 | NULL, 0, send_handler, |
| 2479 | NULL, NULL); | 2479 | NULL, NULL, 0); |
| 2480 | if (IS_ERR(agent)) { | 2480 | if (IS_ERR(agent)) { |
| 2481 | ret = PTR_ERR(agent); | 2481 | ret = PTR_ERR(agent); |
| 2482 | goto err; | 2482 | goto err; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index c639f90cfda4..3edce617c31b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -86,7 +86,6 @@ enum { | |||
| 86 | IPOIB_FLAG_INITIALIZED = 1, | 86 | IPOIB_FLAG_INITIALIZED = 1, |
| 87 | IPOIB_FLAG_ADMIN_UP = 2, | 87 | IPOIB_FLAG_ADMIN_UP = 2, |
| 88 | IPOIB_PKEY_ASSIGNED = 3, | 88 | IPOIB_PKEY_ASSIGNED = 3, |
| 89 | IPOIB_PKEY_STOP = 4, | ||
| 90 | IPOIB_FLAG_SUBINTERFACE = 5, | 89 | IPOIB_FLAG_SUBINTERFACE = 5, |
| 91 | IPOIB_MCAST_RUN = 6, | 90 | IPOIB_MCAST_RUN = 6, |
| 92 | IPOIB_STOP_REAPER = 7, | 91 | IPOIB_STOP_REAPER = 7, |
| @@ -312,7 +311,6 @@ struct ipoib_dev_priv { | |||
| 312 | struct list_head multicast_list; | 311 | struct list_head multicast_list; |
| 313 | struct rb_root multicast_tree; | 312 | struct rb_root multicast_tree; |
| 314 | 313 | ||
| 315 | struct delayed_work pkey_poll_task; | ||
| 316 | struct delayed_work mcast_task; | 314 | struct delayed_work mcast_task; |
| 317 | struct work_struct carrier_on_task; | 315 | struct work_struct carrier_on_task; |
| 318 | struct work_struct flush_light; | 316 | struct work_struct flush_light; |
| @@ -473,10 +471,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); | |||
| 473 | void ipoib_pkey_event(struct work_struct *work); | 471 | void ipoib_pkey_event(struct work_struct *work); |
| 474 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 472 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
| 475 | 473 | ||
| 476 | int ipoib_ib_dev_open(struct net_device *dev); | 474 | int ipoib_ib_dev_open(struct net_device *dev, int flush); |
| 477 | int ipoib_ib_dev_up(struct net_device *dev); | 475 | int ipoib_ib_dev_up(struct net_device *dev); |
| 478 | int ipoib_ib_dev_down(struct net_device *dev, int flush); | 476 | int ipoib_ib_dev_down(struct net_device *dev, int flush); |
| 479 | int ipoib_ib_dev_stop(struct net_device *dev, int flush); | 477 | int ipoib_ib_dev_stop(struct net_device *dev, int flush); |
| 478 | void ipoib_pkey_dev_check_presence(struct net_device *dev); | ||
| 480 | 479 | ||
| 481 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 480 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
| 482 | void ipoib_dev_cleanup(struct net_device *dev); | 481 | void ipoib_dev_cleanup(struct net_device *dev); |
| @@ -532,8 +531,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf); | |||
| 532 | 531 | ||
| 533 | void ipoib_setup(struct net_device *dev); | 532 | void ipoib_setup(struct net_device *dev); |
| 534 | 533 | ||
| 535 | void ipoib_pkey_poll(struct work_struct *work); | 534 | void ipoib_pkey_open(struct ipoib_dev_priv *priv); |
| 536 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | ||
| 537 | void ipoib_drain_cq(struct net_device *dev); | 535 | void ipoib_drain_cq(struct net_device *dev); |
| 538 | 536 | ||
| 539 | void ipoib_set_ethtool_ops(struct net_device *dev); | 537 | void ipoib_set_ethtool_ops(struct net_device *dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 50061854616e..6bd5740e2691 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
| @@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev) | |||
| 281 | { | 281 | { |
| 282 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 282 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 283 | 283 | ||
| 284 | if (priv->mcg_dentry) | 284 | debugfs_remove(priv->mcg_dentry); |
| 285 | debugfs_remove(priv->mcg_dentry); | 285 | debugfs_remove(priv->path_dentry); |
| 286 | if (priv->path_dentry) | ||
| 287 | debugfs_remove(priv->path_dentry); | ||
| 288 | } | 286 | } |
| 289 | 287 | ||
| 290 | int ipoib_register_debugfs(void) | 288 | int ipoib_register_debugfs(void) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6a7003ddb0be..72626c348174 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) | |||
| 664 | drain_tx_cq((struct net_device *)ctx); | 664 | drain_tx_cq((struct net_device *)ctx); |
| 665 | } | 665 | } |
| 666 | 666 | ||
| 667 | int ipoib_ib_dev_open(struct net_device *dev) | 667 | int ipoib_ib_dev_open(struct net_device *dev, int flush) |
| 668 | { | 668 | { |
| 669 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 669 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 670 | int ret; | 670 | int ret; |
| 671 | 671 | ||
| 672 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { | 672 | ipoib_pkey_dev_check_presence(dev); |
| 673 | ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); | 673 | |
| 674 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 674 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { |
| 675 | ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, | ||
| 676 | (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); | ||
| 675 | return -1; | 677 | return -1; |
| 676 | } | 678 | } |
| 677 | set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
| 678 | 679 | ||
| 679 | ret = ipoib_init_qp(dev); | 680 | ret = ipoib_init_qp(dev); |
| 680 | if (ret) { | 681 | if (ret) { |
| @@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
| 705 | dev_stop: | 706 | dev_stop: |
| 706 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) | 707 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) |
| 707 | napi_enable(&priv->napi); | 708 | napi_enable(&priv->napi); |
| 708 | ipoib_ib_dev_stop(dev, 1); | 709 | ipoib_ib_dev_stop(dev, flush); |
| 709 | return -1; | 710 | return -1; |
| 710 | } | 711 | } |
| 711 | 712 | ||
| 712 | static void ipoib_pkey_dev_check_presence(struct net_device *dev) | 713 | void ipoib_pkey_dev_check_presence(struct net_device *dev) |
| 713 | { | 714 | { |
| 714 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 715 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 715 | u16 pkey_index = 0; | ||
| 716 | 716 | ||
| 717 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) | 717 | if (!(priv->pkey & 0x7fff) || |
| 718 | ib_find_pkey(priv->ca, priv->port, priv->pkey, | ||
| 719 | &priv->pkey_index)) | ||
| 718 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 720 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); |
| 719 | else | 721 | else |
| 720 | set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 722 | set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); |
| @@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) | |||
| 745 | clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); | 747 | clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); |
| 746 | netif_carrier_off(dev); | 748 | netif_carrier_off(dev); |
| 747 | 749 | ||
| 748 | /* Shutdown the P_Key thread if still active */ | ||
| 749 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { | ||
| 750 | mutex_lock(&pkey_mutex); | ||
| 751 | set_bit(IPOIB_PKEY_STOP, &priv->flags); | ||
| 752 | cancel_delayed_work_sync(&priv->pkey_poll_task); | ||
| 753 | mutex_unlock(&pkey_mutex); | ||
| 754 | } | ||
| 755 | |||
| 756 | ipoib_mcast_stop_thread(dev, flush); | 750 | ipoib_mcast_stop_thread(dev, flush); |
| 757 | ipoib_mcast_dev_flush(dev); | 751 | ipoib_mcast_dev_flush(dev); |
| 758 | 752 | ||
| @@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
| 924 | (unsigned long) dev); | 918 | (unsigned long) dev); |
| 925 | 919 | ||
| 926 | if (dev->flags & IFF_UP) { | 920 | if (dev->flags & IFF_UP) { |
| 927 | if (ipoib_ib_dev_open(dev)) { | 921 | if (ipoib_ib_dev_open(dev, 1)) { |
| 928 | ipoib_transport_dev_cleanup(dev); | 922 | ipoib_transport_dev_cleanup(dev); |
| 929 | return -ENODEV; | 923 | return -ENODEV; |
| 930 | } | 924 | } |
| @@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) | |||
| 966 | 960 | ||
| 967 | return 1; | 961 | return 1; |
| 968 | } | 962 | } |
| 963 | /* | ||
| 964 | * returns 0 if pkey value was found in a different slot. | ||
| 965 | */ | ||
| 966 | static inline int update_child_pkey(struct ipoib_dev_priv *priv) | ||
| 967 | { | ||
| 968 | u16 old_index = priv->pkey_index; | ||
| 969 | |||
| 970 | priv->pkey_index = 0; | ||
| 971 | ipoib_pkey_dev_check_presence(priv->dev); | ||
| 972 | |||
| 973 | if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 974 | (old_index == priv->pkey_index)) | ||
| 975 | return 1; | ||
| 976 | return 0; | ||
| 977 | } | ||
| 969 | 978 | ||
| 970 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | 979 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, |
| 971 | enum ipoib_flush_level level) | 980 | enum ipoib_flush_level level) |
| 972 | { | 981 | { |
| 973 | struct ipoib_dev_priv *cpriv; | 982 | struct ipoib_dev_priv *cpriv; |
| 974 | struct net_device *dev = priv->dev; | 983 | struct net_device *dev = priv->dev; |
| 975 | u16 new_index; | ||
| 976 | int result; | 984 | int result; |
| 977 | 985 | ||
| 978 | down_read(&priv->vlan_rwsem); | 986 | down_read(&priv->vlan_rwsem); |
| @@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 986 | 994 | ||
| 987 | up_read(&priv->vlan_rwsem); | 995 | up_read(&priv->vlan_rwsem); |
| 988 | 996 | ||
| 989 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { | 997 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && |
| 990 | /* for non-child devices must check/update the pkey value here */ | 998 | level != IPOIB_FLUSH_HEAVY) { |
| 991 | if (level == IPOIB_FLUSH_HEAVY && | ||
| 992 | !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) | ||
| 993 | update_parent_pkey(priv); | ||
| 994 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 999 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
| 995 | return; | 1000 | return; |
| 996 | } | 1001 | } |
| 997 | 1002 | ||
| 998 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 1003 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
| 1004 | /* interface is down. update pkey and leave. */ | ||
| 1005 | if (level == IPOIB_FLUSH_HEAVY) { | ||
| 1006 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) | ||
| 1007 | update_parent_pkey(priv); | ||
| 1008 | else | ||
| 1009 | update_child_pkey(priv); | ||
| 1010 | } | ||
| 999 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); | 1011 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); |
| 1000 | return; | 1012 | return; |
| 1001 | } | 1013 | } |
| @@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 1005 | * (parent) devices should always takes what present in pkey index 0 | 1017 | * (parent) devices should always takes what present in pkey index 0 |
| 1006 | */ | 1018 | */ |
| 1007 | if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 1019 | if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 1008 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { | 1020 | result = update_child_pkey(priv); |
| 1009 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 1021 | if (result) { |
| 1010 | ipoib_ib_dev_down(dev, 0); | 1022 | /* restart QP only if P_Key index is changed */ |
| 1011 | ipoib_ib_dev_stop(dev, 0); | ||
| 1012 | if (ipoib_pkey_dev_delay_open(dev)) | ||
| 1013 | return; | ||
| 1014 | } | ||
| 1015 | /* restart QP only if P_Key index is changed */ | ||
| 1016 | if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 1017 | new_index == priv->pkey_index) { | ||
| 1018 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); | 1023 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); |
| 1019 | return; | 1024 | return; |
| 1020 | } | 1025 | } |
| 1021 | priv->pkey_index = new_index; | 1026 | |
| 1022 | } else { | 1027 | } else { |
| 1023 | result = update_parent_pkey(priv); | 1028 | result = update_parent_pkey(priv); |
| 1024 | /* restart QP only if P_Key value changed */ | 1029 | /* restart QP only if P_Key value changed */ |
| @@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 1038 | ipoib_ib_dev_down(dev, 0); | 1043 | ipoib_ib_dev_down(dev, 0); |
| 1039 | 1044 | ||
| 1040 | if (level == IPOIB_FLUSH_HEAVY) { | 1045 | if (level == IPOIB_FLUSH_HEAVY) { |
| 1041 | ipoib_ib_dev_stop(dev, 0); | 1046 | if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) |
| 1042 | ipoib_ib_dev_open(dev); | 1047 | ipoib_ib_dev_stop(dev, 0); |
| 1048 | if (ipoib_ib_dev_open(dev, 0) != 0) | ||
| 1049 | return; | ||
| 1050 | if (netif_queue_stopped(dev)) | ||
| 1051 | netif_start_queue(dev); | ||
| 1043 | } | 1052 | } |
| 1044 | 1053 | ||
| 1045 | /* | 1054 | /* |
| @@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
| 1094 | ipoib_transport_dev_cleanup(dev); | 1103 | ipoib_transport_dev_cleanup(dev); |
| 1095 | } | 1104 | } |
| 1096 | 1105 | ||
| 1097 | /* | ||
| 1098 | * Delayed P_Key Assigment Interim Support | ||
| 1099 | * | ||
| 1100 | * The following is initial implementation of delayed P_Key assigment | ||
| 1101 | * mechanism. It is using the same approach implemented for the multicast | ||
| 1102 | * group join. The single goal of this implementation is to quickly address | ||
| 1103 | * Bug #2507. This implementation will probably be removed when the P_Key | ||
| 1104 | * change async notification is available. | ||
| 1105 | */ | ||
| 1106 | |||
| 1107 | void ipoib_pkey_poll(struct work_struct *work) | ||
| 1108 | { | ||
| 1109 | struct ipoib_dev_priv *priv = | ||
| 1110 | container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); | ||
| 1111 | struct net_device *dev = priv->dev; | ||
| 1112 | |||
| 1113 | ipoib_pkey_dev_check_presence(dev); | ||
| 1114 | |||
| 1115 | if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) | ||
| 1116 | ipoib_open(dev); | ||
| 1117 | else { | ||
| 1118 | mutex_lock(&pkey_mutex); | ||
| 1119 | if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) | ||
| 1120 | queue_delayed_work(ipoib_workqueue, | ||
| 1121 | &priv->pkey_poll_task, | ||
| 1122 | HZ); | ||
| 1123 | mutex_unlock(&pkey_mutex); | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | int ipoib_pkey_dev_delay_open(struct net_device *dev) | ||
| 1128 | { | ||
| 1129 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1130 | |||
| 1131 | /* Look for the interface pkey value in the IB Port P_Key table and */ | ||
| 1132 | /* set the interface pkey assigment flag */ | ||
| 1133 | ipoib_pkey_dev_check_presence(dev); | ||
| 1134 | 1106 | ||
| 1135 | /* P_Key value not assigned yet - start polling */ | ||
| 1136 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { | ||
| 1137 | mutex_lock(&pkey_mutex); | ||
| 1138 | clear_bit(IPOIB_PKEY_STOP, &priv->flags); | ||
| 1139 | queue_delayed_work(ipoib_workqueue, | ||
| 1140 | &priv->pkey_poll_task, | ||
| 1141 | HZ); | ||
| 1142 | mutex_unlock(&pkey_mutex); | ||
| 1143 | return 1; | ||
| 1144 | } | ||
| 1145 | |||
| 1146 | return 0; | ||
| 1147 | } | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4e675f4fecc9..1310acf6bf92 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev) | |||
| 108 | 108 | ||
| 109 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 109 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
| 110 | 110 | ||
| 111 | if (ipoib_pkey_dev_delay_open(dev)) | 111 | if (ipoib_ib_dev_open(dev, 1)) { |
| 112 | return 0; | 112 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) |
| 113 | 113 | return 0; | |
| 114 | if (ipoib_ib_dev_open(dev)) | ||
| 115 | goto err_disable; | 114 | goto err_disable; |
| 115 | } | ||
| 116 | 116 | ||
| 117 | if (ipoib_ib_dev_up(dev)) | 117 | if (ipoib_ib_dev_up(dev)) |
| 118 | goto err_stop; | 118 | goto err_stop; |
| @@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev) | |||
| 1379 | INIT_LIST_HEAD(&priv->dead_ahs); | 1379 | INIT_LIST_HEAD(&priv->dead_ahs); |
| 1380 | INIT_LIST_HEAD(&priv->multicast_list); | 1380 | INIT_LIST_HEAD(&priv->multicast_list); |
| 1381 | 1381 | ||
| 1382 | INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); | ||
| 1383 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); | 1382 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); |
| 1384 | INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); | 1383 | INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); |
| 1385 | INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); | 1384 | INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index eb7973957a6e..61ee91d88380 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
| @@ -596,20 +596,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
| 596 | struct iser_conn *ib_conn; | 596 | struct iser_conn *ib_conn; |
| 597 | struct iscsi_endpoint *ep; | 597 | struct iscsi_endpoint *ep; |
| 598 | 598 | ||
| 599 | ep = iscsi_create_endpoint(sizeof(*ib_conn)); | 599 | ep = iscsi_create_endpoint(0); |
| 600 | if (!ep) | 600 | if (!ep) |
| 601 | return ERR_PTR(-ENOMEM); | 601 | return ERR_PTR(-ENOMEM); |
| 602 | 602 | ||
| 603 | ib_conn = ep->dd_data; | 603 | ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL); |
| 604 | if (!ib_conn) { | ||
| 605 | err = -ENOMEM; | ||
| 606 | goto failure; | ||
| 607 | } | ||
| 608 | |||
| 609 | ep->dd_data = ib_conn; | ||
| 604 | ib_conn->ep = ep; | 610 | ib_conn->ep = ep; |
| 605 | iser_conn_init(ib_conn); | 611 | iser_conn_init(ib_conn); |
| 606 | 612 | ||
| 607 | err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, | 613 | err = iser_connect(ib_conn, NULL, dst_addr, non_blocking); |
| 608 | non_blocking); | ||
| 609 | if (err) | 614 | if (err) |
| 610 | return ERR_PTR(err); | 615 | goto failure; |
| 611 | 616 | ||
| 612 | return ep; | 617 | return ep; |
| 618 | failure: | ||
| 619 | iscsi_destroy_endpoint(ep); | ||
| 620 | return ERR_PTR(err); | ||
| 613 | } | 621 | } |
| 614 | 622 | ||
| 615 | static int | 623 | static int |
| @@ -619,15 +627,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
| 619 | int rc; | 627 | int rc; |
| 620 | 628 | ||
| 621 | ib_conn = ep->dd_data; | 629 | ib_conn = ep->dd_data; |
| 622 | rc = wait_event_interruptible_timeout(ib_conn->wait, | 630 | rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion, |
| 623 | ib_conn->state == ISER_CONN_UP, | 631 | msecs_to_jiffies(timeout_ms)); |
| 624 | msecs_to_jiffies(timeout_ms)); | ||
| 625 | |||
| 626 | /* if conn establishment failed, return error code to iscsi */ | 632 | /* if conn establishment failed, return error code to iscsi */ |
| 627 | if (!rc && | 633 | if (rc == 0) { |
| 628 | (ib_conn->state == ISER_CONN_TERMINATING || | 634 | mutex_lock(&ib_conn->state_mutex); |
| 629 | ib_conn->state == ISER_CONN_DOWN)) | 635 | if (ib_conn->state == ISER_CONN_TERMINATING || |
| 630 | rc = -1; | 636 | ib_conn->state == ISER_CONN_DOWN) |
| 637 | rc = -1; | ||
| 638 | mutex_unlock(&ib_conn->state_mutex); | ||
| 639 | } | ||
| 631 | 640 | ||
| 632 | iser_info("ib conn %p rc = %d\n", ib_conn, rc); | 641 | iser_info("ib conn %p rc = %d\n", ib_conn, rc); |
| 633 | 642 | ||
| @@ -646,19 +655,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 646 | 655 | ||
| 647 | ib_conn = ep->dd_data; | 656 | ib_conn = ep->dd_data; |
| 648 | iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); | 657 | iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); |
| 658 | mutex_lock(&ib_conn->state_mutex); | ||
| 649 | iser_conn_terminate(ib_conn); | 659 | iser_conn_terminate(ib_conn); |
| 650 | 660 | ||
| 651 | /* | 661 | /* |
| 652 | * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop | 662 | * if iser_conn and iscsi_conn are bound, we must wait for |
| 653 | * call and ISER_CONN_DOWN state before freeing the iser resources. | 663 | * iscsi_conn_stop and flush errors completion before freeing |
| 654 | * otherwise we are safe to free resources immediately. | 664 | * the iser resources. Otherwise we are safe to free resources |
| 665 | * immediately. | ||
| 655 | */ | 666 | */ |
| 656 | if (ib_conn->iscsi_conn) { | 667 | if (ib_conn->iscsi_conn) { |
| 657 | INIT_WORK(&ib_conn->release_work, iser_release_work); | 668 | INIT_WORK(&ib_conn->release_work, iser_release_work); |
| 658 | queue_work(release_wq, &ib_conn->release_work); | 669 | queue_work(release_wq, &ib_conn->release_work); |
| 670 | mutex_unlock(&ib_conn->state_mutex); | ||
| 659 | } else { | 671 | } else { |
| 672 | ib_conn->state = ISER_CONN_DOWN; | ||
| 673 | mutex_unlock(&ib_conn->state_mutex); | ||
| 660 | iser_conn_release(ib_conn); | 674 | iser_conn_release(ib_conn); |
| 661 | } | 675 | } |
| 676 | iscsi_destroy_endpoint(ep); | ||
| 662 | } | 677 | } |
| 663 | 678 | ||
| 664 | static umode_t iser_attr_is_visible(int param_type, int param) | 679 | static umode_t iser_attr_is_visible(int param_type, int param) |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 97cd385bf7f7..c877dad381cb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
| @@ -326,7 +326,6 @@ struct iser_conn { | |||
| 326 | struct iser_device *device; /* device context */ | 326 | struct iser_device *device; /* device context */ |
| 327 | struct rdma_cm_id *cma_id; /* CMA ID */ | 327 | struct rdma_cm_id *cma_id; /* CMA ID */ |
| 328 | struct ib_qp *qp; /* QP */ | 328 | struct ib_qp *qp; /* QP */ |
| 329 | wait_queue_head_t wait; /* waitq for conn/disconn */ | ||
| 330 | unsigned qp_max_recv_dtos; /* num of rx buffers */ | 329 | unsigned qp_max_recv_dtos; /* num of rx buffers */ |
| 331 | unsigned qp_max_recv_dtos_mask; /* above minus 1 */ | 330 | unsigned qp_max_recv_dtos_mask; /* above minus 1 */ |
| 332 | unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ | 331 | unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ |
| @@ -335,6 +334,9 @@ struct iser_conn { | |||
| 335 | char name[ISER_OBJECT_NAME_SIZE]; | 334 | char name[ISER_OBJECT_NAME_SIZE]; |
| 336 | struct work_struct release_work; | 335 | struct work_struct release_work; |
| 337 | struct completion stop_completion; | 336 | struct completion stop_completion; |
| 337 | struct mutex state_mutex; | ||
| 338 | struct completion flush_completion; | ||
| 339 | struct completion up_completion; | ||
| 338 | struct list_head conn_list; /* entry in ig conn list */ | 340 | struct list_head conn_list; /* entry in ig conn list */ |
| 339 | 341 | ||
| 340 | char *login_buf; | 342 | char *login_buf; |
| @@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, | |||
| 448 | enum iser_data_dir cmd_dir); | 450 | enum iser_data_dir cmd_dir); |
| 449 | 451 | ||
| 450 | int iser_connect(struct iser_conn *ib_conn, | 452 | int iser_connect(struct iser_conn *ib_conn, |
| 451 | struct sockaddr_in *src_addr, | 453 | struct sockaddr *src_addr, |
| 452 | struct sockaddr_in *dst_addr, | 454 | struct sockaddr *dst_addr, |
| 453 | int non_blocking); | 455 | int non_blocking); |
| 454 | 456 | ||
| 455 | int iser_reg_page_vec(struct iser_conn *ib_conn, | 457 | int iser_reg_page_vec(struct iser_conn *ib_conn, |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ea01075f9f9b..3ef167f97d6f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
| @@ -491,10 +491,9 @@ out_err: | |||
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | /** | 493 | /** |
| 494 | * releases the QP objects, returns 0 on success, | 494 | * releases the QP object |
| 495 | * -1 on failure | ||
| 496 | */ | 495 | */ |
| 497 | static int iser_free_ib_conn_res(struct iser_conn *ib_conn) | 496 | static void iser_free_ib_conn_res(struct iser_conn *ib_conn) |
| 498 | { | 497 | { |
| 499 | int cq_index; | 498 | int cq_index; |
| 500 | BUG_ON(ib_conn == NULL); | 499 | BUG_ON(ib_conn == NULL); |
| @@ -513,8 +512,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) | |||
| 513 | } | 512 | } |
| 514 | 513 | ||
| 515 | ib_conn->qp = NULL; | 514 | ib_conn->qp = NULL; |
| 516 | |||
| 517 | return 0; | ||
| 518 | } | 515 | } |
| 519 | 516 | ||
| 520 | /** | 517 | /** |
| @@ -568,31 +565,40 @@ static void iser_device_try_release(struct iser_device *device) | |||
| 568 | mutex_unlock(&ig.device_list_mutex); | 565 | mutex_unlock(&ig.device_list_mutex); |
| 569 | } | 566 | } |
| 570 | 567 | ||
| 568 | /** | ||
| 569 | * Called with state mutex held | ||
| 570 | **/ | ||
| 571 | static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, | 571 | static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, |
| 572 | enum iser_ib_conn_state comp, | 572 | enum iser_ib_conn_state comp, |
| 573 | enum iser_ib_conn_state exch) | 573 | enum iser_ib_conn_state exch) |
| 574 | { | 574 | { |
| 575 | int ret; | 575 | int ret; |
| 576 | 576 | ||
| 577 | spin_lock_bh(&ib_conn->lock); | ||
| 578 | if ((ret = (ib_conn->state == comp))) | 577 | if ((ret = (ib_conn->state == comp))) |
| 579 | ib_conn->state = exch; | 578 | ib_conn->state = exch; |
| 580 | spin_unlock_bh(&ib_conn->lock); | ||
| 581 | return ret; | 579 | return ret; |
| 582 | } | 580 | } |
| 583 | 581 | ||
| 584 | void iser_release_work(struct work_struct *work) | 582 | void iser_release_work(struct work_struct *work) |
| 585 | { | 583 | { |
| 586 | struct iser_conn *ib_conn; | 584 | struct iser_conn *ib_conn; |
| 585 | int rc; | ||
| 587 | 586 | ||
| 588 | ib_conn = container_of(work, struct iser_conn, release_work); | 587 | ib_conn = container_of(work, struct iser_conn, release_work); |
| 589 | 588 | ||
| 590 | /* wait for .conn_stop callback */ | 589 | /* wait for .conn_stop callback */ |
| 591 | wait_for_completion(&ib_conn->stop_completion); | 590 | rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ); |
| 591 | WARN_ON(rc == 0); | ||
| 592 | 592 | ||
| 593 | /* wait for the qp`s post send and post receive buffers to empty */ | 593 | /* wait for the qp`s post send and post receive buffers to empty */ |
| 594 | wait_event_interruptible(ib_conn->wait, | 594 | rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ); |
| 595 | ib_conn->state == ISER_CONN_DOWN); | 595 | WARN_ON(rc == 0); |
| 596 | |||
| 597 | ib_conn->state = ISER_CONN_DOWN; | ||
| 598 | |||
| 599 | mutex_lock(&ib_conn->state_mutex); | ||
| 600 | ib_conn->state = ISER_CONN_DOWN; | ||
| 601 | mutex_unlock(&ib_conn->state_mutex); | ||
| 596 | 602 | ||
| 597 | iser_conn_release(ib_conn); | 603 | iser_conn_release(ib_conn); |
| 598 | } | 604 | } |
| @@ -604,23 +610,27 @@ void iser_conn_release(struct iser_conn *ib_conn) | |||
| 604 | { | 610 | { |
| 605 | struct iser_device *device = ib_conn->device; | 611 | struct iser_device *device = ib_conn->device; |
| 606 | 612 | ||
| 607 | BUG_ON(ib_conn->state == ISER_CONN_UP); | ||
| 608 | |||
| 609 | mutex_lock(&ig.connlist_mutex); | 613 | mutex_lock(&ig.connlist_mutex); |
| 610 | list_del(&ib_conn->conn_list); | 614 | list_del(&ib_conn->conn_list); |
| 611 | mutex_unlock(&ig.connlist_mutex); | 615 | mutex_unlock(&ig.connlist_mutex); |
| 616 | |||
| 617 | mutex_lock(&ib_conn->state_mutex); | ||
| 618 | BUG_ON(ib_conn->state != ISER_CONN_DOWN); | ||
| 619 | |||
| 612 | iser_free_rx_descriptors(ib_conn); | 620 | iser_free_rx_descriptors(ib_conn); |
| 613 | iser_free_ib_conn_res(ib_conn); | 621 | iser_free_ib_conn_res(ib_conn); |
| 614 | ib_conn->device = NULL; | 622 | ib_conn->device = NULL; |
| 615 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | 623 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ |
| 616 | if (device != NULL) | 624 | if (device != NULL) |
| 617 | iser_device_try_release(device); | 625 | iser_device_try_release(device); |
| 626 | mutex_unlock(&ib_conn->state_mutex); | ||
| 627 | |||
| 618 | /* if cma handler context, the caller actually destroy the id */ | 628 | /* if cma handler context, the caller actually destroy the id */ |
| 619 | if (ib_conn->cma_id != NULL) { | 629 | if (ib_conn->cma_id != NULL) { |
| 620 | rdma_destroy_id(ib_conn->cma_id); | 630 | rdma_destroy_id(ib_conn->cma_id); |
| 621 | ib_conn->cma_id = NULL; | 631 | ib_conn->cma_id = NULL; |
| 622 | } | 632 | } |
| 623 | iscsi_destroy_endpoint(ib_conn->ep); | 633 | kfree(ib_conn); |
| 624 | } | 634 | } |
| 625 | 635 | ||
| 626 | /** | 636 | /** |
| @@ -642,22 +652,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn) | |||
| 642 | ib_conn,err); | 652 | ib_conn,err); |
| 643 | } | 653 | } |
| 644 | 654 | ||
| 655 | /** | ||
| 656 | * Called with state mutex held | ||
| 657 | **/ | ||
| 645 | static void iser_connect_error(struct rdma_cm_id *cma_id) | 658 | static void iser_connect_error(struct rdma_cm_id *cma_id) |
| 646 | { | 659 | { |
| 647 | struct iser_conn *ib_conn; | 660 | struct iser_conn *ib_conn; |
| 648 | 661 | ||
| 649 | ib_conn = (struct iser_conn *)cma_id->context; | 662 | ib_conn = (struct iser_conn *)cma_id->context; |
| 650 | |||
| 651 | ib_conn->state = ISER_CONN_DOWN; | 663 | ib_conn->state = ISER_CONN_DOWN; |
| 652 | wake_up_interruptible(&ib_conn->wait); | ||
| 653 | } | 664 | } |
| 654 | 665 | ||
| 666 | /** | ||
| 667 | * Called with state mutex held | ||
| 668 | **/ | ||
| 655 | static void iser_addr_handler(struct rdma_cm_id *cma_id) | 669 | static void iser_addr_handler(struct rdma_cm_id *cma_id) |
| 656 | { | 670 | { |
| 657 | struct iser_device *device; | 671 | struct iser_device *device; |
| 658 | struct iser_conn *ib_conn; | 672 | struct iser_conn *ib_conn; |
| 659 | int ret; | 673 | int ret; |
| 660 | 674 | ||
| 675 | ib_conn = (struct iser_conn *)cma_id->context; | ||
| 676 | if (ib_conn->state != ISER_CONN_PENDING) | ||
| 677 | /* bailout */ | ||
| 678 | return; | ||
| 679 | |||
| 661 | device = iser_device_find_by_ib_device(cma_id); | 680 | device = iser_device_find_by_ib_device(cma_id); |
| 662 | if (!device) { | 681 | if (!device) { |
| 663 | iser_err("device lookup/creation failed\n"); | 682 | iser_err("device lookup/creation failed\n"); |
| @@ -665,7 +684,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
| 665 | return; | 684 | return; |
| 666 | } | 685 | } |
| 667 | 686 | ||
| 668 | ib_conn = (struct iser_conn *)cma_id->context; | ||
| 669 | ib_conn->device = device; | 687 | ib_conn->device = device; |
| 670 | 688 | ||
| 671 | /* connection T10-PI support */ | 689 | /* connection T10-PI support */ |
| @@ -689,18 +707,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
| 689 | } | 707 | } |
| 690 | } | 708 | } |
| 691 | 709 | ||
| 710 | /** | ||
| 711 | * Called with state mutex held | ||
| 712 | **/ | ||
| 692 | static void iser_route_handler(struct rdma_cm_id *cma_id) | 713 | static void iser_route_handler(struct rdma_cm_id *cma_id) |
| 693 | { | 714 | { |
| 694 | struct rdma_conn_param conn_param; | 715 | struct rdma_conn_param conn_param; |
| 695 | int ret; | 716 | int ret; |
| 696 | struct iser_cm_hdr req_hdr; | 717 | struct iser_cm_hdr req_hdr; |
| 718 | struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context; | ||
| 719 | struct iser_device *device = ib_conn->device; | ||
| 720 | |||
| 721 | if (ib_conn->state != ISER_CONN_PENDING) | ||
| 722 | /* bailout */ | ||
| 723 | return; | ||
| 697 | 724 | ||
| 698 | ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); | 725 | ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); |
| 699 | if (ret) | 726 | if (ret) |
| 700 | goto failure; | 727 | goto failure; |
| 701 | 728 | ||
| 702 | memset(&conn_param, 0, sizeof conn_param); | 729 | memset(&conn_param, 0, sizeof conn_param); |
| 703 | conn_param.responder_resources = 4; | 730 | conn_param.responder_resources = device->dev_attr.max_qp_rd_atom; |
| 704 | conn_param.initiator_depth = 1; | 731 | conn_param.initiator_depth = 1; |
| 705 | conn_param.retry_count = 7; | 732 | conn_param.retry_count = 7; |
| 706 | conn_param.rnr_retry_count = 6; | 733 | conn_param.rnr_retry_count = 6; |
| @@ -728,12 +755,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) | |||
| 728 | struct ib_qp_attr attr; | 755 | struct ib_qp_attr attr; |
| 729 | struct ib_qp_init_attr init_attr; | 756 | struct ib_qp_init_attr init_attr; |
| 730 | 757 | ||
| 758 | ib_conn = (struct iser_conn *)cma_id->context; | ||
| 759 | if (ib_conn->state != ISER_CONN_PENDING) | ||
| 760 | /* bailout */ | ||
| 761 | return; | ||
| 762 | |||
| 731 | (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); | 763 | (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); |
| 732 | iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); | 764 | iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); |
| 733 | 765 | ||
| 734 | ib_conn = (struct iser_conn *)cma_id->context; | 766 | ib_conn->state = ISER_CONN_UP; |
| 735 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP)) | 767 | complete(&ib_conn->up_completion); |
| 736 | wake_up_interruptible(&ib_conn->wait); | ||
| 737 | } | 768 | } |
| 738 | 769 | ||
| 739 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) | 770 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) |
| @@ -752,19 +783,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
| 752 | iser_err("iscsi_iser connection isn't bound\n"); | 783 | iser_err("iscsi_iser connection isn't bound\n"); |
| 753 | } | 784 | } |
| 754 | 785 | ||
| 755 | /* Complete the termination process if no posts are pending */ | 786 | /* Complete the termination process if no posts are pending. This code |
| 787 | * block also exists in iser_handle_comp_error(), but it is needed here | ||
| 788 | * for cases of no flushes at all, e.g. discovery over rdma. | ||
| 789 | */ | ||
| 756 | if (ib_conn->post_recv_buf_count == 0 && | 790 | if (ib_conn->post_recv_buf_count == 0 && |
| 757 | (atomic_read(&ib_conn->post_send_buf_count) == 0)) { | 791 | (atomic_read(&ib_conn->post_send_buf_count) == 0)) { |
| 758 | ib_conn->state = ISER_CONN_DOWN; | 792 | complete(&ib_conn->flush_completion); |
| 759 | wake_up_interruptible(&ib_conn->wait); | ||
| 760 | } | 793 | } |
| 761 | } | 794 | } |
| 762 | 795 | ||
| 763 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 796 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
| 764 | { | 797 | { |
| 798 | struct iser_conn *ib_conn; | ||
| 799 | |||
| 800 | ib_conn = (struct iser_conn *)cma_id->context; | ||
| 765 | iser_info("event %d status %d conn %p id %p\n", | 801 | iser_info("event %d status %d conn %p id %p\n", |
| 766 | event->event, event->status, cma_id->context, cma_id); | 802 | event->event, event->status, cma_id->context, cma_id); |
| 767 | 803 | ||
| 804 | mutex_lock(&ib_conn->state_mutex); | ||
| 768 | switch (event->event) { | 805 | switch (event->event) { |
| 769 | case RDMA_CM_EVENT_ADDR_RESOLVED: | 806 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
| 770 | iser_addr_handler(cma_id); | 807 | iser_addr_handler(cma_id); |
| @@ -785,24 +822,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
| 785 | case RDMA_CM_EVENT_DISCONNECTED: | 822 | case RDMA_CM_EVENT_DISCONNECTED: |
| 786 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 823 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 787 | case RDMA_CM_EVENT_ADDR_CHANGE: | 824 | case RDMA_CM_EVENT_ADDR_CHANGE: |
| 825 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | ||
| 788 | iser_disconnected_handler(cma_id); | 826 | iser_disconnected_handler(cma_id); |
| 789 | break; | 827 | break; |
| 790 | default: | 828 | default: |
| 791 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); | 829 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); |
| 792 | break; | 830 | break; |
| 793 | } | 831 | } |
| 832 | mutex_unlock(&ib_conn->state_mutex); | ||
| 794 | return 0; | 833 | return 0; |
| 795 | } | 834 | } |
| 796 | 835 | ||
| 797 | void iser_conn_init(struct iser_conn *ib_conn) | 836 | void iser_conn_init(struct iser_conn *ib_conn) |
| 798 | { | 837 | { |
| 799 | ib_conn->state = ISER_CONN_INIT; | 838 | ib_conn->state = ISER_CONN_INIT; |
| 800 | init_waitqueue_head(&ib_conn->wait); | ||
| 801 | ib_conn->post_recv_buf_count = 0; | 839 | ib_conn->post_recv_buf_count = 0; |
| 802 | atomic_set(&ib_conn->post_send_buf_count, 0); | 840 | atomic_set(&ib_conn->post_send_buf_count, 0); |
| 803 | init_completion(&ib_conn->stop_completion); | 841 | init_completion(&ib_conn->stop_completion); |
| 842 | init_completion(&ib_conn->flush_completion); | ||
| 843 | init_completion(&ib_conn->up_completion); | ||
| 804 | INIT_LIST_HEAD(&ib_conn->conn_list); | 844 | INIT_LIST_HEAD(&ib_conn->conn_list); |
| 805 | spin_lock_init(&ib_conn->lock); | 845 | spin_lock_init(&ib_conn->lock); |
| 846 | mutex_init(&ib_conn->state_mutex); | ||
| 806 | } | 847 | } |
| 807 | 848 | ||
| 808 | /** | 849 | /** |
| @@ -810,22 +851,21 @@ void iser_conn_init(struct iser_conn *ib_conn) | |||
| 810 | * sleeps until the connection is established or rejected | 851 | * sleeps until the connection is established or rejected |
| 811 | */ | 852 | */ |
| 812 | int iser_connect(struct iser_conn *ib_conn, | 853 | int iser_connect(struct iser_conn *ib_conn, |
| 813 | struct sockaddr_in *src_addr, | 854 | struct sockaddr *src_addr, |
| 814 | struct sockaddr_in *dst_addr, | 855 | struct sockaddr *dst_addr, |
| 815 | int non_blocking) | 856 | int non_blocking) |
| 816 | { | 857 | { |
| 817 | struct sockaddr *src, *dst; | ||
| 818 | int err = 0; | 858 | int err = 0; |
| 819 | 859 | ||
| 820 | sprintf(ib_conn->name, "%pI4:%d", | 860 | mutex_lock(&ib_conn->state_mutex); |
| 821 | &dst_addr->sin_addr.s_addr, dst_addr->sin_port); | 861 | |
| 862 | sprintf(ib_conn->name, "%pISp", dst_addr); | ||
| 863 | |||
| 864 | iser_info("connecting to: %s\n", ib_conn->name); | ||
| 822 | 865 | ||
| 823 | /* the device is known only --after-- address resolution */ | 866 | /* the device is known only --after-- address resolution */ |
| 824 | ib_conn->device = NULL; | 867 | ib_conn->device = NULL; |
| 825 | 868 | ||
| 826 | iser_info("connecting to: %pI4, port 0x%x\n", | ||
| 827 | &dst_addr->sin_addr, dst_addr->sin_port); | ||
| 828 | |||
| 829 | ib_conn->state = ISER_CONN_PENDING; | 869 | ib_conn->state = ISER_CONN_PENDING; |
| 830 | 870 | ||
| 831 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, | 871 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
| @@ -837,23 +877,21 @@ int iser_connect(struct iser_conn *ib_conn, | |||
| 837 | goto id_failure; | 877 | goto id_failure; |
| 838 | } | 878 | } |
| 839 | 879 | ||
| 840 | src = (struct sockaddr *)src_addr; | 880 | err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); |
| 841 | dst = (struct sockaddr *)dst_addr; | ||
| 842 | err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); | ||
| 843 | if (err) { | 881 | if (err) { |
| 844 | iser_err("rdma_resolve_addr failed: %d\n", err); | 882 | iser_err("rdma_resolve_addr failed: %d\n", err); |
| 845 | goto addr_failure; | 883 | goto addr_failure; |
| 846 | } | 884 | } |
| 847 | 885 | ||
| 848 | if (!non_blocking) { | 886 | if (!non_blocking) { |
| 849 | wait_event_interruptible(ib_conn->wait, | 887 | wait_for_completion_interruptible(&ib_conn->up_completion); |
| 850 | (ib_conn->state != ISER_CONN_PENDING)); | ||
| 851 | 888 | ||
| 852 | if (ib_conn->state != ISER_CONN_UP) { | 889 | if (ib_conn->state != ISER_CONN_UP) { |
| 853 | err = -EIO; | 890 | err = -EIO; |
| 854 | goto connect_failure; | 891 | goto connect_failure; |
| 855 | } | 892 | } |
| 856 | } | 893 | } |
| 894 | mutex_unlock(&ib_conn->state_mutex); | ||
| 857 | 895 | ||
| 858 | mutex_lock(&ig.connlist_mutex); | 896 | mutex_lock(&ig.connlist_mutex); |
| 859 | list_add(&ib_conn->conn_list, &ig.connlist); | 897 | list_add(&ib_conn->conn_list, &ig.connlist); |
| @@ -865,6 +903,7 @@ id_failure: | |||
| 865 | addr_failure: | 903 | addr_failure: |
| 866 | ib_conn->state = ISER_CONN_DOWN; | 904 | ib_conn->state = ISER_CONN_DOWN; |
| 867 | connect_failure: | 905 | connect_failure: |
| 906 | mutex_unlock(&ib_conn->state_mutex); | ||
| 868 | iser_conn_release(ib_conn); | 907 | iser_conn_release(ib_conn); |
| 869 | return err; | 908 | return err; |
| 870 | } | 909 | } |
| @@ -1049,18 +1088,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, | |||
| 1049 | 1088 | ||
| 1050 | if (ib_conn->post_recv_buf_count == 0 && | 1089 | if (ib_conn->post_recv_buf_count == 0 && |
| 1051 | atomic_read(&ib_conn->post_send_buf_count) == 0) { | 1090 | atomic_read(&ib_conn->post_send_buf_count) == 0) { |
| 1052 | /* getting here when the state is UP means that the conn is * | 1091 | /** |
| 1053 | * being terminated asynchronously from the iSCSI layer's * | 1092 | * getting here when the state is UP means that the conn is |
| 1054 | * perspective. */ | 1093 | * being terminated asynchronously from the iSCSI layer's |
| 1055 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 1094 | * perspective. It is safe to peek at the connection state |
| 1056 | ISER_CONN_TERMINATING)) | 1095 | * since iscsi_conn_failure is allowed to be called twice. |
| 1096 | **/ | ||
| 1097 | if (ib_conn->state == ISER_CONN_UP) | ||
| 1057 | iscsi_conn_failure(ib_conn->iscsi_conn, | 1098 | iscsi_conn_failure(ib_conn->iscsi_conn, |
| 1058 | ISCSI_ERR_CONN_FAILED); | 1099 | ISCSI_ERR_CONN_FAILED); |
| 1059 | 1100 | ||
| 1060 | /* no more non completed posts to the QP, complete the | 1101 | /* no more non completed posts to the QP, complete the |
| 1061 | * termination process w.o worrying on disconnect event */ | 1102 | * termination process w.o worrying on disconnect event */ |
| 1062 | ib_conn->state = ISER_CONN_DOWN; | 1103 | complete(&ib_conn->flush_completion); |
| 1063 | wake_up_interruptible(&ib_conn->wait); | ||
| 1064 | } | 1104 | } |
| 1065 | } | 1105 | } |
| 1066 | 1106 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e3c2c5b4297f..62d2a18e1b41 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr); | |||
| 130 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | 130 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); |
| 131 | 131 | ||
| 132 | static struct scsi_transport_template *ib_srp_transport_template; | 132 | static struct scsi_transport_template *ib_srp_transport_template; |
| 133 | static struct workqueue_struct *srp_remove_wq; | ||
| 133 | 134 | ||
| 134 | static struct ib_client srp_client = { | 135 | static struct ib_client srp_client = { |
| 135 | .name = "srp", | 136 | .name = "srp", |
| @@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target) | |||
| 731 | spin_unlock_irq(&target->lock); | 732 | spin_unlock_irq(&target->lock); |
| 732 | 733 | ||
| 733 | if (changed) | 734 | if (changed) |
| 734 | queue_work(system_long_wq, &target->remove_work); | 735 | queue_work(srp_remove_wq, &target->remove_work); |
| 735 | 736 | ||
| 736 | return changed; | 737 | return changed; |
| 737 | } | 738 | } |
| @@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
| 1643 | SCSI_SENSE_BUFFERSIZE)); | 1644 | SCSI_SENSE_BUFFERSIZE)); |
| 1644 | } | 1645 | } |
| 1645 | 1646 | ||
| 1646 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) | 1647 | if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) |
| 1647 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); | ||
| 1648 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | ||
| 1649 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); | 1648 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
| 1649 | else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) | ||
| 1650 | scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); | ||
| 1651 | else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) | ||
| 1652 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); | ||
| 1653 | else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) | ||
| 1654 | scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); | ||
| 1650 | 1655 | ||
| 1651 | srp_free_req(target, req, scmnd, | 1656 | srp_free_req(target, req, scmnd, |
| 1652 | be32_to_cpu(rsp->req_lim_delta)); | 1657 | be32_to_cpu(rsp->req_lim_delta)); |
| @@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device) | |||
| 3261 | spin_unlock(&host->target_lock); | 3266 | spin_unlock(&host->target_lock); |
| 3262 | 3267 | ||
| 3263 | /* | 3268 | /* |
| 3264 | * Wait for target port removal tasks. | 3269 | * Wait for tl_err and target port removal tasks. |
| 3265 | */ | 3270 | */ |
| 3266 | flush_workqueue(system_long_wq); | 3271 | flush_workqueue(system_long_wq); |
| 3272 | flush_workqueue(srp_remove_wq); | ||
| 3267 | 3273 | ||
| 3268 | kfree(host); | 3274 | kfree(host); |
| 3269 | } | 3275 | } |
| @@ -3313,16 +3319,22 @@ static int __init srp_init_module(void) | |||
| 3313 | indirect_sg_entries = cmd_sg_entries; | 3319 | indirect_sg_entries = cmd_sg_entries; |
| 3314 | } | 3320 | } |
| 3315 | 3321 | ||
| 3322 | srp_remove_wq = create_workqueue("srp_remove"); | ||
| 3323 | if (!srp_remove_wq) { | ||
| 3324 | ret = -ENOMEM; | ||
| 3325 | goto out; | ||
| 3326 | } | ||
| 3327 | |||
| 3328 | ret = -ENOMEM; | ||
| 3316 | ib_srp_transport_template = | 3329 | ib_srp_transport_template = |
| 3317 | srp_attach_transport(&ib_srp_transport_functions); | 3330 | srp_attach_transport(&ib_srp_transport_functions); |
| 3318 | if (!ib_srp_transport_template) | 3331 | if (!ib_srp_transport_template) |
| 3319 | return -ENOMEM; | 3332 | goto destroy_wq; |
| 3320 | 3333 | ||
| 3321 | ret = class_register(&srp_class); | 3334 | ret = class_register(&srp_class); |
| 3322 | if (ret) { | 3335 | if (ret) { |
| 3323 | pr_err("couldn't register class infiniband_srp\n"); | 3336 | pr_err("couldn't register class infiniband_srp\n"); |
| 3324 | srp_release_transport(ib_srp_transport_template); | 3337 | goto release_tr; |
| 3325 | return ret; | ||
| 3326 | } | 3338 | } |
| 3327 | 3339 | ||
| 3328 | ib_sa_register_client(&srp_sa_client); | 3340 | ib_sa_register_client(&srp_sa_client); |
| @@ -3330,13 +3342,22 @@ static int __init srp_init_module(void) | |||
| 3330 | ret = ib_register_client(&srp_client); | 3342 | ret = ib_register_client(&srp_client); |
| 3331 | if (ret) { | 3343 | if (ret) { |
| 3332 | pr_err("couldn't register IB client\n"); | 3344 | pr_err("couldn't register IB client\n"); |
| 3333 | srp_release_transport(ib_srp_transport_template); | 3345 | goto unreg_sa; |
| 3334 | ib_sa_unregister_client(&srp_sa_client); | ||
| 3335 | class_unregister(&srp_class); | ||
| 3336 | return ret; | ||
| 3337 | } | 3346 | } |
| 3338 | 3347 | ||
| 3339 | return 0; | 3348 | out: |
| 3349 | return ret; | ||
| 3350 | |||
| 3351 | unreg_sa: | ||
| 3352 | ib_sa_unregister_client(&srp_sa_client); | ||
| 3353 | class_unregister(&srp_class); | ||
| 3354 | |||
| 3355 | release_tr: | ||
| 3356 | srp_release_transport(ib_srp_transport_template); | ||
| 3357 | |||
| 3358 | destroy_wq: | ||
| 3359 | destroy_workqueue(srp_remove_wq); | ||
| 3360 | goto out; | ||
| 3340 | } | 3361 | } |
| 3341 | 3362 | ||
| 3342 | static void __exit srp_cleanup_module(void) | 3363 | static void __exit srp_cleanup_module(void) |
| @@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void) | |||
| 3345 | ib_sa_unregister_client(&srp_sa_client); | 3366 | ib_sa_unregister_client(&srp_sa_client); |
| 3346 | class_unregister(&srp_class); | 3367 | class_unregister(&srp_class); |
| 3347 | srp_release_transport(ib_srp_transport_template); | 3368 | srp_release_transport(ib_srp_transport_template); |
| 3369 | destroy_workqueue(srp_remove_wq); | ||
| 3348 | } | 3370 | } |
| 3349 | 3371 | ||
| 3350 | module_init(srp_init_module); | 3372 | module_init(srp_init_module); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index fe09f2788b15..d28a8c284da9 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
| @@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, | |||
| 198 | case IB_EVENT_PKEY_CHANGE: | 198 | case IB_EVENT_PKEY_CHANGE: |
| 199 | case IB_EVENT_SM_CHANGE: | 199 | case IB_EVENT_SM_CHANGE: |
| 200 | case IB_EVENT_CLIENT_REREGISTER: | 200 | case IB_EVENT_CLIENT_REREGISTER: |
| 201 | case IB_EVENT_GID_CHANGE: | ||
| 201 | /* Refresh port data asynchronously. */ | 202 | /* Refresh port data asynchronously. */ |
| 202 | if (event->element.port_num <= sdev->device->phys_port_cnt) { | 203 | if (event->element.port_num <= sdev->device->phys_port_cnt) { |
| 203 | sport = &sdev->port[event->element.port_num - 1]; | 204 | sport = &sdev->port[event->element.port_num - 1]; |
| @@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport) | |||
| 563 | ®_req, 0, | 564 | ®_req, 0, |
| 564 | srpt_mad_send_handler, | 565 | srpt_mad_send_handler, |
| 565 | srpt_mad_recv_handler, | 566 | srpt_mad_recv_handler, |
| 566 | sport); | 567 | sport, 0); |
| 567 | if (IS_ERR(sport->mad_agent)) { | 568 | if (IS_ERR(sport->mad_agent)) { |
| 568 | ret = PTR_ERR(sport->mad_agent); | 569 | ret = PTR_ERR(sport->mad_agent); |
| 569 | sport->mad_agent = NULL; | 570 | sport->mad_agent = NULL; |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 811f1351db7a..43e08d0bc3d3 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -897,5 +897,6 @@ void be_roce_dev_remove(struct be_adapter *); | |||
| 897 | */ | 897 | */ |
| 898 | void be_roce_dev_open(struct be_adapter *); | 898 | void be_roce_dev_open(struct be_adapter *); |
| 899 | void be_roce_dev_close(struct be_adapter *); | 899 | void be_roce_dev_close(struct be_adapter *); |
| 900 | void be_roce_dev_shutdown(struct be_adapter *); | ||
| 900 | 901 | ||
| 901 | #endif /* BE_H */ | 902 | #endif /* BE_H */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index db4ff14ff18f..9cdeda54674a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -5014,6 +5014,7 @@ static void be_shutdown(struct pci_dev *pdev) | |||
| 5014 | if (!adapter) | 5014 | if (!adapter) |
| 5015 | return; | 5015 | return; |
| 5016 | 5016 | ||
| 5017 | be_roce_dev_shutdown(adapter); | ||
| 5017 | cancel_delayed_work_sync(&adapter->work); | 5018 | cancel_delayed_work_sync(&adapter->work); |
| 5018 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 5019 | cancel_delayed_work_sync(&adapter->func_recovery_work); |
| 5019 | 5020 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 5bf16603a3e9..ef4672dc7357 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c | |||
| @@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter) | |||
| 120 | { | 120 | { |
| 121 | if (ocrdma_drv && adapter->ocrdma_dev && | 121 | if (ocrdma_drv && adapter->ocrdma_dev && |
| 122 | ocrdma_drv->state_change_handler) | 122 | ocrdma_drv->state_change_handler) |
| 123 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); | 123 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, |
| 124 | BE_DEV_UP); | ||
| 124 | } | 125 | } |
| 125 | 126 | ||
| 126 | void be_roce_dev_open(struct be_adapter *adapter) | 127 | void be_roce_dev_open(struct be_adapter *adapter) |
| @@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter) | |||
| 136 | { | 137 | { |
| 137 | if (ocrdma_drv && adapter->ocrdma_dev && | 138 | if (ocrdma_drv && adapter->ocrdma_dev && |
| 138 | ocrdma_drv->state_change_handler) | 139 | ocrdma_drv->state_change_handler) |
| 139 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); | 140 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, |
| 141 | BE_DEV_DOWN); | ||
| 140 | } | 142 | } |
| 141 | 143 | ||
| 142 | void be_roce_dev_close(struct be_adapter *adapter) | 144 | void be_roce_dev_close(struct be_adapter *adapter) |
| @@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter) | |||
| 148 | } | 150 | } |
| 149 | } | 151 | } |
| 150 | 152 | ||
| 153 | void be_roce_dev_shutdown(struct be_adapter *adapter) | ||
| 154 | { | ||
| 155 | if (be_roce_supported(adapter)) { | ||
| 156 | mutex_lock(&be_adapter_list_lock); | ||
| 157 | if (ocrdma_drv && adapter->ocrdma_dev && | ||
| 158 | ocrdma_drv->state_change_handler) | ||
| 159 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, | ||
| 160 | BE_DEV_SHUTDOWN); | ||
| 161 | mutex_unlock(&be_adapter_list_lock); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 151 | int be_roce_register_driver(struct ocrdma_driver *drv) | 165 | int be_roce_register_driver(struct ocrdma_driver *drv) |
| 152 | { | 166 | { |
| 153 | struct be_adapter *dev; | 167 | struct be_adapter *dev; |
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index a3d9e96c18eb..e6f7eb1a7d87 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h | |||
| @@ -62,7 +62,8 @@ struct ocrdma_driver { | |||
| 62 | 62 | ||
| 63 | enum { | 63 | enum { |
| 64 | BE_DEV_UP = 0, | 64 | BE_DEV_UP = 0, |
| 65 | BE_DEV_DOWN = 1 | 65 | BE_DEV_DOWN = 1, |
| 66 | BE_DEV_SHUTDOWN = 2 | ||
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | /* APIs for RoCE driver to register callback handlers, | 69 | /* APIs for RoCE driver to register callback handlers, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 5d940a26055c..65a4a0f88ea0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -1311,6 +1311,15 @@ static struct mlx4_cmd_info cmd_info[] = { | |||
| 1311 | .wrapper = mlx4_MAD_IFC_wrapper | 1311 | .wrapper = mlx4_MAD_IFC_wrapper |
| 1312 | }, | 1312 | }, |
| 1313 | { | 1313 | { |
| 1314 | .opcode = MLX4_CMD_MAD_DEMUX, | ||
| 1315 | .has_inbox = false, | ||
| 1316 | .has_outbox = false, | ||
| 1317 | .out_is_imm = false, | ||
| 1318 | .encode_slave_id = false, | ||
| 1319 | .verify = NULL, | ||
| 1320 | .wrapper = mlx4_CMD_EPERM_wrapper | ||
| 1321 | }, | ||
| 1322 | { | ||
| 1314 | .opcode = MLX4_CMD_QUERY_IF_STAT, | 1323 | .opcode = MLX4_CMD_QUERY_IF_STAT, |
| 1315 | .has_inbox = false, | 1324 | .has_inbox = false, |
| 1316 | .has_outbox = true, | 1325 | .has_outbox = true, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 688e1eabab29..494753e44ae3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
| 136 | [7] = "FSM (MAC anti-spoofing) support", | 136 | [7] = "FSM (MAC anti-spoofing) support", |
| 137 | [8] = "Dynamic QP updates support", | 137 | [8] = "Dynamic QP updates support", |
| 138 | [9] = "Device managed flow steering IPoIB support", | 138 | [9] = "Device managed flow steering IPoIB support", |
| 139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support" | 139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support", |
| 140 | [11] = "MAD DEMUX (Secure-Host) support" | ||
| 140 | }; | 141 | }; |
| 141 | int i; | 142 | int i; |
| 142 | 143 | ||
| @@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 571 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 | 572 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 |
| 572 | #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d | 573 | #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d |
| 573 | #define QUERY_DEV_CAP_VXLAN 0x9e | 574 | #define QUERY_DEV_CAP_VXLAN 0x9e |
| 575 | #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 | ||
| 574 | 576 | ||
| 575 | dev_cap->flags2 = 0; | 577 | dev_cap->flags2 = 0; |
| 576 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 578 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| @@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 748 | MLX4_GET(dev_cap->max_counters, outbox, | 750 | MLX4_GET(dev_cap->max_counters, outbox, |
| 749 | QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); | 751 | QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); |
| 750 | 752 | ||
| 753 | MLX4_GET(field32, outbox, | ||
| 754 | QUERY_DEV_CAP_MAD_DEMUX_OFFSET); | ||
| 755 | if (field32 & (1 << 0)) | ||
| 756 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; | ||
| 757 | |||
| 751 | MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); | 758 | MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); |
| 752 | if (field32 & (1 << 16)) | 759 | if (field32 & (1 << 16)) |
| 753 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; | 760 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; |
| @@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work) | |||
| 2016 | out: | 2023 | out: |
| 2017 | mlx4_free_cmd_mailbox(dev, mailbox); | 2024 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 2018 | } | 2025 | } |
| 2026 | |||
| 2027 | static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, | ||
| 2028 | struct mlx4_cmd_mailbox *mailbox) | ||
| 2029 | { | ||
| 2030 | #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 | ||
| 2031 | #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 | ||
| 2032 | #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 | ||
| 2033 | #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 | ||
| 2034 | |||
| 2035 | u32 set_attr_mask, getresp_attr_mask; | ||
| 2036 | u32 trap_attr_mask, traprepress_attr_mask; | ||
| 2037 | |||
| 2038 | MLX4_GET(set_attr_mask, mailbox->buf, | ||
| 2039 | MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); | ||
| 2040 | mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", | ||
| 2041 | set_attr_mask); | ||
| 2042 | |||
| 2043 | MLX4_GET(getresp_attr_mask, mailbox->buf, | ||
| 2044 | MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); | ||
| 2045 | mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", | ||
| 2046 | getresp_attr_mask); | ||
| 2047 | |||
| 2048 | MLX4_GET(trap_attr_mask, mailbox->buf, | ||
| 2049 | MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); | ||
| 2050 | mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", | ||
| 2051 | trap_attr_mask); | ||
| 2052 | |||
| 2053 | MLX4_GET(traprepress_attr_mask, mailbox->buf, | ||
| 2054 | MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); | ||
| 2055 | mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", | ||
| 2056 | traprepress_attr_mask); | ||
| 2057 | |||
| 2058 | if (set_attr_mask && getresp_attr_mask && trap_attr_mask && | ||
| 2059 | traprepress_attr_mask) | ||
| 2060 | return 1; | ||
| 2061 | |||
| 2062 | return 0; | ||
| 2063 | } | ||
| 2064 | |||
| 2065 | int mlx4_config_mad_demux(struct mlx4_dev *dev) | ||
| 2066 | { | ||
| 2067 | struct mlx4_cmd_mailbox *mailbox; | ||
| 2068 | int secure_host_active; | ||
| 2069 | int err; | ||
| 2070 | |||
| 2071 | /* Check if mad_demux is supported */ | ||
| 2072 | if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) | ||
| 2073 | return 0; | ||
| 2074 | |||
| 2075 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 2076 | if (IS_ERR(mailbox)) { | ||
| 2077 | mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); | ||
| 2078 | return -ENOMEM; | ||
| 2079 | } | ||
| 2080 | |||
| 2081 | /* Query mad_demux to find out which MADs are handled by internal sma */ | ||
| 2082 | err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, | ||
| 2083 | MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, | ||
| 2084 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | ||
| 2085 | if (err) { | ||
| 2086 | mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", | ||
| 2087 | err); | ||
| 2088 | goto out; | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox); | ||
| 2092 | |||
| 2093 | /* Config mad_demux to handle all MADs returned by the query above */ | ||
| 2094 | err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, | ||
| 2095 | MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, | ||
| 2096 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | ||
| 2097 | if (err) { | ||
| 2098 | mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); | ||
| 2099 | goto out; | ||
| 2100 | } | ||
| 2101 | |||
| 2102 | if (secure_host_active) | ||
| 2103 | mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); | ||
| 2104 | out: | ||
| 2105 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 2106 | return err; | ||
| 2107 | } | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 80b8c5f30e4e..0158689906fd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1853,6 +1853,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
| 1853 | mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); | 1853 | mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); |
| 1854 | goto err_mr_table_free; | 1854 | goto err_mr_table_free; |
| 1855 | } | 1855 | } |
| 1856 | err = mlx4_config_mad_demux(dev); | ||
| 1857 | if (err) { | ||
| 1858 | mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); | ||
| 1859 | goto err_mcg_table_free; | ||
| 1860 | } | ||
| 1856 | } | 1861 | } |
| 1857 | 1862 | ||
| 1858 | err = mlx4_init_eq_table(dev); | 1863 | err = mlx4_init_eq_table(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 13fbcd03c3e4..b508c7887ef8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
| @@ -274,6 +274,8 @@ struct mlx4_icm_table { | |||
| 274 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) | 274 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) |
| 275 | #define MLX4_MPT_FLAG_REGION (1 << 8) | 275 | #define MLX4_MPT_FLAG_REGION (1 << 8) |
| 276 | 276 | ||
| 277 | #define MLX4_MPT_PD_MASK (0x1FFFFUL) | ||
| 278 | #define MLX4_MPT_PD_VF_MASK (0xFE0000UL) | ||
| 277 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) | 279 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) |
| 278 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) | 280 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) |
| 279 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) | 281 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) |
| @@ -1306,5 +1308,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev); | |||
| 1306 | int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); | 1308 | int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); |
| 1307 | /* Returns the VF index of slave */ | 1309 | /* Returns the VF index of slave */ |
| 1308 | int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); | 1310 | int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); |
| 1311 | int mlx4_config_mad_demux(struct mlx4_dev *dev); | ||
| 1309 | 1312 | ||
| 1310 | #endif /* MLX4_H */ | 1313 | #endif /* MLX4_H */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2839abb878a6..7d717eccb7b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
| @@ -298,6 +298,131 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
| 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 302 | struct mlx4_mpt_entry ***mpt_entry) | ||
| 303 | { | ||
| 304 | int err; | ||
| 305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | ||
| 306 | struct mlx4_cmd_mailbox *mailbox = NULL; | ||
| 307 | |||
| 308 | /* Make sure that at this point we have single-threaded access only */ | ||
| 309 | |||
| 310 | if (mmr->enabled != MLX4_MPT_EN_HW) | ||
| 311 | return -EINVAL; | ||
| 312 | |||
| 313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | ||
| 314 | |||
| 315 | if (err) { | ||
| 316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | ||
| 317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | ||
| 318 | return err; | ||
| 319 | } | ||
| 320 | |||
| 321 | mmr->enabled = MLX4_MPT_EN_SW; | ||
| 322 | |||
| 323 | if (!mlx4_is_mfunc(dev)) { | ||
| 324 | **mpt_entry = mlx4_table_find( | ||
| 325 | &mlx4_priv(dev)->mr_table.dmpt_table, | ||
| 326 | key, NULL); | ||
| 327 | } else { | ||
| 328 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 329 | if (IS_ERR_OR_NULL(mailbox)) | ||
| 330 | return PTR_ERR(mailbox); | ||
| 331 | |||
| 332 | err = mlx4_cmd_box(dev, 0, mailbox->dma, key, | ||
| 333 | 0, MLX4_CMD_QUERY_MPT, | ||
| 334 | MLX4_CMD_TIME_CLASS_B, | ||
| 335 | MLX4_CMD_WRAPPED); | ||
| 336 | |||
| 337 | if (err) | ||
| 338 | goto free_mailbox; | ||
| 339 | |||
| 340 | *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; | ||
| 341 | } | ||
| 342 | |||
| 343 | if (!(*mpt_entry) || !(**mpt_entry)) { | ||
| 344 | err = -ENOMEM; | ||
| 345 | goto free_mailbox; | ||
| 346 | } | ||
| 347 | |||
| 348 | return 0; | ||
| 349 | |||
| 350 | free_mailbox: | ||
| 351 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 352 | return err; | ||
| 353 | } | ||
| 354 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); | ||
| 355 | |||
| 356 | int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 357 | struct mlx4_mpt_entry **mpt_entry) | ||
| 358 | { | ||
| 359 | int err; | ||
| 360 | |||
| 361 | if (!mlx4_is_mfunc(dev)) { | ||
| 362 | /* Make sure any changes to this entry are flushed */ | ||
| 363 | wmb(); | ||
| 364 | |||
| 365 | *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; | ||
| 366 | |||
| 367 | /* Make sure the new status is written */ | ||
| 368 | wmb(); | ||
| 369 | |||
| 370 | err = mlx4_SYNC_TPT(dev); | ||
| 371 | } else { | ||
| 372 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | ||
| 373 | |||
| 374 | struct mlx4_cmd_mailbox *mailbox = | ||
| 375 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | ||
| 376 | buf); | ||
| 377 | |||
| 378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | ||
| 379 | } | ||
| 380 | |||
| 381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | ||
| 382 | if (!err) | ||
| 383 | mmr->enabled = MLX4_MPT_EN_HW; | ||
| 384 | return err; | ||
| 385 | } | ||
| 386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | ||
| 387 | |||
| 388 | void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, | ||
| 389 | struct mlx4_mpt_entry **mpt_entry) | ||
| 390 | { | ||
| 391 | if (mlx4_is_mfunc(dev)) { | ||
| 392 | struct mlx4_cmd_mailbox *mailbox = | ||
| 393 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | ||
| 394 | buf); | ||
| 395 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 396 | } | ||
| 397 | } | ||
| 398 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | ||
| 399 | |||
| 400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | ||
| 401 | u32 pdn) | ||
| 402 | { | ||
| 403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | ||
| 404 | /* The wrapper function will put the slave's id here */ | ||
| 405 | if (mlx4_is_mfunc(dev)) | ||
| 406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | ||
| 407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | ||
| 408 | (pdn & MLX4_MPT_PD_MASK) | ||
| 409 | | MLX4_MPT_PD_FLAG_EN_INV); | ||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); | ||
| 413 | |||
| 414 | int mlx4_mr_hw_change_access(struct mlx4_dev *dev, | ||
| 415 | struct mlx4_mpt_entry *mpt_entry, | ||
| 416 | u32 access) | ||
| 417 | { | ||
| 418 | u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | | ||
| 419 | (access & MLX4_PERM_MASK); | ||
| 420 | |||
| 421 | mpt_entry->flags = cpu_to_be32(flags); | ||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); | ||
| 425 | |||
| 301 | static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, | 426 | static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, |
| 302 | u64 iova, u64 size, u32 access, int npages, | 427 | u64 iova, u64 size, u32 access, int npages, |
| 303 | int page_shift, struct mlx4_mr *mr) | 428 | int page_shift, struct mlx4_mr *mr) |
| @@ -463,6 +588,41 @@ int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
| 463 | } | 588 | } |
| 464 | EXPORT_SYMBOL_GPL(mlx4_mr_free); | 589 | EXPORT_SYMBOL_GPL(mlx4_mr_free); |
| 465 | 590 | ||
| 591 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) | ||
| 592 | { | ||
| 593 | mlx4_mtt_cleanup(dev, &mr->mtt); | ||
| 594 | } | ||
| 595 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); | ||
| 596 | |||
| 597 | int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | ||
| 598 | u64 iova, u64 size, int npages, | ||
| 599 | int page_shift, struct mlx4_mpt_entry *mpt_entry) | ||
| 600 | { | ||
| 601 | int err; | ||
| 602 | |||
| 603 | mpt_entry->start = cpu_to_be64(mr->iova); | ||
| 604 | mpt_entry->length = cpu_to_be64(mr->size); | ||
| 605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | ||
| 606 | |||
| 607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | ||
| 608 | if (err) | ||
| 609 | return err; | ||
| 610 | |||
| 611 | if (mr->mtt.order < 0) { | ||
| 612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | ||
| 613 | mpt_entry->mtt_addr = 0; | ||
| 614 | } else { | ||
| 615 | mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, | ||
| 616 | &mr->mtt)); | ||
| 617 | if (mr->mtt.page_shift == 0) | ||
| 618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | ||
| 619 | } | ||
| 620 | mr->enabled = MLX4_MPT_EN_SW; | ||
| 621 | |||
| 622 | return 0; | ||
| 623 | } | ||
| 624 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); | ||
| 625 | |||
| 466 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | 626 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) |
| 467 | { | 627 | { |
| 468 | struct mlx4_cmd_mailbox *mailbox; | 628 | struct mlx4_cmd_mailbox *mailbox; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 0efc1368e5a8..1089367fed22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -2613,12 +2613,34 @@ int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, | |||
| 2613 | if (err) | 2613 | if (err) |
| 2614 | return err; | 2614 | return err; |
| 2615 | 2615 | ||
| 2616 | if (mpt->com.from_state != RES_MPT_HW) { | 2616 | if (mpt->com.from_state == RES_MPT_MAPPED) { |
| 2617 | /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do | ||
| 2618 | * that, the VF must read the MPT. But since the MPT entry memory is not | ||
| 2619 | * in the VF's virtual memory space, it must use QUERY_MPT to obtain the | ||
| 2620 | * entry contents. To guarantee that the MPT cannot be changed, the driver | ||
| 2621 | * must perform HW2SW_MPT before this query and return the MPT entry to HW | ||
| 2622 | * ownership fofollowing the change. The change here allows the VF to | ||
| 2623 | * perform QUERY_MPT also when the entry is in SW ownership. | ||
| 2624 | */ | ||
| 2625 | struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( | ||
| 2626 | &mlx4_priv(dev)->mr_table.dmpt_table, | ||
| 2627 | mpt->key, NULL); | ||
| 2628 | |||
| 2629 | if (NULL == mpt_entry || NULL == outbox->buf) { | ||
| 2630 | err = -EINVAL; | ||
| 2631 | goto out; | ||
| 2632 | } | ||
| 2633 | |||
| 2634 | memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); | ||
| 2635 | |||
| 2636 | err = 0; | ||
| 2637 | } else if (mpt->com.from_state == RES_MPT_HW) { | ||
| 2638 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); | ||
| 2639 | } else { | ||
| 2617 | err = -EBUSY; | 2640 | err = -EBUSY; |
| 2618 | goto out; | 2641 | goto out; |
| 2619 | } | 2642 | } |
| 2620 | 2643 | ||
| 2621 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); | ||
| 2622 | 2644 | ||
| 2623 | out: | 2645 | out: |
| 2624 | put_res(dev, slave, id, RES_MPT); | 2646 | put_res(dev, slave, id, RES_MPT); |
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 43fea2219f83..ae45bd99baed 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c | |||
| @@ -472,7 +472,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport) | |||
| 472 | if (delay > 0) | 472 | if (delay > 0) |
| 473 | queue_delayed_work(system_long_wq, &rport->reconnect_work, | 473 | queue_delayed_work(system_long_wq, &rport->reconnect_work, |
| 474 | 1UL * delay * HZ); | 474 | 1UL * delay * HZ); |
| 475 | if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { | 475 | if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) && |
| 476 | srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { | ||
| 476 | pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), | 477 | pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), |
| 477 | rport->state); | 478 | rport->state); |
| 478 | scsi_target_block(&shost->shost_gendev); | 479 | scsi_target_block(&shost->shost_gendev); |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index c8450366c130..379c02648ab3 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
| @@ -116,6 +116,7 @@ enum { | |||
| 116 | /* special QP and management commands */ | 116 | /* special QP and management commands */ |
| 117 | MLX4_CMD_CONF_SPECIAL_QP = 0x23, | 117 | MLX4_CMD_CONF_SPECIAL_QP = 0x23, |
| 118 | MLX4_CMD_MAD_IFC = 0x24, | 118 | MLX4_CMD_MAD_IFC = 0x24, |
| 119 | MLX4_CMD_MAD_DEMUX = 0x203, | ||
| 119 | 120 | ||
| 120 | /* multicast commands */ | 121 | /* multicast commands */ |
| 121 | MLX4_CMD_READ_MCG = 0x25, | 122 | MLX4_CMD_READ_MCG = 0x25, |
| @@ -186,6 +187,12 @@ enum { | |||
| 186 | }; | 187 | }; |
| 187 | 188 | ||
| 188 | enum { | 189 | enum { |
| 190 | MLX4_CMD_MAD_DEMUX_CONFIG = 0, | ||
| 191 | MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1, | ||
| 192 | MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */ | ||
| 193 | }; | ||
| 194 | |||
| 195 | enum { | ||
| 189 | MLX4_CMD_WRAPPED, | 196 | MLX4_CMD_WRAPPED, |
| 190 | MLX4_CMD_NATIVE | 197 | MLX4_CMD_NATIVE |
| 191 | }; | 198 | }; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e15b1544ea83..071f6b234604 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -183,6 +183,7 @@ enum { | |||
| 183 | MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, | 183 | MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, |
| 184 | MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, | 184 | MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, |
| 185 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, | 185 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, |
| 186 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, | ||
| 186 | }; | 187 | }; |
| 187 | 188 | ||
| 188 | enum { | 189 | enum { |
| @@ -273,6 +274,7 @@ enum { | |||
| 273 | MLX4_PERM_REMOTE_WRITE = 1 << 13, | 274 | MLX4_PERM_REMOTE_WRITE = 1 << 13, |
| 274 | MLX4_PERM_ATOMIC = 1 << 14, | 275 | MLX4_PERM_ATOMIC = 1 << 14, |
| 275 | MLX4_PERM_BIND_MW = 1 << 15, | 276 | MLX4_PERM_BIND_MW = 1 << 15, |
| 277 | MLX4_PERM_MASK = 0xFC00 | ||
| 276 | }; | 278 | }; |
| 277 | 279 | ||
| 278 | enum { | 280 | enum { |
| @@ -1254,6 +1256,21 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); | |||
| 1254 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); | 1256 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); |
| 1255 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, | 1257 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, |
| 1256 | int enable); | 1258 | int enable); |
| 1259 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 1260 | struct mlx4_mpt_entry ***mpt_entry); | ||
| 1261 | int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 1262 | struct mlx4_mpt_entry **mpt_entry); | ||
| 1263 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | ||
| 1264 | u32 pdn); | ||
| 1265 | int mlx4_mr_hw_change_access(struct mlx4_dev *dev, | ||
| 1266 | struct mlx4_mpt_entry *mpt_entry, | ||
| 1267 | u32 access); | ||
| 1268 | void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, | ||
| 1269 | struct mlx4_mpt_entry **mpt_entry); | ||
| 1270 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr); | ||
| 1271 | int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | ||
| 1272 | u64 iova, u64 size, int npages, | ||
| 1273 | int page_shift, struct mlx4_mpt_entry *mpt_entry); | ||
| 1257 | 1274 | ||
| 1258 | /* Returns true if running in low memory profile (kdump kernel) */ | 1275 | /* Returns true if running in low memory profile (kdump kernel) */ |
| 1259 | static inline bool mlx4_low_memory_profile(void) | 1276 | static inline bool mlx4_low_memory_profile(void) |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 3d81b90cc315..9bb99e983f58 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/list.h> | 40 | #include <linux/list.h> |
| 41 | 41 | ||
| 42 | #include <rdma/ib_verbs.h> | 42 | #include <rdma/ib_verbs.h> |
| 43 | #include <uapi/rdma/ib_user_mad.h> | ||
| 43 | 44 | ||
| 44 | /* Management base version */ | 45 | /* Management base version */ |
| 45 | #define IB_MGMT_BASE_VERSION 1 | 46 | #define IB_MGMT_BASE_VERSION 1 |
| @@ -355,9 +356,13 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, | |||
| 355 | * @hi_tid: Access layer assigned transaction ID for this client. | 356 | * @hi_tid: Access layer assigned transaction ID for this client. |
| 356 | * Unsolicited MADs sent by this client will have the upper 32-bits | 357 | * Unsolicited MADs sent by this client will have the upper 32-bits |
| 357 | * of their TID set to this value. | 358 | * of their TID set to this value. |
| 359 | * @flags: registration flags | ||
| 358 | * @port_num: Port number on which QP is registered | 360 | * @port_num: Port number on which QP is registered |
| 359 | * @rmpp_version: If set, indicates the RMPP version used by this agent. | 361 | * @rmpp_version: If set, indicates the RMPP version used by this agent. |
| 360 | */ | 362 | */ |
| 363 | enum { | ||
| 364 | IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP, | ||
| 365 | }; | ||
| 361 | struct ib_mad_agent { | 366 | struct ib_mad_agent { |
| 362 | struct ib_device *device; | 367 | struct ib_device *device; |
| 363 | struct ib_qp *qp; | 368 | struct ib_qp *qp; |
| @@ -367,6 +372,7 @@ struct ib_mad_agent { | |||
| 367 | ib_mad_snoop_handler snoop_handler; | 372 | ib_mad_snoop_handler snoop_handler; |
| 368 | void *context; | 373 | void *context; |
| 369 | u32 hi_tid; | 374 | u32 hi_tid; |
| 375 | u32 flags; | ||
| 370 | u8 port_num; | 376 | u8 port_num; |
| 371 | u8 rmpp_version; | 377 | u8 rmpp_version; |
| 372 | }; | 378 | }; |
| @@ -426,6 +432,7 @@ struct ib_mad_recv_wc { | |||
| 426 | * in the range from 0x30 to 0x4f. Otherwise not used. | 432 | * in the range from 0x30 to 0x4f. Otherwise not used. |
| 427 | * @method_mask: The caller will receive unsolicited MADs for any method | 433 | * @method_mask: The caller will receive unsolicited MADs for any method |
| 428 | * where @method_mask = 1. | 434 | * where @method_mask = 1. |
| 435 | * | ||
| 429 | */ | 436 | */ |
| 430 | struct ib_mad_reg_req { | 437 | struct ib_mad_reg_req { |
| 431 | u8 mgmt_class; | 438 | u8 mgmt_class; |
| @@ -451,6 +458,7 @@ struct ib_mad_reg_req { | |||
| 451 | * @recv_handler: The completion callback routine invoked for a received | 458 | * @recv_handler: The completion callback routine invoked for a received |
| 452 | * MAD. | 459 | * MAD. |
| 453 | * @context: User specified context associated with the registration. | 460 | * @context: User specified context associated with the registration. |
| 461 | * @registration_flags: Registration flags to set for this agent | ||
| 454 | */ | 462 | */ |
| 455 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | 463 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, |
| 456 | u8 port_num, | 464 | u8 port_num, |
| @@ -459,7 +467,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
| 459 | u8 rmpp_version, | 467 | u8 rmpp_version, |
| 460 | ib_mad_send_handler send_handler, | 468 | ib_mad_send_handler send_handler, |
| 461 | ib_mad_recv_handler recv_handler, | 469 | ib_mad_recv_handler recv_handler, |
| 462 | void *context); | 470 | void *context, |
| 471 | u32 registration_flags); | ||
| 463 | 472 | ||
| 464 | enum ib_mad_snoop_flags { | 473 | enum ib_mad_snoop_flags { |
| 465 | /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ | 474 | /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ |
| @@ -661,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num); | |||
| 661 | */ | 670 | */ |
| 662 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf); | 671 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf); |
| 663 | 672 | ||
| 673 | /** | ||
| 674 | * ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP. | ||
| 675 | * @agent: the agent in question | ||
| 676 | * @return: true if agent is performing rmpp, false otherwise. | ||
| 677 | */ | ||
| 678 | int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent); | ||
| 679 | |||
| 664 | #endif /* IB_MAD_H */ | 680 | #endif /* IB_MAD_H */ |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7ccef342f724..ed44cc07a7b3 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -1097,7 +1097,8 @@ struct ib_mr_attr { | |||
| 1097 | enum ib_mr_rereg_flags { | 1097 | enum ib_mr_rereg_flags { |
| 1098 | IB_MR_REREG_TRANS = 1, | 1098 | IB_MR_REREG_TRANS = 1, |
| 1099 | IB_MR_REREG_PD = (1<<1), | 1099 | IB_MR_REREG_PD = (1<<1), |
| 1100 | IB_MR_REREG_ACCESS = (1<<2) | 1100 | IB_MR_REREG_ACCESS = (1<<2), |
| 1101 | IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) | ||
| 1101 | }; | 1102 | }; |
| 1102 | 1103 | ||
| 1103 | /** | 1104 | /** |
| @@ -1547,6 +1548,13 @@ struct ib_device { | |||
| 1547 | u64 virt_addr, | 1548 | u64 virt_addr, |
| 1548 | int mr_access_flags, | 1549 | int mr_access_flags, |
| 1549 | struct ib_udata *udata); | 1550 | struct ib_udata *udata); |
| 1551 | int (*rereg_user_mr)(struct ib_mr *mr, | ||
| 1552 | int flags, | ||
| 1553 | u64 start, u64 length, | ||
| 1554 | u64 virt_addr, | ||
| 1555 | int mr_access_flags, | ||
| 1556 | struct ib_pd *pd, | ||
| 1557 | struct ib_udata *udata); | ||
| 1550 | int (*query_mr)(struct ib_mr *mr, | 1558 | int (*query_mr)(struct ib_mr *mr, |
| 1551 | struct ib_mr_attr *mr_attr); | 1559 | struct ib_mr_attr *mr_attr); |
| 1552 | int (*dereg_mr)(struct ib_mr *mr); | 1560 | int (*dereg_mr)(struct ib_mr *mr); |
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index d6fce1cbdb90..09f809f323ea 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h | |||
| @@ -191,6 +191,45 @@ struct ib_user_mad_reg_req { | |||
| 191 | __u8 rmpp_version; | 191 | __u8 rmpp_version; |
| 192 | }; | 192 | }; |
| 193 | 193 | ||
| 194 | /** | ||
| 195 | * ib_user_mad_reg_req2 - MAD registration request | ||
| 196 | * | ||
| 197 | * @id - Set by the _kernel_; used by userspace to identify the | ||
| 198 | * registered agent in future requests. | ||
| 199 | * @qpn - Queue pair number; must be 0 or 1. | ||
| 200 | * @mgmt_class - Indicates which management class of MADs should be | ||
| 201 | * receive by the caller. This field is only required if | ||
| 202 | * the user wishes to receive unsolicited MADs, otherwise | ||
| 203 | * it should be 0. | ||
| 204 | * @mgmt_class_version - Indicates which version of MADs for the given | ||
| 205 | * management class to receive. | ||
| 206 | * @res - Ignored. | ||
| 207 | * @flags - additional registration flags; Must be in the set of | ||
| 208 | * flags defined in IB_USER_MAD_REG_FLAGS_CAP | ||
| 209 | * @method_mask - The caller wishes to receive unsolicited MADs for the | ||
| 210 | * methods whose bit(s) is(are) set. | ||
| 211 | * @oui - Indicates IEEE OUI to use when mgmt_class is a vendor | ||
| 212 | * class in the range from 0x30 to 0x4f. Otherwise not | ||
| 213 | * used. | ||
| 214 | * @rmpp_version - If set, indicates the RMPP version to use. | ||
| 215 | */ | ||
| 216 | enum { | ||
| 217 | IB_USER_MAD_USER_RMPP = (1 << 0), | ||
| 218 | }; | ||
| 219 | #define IB_USER_MAD_REG_FLAGS_CAP (IB_USER_MAD_USER_RMPP) | ||
| 220 | struct ib_user_mad_reg_req2 { | ||
| 221 | __u32 id; | ||
| 222 | __u32 qpn; | ||
| 223 | __u8 mgmt_class; | ||
| 224 | __u8 mgmt_class_version; | ||
| 225 | __u16 res; | ||
| 226 | __u32 flags; | ||
| 227 | __u64 method_mask[2]; | ||
| 228 | __u32 oui; | ||
| 229 | __u8 rmpp_version; | ||
| 230 | __u8 reserved[3]; | ||
| 231 | }; | ||
| 232 | |||
| 194 | #define IB_IOCTL_MAGIC 0x1b | 233 | #define IB_IOCTL_MAGIC 0x1b |
| 195 | 234 | ||
| 196 | #define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ | 235 | #define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ |
| @@ -200,4 +239,7 @@ struct ib_user_mad_reg_req { | |||
| 200 | 239 | ||
| 201 | #define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3) | 240 | #define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3) |
| 202 | 241 | ||
| 242 | #define IB_USER_MAD_REGISTER_AGENT2 _IOWR(IB_IOCTL_MAGIC, 4, \ | ||
| 243 | struct ib_user_mad_reg_req2) | ||
| 244 | |||
| 203 | #endif /* IB_USER_MAD_H */ | 245 | #endif /* IB_USER_MAD_H */ |
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index cbfdd4ca9510..26daf55ff76e 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h | |||
| @@ -276,6 +276,22 @@ struct ib_uverbs_reg_mr_resp { | |||
| 276 | __u32 rkey; | 276 | __u32 rkey; |
| 277 | }; | 277 | }; |
| 278 | 278 | ||
| 279 | struct ib_uverbs_rereg_mr { | ||
| 280 | __u64 response; | ||
| 281 | __u32 mr_handle; | ||
| 282 | __u32 flags; | ||
| 283 | __u64 start; | ||
| 284 | __u64 length; | ||
| 285 | __u64 hca_va; | ||
| 286 | __u32 pd_handle; | ||
| 287 | __u32 access_flags; | ||
| 288 | }; | ||
| 289 | |||
| 290 | struct ib_uverbs_rereg_mr_resp { | ||
| 291 | __u32 lkey; | ||
| 292 | __u32 rkey; | ||
| 293 | }; | ||
| 294 | |||
| 279 | struct ib_uverbs_dereg_mr { | 295 | struct ib_uverbs_dereg_mr { |
| 280 | __u32 mr_handle; | 296 | __u32 mr_handle; |
| 281 | }; | 297 | }; |
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index 99b80abf360a..3066718eb120 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #define RDMA_USER_CM_H | 34 | #define RDMA_USER_CM_H |
| 35 | 35 | ||
| 36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
| 37 | #include <linux/socket.h> | ||
| 37 | #include <linux/in6.h> | 38 | #include <linux/in6.h> |
| 38 | #include <rdma/ib_user_verbs.h> | 39 | #include <rdma/ib_user_verbs.h> |
| 39 | #include <rdma/ib_user_sa.h> | 40 | #include <rdma/ib_user_sa.h> |
