diff options
74 files changed, 3781 insertions, 662 deletions
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index bf508b5550c4..dc21836b5a8d 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
@@ -1,18 +1,3 @@ | |||
1 | obj-$(CONFIG_INFINIBAND) += core/ | 1 | obj-$(CONFIG_INFINIBAND) += core/ |
2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ | 2 | obj-$(CONFIG_INFINIBAND) += hw/ |
3 | obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ | 3 | obj-$(CONFIG_INFINIBAND) += ulp/ |
4 | obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/ | ||
5 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ | ||
6 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ | ||
7 | obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ | ||
8 | obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ | ||
9 | obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ | ||
10 | obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/ | ||
11 | obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ | ||
12 | obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ | ||
13 | obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/ | ||
14 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ | ||
15 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ | ||
16 | obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ | ||
17 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ | ||
18 | obj-$(CONFIG_INFINIBAND_ISERT) += ulp/isert/ | ||
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 3ab3865544bb..ffd0af6734af 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
@@ -18,7 +18,7 @@ ib_sa-y := sa_query.o multicast.o | |||
18 | 18 | ||
19 | ib_cm-y := cm.o | 19 | ib_cm-y := cm.o |
20 | 20 | ||
21 | iw_cm-y := iwcm.o | 21 | iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o |
22 | 22 | ||
23 | rdma_cm-y := cma.o | 23 | rdma_cm-y := cma.o |
24 | 24 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 42c3058e6e9c..d570030d899c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -3607,7 +3607,8 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) | |||
3607 | 3607 | ||
3608 | id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, | 3608 | id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, |
3609 | sizeof *id_stats, RDMA_NL_RDMA_CM, | 3609 | sizeof *id_stats, RDMA_NL_RDMA_CM, |
3610 | RDMA_NL_RDMA_CM_ID_STATS); | 3610 | RDMA_NL_RDMA_CM_ID_STATS, |
3611 | NLM_F_MULTI); | ||
3611 | if (!id_stats) | 3612 | if (!id_stats) |
3612 | goto out; | 3613 | goto out; |
3613 | 3614 | ||
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c new file mode 100644 index 000000000000..b85ddbc979e0 --- /dev/null +++ b/drivers/infiniband/core/iwpm_msg.c | |||
@@ -0,0 +1,685 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2014 Chelsio, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include "iwpm_util.h" | ||
35 | |||
36 | static const char iwpm_ulib_name[] = "iWarpPortMapperUser"; | ||
37 | static int iwpm_ulib_version = 3; | ||
38 | static int iwpm_user_pid = IWPM_PID_UNDEFINED; | ||
39 | static atomic_t echo_nlmsg_seq; | ||
40 | |||
41 | int iwpm_valid_pid(void) | ||
42 | { | ||
43 | return iwpm_user_pid > 0; | ||
44 | } | ||
45 | EXPORT_SYMBOL(iwpm_valid_pid); | ||
46 | |||
47 | /* | ||
48 | * iwpm_register_pid - Send a netlink query to user space | ||
49 | * for the iwarp port mapper pid | ||
50 | * | ||
51 | * nlmsg attributes: | ||
52 | * [IWPM_NLA_REG_PID_SEQ] | ||
53 | * [IWPM_NLA_REG_IF_NAME] | ||
54 | * [IWPM_NLA_REG_IBDEV_NAME] | ||
55 | * [IWPM_NLA_REG_ULIB_NAME] | ||
56 | */ | ||
57 | int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) | ||
58 | { | ||
59 | struct sk_buff *skb = NULL; | ||
60 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
61 | struct nlmsghdr *nlh; | ||
62 | u32 msg_seq; | ||
63 | const char *err_str = ""; | ||
64 | int ret = -EINVAL; | ||
65 | |||
66 | if (!iwpm_valid_client(nl_client)) { | ||
67 | err_str = "Invalid port mapper client"; | ||
68 | goto pid_query_error; | ||
69 | } | ||
70 | if (iwpm_registered_client(nl_client)) | ||
71 | return 0; | ||
72 | skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); | ||
73 | if (!skb) { | ||
74 | err_str = "Unable to create a nlmsg"; | ||
75 | goto pid_query_error; | ||
76 | } | ||
77 | nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); | ||
78 | nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL); | ||
79 | if (!nlmsg_request) { | ||
80 | err_str = "Unable to allocate netlink request"; | ||
81 | goto pid_query_error; | ||
82 | } | ||
83 | msg_seq = atomic_read(&echo_nlmsg_seq); | ||
84 | |||
85 | /* fill in the pid request message */ | ||
86 | err_str = "Unable to put attribute of the nlmsg"; | ||
87 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ); | ||
88 | if (ret) | ||
89 | goto pid_query_error; | ||
90 | ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE, | ||
91 | pm_msg->if_name, IWPM_NLA_REG_IF_NAME); | ||
92 | if (ret) | ||
93 | goto pid_query_error; | ||
94 | ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE, | ||
95 | pm_msg->dev_name, IWPM_NLA_REG_IBDEV_NAME); | ||
96 | if (ret) | ||
97 | goto pid_query_error; | ||
98 | ret = ibnl_put_attr(skb, nlh, IWPM_ULIBNAME_SIZE, | ||
99 | (char *)iwpm_ulib_name, IWPM_NLA_REG_ULIB_NAME); | ||
100 | if (ret) | ||
101 | goto pid_query_error; | ||
102 | |||
103 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", | ||
104 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); | ||
105 | |||
106 | ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); | ||
107 | if (ret) { | ||
108 | skb = NULL; /* skb is freed in the netlink send-op handling */ | ||
109 | iwpm_set_registered(nl_client, 1); | ||
110 | iwpm_user_pid = IWPM_PID_UNAVAILABLE; | ||
111 | err_str = "Unable to send a nlmsg"; | ||
112 | goto pid_query_error; | ||
113 | } | ||
114 | nlmsg_request->req_buffer = pm_msg; | ||
115 | ret = iwpm_wait_complete_req(nlmsg_request); | ||
116 | return ret; | ||
117 | pid_query_error: | ||
118 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | ||
119 | if (skb) | ||
120 | dev_kfree_skb(skb); | ||
121 | if (nlmsg_request) | ||
122 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | ||
123 | return ret; | ||
124 | } | ||
125 | EXPORT_SYMBOL(iwpm_register_pid); | ||
126 | |||
127 | /* | ||
128 | * iwpm_add_mapping - Send a netlink add mapping message | ||
129 | * to the port mapper | ||
130 | * nlmsg attributes: | ||
131 | * [IWPM_NLA_MANAGE_MAPPING_SEQ] | ||
132 | * [IWPM_NLA_MANAGE_ADDR] | ||
133 | */ | ||
134 | int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | ||
135 | { | ||
136 | struct sk_buff *skb = NULL; | ||
137 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
138 | struct nlmsghdr *nlh; | ||
139 | u32 msg_seq; | ||
140 | const char *err_str = ""; | ||
141 | int ret = -EINVAL; | ||
142 | |||
143 | if (!iwpm_valid_client(nl_client)) { | ||
144 | err_str = "Invalid port mapper client"; | ||
145 | goto add_mapping_error; | ||
146 | } | ||
147 | if (!iwpm_registered_client(nl_client)) { | ||
148 | err_str = "Unregistered port mapper client"; | ||
149 | goto add_mapping_error; | ||
150 | } | ||
151 | if (!iwpm_valid_pid()) | ||
152 | return 0; | ||
153 | skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); | ||
154 | if (!skb) { | ||
155 | err_str = "Unable to create a nlmsg"; | ||
156 | goto add_mapping_error; | ||
157 | } | ||
158 | nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); | ||
159 | nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL); | ||
160 | if (!nlmsg_request) { | ||
161 | err_str = "Unable to allocate netlink request"; | ||
162 | goto add_mapping_error; | ||
163 | } | ||
164 | msg_seq = atomic_read(&echo_nlmsg_seq); | ||
165 | /* fill in the add mapping message */ | ||
166 | err_str = "Unable to put attribute of the nlmsg"; | ||
167 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, | ||
168 | IWPM_NLA_MANAGE_MAPPING_SEQ); | ||
169 | if (ret) | ||
170 | goto add_mapping_error; | ||
171 | ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), | ||
172 | &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); | ||
173 | if (ret) | ||
174 | goto add_mapping_error; | ||
175 | nlmsg_request->req_buffer = pm_msg; | ||
176 | |||
177 | ret = ibnl_unicast(skb, nlh, iwpm_user_pid); | ||
178 | if (ret) { | ||
179 | skb = NULL; /* skb is freed in the netlink send-op handling */ | ||
180 | iwpm_user_pid = IWPM_PID_UNDEFINED; | ||
181 | err_str = "Unable to send a nlmsg"; | ||
182 | goto add_mapping_error; | ||
183 | } | ||
184 | ret = iwpm_wait_complete_req(nlmsg_request); | ||
185 | return ret; | ||
186 | add_mapping_error: | ||
187 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | ||
188 | if (skb) | ||
189 | dev_kfree_skb(skb); | ||
190 | if (nlmsg_request) | ||
191 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | ||
192 | return ret; | ||
193 | } | ||
194 | EXPORT_SYMBOL(iwpm_add_mapping); | ||
195 | |||
196 | /* | ||
197 | * iwpm_add_and_query_mapping - Send a netlink add and query | ||
198 | * mapping message to the port mapper | ||
199 | * nlmsg attributes: | ||
200 | * [IWPM_NLA_QUERY_MAPPING_SEQ] | ||
201 | * [IWPM_NLA_QUERY_LOCAL_ADDR] | ||
202 | * [IWPM_NLA_QUERY_REMOTE_ADDR] | ||
203 | */ | ||
204 | int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | ||
205 | { | ||
206 | struct sk_buff *skb = NULL; | ||
207 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
208 | struct nlmsghdr *nlh; | ||
209 | u32 msg_seq; | ||
210 | const char *err_str = ""; | ||
211 | int ret = -EINVAL; | ||
212 | |||
213 | if (!iwpm_valid_client(nl_client)) { | ||
214 | err_str = "Invalid port mapper client"; | ||
215 | goto query_mapping_error; | ||
216 | } | ||
217 | if (!iwpm_registered_client(nl_client)) { | ||
218 | err_str = "Unregistered port mapper client"; | ||
219 | goto query_mapping_error; | ||
220 | } | ||
221 | if (!iwpm_valid_pid()) | ||
222 | return 0; | ||
223 | ret = -ENOMEM; | ||
224 | skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); | ||
225 | if (!skb) { | ||
226 | err_str = "Unable to create a nlmsg"; | ||
227 | goto query_mapping_error; | ||
228 | } | ||
229 | nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); | ||
230 | nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, | ||
231 | nl_client, GFP_KERNEL); | ||
232 | if (!nlmsg_request) { | ||
233 | err_str = "Unable to allocate netlink request"; | ||
234 | goto query_mapping_error; | ||
235 | } | ||
236 | msg_seq = atomic_read(&echo_nlmsg_seq); | ||
237 | |||
238 | /* fill in the query message */ | ||
239 | err_str = "Unable to put attribute of the nlmsg"; | ||
240 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, | ||
241 | IWPM_NLA_QUERY_MAPPING_SEQ); | ||
242 | if (ret) | ||
243 | goto query_mapping_error; | ||
244 | ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), | ||
245 | &pm_msg->loc_addr, IWPM_NLA_QUERY_LOCAL_ADDR); | ||
246 | if (ret) | ||
247 | goto query_mapping_error; | ||
248 | ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), | ||
249 | &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); | ||
250 | if (ret) | ||
251 | goto query_mapping_error; | ||
252 | nlmsg_request->req_buffer = pm_msg; | ||
253 | |||
254 | ret = ibnl_unicast(skb, nlh, iwpm_user_pid); | ||
255 | if (ret) { | ||
256 | skb = NULL; /* skb is freed in the netlink send-op handling */ | ||
257 | err_str = "Unable to send a nlmsg"; | ||
258 | goto query_mapping_error; | ||
259 | } | ||
260 | ret = iwpm_wait_complete_req(nlmsg_request); | ||
261 | return ret; | ||
262 | query_mapping_error: | ||
263 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | ||
264 | if (skb) | ||
265 | dev_kfree_skb(skb); | ||
266 | if (nlmsg_request) | ||
267 | iwpm_free_nlmsg_request(&nlmsg_request->kref); | ||
268 | return ret; | ||
269 | } | ||
270 | EXPORT_SYMBOL(iwpm_add_and_query_mapping); | ||
271 | |||
272 | /* | ||
273 | * iwpm_remove_mapping - Send a netlink remove mapping message | ||
274 | * to the port mapper | ||
275 | * nlmsg attributes: | ||
276 | * [IWPM_NLA_MANAGE_MAPPING_SEQ] | ||
277 | * [IWPM_NLA_MANAGE_ADDR] | ||
278 | */ | ||
279 | int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) | ||
280 | { | ||
281 | struct sk_buff *skb = NULL; | ||
282 | struct nlmsghdr *nlh; | ||
283 | u32 msg_seq; | ||
284 | const char *err_str = ""; | ||
285 | int ret = -EINVAL; | ||
286 | |||
287 | if (!iwpm_valid_client(nl_client)) { | ||
288 | err_str = "Invalid port mapper client"; | ||
289 | goto remove_mapping_error; | ||
290 | } | ||
291 | if (!iwpm_registered_client(nl_client)) { | ||
292 | err_str = "Unregistered port mapper client"; | ||
293 | goto remove_mapping_error; | ||
294 | } | ||
295 | if (!iwpm_valid_pid()) | ||
296 | return 0; | ||
297 | skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); | ||
298 | if (!skb) { | ||
299 | ret = -ENOMEM; | ||
300 | err_str = "Unable to create a nlmsg"; | ||
301 | goto remove_mapping_error; | ||
302 | } | ||
303 | msg_seq = atomic_read(&echo_nlmsg_seq); | ||
304 | nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); | ||
305 | err_str = "Unable to put attribute of the nlmsg"; | ||
306 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, | ||
307 | IWPM_NLA_MANAGE_MAPPING_SEQ); | ||
308 | if (ret) | ||
309 | goto remove_mapping_error; | ||
310 | ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage), | ||
311 | local_addr, IWPM_NLA_MANAGE_ADDR); | ||
312 | if (ret) | ||
313 | goto remove_mapping_error; | ||
314 | |||
315 | ret = ibnl_unicast(skb, nlh, iwpm_user_pid); | ||
316 | if (ret) { | ||
317 | skb = NULL; /* skb is freed in the netlink send-op handling */ | ||
318 | iwpm_user_pid = IWPM_PID_UNDEFINED; | ||
319 | err_str = "Unable to send a nlmsg"; | ||
320 | goto remove_mapping_error; | ||
321 | } | ||
322 | iwpm_print_sockaddr(local_addr, | ||
323 | "remove_mapping: Local sockaddr:"); | ||
324 | return 0; | ||
325 | remove_mapping_error: | ||
326 | pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); | ||
327 | if (skb) | ||
328 | dev_kfree_skb_any(skb); | ||
329 | return ret; | ||
330 | } | ||
331 | EXPORT_SYMBOL(iwpm_remove_mapping); | ||
332 | |||
333 | /* netlink attribute policy for the received response to register pid request */ | ||
334 | static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = { | ||
335 | [IWPM_NLA_RREG_PID_SEQ] = { .type = NLA_U32 }, | ||
336 | [IWPM_NLA_RREG_IBDEV_NAME] = { .type = NLA_STRING, | ||
337 | .len = IWPM_DEVNAME_SIZE - 1 }, | ||
338 | [IWPM_NLA_RREG_ULIB_NAME] = { .type = NLA_STRING, | ||
339 | .len = IWPM_ULIBNAME_SIZE - 1 }, | ||
340 | [IWPM_NLA_RREG_ULIB_VER] = { .type = NLA_U16 }, | ||
341 | [IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 } | ||
342 | }; | ||
343 | |||
344 | /* | ||
345 | * iwpm_register_pid_cb - Process a port mapper response to | ||
346 | * iwpm_register_pid() | ||
347 | */ | ||
348 | int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
349 | { | ||
350 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
351 | struct nlattr *nltb[IWPM_NLA_RREG_PID_MAX]; | ||
352 | struct iwpm_dev_data *pm_msg; | ||
353 | char *dev_name, *iwpm_name; | ||
354 | u32 msg_seq; | ||
355 | u8 nl_client; | ||
356 | u16 iwpm_version; | ||
357 | const char *msg_type = "Register Pid response"; | ||
358 | |||
359 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_RREG_PID_MAX, | ||
360 | resp_reg_policy, nltb, msg_type)) | ||
361 | return -EINVAL; | ||
362 | |||
363 | msg_seq = nla_get_u32(nltb[IWPM_NLA_RREG_PID_SEQ]); | ||
364 | nlmsg_request = iwpm_find_nlmsg_request(msg_seq); | ||
365 | if (!nlmsg_request) { | ||
366 | pr_info("%s: Could not find a matching request (seq = %u)\n", | ||
367 | __func__, msg_seq); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | pm_msg = nlmsg_request->req_buffer; | ||
371 | nl_client = nlmsg_request->nl_client; | ||
372 | dev_name = (char *)nla_data(nltb[IWPM_NLA_RREG_IBDEV_NAME]); | ||
373 | iwpm_name = (char *)nla_data(nltb[IWPM_NLA_RREG_ULIB_NAME]); | ||
374 | iwpm_version = nla_get_u16(nltb[IWPM_NLA_RREG_ULIB_VER]); | ||
375 | |||
376 | /* check device name, ulib name and version */ | ||
377 | if (strcmp(pm_msg->dev_name, dev_name) || | ||
378 | strcmp(iwpm_ulib_name, iwpm_name) || | ||
379 | iwpm_version != iwpm_ulib_version) { | ||
380 | |||
381 | pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n", | ||
382 | __func__, dev_name, iwpm_name, iwpm_version); | ||
383 | nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; | ||
384 | goto register_pid_response_exit; | ||
385 | } | ||
386 | iwpm_user_pid = cb->nlh->nlmsg_pid; | ||
387 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
388 | pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", | ||
389 | __func__, iwpm_user_pid); | ||
390 | if (iwpm_valid_client(nl_client)) | ||
391 | iwpm_set_registered(nl_client, 1); | ||
392 | register_pid_response_exit: | ||
393 | nlmsg_request->request_done = 1; | ||
394 | /* always for found nlmsg_request */ | ||
395 | kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); | ||
396 | barrier(); | ||
397 | wake_up(&nlmsg_request->waitq); | ||
398 | return 0; | ||
399 | } | ||
400 | EXPORT_SYMBOL(iwpm_register_pid_cb); | ||
401 | |||
402 | /* netlink attribute policy for the received response to add mapping request */ | ||
403 | static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = { | ||
404 | [IWPM_NLA_MANAGE_MAPPING_SEQ] = { .type = NLA_U32 }, | ||
405 | [IWPM_NLA_MANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
406 | [IWPM_NLA_MANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
407 | [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 } | ||
408 | }; | ||
409 | |||
410 | /* | ||
411 | * iwpm_add_mapping_cb - Process a port mapper response to | ||
412 | * iwpm_add_mapping() | ||
413 | */ | ||
414 | int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
415 | { | ||
416 | struct iwpm_sa_data *pm_msg; | ||
417 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
418 | struct nlattr *nltb[IWPM_NLA_RMANAGE_MAPPING_MAX]; | ||
419 | struct sockaddr_storage *local_sockaddr; | ||
420 | struct sockaddr_storage *mapped_sockaddr; | ||
421 | const char *msg_type; | ||
422 | u32 msg_seq; | ||
423 | |||
424 | msg_type = "Add Mapping response"; | ||
425 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_RMANAGE_MAPPING_MAX, | ||
426 | resp_add_policy, nltb, msg_type)) | ||
427 | return -EINVAL; | ||
428 | |||
429 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
430 | |||
431 | msg_seq = nla_get_u32(nltb[IWPM_NLA_MANAGE_MAPPING_SEQ]); | ||
432 | nlmsg_request = iwpm_find_nlmsg_request(msg_seq); | ||
433 | if (!nlmsg_request) { | ||
434 | pr_info("%s: Could not find a matching request (seq = %u)\n", | ||
435 | __func__, msg_seq); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | pm_msg = nlmsg_request->req_buffer; | ||
439 | local_sockaddr = (struct sockaddr_storage *) | ||
440 | nla_data(nltb[IWPM_NLA_MANAGE_ADDR]); | ||
441 | mapped_sockaddr = (struct sockaddr_storage *) | ||
442 | nla_data(nltb[IWPM_NLA_MANAGE_MAPPED_LOC_ADDR]); | ||
443 | |||
444 | if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) { | ||
445 | nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; | ||
446 | goto add_mapping_response_exit; | ||
447 | } | ||
448 | if (mapped_sockaddr->ss_family != local_sockaddr->ss_family) { | ||
449 | pr_info("%s: Sockaddr family doesn't match the requested one\n", | ||
450 | __func__); | ||
451 | nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; | ||
452 | goto add_mapping_response_exit; | ||
453 | } | ||
454 | memcpy(&pm_msg->mapped_loc_addr, mapped_sockaddr, | ||
455 | sizeof(*mapped_sockaddr)); | ||
456 | iwpm_print_sockaddr(&pm_msg->loc_addr, | ||
457 | "add_mapping: Local sockaddr:"); | ||
458 | iwpm_print_sockaddr(&pm_msg->mapped_loc_addr, | ||
459 | "add_mapping: Mapped local sockaddr:"); | ||
460 | |||
461 | add_mapping_response_exit: | ||
462 | nlmsg_request->request_done = 1; | ||
463 | /* always for found request */ | ||
464 | kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); | ||
465 | barrier(); | ||
466 | wake_up(&nlmsg_request->waitq); | ||
467 | return 0; | ||
468 | } | ||
469 | EXPORT_SYMBOL(iwpm_add_mapping_cb); | ||
470 | |||
471 | /* netlink attribute policy for the response to add and query mapping request */ | ||
472 | static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { | ||
473 | [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, | ||
474 | [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
475 | [IWPM_NLA_QUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
476 | [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
477 | [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | ||
478 | [IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 } | ||
479 | }; | ||
480 | |||
481 | /* | ||
482 | * iwpm_add_and_query_mapping_cb - Process a port mapper response to | ||
483 | * iwpm_add_and_query_mapping() | ||
484 | */ | ||
485 | int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, | ||
486 | struct netlink_callback *cb) | ||
487 | { | ||
488 | struct iwpm_sa_data *pm_msg; | ||
489 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
490 | struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; | ||
491 | struct sockaddr_storage *local_sockaddr, *remote_sockaddr; | ||
492 | struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; | ||
493 | const char *msg_type; | ||
494 | u32 msg_seq; | ||
495 | u16 err_code; | ||
496 | |||
497 | msg_type = "Query Mapping response"; | ||
498 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, | ||
499 | resp_query_policy, nltb, msg_type)) | ||
500 | return -EINVAL; | ||
501 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
502 | |||
503 | msg_seq = nla_get_u32(nltb[IWPM_NLA_QUERY_MAPPING_SEQ]); | ||
504 | nlmsg_request = iwpm_find_nlmsg_request(msg_seq); | ||
505 | if (!nlmsg_request) { | ||
506 | pr_info("%s: Could not find a matching request (seq = %u)\n", | ||
507 | __func__, msg_seq); | ||
508 | return -EINVAL; | ||
509 | } | ||
510 | pm_msg = nlmsg_request->req_buffer; | ||
511 | local_sockaddr = (struct sockaddr_storage *) | ||
512 | nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]); | ||
513 | remote_sockaddr = (struct sockaddr_storage *) | ||
514 | nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]); | ||
515 | mapped_loc_sockaddr = (struct sockaddr_storage *) | ||
516 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); | ||
517 | mapped_rem_sockaddr = (struct sockaddr_storage *) | ||
518 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); | ||
519 | |||
520 | err_code = nla_get_u16(nltb[IWPM_NLA_RQUERY_MAPPING_ERR]); | ||
521 | if (err_code == IWPM_REMOTE_QUERY_REJECT) { | ||
522 | pr_info("%s: Received a Reject (pid = %u, echo seq = %u)\n", | ||
523 | __func__, cb->nlh->nlmsg_pid, msg_seq); | ||
524 | nlmsg_request->err_code = IWPM_REMOTE_QUERY_REJECT; | ||
525 | } | ||
526 | if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr) || | ||
527 | iwpm_compare_sockaddr(remote_sockaddr, &pm_msg->rem_addr)) { | ||
528 | pr_info("%s: Incorrect local sockaddr\n", __func__); | ||
529 | nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; | ||
530 | goto query_mapping_response_exit; | ||
531 | } | ||
532 | if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || | ||
533 | mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { | ||
534 | pr_info("%s: Sockaddr family doesn't match the requested one\n", | ||
535 | __func__); | ||
536 | nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR; | ||
537 | goto query_mapping_response_exit; | ||
538 | } | ||
539 | memcpy(&pm_msg->mapped_loc_addr, mapped_loc_sockaddr, | ||
540 | sizeof(*mapped_loc_sockaddr)); | ||
541 | memcpy(&pm_msg->mapped_rem_addr, mapped_rem_sockaddr, | ||
542 | sizeof(*mapped_rem_sockaddr)); | ||
543 | |||
544 | iwpm_print_sockaddr(&pm_msg->loc_addr, | ||
545 | "query_mapping: Local sockaddr:"); | ||
546 | iwpm_print_sockaddr(&pm_msg->mapped_loc_addr, | ||
547 | "query_mapping: Mapped local sockaddr:"); | ||
548 | iwpm_print_sockaddr(&pm_msg->rem_addr, | ||
549 | "query_mapping: Remote sockaddr:"); | ||
550 | iwpm_print_sockaddr(&pm_msg->mapped_rem_addr, | ||
551 | "query_mapping: Mapped remote sockaddr:"); | ||
552 | query_mapping_response_exit: | ||
553 | nlmsg_request->request_done = 1; | ||
554 | /* always for found request */ | ||
555 | kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); | ||
556 | barrier(); | ||
557 | wake_up(&nlmsg_request->waitq); | ||
558 | return 0; | ||
559 | } | ||
560 | EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); | ||
561 | |||
562 | /* netlink attribute policy for the received request for mapping info */ | ||
563 | static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { | ||
564 | [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, | ||
565 | .len = IWPM_ULIBNAME_SIZE - 1 }, | ||
566 | [IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 } | ||
567 | }; | ||
568 | |||
569 | /* | ||
570 | * iwpm_mapping_info_cb - Process a port mapper request for mapping info | ||
571 | */ | ||
572 | int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
573 | { | ||
574 | struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; | ||
575 | const char *msg_type = "Mapping Info response"; | ||
576 | int iwpm_pid; | ||
577 | u8 nl_client; | ||
578 | char *iwpm_name; | ||
579 | u16 iwpm_version; | ||
580 | int ret = -EINVAL; | ||
581 | |||
582 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_REQ_MAX, | ||
583 | resp_mapinfo_policy, nltb, msg_type)) { | ||
584 | pr_info("%s: Unable to parse nlmsg\n", __func__); | ||
585 | return ret; | ||
586 | } | ||
587 | iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]); | ||
588 | iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]); | ||
589 | if (strcmp(iwpm_ulib_name, iwpm_name) || | ||
590 | iwpm_version != iwpm_ulib_version) { | ||
591 | pr_info("%s: Invalid port mapper name = %s version = %d\n", | ||
592 | __func__, iwpm_name, iwpm_version); | ||
593 | return ret; | ||
594 | } | ||
595 | nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); | ||
596 | if (!iwpm_valid_client(nl_client)) { | ||
597 | pr_info("%s: Invalid port mapper client = %d\n", | ||
598 | __func__, nl_client); | ||
599 | return ret; | ||
600 | } | ||
601 | iwpm_set_registered(nl_client, 0); | ||
602 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
603 | if (!iwpm_mapinfo_available()) | ||
604 | return 0; | ||
605 | iwpm_pid = cb->nlh->nlmsg_pid; | ||
606 | pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", | ||
607 | __func__, iwpm_pid); | ||
608 | ret = iwpm_send_mapinfo(nl_client, iwpm_pid); | ||
609 | return ret; | ||
610 | } | ||
611 | EXPORT_SYMBOL(iwpm_mapping_info_cb); | ||
612 | |||
613 | /* netlink attribute policy for the received mapping info ack */ | ||
614 | static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = { | ||
615 | [IWPM_NLA_MAPINFO_SEQ] = { .type = NLA_U32 }, | ||
616 | [IWPM_NLA_MAPINFO_SEND_NUM] = { .type = NLA_U32 }, | ||
617 | [IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 } | ||
618 | }; | ||
619 | |||
620 | /* | ||
621 | * iwpm_ack_mapping_info_cb - Process a port mapper ack for | ||
622 | * the provided mapping info records | ||
623 | */ | ||
624 | int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
625 | { | ||
626 | struct nlattr *nltb[IWPM_NLA_MAPINFO_NUM_MAX]; | ||
627 | u32 mapinfo_send, mapinfo_ack; | ||
628 | const char *msg_type = "Mapping Info Ack"; | ||
629 | |||
630 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_NUM_MAX, | ||
631 | ack_mapinfo_policy, nltb, msg_type)) | ||
632 | return -EINVAL; | ||
633 | mapinfo_send = nla_get_u32(nltb[IWPM_NLA_MAPINFO_SEND_NUM]); | ||
634 | mapinfo_ack = nla_get_u32(nltb[IWPM_NLA_MAPINFO_ACK_NUM]); | ||
635 | if (mapinfo_ack != mapinfo_send) | ||
636 | pr_info("%s: Invalid mapinfo number (sent = %u ack-ed = %u)\n", | ||
637 | __func__, mapinfo_send, mapinfo_ack); | ||
638 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
639 | return 0; | ||
640 | } | ||
641 | EXPORT_SYMBOL(iwpm_ack_mapping_info_cb); | ||
642 | |||
643 | /* netlink attribute policy for the received port mapper error message */ | ||
644 | static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = { | ||
645 | [IWPM_NLA_ERR_SEQ] = { .type = NLA_U32 }, | ||
646 | [IWPM_NLA_ERR_CODE] = { .type = NLA_U16 }, | ||
647 | }; | ||
648 | |||
649 | /* | ||
650 | * iwpm_mapping_error_cb - Process a port mapper error message | ||
651 | */ | ||
652 | int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
653 | { | ||
654 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
655 | int nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); | ||
656 | struct nlattr *nltb[IWPM_NLA_ERR_MAX]; | ||
657 | u32 msg_seq; | ||
658 | u16 err_code; | ||
659 | const char *msg_type = "Mapping Error Msg"; | ||
660 | |||
661 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_ERR_MAX, | ||
662 | map_error_policy, nltb, msg_type)) | ||
663 | return -EINVAL; | ||
664 | |||
665 | msg_seq = nla_get_u32(nltb[IWPM_NLA_ERR_SEQ]); | ||
666 | err_code = nla_get_u16(nltb[IWPM_NLA_ERR_CODE]); | ||
667 | pr_info("%s: Received msg seq = %u err code = %u client = %d\n", | ||
668 | __func__, msg_seq, err_code, nl_client); | ||
669 | /* look for nlmsg_request */ | ||
670 | nlmsg_request = iwpm_find_nlmsg_request(msg_seq); | ||
671 | if (!nlmsg_request) { | ||
672 | /* not all errors have associated requests */ | ||
673 | pr_debug("Could not find matching req (seq = %u)\n", msg_seq); | ||
674 | return 0; | ||
675 | } | ||
676 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
677 | nlmsg_request->err_code = err_code; | ||
678 | nlmsg_request->request_done = 1; | ||
679 | /* always for found request */ | ||
680 | kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); | ||
681 | barrier(); | ||
682 | wake_up(&nlmsg_request->waitq); | ||
683 | return 0; | ||
684 | } | ||
685 | EXPORT_SYMBOL(iwpm_mapping_error_cb); | ||
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c new file mode 100644 index 000000000000..69e9f84c1605 --- /dev/null +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Chelsio, Inc. All rights reserved. | ||
3 | * Copyright (c) 2014 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include "iwpm_util.h" | ||
35 | |||
36 | #define IWPM_HASH_BUCKET_SIZE 512 | ||
37 | #define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) | ||
38 | |||
39 | static LIST_HEAD(iwpm_nlmsg_req_list); | ||
40 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | ||
41 | |||
42 | static struct hlist_head *iwpm_hash_bucket; | ||
43 | static DEFINE_SPINLOCK(iwpm_mapinfo_lock); | ||
44 | |||
45 | static DEFINE_MUTEX(iwpm_admin_lock); | ||
46 | static struct iwpm_admin_data iwpm_admin; | ||
47 | |||
48 | int iwpm_init(u8 nl_client) | ||
49 | { | ||
50 | if (iwpm_valid_client(nl_client)) | ||
51 | return -EINVAL; | ||
52 | mutex_lock(&iwpm_admin_lock); | ||
53 | if (atomic_read(&iwpm_admin.refcount) == 0) { | ||
54 | iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * | ||
55 | sizeof(struct hlist_head), GFP_KERNEL); | ||
56 | if (!iwpm_hash_bucket) { | ||
57 | mutex_unlock(&iwpm_admin_lock); | ||
58 | pr_err("%s Unable to create mapinfo hash table\n", __func__); | ||
59 | return -ENOMEM; | ||
60 | } | ||
61 | } | ||
62 | atomic_inc(&iwpm_admin.refcount); | ||
63 | mutex_unlock(&iwpm_admin_lock); | ||
64 | iwpm_set_valid(nl_client, 1); | ||
65 | return 0; | ||
66 | } | ||
67 | EXPORT_SYMBOL(iwpm_init); | ||
68 | |||
69 | static void free_hash_bucket(void); | ||
70 | |||
71 | int iwpm_exit(u8 nl_client) | ||
72 | { | ||
73 | |||
74 | if (!iwpm_valid_client(nl_client)) | ||
75 | return -EINVAL; | ||
76 | mutex_lock(&iwpm_admin_lock); | ||
77 | if (atomic_read(&iwpm_admin.refcount) == 0) { | ||
78 | mutex_unlock(&iwpm_admin_lock); | ||
79 | pr_err("%s Incorrect usage - negative refcount\n", __func__); | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | if (atomic_dec_and_test(&iwpm_admin.refcount)) { | ||
83 | free_hash_bucket(); | ||
84 | pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); | ||
85 | } | ||
86 | mutex_unlock(&iwpm_admin_lock); | ||
87 | iwpm_set_valid(nl_client, 0); | ||
88 | return 0; | ||
89 | } | ||
90 | EXPORT_SYMBOL(iwpm_exit); | ||
91 | |||
92 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, | ||
93 | struct sockaddr_storage *); | ||
94 | |||
95 | int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | ||
96 | struct sockaddr_storage *mapped_sockaddr, | ||
97 | u8 nl_client) | ||
98 | { | ||
99 | struct hlist_head *hash_bucket_head; | ||
100 | struct iwpm_mapping_info *map_info; | ||
101 | unsigned long flags; | ||
102 | |||
103 | if (!iwpm_valid_client(nl_client)) | ||
104 | return -EINVAL; | ||
105 | map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); | ||
106 | if (!map_info) { | ||
107 | pr_err("%s: Unable to allocate a mapping info\n", __func__); | ||
108 | return -ENOMEM; | ||
109 | } | ||
110 | memcpy(&map_info->local_sockaddr, local_sockaddr, | ||
111 | sizeof(struct sockaddr_storage)); | ||
112 | memcpy(&map_info->mapped_sockaddr, mapped_sockaddr, | ||
113 | sizeof(struct sockaddr_storage)); | ||
114 | map_info->nl_client = nl_client; | ||
115 | |||
116 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
117 | if (iwpm_hash_bucket) { | ||
118 | hash_bucket_head = get_hash_bucket_head( | ||
119 | &map_info->local_sockaddr, | ||
120 | &map_info->mapped_sockaddr); | ||
121 | hlist_add_head(&map_info->hlist_node, hash_bucket_head); | ||
122 | } | ||
123 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | ||
124 | return 0; | ||
125 | } | ||
126 | EXPORT_SYMBOL(iwpm_create_mapinfo); | ||
127 | |||
128 | int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, | ||
129 | struct sockaddr_storage *mapped_local_addr) | ||
130 | { | ||
131 | struct hlist_node *tmp_hlist_node; | ||
132 | struct hlist_head *hash_bucket_head; | ||
133 | struct iwpm_mapping_info *map_info = NULL; | ||
134 | unsigned long flags; | ||
135 | int ret = -EINVAL; | ||
136 | |||
137 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
138 | if (iwpm_hash_bucket) { | ||
139 | hash_bucket_head = get_hash_bucket_head( | ||
140 | local_sockaddr, | ||
141 | mapped_local_addr); | ||
142 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | ||
143 | hash_bucket_head, hlist_node) { | ||
144 | |||
145 | if (!iwpm_compare_sockaddr(&map_info->mapped_sockaddr, | ||
146 | mapped_local_addr)) { | ||
147 | |||
148 | hlist_del_init(&map_info->hlist_node); | ||
149 | kfree(map_info); | ||
150 | ret = 0; | ||
151 | break; | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | ||
156 | return ret; | ||
157 | } | ||
158 | EXPORT_SYMBOL(iwpm_remove_mapinfo); | ||
159 | |||
160 | static void free_hash_bucket(void) | ||
161 | { | ||
162 | struct hlist_node *tmp_hlist_node; | ||
163 | struct iwpm_mapping_info *map_info; | ||
164 | unsigned long flags; | ||
165 | int i; | ||
166 | |||
167 | /* remove all the mapinfo data from the list */ | ||
168 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
169 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | ||
170 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | ||
171 | &iwpm_hash_bucket[i], hlist_node) { | ||
172 | |||
173 | hlist_del_init(&map_info->hlist_node); | ||
174 | kfree(map_info); | ||
175 | } | ||
176 | } | ||
177 | /* free the hash list */ | ||
178 | kfree(iwpm_hash_bucket); | ||
179 | iwpm_hash_bucket = NULL; | ||
180 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | ||
181 | } | ||
182 | |||
183 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, | ||
184 | u8 nl_client, gfp_t gfp) | ||
185 | { | ||
186 | struct iwpm_nlmsg_request *nlmsg_request = NULL; | ||
187 | unsigned long flags; | ||
188 | |||
189 | nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp); | ||
190 | if (!nlmsg_request) { | ||
191 | pr_err("%s Unable to allocate a nlmsg_request\n", __func__); | ||
192 | return NULL; | ||
193 | } | ||
194 | spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); | ||
195 | list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list); | ||
196 | spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); | ||
197 | |||
198 | kref_init(&nlmsg_request->kref); | ||
199 | kref_get(&nlmsg_request->kref); | ||
200 | nlmsg_request->nlmsg_seq = nlmsg_seq; | ||
201 | nlmsg_request->nl_client = nl_client; | ||
202 | nlmsg_request->request_done = 0; | ||
203 | nlmsg_request->err_code = 0; | ||
204 | return nlmsg_request; | ||
205 | } | ||
206 | |||
207 | void iwpm_free_nlmsg_request(struct kref *kref) | ||
208 | { | ||
209 | struct iwpm_nlmsg_request *nlmsg_request; | ||
210 | unsigned long flags; | ||
211 | |||
212 | nlmsg_request = container_of(kref, struct iwpm_nlmsg_request, kref); | ||
213 | |||
214 | spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); | ||
215 | list_del_init(&nlmsg_request->inprocess_list); | ||
216 | spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); | ||
217 | |||
218 | if (!nlmsg_request->request_done) | ||
219 | pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n", | ||
220 | __func__, nlmsg_request->nlmsg_seq); | ||
221 | kfree(nlmsg_request); | ||
222 | } | ||
223 | |||
224 | struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq) | ||
225 | { | ||
226 | struct iwpm_nlmsg_request *nlmsg_request; | ||
227 | struct iwpm_nlmsg_request *found_request = NULL; | ||
228 | unsigned long flags; | ||
229 | |||
230 | spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); | ||
231 | list_for_each_entry(nlmsg_request, &iwpm_nlmsg_req_list, | ||
232 | inprocess_list) { | ||
233 | if (nlmsg_request->nlmsg_seq == echo_seq) { | ||
234 | found_request = nlmsg_request; | ||
235 | kref_get(&nlmsg_request->kref); | ||
236 | break; | ||
237 | } | ||
238 | } | ||
239 | spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); | ||
240 | return found_request; | ||
241 | } | ||
242 | |||
243 | int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request) | ||
244 | { | ||
245 | int ret; | ||
246 | init_waitqueue_head(&nlmsg_request->waitq); | ||
247 | |||
248 | ret = wait_event_timeout(nlmsg_request->waitq, | ||
249 | (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT); | ||
250 | if (!ret) { | ||
251 | ret = -EINVAL; | ||
252 | pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n", | ||
253 | __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq); | ||
254 | } else { | ||
255 | ret = nlmsg_request->err_code; | ||
256 | } | ||
257 | kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | int iwpm_get_nlmsg_seq(void) | ||
262 | { | ||
263 | return atomic_inc_return(&iwpm_admin.nlmsg_seq); | ||
264 | } | ||
265 | |||
266 | int iwpm_valid_client(u8 nl_client) | ||
267 | { | ||
268 | if (nl_client >= RDMA_NL_NUM_CLIENTS) | ||
269 | return 0; | ||
270 | return iwpm_admin.client_list[nl_client]; | ||
271 | } | ||
272 | |||
273 | void iwpm_set_valid(u8 nl_client, int valid) | ||
274 | { | ||
275 | if (nl_client >= RDMA_NL_NUM_CLIENTS) | ||
276 | return; | ||
277 | iwpm_admin.client_list[nl_client] = valid; | ||
278 | } | ||
279 | |||
280 | /* valid client */ | ||
281 | int iwpm_registered_client(u8 nl_client) | ||
282 | { | ||
283 | return iwpm_admin.reg_list[nl_client]; | ||
284 | } | ||
285 | |||
286 | /* valid client */ | ||
287 | void iwpm_set_registered(u8 nl_client, int reg) | ||
288 | { | ||
289 | iwpm_admin.reg_list[nl_client] = reg; | ||
290 | } | ||
291 | |||
292 | int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, | ||
293 | struct sockaddr_storage *b_sockaddr) | ||
294 | { | ||
295 | if (a_sockaddr->ss_family != b_sockaddr->ss_family) | ||
296 | return 1; | ||
297 | if (a_sockaddr->ss_family == AF_INET) { | ||
298 | struct sockaddr_in *a4_sockaddr = | ||
299 | (struct sockaddr_in *)a_sockaddr; | ||
300 | struct sockaddr_in *b4_sockaddr = | ||
301 | (struct sockaddr_in *)b_sockaddr; | ||
302 | if (!memcmp(&a4_sockaddr->sin_addr, | ||
303 | &b4_sockaddr->sin_addr, sizeof(struct in_addr)) | ||
304 | && a4_sockaddr->sin_port == b4_sockaddr->sin_port) | ||
305 | return 0; | ||
306 | |||
307 | } else if (a_sockaddr->ss_family == AF_INET6) { | ||
308 | struct sockaddr_in6 *a6_sockaddr = | ||
309 | (struct sockaddr_in6 *)a_sockaddr; | ||
310 | struct sockaddr_in6 *b6_sockaddr = | ||
311 | (struct sockaddr_in6 *)b_sockaddr; | ||
312 | if (!memcmp(&a6_sockaddr->sin6_addr, | ||
313 | &b6_sockaddr->sin6_addr, sizeof(struct in6_addr)) | ||
314 | && a6_sockaddr->sin6_port == b6_sockaddr->sin6_port) | ||
315 | return 0; | ||
316 | |||
317 | } else { | ||
318 | pr_err("%s: Invalid sockaddr family\n", __func__); | ||
319 | } | ||
320 | return 1; | ||
321 | } | ||
322 | |||
323 | struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, | ||
324 | int nl_client) | ||
325 | { | ||
326 | struct sk_buff *skb = NULL; | ||
327 | |||
328 | skb = dev_alloc_skb(NLMSG_GOODSIZE); | ||
329 | if (!skb) { | ||
330 | pr_err("%s Unable to allocate skb\n", __func__); | ||
331 | goto create_nlmsg_exit; | ||
332 | } | ||
333 | if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op, | ||
334 | NLM_F_REQUEST))) { | ||
335 | pr_warn("%s: Unable to put the nlmsg header\n", __func__); | ||
336 | dev_kfree_skb(skb); | ||
337 | skb = NULL; | ||
338 | } | ||
339 | create_nlmsg_exit: | ||
340 | return skb; | ||
341 | } | ||
342 | |||
343 | int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, | ||
344 | const struct nla_policy *nlmsg_policy, | ||
345 | struct nlattr *nltb[], const char *msg_type) | ||
346 | { | ||
347 | int nlh_len = 0; | ||
348 | int ret; | ||
349 | const char *err_str = ""; | ||
350 | |||
351 | ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy); | ||
352 | if (ret) { | ||
353 | err_str = "Invalid attribute"; | ||
354 | goto parse_nlmsg_error; | ||
355 | } | ||
356 | ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy); | ||
357 | if (ret) { | ||
358 | err_str = "Unable to parse the nlmsg"; | ||
359 | goto parse_nlmsg_error; | ||
360 | } | ||
361 | ret = iwpm_validate_nlmsg_attr(nltb, policy_max); | ||
362 | if (ret) { | ||
363 | err_str = "Invalid NULL attribute"; | ||
364 | goto parse_nlmsg_error; | ||
365 | } | ||
366 | return 0; | ||
367 | parse_nlmsg_error: | ||
368 | pr_warn("%s: %s (msg type %s ret = %d)\n", | ||
369 | __func__, err_str, msg_type, ret); | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg) | ||
374 | { | ||
375 | struct sockaddr_in6 *sockaddr_v6; | ||
376 | struct sockaddr_in *sockaddr_v4; | ||
377 | |||
378 | switch (sockaddr->ss_family) { | ||
379 | case AF_INET: | ||
380 | sockaddr_v4 = (struct sockaddr_in *)sockaddr; | ||
381 | pr_debug("%s IPV4 %pI4: %u(0x%04X)\n", | ||
382 | msg, &sockaddr_v4->sin_addr, | ||
383 | ntohs(sockaddr_v4->sin_port), | ||
384 | ntohs(sockaddr_v4->sin_port)); | ||
385 | break; | ||
386 | case AF_INET6: | ||
387 | sockaddr_v6 = (struct sockaddr_in6 *)sockaddr; | ||
388 | pr_debug("%s IPV6 %pI6: %u(0x%04X)\n", | ||
389 | msg, &sockaddr_v6->sin6_addr, | ||
390 | ntohs(sockaddr_v6->sin6_port), | ||
391 | ntohs(sockaddr_v6->sin6_port)); | ||
392 | break; | ||
393 | default: | ||
394 | break; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static u32 iwpm_ipv6_jhash(struct sockaddr_in6 *ipv6_sockaddr) | ||
399 | { | ||
400 | u32 ipv6_hash = jhash(&ipv6_sockaddr->sin6_addr, sizeof(struct in6_addr), 0); | ||
401 | u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0); | ||
402 | return hash; | ||
403 | } | ||
404 | |||
405 | static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) | ||
406 | { | ||
407 | u32 ipv4_hash = jhash(&ipv4_sockaddr->sin_addr, sizeof(struct in_addr), 0); | ||
408 | u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0); | ||
409 | return hash; | ||
410 | } | ||
411 | |||
412 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage | ||
413 | *local_sockaddr, | ||
414 | struct sockaddr_storage | ||
415 | *mapped_sockaddr) | ||
416 | { | ||
417 | u32 local_hash, mapped_hash, hash; | ||
418 | |||
419 | if (local_sockaddr->ss_family == AF_INET) { | ||
420 | local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); | ||
421 | mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); | ||
422 | |||
423 | } else if (local_sockaddr->ss_family == AF_INET6) { | ||
424 | local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); | ||
425 | mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); | ||
426 | } else { | ||
427 | pr_err("%s: Invalid sockaddr family\n", __func__); | ||
428 | return NULL; | ||
429 | } | ||
430 | |||
431 | if (local_hash == mapped_hash) /* if port mapper isn't available */ | ||
432 | hash = local_hash; | ||
433 | else | ||
434 | hash = jhash_2words(local_hash, mapped_hash, 0); | ||
435 | |||
436 | return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; | ||
437 | } | ||
438 | |||
439 | static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | ||
440 | { | ||
441 | struct sk_buff *skb = NULL; | ||
442 | struct nlmsghdr *nlh; | ||
443 | u32 msg_seq; | ||
444 | const char *err_str = ""; | ||
445 | int ret = -EINVAL; | ||
446 | |||
447 | skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client); | ||
448 | if (!skb) { | ||
449 | err_str = "Unable to create a nlmsg"; | ||
450 | goto mapinfo_num_error; | ||
451 | } | ||
452 | nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); | ||
453 | msg_seq = 0; | ||
454 | err_str = "Unable to put attribute of mapinfo number nlmsg"; | ||
455 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ); | ||
456 | if (ret) | ||
457 | goto mapinfo_num_error; | ||
458 | ret = ibnl_put_attr(skb, nlh, sizeof(u32), | ||
459 | &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); | ||
460 | if (ret) | ||
461 | goto mapinfo_num_error; | ||
462 | ret = ibnl_unicast(skb, nlh, iwpm_pid); | ||
463 | if (ret) { | ||
464 | skb = NULL; | ||
465 | err_str = "Unable to send a nlmsg"; | ||
466 | goto mapinfo_num_error; | ||
467 | } | ||
468 | pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num); | ||
469 | return 0; | ||
470 | mapinfo_num_error: | ||
471 | pr_info("%s: %s\n", __func__, err_str); | ||
472 | if (skb) | ||
473 | dev_kfree_skb(skb); | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) | ||
478 | { | ||
479 | struct nlmsghdr *nlh = NULL; | ||
480 | int ret = 0; | ||
481 | |||
482 | if (!skb) | ||
483 | return ret; | ||
484 | if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, | ||
485 | RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { | ||
486 | pr_warn("%s Unable to put NLMSG_DONE\n", __func__); | ||
487 | return -ENOMEM; | ||
488 | } | ||
489 | nlh->nlmsg_type = NLMSG_DONE; | ||
490 | ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid); | ||
491 | if (ret) | ||
492 | pr_warn("%s Unable to send a nlmsg\n", __func__); | ||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) | ||
497 | { | ||
498 | struct iwpm_mapping_info *map_info; | ||
499 | struct sk_buff *skb = NULL; | ||
500 | struct nlmsghdr *nlh; | ||
501 | int skb_num = 0, mapping_num = 0; | ||
502 | int i = 0, nlmsg_bytes = 0; | ||
503 | unsigned long flags; | ||
504 | const char *err_str = ""; | ||
505 | int ret; | ||
506 | |||
507 | skb = dev_alloc_skb(NLMSG_GOODSIZE); | ||
508 | if (!skb) { | ||
509 | ret = -ENOMEM; | ||
510 | err_str = "Unable to allocate skb"; | ||
511 | goto send_mapping_info_exit; | ||
512 | } | ||
513 | skb_num++; | ||
514 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
515 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | ||
516 | hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], | ||
517 | hlist_node) { | ||
518 | if (map_info->nl_client != nl_client) | ||
519 | continue; | ||
520 | nlh = NULL; | ||
521 | if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, | ||
522 | RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { | ||
523 | ret = -ENOMEM; | ||
524 | err_str = "Unable to put the nlmsg header"; | ||
525 | goto send_mapping_info_unlock; | ||
526 | } | ||
527 | err_str = "Unable to put attribute of the nlmsg"; | ||
528 | ret = ibnl_put_attr(skb, nlh, | ||
529 | sizeof(struct sockaddr_storage), | ||
530 | &map_info->local_sockaddr, | ||
531 | IWPM_NLA_MAPINFO_LOCAL_ADDR); | ||
532 | if (ret) | ||
533 | goto send_mapping_info_unlock; | ||
534 | |||
535 | ret = ibnl_put_attr(skb, nlh, | ||
536 | sizeof(struct sockaddr_storage), | ||
537 | &map_info->mapped_sockaddr, | ||
538 | IWPM_NLA_MAPINFO_MAPPED_ADDR); | ||
539 | if (ret) | ||
540 | goto send_mapping_info_unlock; | ||
541 | |||
542 | iwpm_print_sockaddr(&map_info->local_sockaddr, | ||
543 | "send_mapping_info: Local sockaddr:"); | ||
544 | iwpm_print_sockaddr(&map_info->mapped_sockaddr, | ||
545 | "send_mapping_info: Mapped local sockaddr:"); | ||
546 | mapping_num++; | ||
547 | nlmsg_bytes += nlh->nlmsg_len; | ||
548 | |||
549 | /* check if all mappings can fit in one skb */ | ||
550 | if (NLMSG_GOODSIZE - nlmsg_bytes < nlh->nlmsg_len * 2) { | ||
551 | /* and leave room for NLMSG_DONE */ | ||
552 | nlmsg_bytes = 0; | ||
553 | skb_num++; | ||
554 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, | ||
555 | flags); | ||
556 | /* send the skb */ | ||
557 | ret = send_nlmsg_done(skb, nl_client, iwpm_pid); | ||
558 | skb = NULL; | ||
559 | if (ret) { | ||
560 | err_str = "Unable to send map info"; | ||
561 | goto send_mapping_info_exit; | ||
562 | } | ||
563 | if (skb_num == IWPM_MAPINFO_SKB_COUNT) { | ||
564 | ret = -ENOMEM; | ||
565 | err_str = "Insufficient skbs for map info"; | ||
566 | goto send_mapping_info_exit; | ||
567 | } | ||
568 | skb = dev_alloc_skb(NLMSG_GOODSIZE); | ||
569 | if (!skb) { | ||
570 | ret = -ENOMEM; | ||
571 | err_str = "Unable to allocate skb"; | ||
572 | goto send_mapping_info_exit; | ||
573 | } | ||
574 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
575 | } | ||
576 | } | ||
577 | } | ||
578 | send_mapping_info_unlock: | ||
579 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | ||
580 | send_mapping_info_exit: | ||
581 | if (ret) { | ||
582 | pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); | ||
583 | if (skb) | ||
584 | dev_kfree_skb(skb); | ||
585 | return ret; | ||
586 | } | ||
587 | send_nlmsg_done(skb, nl_client, iwpm_pid); | ||
588 | return send_mapinfo_num(mapping_num, nl_client, iwpm_pid); | ||
589 | } | ||
590 | |||
591 | int iwpm_mapinfo_available(void) | ||
592 | { | ||
593 | unsigned long flags; | ||
594 | int full_bucket = 0, i = 0; | ||
595 | |||
596 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | ||
597 | if (iwpm_hash_bucket) { | ||
598 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | ||
599 | if (!hlist_empty(&iwpm_hash_bucket[i])) { | ||
600 | full_bucket = 1; | ||
601 | break; | ||
602 | } | ||
603 | } | ||
604 | } | ||
605 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | ||
606 | return full_bucket; | ||
607 | } | ||
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h new file mode 100644 index 000000000000..9777c869a140 --- /dev/null +++ b/drivers/infiniband/core/iwpm_util.h | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2014 Chelsio, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _IWPM_UTIL_H | ||
34 | #define _IWPM_UTIL_H | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/in.h> | ||
39 | #include <linux/in6.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/workqueue.h> | ||
45 | #include <linux/mutex.h> | ||
46 | #include <linux/jhash.h> | ||
47 | #include <linux/kref.h> | ||
48 | #include <net/netlink.h> | ||
49 | #include <linux/errno.h> | ||
50 | #include <rdma/iw_portmap.h> | ||
51 | #include <rdma/rdma_netlink.h> | ||
52 | |||
53 | |||
54 | #define IWPM_NL_RETRANS 3 | ||
55 | #define IWPM_NL_TIMEOUT (10*HZ) | ||
56 | #define IWPM_MAPINFO_SKB_COUNT 20 | ||
57 | |||
58 | #define IWPM_PID_UNDEFINED -1 | ||
59 | #define IWPM_PID_UNAVAILABLE -2 | ||
60 | |||
61 | struct iwpm_nlmsg_request { | ||
62 | struct list_head inprocess_list; | ||
63 | __u32 nlmsg_seq; | ||
64 | void *req_buffer; | ||
65 | u8 nl_client; | ||
66 | u8 request_done; | ||
67 | u16 err_code; | ||
68 | wait_queue_head_t waitq; | ||
69 | struct kref kref; | ||
70 | }; | ||
71 | |||
72 | struct iwpm_mapping_info { | ||
73 | struct hlist_node hlist_node; | ||
74 | struct sockaddr_storage local_sockaddr; | ||
75 | struct sockaddr_storage mapped_sockaddr; | ||
76 | u8 nl_client; | ||
77 | }; | ||
78 | |||
79 | struct iwpm_admin_data { | ||
80 | atomic_t refcount; | ||
81 | atomic_t nlmsg_seq; | ||
82 | int client_list[RDMA_NL_NUM_CLIENTS]; | ||
83 | int reg_list[RDMA_NL_NUM_CLIENTS]; | ||
84 | }; | ||
85 | |||
86 | /** | ||
87 | * iwpm_get_nlmsg_request - Allocate and initialize netlink message request | ||
88 | * @nlmsg_seq: Sequence number of the netlink message | ||
89 | * @nl_client: The index of the netlink client | ||
90 | * @gfp: Indicates how the memory for the request should be allocated | ||
91 | * | ||
92 | * Returns the newly allocated netlink request object if successful, | ||
93 | * otherwise returns NULL | ||
94 | */ | ||
95 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, | ||
96 | u8 nl_client, gfp_t gfp); | ||
97 | |||
98 | /** | ||
99 | * iwpm_free_nlmsg_request - Deallocate netlink message request | ||
100 | * @kref: Holds reference of netlink message request | ||
101 | */ | ||
102 | void iwpm_free_nlmsg_request(struct kref *kref); | ||
103 | |||
104 | /** | ||
105 | * iwpm_find_nlmsg_request - Find netlink message request in the request list | ||
106 | * @echo_seq: Sequence number of the netlink request to find | ||
107 | * | ||
108 | * Returns the found netlink message request, | ||
109 | * if not found, returns NULL | ||
110 | */ | ||
111 | struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq); | ||
112 | |||
113 | /** | ||
114 | * iwpm_wait_complete_req - Block while servicing the netlink request | ||
115 | * @nlmsg_request: Netlink message request to service | ||
116 | * | ||
117 | * Wakes up, after the request is completed or expired | ||
118 | * Returns 0 if the request is complete without error | ||
119 | */ | ||
120 | int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request); | ||
121 | |||
122 | /** | ||
123 | * iwpm_get_nlmsg_seq - Get the sequence number for a netlink | ||
124 | * message to send to the port mapper | ||
125 | * | ||
126 | * Returns the sequence number for the netlink message. | ||
127 | */ | ||
128 | int iwpm_get_nlmsg_seq(void); | ||
129 | |||
130 | /** | ||
131 | * iwpm_valid_client - Check if the port mapper client is valid | ||
132 | * @nl_client: The index of the netlink client | ||
133 | * | ||
134 | * Valid clients need to call iwpm_init() before using | ||
135 | * the port mapper | ||
136 | */ | ||
137 | int iwpm_valid_client(u8 nl_client); | ||
138 | |||
139 | /** | ||
140 | * iwpm_set_valid - Set the port mapper client to valid or not | ||
141 | * @nl_client: The index of the netlink client | ||
142 | * @valid: 1 if valid or 0 if invalid | ||
143 | */ | ||
144 | void iwpm_set_valid(u8 nl_client, int valid); | ||
145 | |||
146 | /** | ||
147 | * iwpm_registered_client - Check if the port mapper client is registered | ||
148 | * @nl_client: The index of the netlink client | ||
149 | * | ||
150 | * Call iwpm_register_pid() to register a client | ||
151 | */ | ||
152 | int iwpm_registered_client(u8 nl_client); | ||
153 | |||
154 | /** | ||
155 | * iwpm_set_registered - Set the port mapper client to registered or not | ||
156 | * @nl_client: The index of the netlink client | ||
157 | * @reg: 1 if registered or 0 if not | ||
158 | */ | ||
159 | void iwpm_set_registered(u8 nl_client, int reg); | ||
160 | |||
161 | /** | ||
162 | * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of | ||
163 | * a client to the user space port mapper | ||
164 | * @nl_client: The index of the netlink client | ||
165 | * @iwpm_pid: The pid of the user space port mapper | ||
166 | * | ||
167 | * If successful, returns the number of sent mapping info records | ||
168 | */ | ||
169 | int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid); | ||
170 | |||
171 | /** | ||
172 | * iwpm_mapinfo_available - Check if any mapping info records is available | ||
173 | * in the hash table | ||
174 | * | ||
175 | * Returns 1 if mapping information is available, otherwise returns 0 | ||
176 | */ | ||
177 | int iwpm_mapinfo_available(void); | ||
178 | |||
179 | /** | ||
180 | * iwpm_compare_sockaddr - Compare two sockaddr storage structs | ||
181 | * | ||
182 | * Returns 0 if they are holding the same ip/tcp address info, | ||
183 | * otherwise returns 1 | ||
184 | */ | ||
185 | int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, | ||
186 | struct sockaddr_storage *b_sockaddr); | ||
187 | |||
188 | /** | ||
189 | * iwpm_validate_nlmsg_attr - Check for NULL netlink attributes | ||
190 | * @nltb: Holds address of each netlink message attributes | ||
191 | * @nla_count: Number of netlink message attributes | ||
192 | * | ||
193 | * Returns error if any of the nla_count attributes is NULL | ||
194 | */ | ||
195 | static inline int iwpm_validate_nlmsg_attr(struct nlattr *nltb[], | ||
196 | int nla_count) | ||
197 | { | ||
198 | int i; | ||
199 | for (i = 1; i < nla_count; i++) { | ||
200 | if (!nltb[i]) | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * iwpm_create_nlmsg - Allocate skb and form a netlink message | ||
208 | * @nl_op: Netlink message opcode | ||
209 | * @nlh: Holds address of the netlink message header in skb | ||
210 | * @nl_client: The index of the netlink client | ||
211 | * | ||
212 | * Returns the newly allcated skb, or NULL if the tailroom of the skb | ||
213 | * is insufficient to store the message header and payload | ||
214 | */ | ||
215 | struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, | ||
216 | int nl_client); | ||
217 | |||
218 | /** | ||
219 | * iwpm_parse_nlmsg - Validate and parse the received netlink message | ||
220 | * @cb: Netlink callback structure | ||
221 | * @policy_max: Maximum attribute type to be expected | ||
222 | * @nlmsg_policy: Validation policy | ||
223 | * @nltb: Array to store policy_max parsed elements | ||
224 | * @msg_type: Type of netlink message | ||
225 | * | ||
226 | * Returns 0 on success or a negative error code | ||
227 | */ | ||
228 | int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, | ||
229 | const struct nla_policy *nlmsg_policy, | ||
230 | struct nlattr *nltb[], const char *msg_type); | ||
231 | |||
232 | /** | ||
233 | * iwpm_print_sockaddr - Print IPv4/IPv6 address and TCP port | ||
234 | * @sockaddr: Socket address to print | ||
235 | * @msg: Message to print | ||
236 | */ | ||
237 | void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg); | ||
238 | #endif | ||
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index a1e9cba84944..23dd5a5c7597 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -103,13 +103,13 @@ int ibnl_remove_client(int index) | |||
103 | EXPORT_SYMBOL(ibnl_remove_client); | 103 | EXPORT_SYMBOL(ibnl_remove_client); |
104 | 104 | ||
105 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | 105 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, |
106 | int len, int client, int op) | 106 | int len, int client, int op, int flags) |
107 | { | 107 | { |
108 | unsigned char *prev_tail; | 108 | unsigned char *prev_tail; |
109 | 109 | ||
110 | prev_tail = skb_tail_pointer(skb); | 110 | prev_tail = skb_tail_pointer(skb); |
111 | *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), | 111 | *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), |
112 | len, NLM_F_MULTI); | 112 | len, flags); |
113 | if (!*nlh) | 113 | if (!*nlh) |
114 | goto out_nlmsg_trim; | 114 | goto out_nlmsg_trim; |
115 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; | 115 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; |
@@ -172,6 +172,20 @@ static void ibnl_rcv(struct sk_buff *skb) | |||
172 | mutex_unlock(&ibnl_mutex); | 172 | mutex_unlock(&ibnl_mutex); |
173 | } | 173 | } |
174 | 174 | ||
175 | int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
176 | __u32 pid) | ||
177 | { | ||
178 | return nlmsg_unicast(nls, skb, pid); | ||
179 | } | ||
180 | EXPORT_SYMBOL(ibnl_unicast); | ||
181 | |||
182 | int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
183 | unsigned int group, gfp_t flags) | ||
184 | { | ||
185 | return nlmsg_multicast(nls, skb, 0, group, flags); | ||
186 | } | ||
187 | EXPORT_SYMBOL(ibnl_multicast); | ||
188 | |||
175 | int __init ibnl_init(void) | 189 | int __init ibnl_init(void) |
176 | { | 190 | { |
177 | struct netlink_kernel_cfg cfg = { | 191 | struct netlink_kernel_cfg cfg = { |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index f820958e4047..233eaf541f55 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -618,7 +618,7 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) | |||
618 | 618 | ||
619 | static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) | 619 | static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) |
620 | { | 620 | { |
621 | bool preload = gfp_mask & __GFP_WAIT; | 621 | bool preload = !!(gfp_mask & __GFP_WAIT); |
622 | unsigned long flags; | 622 | unsigned long flags; |
623 | int ret, id; | 623 | int ret, id; |
624 | 624 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 92525f855d82..c2b89cc5dbca 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | #include "core_priv.h" | 49 | #include "core_priv.h" |
50 | 50 | ||
51 | int ib_rate_to_mult(enum ib_rate rate) | 51 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
52 | { | 52 | { |
53 | switch (rate) { | 53 | switch (rate) { |
54 | case IB_RATE_2_5_GBPS: return 1; | 54 | case IB_RATE_2_5_GBPS: return 1; |
@@ -65,7 +65,7 @@ int ib_rate_to_mult(enum ib_rate rate) | |||
65 | } | 65 | } |
66 | EXPORT_SYMBOL(ib_rate_to_mult); | 66 | EXPORT_SYMBOL(ib_rate_to_mult); |
67 | 67 | ||
68 | enum ib_rate mult_to_ib_rate(int mult) | 68 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
69 | { | 69 | { |
70 | switch (mult) { | 70 | switch (mult) { |
71 | case 1: return IB_RATE_2_5_GBPS; | 71 | case 1: return IB_RATE_2_5_GBPS; |
@@ -82,7 +82,7 @@ enum ib_rate mult_to_ib_rate(int mult) | |||
82 | } | 82 | } |
83 | EXPORT_SYMBOL(mult_to_ib_rate); | 83 | EXPORT_SYMBOL(mult_to_ib_rate); |
84 | 84 | ||
85 | int ib_rate_to_mbps(enum ib_rate rate) | 85 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
86 | { | 86 | { |
87 | switch (rate) { | 87 | switch (rate) { |
88 | case IB_RATE_2_5_GBPS: return 2500; | 88 | case IB_RATE_2_5_GBPS: return 2500; |
@@ -107,7 +107,7 @@ int ib_rate_to_mbps(enum ib_rate rate) | |||
107 | } | 107 | } |
108 | EXPORT_SYMBOL(ib_rate_to_mbps); | 108 | EXPORT_SYMBOL(ib_rate_to_mbps); |
109 | 109 | ||
110 | enum rdma_transport_type | 110 | __attribute_const__ enum rdma_transport_type |
111 | rdma_node_get_transport(enum rdma_node_type node_type) | 111 | rdma_node_get_transport(enum rdma_node_type node_type) |
112 | { | 112 | { |
113 | switch (node_type) { | 113 | switch (node_type) { |
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile new file mode 100644 index 000000000000..e900b03531a9 --- /dev/null +++ b/drivers/infiniband/hw/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ | ||
2 | obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ | ||
3 | obj-$(CONFIG_INFINIBAND_QIB) += qib/ | ||
4 | obj-$(CONFIG_INFINIBAND_EHCA) += ehca/ | ||
5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/ | ||
6 | obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ | ||
7 | obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ | ||
8 | obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ | ||
9 | obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ | ||
10 | obj-$(CONFIG_INFINIBAND_NES) += nes/ | ||
11 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ | ||
12 | obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index c3f5aca4ef00..de1c61b417d6 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -735,14 +735,12 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |||
735 | ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) | | 735 | ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) | |
736 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | | 736 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | |
737 | V_TPT_PAGE_SIZE(page_size)); | 737 | V_TPT_PAGE_SIZE(page_size)); |
738 | tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : | 738 | tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); |
739 | cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); | ||
740 | tpt.len = cpu_to_be32(len); | 739 | tpt.len = cpu_to_be32(len); |
741 | tpt.va_hi = cpu_to_be32((u32) (to >> 32)); | 740 | tpt.va_hi = cpu_to_be32((u32) (to >> 32)); |
742 | tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); | 741 | tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); |
743 | tpt.rsvd_bind_cnt_or_pstag = 0; | 742 | tpt.rsvd_bind_cnt_or_pstag = 0; |
744 | tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : | 743 | tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2)); |
745 | cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2)); | ||
746 | } | 744 | } |
747 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, | 745 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, |
748 | stag_idx + | 746 | stag_idx + |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 095bb046e2c8..cb78b1e9bcd9 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -418,6 +418,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
418 | skb->priority = CPL_PRIORITY_DATA; | 418 | skb->priority = CPL_PRIORITY_DATA; |
419 | set_arp_failure_handler(skb, abort_arp_failure); | 419 | set_arp_failure_handler(skb, abort_arp_failure); |
420 | req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); | 420 | req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); |
421 | memset(req, 0, sizeof(*req)); | ||
421 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | 422 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); |
422 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | 423 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); |
423 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | 424 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 1f863a96a480..96d7131ab974 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | 2 | * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -47,6 +47,8 @@ | |||
47 | #include <net/ip6_route.h> | 47 | #include <net/ip6_route.h> |
48 | #include <net/addrconf.h> | 48 | #include <net/addrconf.h> |
49 | 49 | ||
50 | #include <rdma/ib_addr.h> | ||
51 | |||
50 | #include "iw_cxgb4.h" | 52 | #include "iw_cxgb4.h" |
51 | 53 | ||
52 | static char *states[] = { | 54 | static char *states[] = { |
@@ -294,6 +296,12 @@ void _c4iw_free_ep(struct kref *kref) | |||
294 | dst_release(ep->dst); | 296 | dst_release(ep->dst); |
295 | cxgb4_l2t_release(ep->l2t); | 297 | cxgb4_l2t_release(ep->l2t); |
296 | } | 298 | } |
299 | if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { | ||
300 | print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); | ||
301 | iwpm_remove_mapinfo(&ep->com.local_addr, | ||
302 | &ep->com.mapped_local_addr); | ||
303 | iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); | ||
304 | } | ||
297 | kfree(ep); | 305 | kfree(ep); |
298 | } | 306 | } |
299 | 307 | ||
@@ -341,10 +349,7 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | |||
341 | 349 | ||
342 | static struct net_device *get_real_dev(struct net_device *egress_dev) | 350 | static struct net_device *get_real_dev(struct net_device *egress_dev) |
343 | { | 351 | { |
344 | struct net_device *phys_dev = egress_dev; | 352 | return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; |
345 | if (egress_dev->priv_flags & IFF_802_1Q_VLAN) | ||
346 | phys_dev = vlan_dev_real_dev(egress_dev); | ||
347 | return phys_dev; | ||
348 | } | 353 | } |
349 | 354 | ||
350 | static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) | 355 | static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) |
@@ -528,6 +533,38 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
528 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 533 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
529 | } | 534 | } |
530 | 535 | ||
536 | /* | ||
537 | * c4iw_form_pm_msg - Form a port mapper message with mapping info | ||
538 | */ | ||
539 | static void c4iw_form_pm_msg(struct c4iw_ep *ep, | ||
540 | struct iwpm_sa_data *pm_msg) | ||
541 | { | ||
542 | memcpy(&pm_msg->loc_addr, &ep->com.local_addr, | ||
543 | sizeof(ep->com.local_addr)); | ||
544 | memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, | ||
545 | sizeof(ep->com.remote_addr)); | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * c4iw_form_reg_msg - Form a port mapper message with dev info | ||
550 | */ | ||
551 | static void c4iw_form_reg_msg(struct c4iw_dev *dev, | ||
552 | struct iwpm_dev_data *pm_msg) | ||
553 | { | ||
554 | memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE); | ||
555 | memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name, | ||
556 | IWPM_IFNAME_SIZE); | ||
557 | } | ||
558 | |||
559 | static void c4iw_record_pm_msg(struct c4iw_ep *ep, | ||
560 | struct iwpm_sa_data *pm_msg) | ||
561 | { | ||
562 | memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, | ||
563 | sizeof(ep->com.mapped_local_addr)); | ||
564 | memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, | ||
565 | sizeof(ep->com.mapped_remote_addr)); | ||
566 | } | ||
567 | |||
531 | static int send_connect(struct c4iw_ep *ep) | 568 | static int send_connect(struct c4iw_ep *ep) |
532 | { | 569 | { |
533 | struct cpl_act_open_req *req; | 570 | struct cpl_act_open_req *req; |
@@ -546,10 +583,14 @@ static int send_connect(struct c4iw_ep *ep) | |||
546 | int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? | 583 | int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? |
547 | sizeof(struct cpl_act_open_req6) : | 584 | sizeof(struct cpl_act_open_req6) : |
548 | sizeof(struct cpl_t5_act_open_req6); | 585 | sizeof(struct cpl_t5_act_open_req6); |
549 | struct sockaddr_in *la = (struct sockaddr_in *)&ep->com.local_addr; | 586 | struct sockaddr_in *la = (struct sockaddr_in *) |
550 | struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr; | 587 | &ep->com.mapped_local_addr; |
551 | struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr; | 588 | struct sockaddr_in *ra = (struct sockaddr_in *) |
552 | struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; | 589 | &ep->com.mapped_remote_addr; |
590 | struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) | ||
591 | &ep->com.mapped_local_addr; | ||
592 | struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) | ||
593 | &ep->com.mapped_remote_addr; | ||
553 | 594 | ||
554 | wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? | 595 | wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? |
555 | roundup(sizev4, 16) : | 596 | roundup(sizev4, 16) : |
@@ -1627,10 +1668,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1627 | req->le.filter = cpu_to_be32(cxgb4_select_ntuple( | 1668 | req->le.filter = cpu_to_be32(cxgb4_select_ntuple( |
1628 | ep->com.dev->rdev.lldi.ports[0], | 1669 | ep->com.dev->rdev.lldi.ports[0], |
1629 | ep->l2t)); | 1670 | ep->l2t)); |
1630 | sin = (struct sockaddr_in *)&ep->com.local_addr; | 1671 | sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; |
1631 | req->le.lport = sin->sin_port; | 1672 | req->le.lport = sin->sin_port; |
1632 | req->le.u.ipv4.lip = sin->sin_addr.s_addr; | 1673 | req->le.u.ipv4.lip = sin->sin_addr.s_addr; |
1633 | sin = (struct sockaddr_in *)&ep->com.remote_addr; | 1674 | sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; |
1634 | req->le.pport = sin->sin_port; | 1675 | req->le.pport = sin->sin_port; |
1635 | req->le.u.ipv4.pip = sin->sin_addr.s_addr; | 1676 | req->le.u.ipv4.pip = sin->sin_addr.s_addr; |
1636 | req->tcb.t_state_to_astid = | 1677 | req->tcb.t_state_to_astid = |
@@ -1746,16 +1787,16 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, | |||
1746 | if (!ep->l2t) | 1787 | if (!ep->l2t) |
1747 | goto out; | 1788 | goto out; |
1748 | ep->mtu = dst_mtu(dst); | 1789 | ep->mtu = dst_mtu(dst); |
1749 | ep->tx_chan = cxgb4_port_chan(n->dev); | 1790 | ep->tx_chan = cxgb4_port_chan(pdev); |
1750 | ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; | 1791 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
1751 | step = cdev->rdev.lldi.ntxq / | 1792 | step = cdev->rdev.lldi.ntxq / |
1752 | cdev->rdev.lldi.nchan; | 1793 | cdev->rdev.lldi.nchan; |
1753 | ep->txq_idx = cxgb4_port_idx(n->dev) * step; | 1794 | ep->txq_idx = cxgb4_port_idx(pdev) * step; |
1754 | ep->ctrlq_idx = cxgb4_port_idx(n->dev); | 1795 | ep->ctrlq_idx = cxgb4_port_idx(pdev); |
1755 | step = cdev->rdev.lldi.nrxq / | 1796 | step = cdev->rdev.lldi.nrxq / |
1756 | cdev->rdev.lldi.nchan; | 1797 | cdev->rdev.lldi.nchan; |
1757 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | 1798 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ |
1758 | cxgb4_port_idx(n->dev) * step]; | 1799 | cxgb4_port_idx(pdev) * step]; |
1759 | 1800 | ||
1760 | if (clear_mpa_v1) { | 1801 | if (clear_mpa_v1) { |
1761 | ep->retry_with_mpa_v1 = 0; | 1802 | ep->retry_with_mpa_v1 = 0; |
@@ -1870,10 +1911,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1870 | struct sockaddr_in6 *ra6; | 1911 | struct sockaddr_in6 *ra6; |
1871 | 1912 | ||
1872 | ep = lookup_atid(t, atid); | 1913 | ep = lookup_atid(t, atid); |
1873 | la = (struct sockaddr_in *)&ep->com.local_addr; | 1914 | la = (struct sockaddr_in *)&ep->com.mapped_local_addr; |
1874 | ra = (struct sockaddr_in *)&ep->com.remote_addr; | 1915 | ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; |
1875 | la6 = (struct sockaddr_in6 *)&ep->com.local_addr; | 1916 | la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; |
1876 | ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; | 1917 | ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; |
1877 | 1918 | ||
1878 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, | 1919 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, |
1879 | status, status2errno(status)); | 1920 | status, status2errno(status)); |
@@ -2730,13 +2771,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2730 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | 2771 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); |
2731 | struct c4iw_ep *ep; | 2772 | struct c4iw_ep *ep; |
2732 | int err = 0; | 2773 | int err = 0; |
2733 | struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; | 2774 | struct sockaddr_in *laddr; |
2734 | struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; | 2775 | struct sockaddr_in *raddr; |
2735 | struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; | 2776 | struct sockaddr_in6 *laddr6; |
2736 | struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) | 2777 | struct sockaddr_in6 *raddr6; |
2737 | &cm_id->remote_addr; | 2778 | struct iwpm_dev_data pm_reg_msg; |
2779 | struct iwpm_sa_data pm_msg; | ||
2738 | __u8 *ra; | 2780 | __u8 *ra; |
2739 | int iptype; | 2781 | int iptype; |
2782 | int iwpm_err = 0; | ||
2740 | 2783 | ||
2741 | if ((conn_param->ord > c4iw_max_read_depth) || | 2784 | if ((conn_param->ord > c4iw_max_read_depth) || |
2742 | (conn_param->ird > c4iw_max_read_depth)) { | 2785 | (conn_param->ird > c4iw_max_read_depth)) { |
@@ -2767,7 +2810,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2767 | if (!ep->com.qp) { | 2810 | if (!ep->com.qp) { |
2768 | PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); | 2811 | PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); |
2769 | err = -EINVAL; | 2812 | err = -EINVAL; |
2770 | goto fail2; | 2813 | goto fail1; |
2771 | } | 2814 | } |
2772 | ref_qp(ep); | 2815 | ref_qp(ep); |
2773 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | 2816 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, |
@@ -2780,10 +2823,50 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2780 | if (ep->atid == -1) { | 2823 | if (ep->atid == -1) { |
2781 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | 2824 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); |
2782 | err = -ENOMEM; | 2825 | err = -ENOMEM; |
2783 | goto fail2; | 2826 | goto fail1; |
2784 | } | 2827 | } |
2785 | insert_handle(dev, &dev->atid_idr, ep, ep->atid); | 2828 | insert_handle(dev, &dev->atid_idr, ep, ep->atid); |
2786 | 2829 | ||
2830 | memcpy(&ep->com.local_addr, &cm_id->local_addr, | ||
2831 | sizeof(ep->com.local_addr)); | ||
2832 | memcpy(&ep->com.remote_addr, &cm_id->remote_addr, | ||
2833 | sizeof(ep->com.remote_addr)); | ||
2834 | |||
2835 | /* No port mapper available, go with the specified peer information */ | ||
2836 | memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, | ||
2837 | sizeof(ep->com.mapped_local_addr)); | ||
2838 | memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, | ||
2839 | sizeof(ep->com.mapped_remote_addr)); | ||
2840 | |||
2841 | c4iw_form_reg_msg(dev, &pm_reg_msg); | ||
2842 | iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); | ||
2843 | if (iwpm_err) { | ||
2844 | PDBG("%s: Port Mapper reg pid fail (err = %d).\n", | ||
2845 | __func__, iwpm_err); | ||
2846 | } | ||
2847 | if (iwpm_valid_pid() && !iwpm_err) { | ||
2848 | c4iw_form_pm_msg(ep, &pm_msg); | ||
2849 | iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW); | ||
2850 | if (iwpm_err) | ||
2851 | PDBG("%s: Port Mapper query fail (err = %d).\n", | ||
2852 | __func__, iwpm_err); | ||
2853 | else | ||
2854 | c4iw_record_pm_msg(ep, &pm_msg); | ||
2855 | } | ||
2856 | if (iwpm_create_mapinfo(&ep->com.local_addr, | ||
2857 | &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { | ||
2858 | iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); | ||
2859 | err = -ENOMEM; | ||
2860 | goto fail1; | ||
2861 | } | ||
2862 | print_addr(&ep->com, __func__, "add_query/create_mapinfo"); | ||
2863 | set_bit(RELEASE_MAPINFO, &ep->com.flags); | ||
2864 | |||
2865 | laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; | ||
2866 | raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; | ||
2867 | laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; | ||
2868 | raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; | ||
2869 | |||
2787 | if (cm_id->remote_addr.ss_family == AF_INET) { | 2870 | if (cm_id->remote_addr.ss_family == AF_INET) { |
2788 | iptype = 4; | 2871 | iptype = 4; |
2789 | ra = (__u8 *)&raddr->sin_addr; | 2872 | ra = (__u8 *)&raddr->sin_addr; |
@@ -2794,7 +2877,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2794 | if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { | 2877 | if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { |
2795 | err = pick_local_ipaddrs(dev, cm_id); | 2878 | err = pick_local_ipaddrs(dev, cm_id); |
2796 | if (err) | 2879 | if (err) |
2797 | goto fail2; | 2880 | goto fail1; |
2798 | } | 2881 | } |
2799 | 2882 | ||
2800 | /* find a route */ | 2883 | /* find a route */ |
@@ -2814,7 +2897,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2814 | if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { | 2897 | if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { |
2815 | err = pick_local_ip6addrs(dev, cm_id); | 2898 | err = pick_local_ip6addrs(dev, cm_id); |
2816 | if (err) | 2899 | if (err) |
2817 | goto fail2; | 2900 | goto fail1; |
2818 | } | 2901 | } |
2819 | 2902 | ||
2820 | /* find a route */ | 2903 | /* find a route */ |
@@ -2830,13 +2913,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2830 | if (!ep->dst) { | 2913 | if (!ep->dst) { |
2831 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | 2914 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); |
2832 | err = -EHOSTUNREACH; | 2915 | err = -EHOSTUNREACH; |
2833 | goto fail3; | 2916 | goto fail2; |
2834 | } | 2917 | } |
2835 | 2918 | ||
2836 | err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); | 2919 | err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); |
2837 | if (err) { | 2920 | if (err) { |
2838 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 2921 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
2839 | goto fail4; | 2922 | goto fail3; |
2840 | } | 2923 | } |
2841 | 2924 | ||
2842 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | 2925 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", |
@@ -2845,10 +2928,6 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2845 | 2928 | ||
2846 | state_set(&ep->com, CONNECTING); | 2929 | state_set(&ep->com, CONNECTING); |
2847 | ep->tos = 0; | 2930 | ep->tos = 0; |
2848 | memcpy(&ep->com.local_addr, &cm_id->local_addr, | ||
2849 | sizeof(ep->com.local_addr)); | ||
2850 | memcpy(&ep->com.remote_addr, &cm_id->remote_addr, | ||
2851 | sizeof(ep->com.remote_addr)); | ||
2852 | 2931 | ||
2853 | /* send connect request to rnic */ | 2932 | /* send connect request to rnic */ |
2854 | err = send_connect(ep); | 2933 | err = send_connect(ep); |
@@ -2856,12 +2935,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2856 | goto out; | 2935 | goto out; |
2857 | 2936 | ||
2858 | cxgb4_l2t_release(ep->l2t); | 2937 | cxgb4_l2t_release(ep->l2t); |
2859 | fail4: | ||
2860 | dst_release(ep->dst); | ||
2861 | fail3: | 2938 | fail3: |
2939 | dst_release(ep->dst); | ||
2940 | fail2: | ||
2862 | remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); | 2941 | remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); |
2863 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | 2942 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); |
2864 | fail2: | 2943 | fail1: |
2865 | cm_id->rem_ref(cm_id); | 2944 | cm_id->rem_ref(cm_id); |
2866 | c4iw_put_ep(&ep->com); | 2945 | c4iw_put_ep(&ep->com); |
2867 | out: | 2946 | out: |
@@ -2871,7 +2950,8 @@ out: | |||
2871 | static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) | 2950 | static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) |
2872 | { | 2951 | { |
2873 | int err; | 2952 | int err; |
2874 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; | 2953 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) |
2954 | &ep->com.mapped_local_addr; | ||
2875 | 2955 | ||
2876 | c4iw_init_wr_wait(&ep->com.wr_wait); | 2956 | c4iw_init_wr_wait(&ep->com.wr_wait); |
2877 | err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], | 2957 | err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], |
@@ -2892,7 +2972,8 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) | |||
2892 | static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) | 2972 | static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) |
2893 | { | 2973 | { |
2894 | int err; | 2974 | int err; |
2895 | struct sockaddr_in *sin = (struct sockaddr_in *)&ep->com.local_addr; | 2975 | struct sockaddr_in *sin = (struct sockaddr_in *) |
2976 | &ep->com.mapped_local_addr; | ||
2896 | 2977 | ||
2897 | if (dev->rdev.lldi.enable_fw_ofld_conn) { | 2978 | if (dev->rdev.lldi.enable_fw_ofld_conn) { |
2898 | do { | 2979 | do { |
@@ -2927,6 +3008,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
2927 | int err = 0; | 3008 | int err = 0; |
2928 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | 3009 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); |
2929 | struct c4iw_listen_ep *ep; | 3010 | struct c4iw_listen_ep *ep; |
3011 | struct iwpm_dev_data pm_reg_msg; | ||
3012 | struct iwpm_sa_data pm_msg; | ||
3013 | int iwpm_err = 0; | ||
2930 | 3014 | ||
2931 | might_sleep(); | 3015 | might_sleep(); |
2932 | 3016 | ||
@@ -2961,6 +3045,37 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
2961 | goto fail2; | 3045 | goto fail2; |
2962 | } | 3046 | } |
2963 | insert_handle(dev, &dev->stid_idr, ep, ep->stid); | 3047 | insert_handle(dev, &dev->stid_idr, ep, ep->stid); |
3048 | |||
3049 | /* No port mapper available, go with the specified info */ | ||
3050 | memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, | ||
3051 | sizeof(ep->com.mapped_local_addr)); | ||
3052 | |||
3053 | c4iw_form_reg_msg(dev, &pm_reg_msg); | ||
3054 | iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); | ||
3055 | if (iwpm_err) { | ||
3056 | PDBG("%s: Port Mapper reg pid fail (err = %d).\n", | ||
3057 | __func__, iwpm_err); | ||
3058 | } | ||
3059 | if (iwpm_valid_pid() && !iwpm_err) { | ||
3060 | memcpy(&pm_msg.loc_addr, &ep->com.local_addr, | ||
3061 | sizeof(ep->com.local_addr)); | ||
3062 | iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW); | ||
3063 | if (iwpm_err) | ||
3064 | PDBG("%s: Port Mapper query fail (err = %d).\n", | ||
3065 | __func__, iwpm_err); | ||
3066 | else | ||
3067 | memcpy(&ep->com.mapped_local_addr, | ||
3068 | &pm_msg.mapped_loc_addr, | ||
3069 | sizeof(ep->com.mapped_local_addr)); | ||
3070 | } | ||
3071 | if (iwpm_create_mapinfo(&ep->com.local_addr, | ||
3072 | &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { | ||
3073 | err = -ENOMEM; | ||
3074 | goto fail3; | ||
3075 | } | ||
3076 | print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); | ||
3077 | |||
3078 | set_bit(RELEASE_MAPINFO, &ep->com.flags); | ||
2964 | state_set(&ep->com, LISTEN); | 3079 | state_set(&ep->com, LISTEN); |
2965 | if (ep->com.local_addr.ss_family == AF_INET) | 3080 | if (ep->com.local_addr.ss_family == AF_INET) |
2966 | err = create_server4(dev, ep); | 3081 | err = create_server4(dev, ep); |
@@ -2970,6 +3085,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
2970 | cm_id->provider_data = ep; | 3085 | cm_id->provider_data = ep; |
2971 | goto out; | 3086 | goto out; |
2972 | } | 3087 | } |
3088 | |||
3089 | fail3: | ||
2973 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, | 3090 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, |
2974 | ep->com.local_addr.ss_family); | 3091 | ep->com.local_addr.ss_family); |
2975 | fail2: | 3092 | fail2: |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index cfaa56ada189..7151a02b4ebb 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -940,7 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
940 | if (!mm2) | 940 | if (!mm2) |
941 | goto err4; | 941 | goto err4; |
942 | 942 | ||
943 | memset(&uresp, 0, sizeof(uresp)); | ||
944 | uresp.qid_mask = rhp->rdev.cqmask; | 943 | uresp.qid_mask = rhp->rdev.cqmask; |
945 | uresp.cqid = chp->cq.cqid; | 944 | uresp.cqid = chp->cq.cqid; |
946 | uresp.size = chp->cq.size; | 945 | uresp.size = chp->cq.size; |
@@ -951,7 +950,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
951 | uresp.gts_key = ucontext->key; | 950 | uresp.gts_key = ucontext->key; |
952 | ucontext->key += PAGE_SIZE; | 951 | ucontext->key += PAGE_SIZE; |
953 | spin_unlock(&ucontext->mmap_lock); | 952 | spin_unlock(&ucontext->mmap_lock); |
954 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | 953 | ret = ib_copy_to_udata(udata, &uresp, |
954 | sizeof(uresp) - sizeof(uresp.reserved)); | ||
955 | if (ret) | 955 | if (ret) |
956 | goto err5; | 956 | goto err5; |
957 | 957 | ||
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index f4fa50a609e2..dd93aadc996e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -77,6 +77,16 @@ struct c4iw_debugfs_data { | |||
77 | int pos; | 77 | int pos; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | /* registered cxgb4 netlink callbacks */ | ||
81 | static struct ibnl_client_cbs c4iw_nl_cb_table[] = { | ||
82 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, | ||
83 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | ||
84 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | ||
85 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | ||
86 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | ||
87 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | ||
88 | }; | ||
89 | |||
80 | static int count_idrs(int id, void *p, void *data) | 90 | static int count_idrs(int id, void *p, void *data) |
81 | { | 91 | { |
82 | int *countp = data; | 92 | int *countp = data; |
@@ -113,35 +123,49 @@ static int dump_qp(int id, void *p, void *data) | |||
113 | &qp->ep->com.local_addr; | 123 | &qp->ep->com.local_addr; |
114 | struct sockaddr_in *rsin = (struct sockaddr_in *) | 124 | struct sockaddr_in *rsin = (struct sockaddr_in *) |
115 | &qp->ep->com.remote_addr; | 125 | &qp->ep->com.remote_addr; |
126 | struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) | ||
127 | &qp->ep->com.mapped_local_addr; | ||
128 | struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) | ||
129 | &qp->ep->com.mapped_remote_addr; | ||
116 | 130 | ||
117 | cc = snprintf(qpd->buf + qpd->pos, space, | 131 | cc = snprintf(qpd->buf + qpd->pos, space, |
118 | "rc qp sq id %u rq id %u state %u " | 132 | "rc qp sq id %u rq id %u state %u " |
119 | "onchip %u ep tid %u state %u " | 133 | "onchip %u ep tid %u state %u " |
120 | "%pI4:%u->%pI4:%u\n", | 134 | "%pI4:%u/%u->%pI4:%u/%u\n", |
121 | qp->wq.sq.qid, qp->wq.rq.qid, | 135 | qp->wq.sq.qid, qp->wq.rq.qid, |
122 | (int)qp->attr.state, | 136 | (int)qp->attr.state, |
123 | qp->wq.sq.flags & T4_SQ_ONCHIP, | 137 | qp->wq.sq.flags & T4_SQ_ONCHIP, |
124 | qp->ep->hwtid, (int)qp->ep->com.state, | 138 | qp->ep->hwtid, (int)qp->ep->com.state, |
125 | &lsin->sin_addr, ntohs(lsin->sin_port), | 139 | &lsin->sin_addr, ntohs(lsin->sin_port), |
126 | &rsin->sin_addr, ntohs(rsin->sin_port)); | 140 | ntohs(mapped_lsin->sin_port), |
141 | &rsin->sin_addr, ntohs(rsin->sin_port), | ||
142 | ntohs(mapped_rsin->sin_port)); | ||
127 | } else { | 143 | } else { |
128 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) | 144 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) |
129 | &qp->ep->com.local_addr; | 145 | &qp->ep->com.local_addr; |
130 | struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) | 146 | struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) |
131 | &qp->ep->com.remote_addr; | 147 | &qp->ep->com.remote_addr; |
148 | struct sockaddr_in6 *mapped_lsin6 = | ||
149 | (struct sockaddr_in6 *) | ||
150 | &qp->ep->com.mapped_local_addr; | ||
151 | struct sockaddr_in6 *mapped_rsin6 = | ||
152 | (struct sockaddr_in6 *) | ||
153 | &qp->ep->com.mapped_remote_addr; | ||
132 | 154 | ||
133 | cc = snprintf(qpd->buf + qpd->pos, space, | 155 | cc = snprintf(qpd->buf + qpd->pos, space, |
134 | "rc qp sq id %u rq id %u state %u " | 156 | "rc qp sq id %u rq id %u state %u " |
135 | "onchip %u ep tid %u state %u " | 157 | "onchip %u ep tid %u state %u " |
136 | "%pI6:%u->%pI6:%u\n", | 158 | "%pI6:%u/%u->%pI6:%u/%u\n", |
137 | qp->wq.sq.qid, qp->wq.rq.qid, | 159 | qp->wq.sq.qid, qp->wq.rq.qid, |
138 | (int)qp->attr.state, | 160 | (int)qp->attr.state, |
139 | qp->wq.sq.flags & T4_SQ_ONCHIP, | 161 | qp->wq.sq.flags & T4_SQ_ONCHIP, |
140 | qp->ep->hwtid, (int)qp->ep->com.state, | 162 | qp->ep->hwtid, (int)qp->ep->com.state, |
141 | &lsin6->sin6_addr, | 163 | &lsin6->sin6_addr, |
142 | ntohs(lsin6->sin6_port), | 164 | ntohs(lsin6->sin6_port), |
165 | ntohs(mapped_lsin6->sin6_port), | ||
143 | &rsin6->sin6_addr, | 166 | &rsin6->sin6_addr, |
144 | ntohs(rsin6->sin6_port)); | 167 | ntohs(rsin6->sin6_port), |
168 | ntohs(mapped_rsin6->sin6_port)); | ||
145 | } | 169 | } |
146 | } else | 170 | } else |
147 | cc = snprintf(qpd->buf + qpd->pos, space, | 171 | cc = snprintf(qpd->buf + qpd->pos, space, |
@@ -386,31 +410,43 @@ static int dump_ep(int id, void *p, void *data) | |||
386 | &ep->com.local_addr; | 410 | &ep->com.local_addr; |
387 | struct sockaddr_in *rsin = (struct sockaddr_in *) | 411 | struct sockaddr_in *rsin = (struct sockaddr_in *) |
388 | &ep->com.remote_addr; | 412 | &ep->com.remote_addr; |
413 | struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) | ||
414 | &ep->com.mapped_local_addr; | ||
415 | struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) | ||
416 | &ep->com.mapped_remote_addr; | ||
389 | 417 | ||
390 | cc = snprintf(epd->buf + epd->pos, space, | 418 | cc = snprintf(epd->buf + epd->pos, space, |
391 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 419 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
392 | "history 0x%lx hwtid %d atid %d " | 420 | "history 0x%lx hwtid %d atid %d " |
393 | "%pI4:%d <-> %pI4:%d\n", | 421 | "%pI4:%d/%d <-> %pI4:%d/%d\n", |
394 | ep, ep->com.cm_id, ep->com.qp, | 422 | ep, ep->com.cm_id, ep->com.qp, |
395 | (int)ep->com.state, ep->com.flags, | 423 | (int)ep->com.state, ep->com.flags, |
396 | ep->com.history, ep->hwtid, ep->atid, | 424 | ep->com.history, ep->hwtid, ep->atid, |
397 | &lsin->sin_addr, ntohs(lsin->sin_port), | 425 | &lsin->sin_addr, ntohs(lsin->sin_port), |
398 | &rsin->sin_addr, ntohs(rsin->sin_port)); | 426 | ntohs(mapped_lsin->sin_port), |
427 | &rsin->sin_addr, ntohs(rsin->sin_port), | ||
428 | ntohs(mapped_rsin->sin_port)); | ||
399 | } else { | 429 | } else { |
400 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) | 430 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) |
401 | &ep->com.local_addr; | 431 | &ep->com.local_addr; |
402 | struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) | 432 | struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) |
403 | &ep->com.remote_addr; | 433 | &ep->com.remote_addr; |
434 | struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) | ||
435 | &ep->com.mapped_local_addr; | ||
436 | struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *) | ||
437 | &ep->com.mapped_remote_addr; | ||
404 | 438 | ||
405 | cc = snprintf(epd->buf + epd->pos, space, | 439 | cc = snprintf(epd->buf + epd->pos, space, |
406 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 440 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
407 | "history 0x%lx hwtid %d atid %d " | 441 | "history 0x%lx hwtid %d atid %d " |
408 | "%pI6:%d <-> %pI6:%d\n", | 442 | "%pI6:%d/%d <-> %pI6:%d/%d\n", |
409 | ep, ep->com.cm_id, ep->com.qp, | 443 | ep, ep->com.cm_id, ep->com.qp, |
410 | (int)ep->com.state, ep->com.flags, | 444 | (int)ep->com.state, ep->com.flags, |
411 | ep->com.history, ep->hwtid, ep->atid, | 445 | ep->com.history, ep->hwtid, ep->atid, |
412 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), | 446 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), |
413 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port)); | 447 | ntohs(mapped_lsin6->sin6_port), |
448 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port), | ||
449 | ntohs(mapped_rsin6->sin6_port)); | ||
414 | } | 450 | } |
415 | if (cc < space) | 451 | if (cc < space) |
416 | epd->pos += cc; | 452 | epd->pos += cc; |
@@ -431,23 +467,29 @@ static int dump_listen_ep(int id, void *p, void *data) | |||
431 | if (ep->com.local_addr.ss_family == AF_INET) { | 467 | if (ep->com.local_addr.ss_family == AF_INET) { |
432 | struct sockaddr_in *lsin = (struct sockaddr_in *) | 468 | struct sockaddr_in *lsin = (struct sockaddr_in *) |
433 | &ep->com.local_addr; | 469 | &ep->com.local_addr; |
470 | struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) | ||
471 | &ep->com.mapped_local_addr; | ||
434 | 472 | ||
435 | cc = snprintf(epd->buf + epd->pos, space, | 473 | cc = snprintf(epd->buf + epd->pos, space, |
436 | "ep %p cm_id %p state %d flags 0x%lx stid %d " | 474 | "ep %p cm_id %p state %d flags 0x%lx stid %d " |
437 | "backlog %d %pI4:%d\n", | 475 | "backlog %d %pI4:%d/%d\n", |
438 | ep, ep->com.cm_id, (int)ep->com.state, | 476 | ep, ep->com.cm_id, (int)ep->com.state, |
439 | ep->com.flags, ep->stid, ep->backlog, | 477 | ep->com.flags, ep->stid, ep->backlog, |
440 | &lsin->sin_addr, ntohs(lsin->sin_port)); | 478 | &lsin->sin_addr, ntohs(lsin->sin_port), |
479 | ntohs(mapped_lsin->sin_port)); | ||
441 | } else { | 480 | } else { |
442 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) | 481 | struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) |
443 | &ep->com.local_addr; | 482 | &ep->com.local_addr; |
483 | struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) | ||
484 | &ep->com.mapped_local_addr; | ||
444 | 485 | ||
445 | cc = snprintf(epd->buf + epd->pos, space, | 486 | cc = snprintf(epd->buf + epd->pos, space, |
446 | "ep %p cm_id %p state %d flags 0x%lx stid %d " | 487 | "ep %p cm_id %p state %d flags 0x%lx stid %d " |
447 | "backlog %d %pI6:%d\n", | 488 | "backlog %d %pI6:%d/%d\n", |
448 | ep, ep->com.cm_id, (int)ep->com.state, | 489 | ep, ep->com.cm_id, (int)ep->com.state, |
449 | ep->com.flags, ep->stid, ep->backlog, | 490 | ep->com.flags, ep->stid, ep->backlog, |
450 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port)); | 491 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), |
492 | ntohs(mapped_lsin6->sin6_port)); | ||
451 | } | 493 | } |
452 | if (cc < space) | 494 | if (cc < space) |
453 | epd->pos += cc; | 495 | epd->pos += cc; |
@@ -687,6 +729,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx) | |||
687 | if (ctx->dev->rdev.oc_mw_kva) | 729 | if (ctx->dev->rdev.oc_mw_kva) |
688 | iounmap(ctx->dev->rdev.oc_mw_kva); | 730 | iounmap(ctx->dev->rdev.oc_mw_kva); |
689 | ib_dealloc_device(&ctx->dev->ibdev); | 731 | ib_dealloc_device(&ctx->dev->ibdev); |
732 | iwpm_exit(RDMA_NL_C4IW); | ||
690 | ctx->dev = NULL; | 733 | ctx->dev = NULL; |
691 | } | 734 | } |
692 | 735 | ||
@@ -736,6 +779,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
736 | pci_resource_len(devp->rdev.lldi.pdev, 2)); | 779 | pci_resource_len(devp->rdev.lldi.pdev, 2)); |
737 | if (!devp->rdev.bar2_kva) { | 780 | if (!devp->rdev.bar2_kva) { |
738 | pr_err(MOD "Unable to ioremap BAR2\n"); | 781 | pr_err(MOD "Unable to ioremap BAR2\n"); |
782 | ib_dealloc_device(&devp->ibdev); | ||
739 | return ERR_PTR(-EINVAL); | 783 | return ERR_PTR(-EINVAL); |
740 | } | 784 | } |
741 | } else if (ocqp_supported(infop)) { | 785 | } else if (ocqp_supported(infop)) { |
@@ -747,6 +791,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
747 | devp->rdev.lldi.vr->ocq.size); | 791 | devp->rdev.lldi.vr->ocq.size); |
748 | if (!devp->rdev.oc_mw_kva) { | 792 | if (!devp->rdev.oc_mw_kva) { |
749 | pr_err(MOD "Unable to ioremap onchip mem\n"); | 793 | pr_err(MOD "Unable to ioremap onchip mem\n"); |
794 | ib_dealloc_device(&devp->ibdev); | ||
750 | return ERR_PTR(-EINVAL); | 795 | return ERR_PTR(-EINVAL); |
751 | } | 796 | } |
752 | } | 797 | } |
@@ -780,6 +825,14 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
780 | c4iw_debugfs_root); | 825 | c4iw_debugfs_root); |
781 | setup_debugfs(devp); | 826 | setup_debugfs(devp); |
782 | } | 827 | } |
828 | |||
829 | ret = iwpm_init(RDMA_NL_C4IW); | ||
830 | if (ret) { | ||
831 | pr_err("port mapper initialization failed with %d\n", ret); | ||
832 | ib_dealloc_device(&devp->ibdev); | ||
833 | return ERR_PTR(ret); | ||
834 | } | ||
835 | |||
783 | return devp; | 836 | return devp; |
784 | } | 837 | } |
785 | 838 | ||
@@ -1274,6 +1327,11 @@ static int __init c4iw_init_module(void) | |||
1274 | printk(KERN_WARNING MOD | 1327 | printk(KERN_WARNING MOD |
1275 | "could not create debugfs entry, continuing\n"); | 1328 | "could not create debugfs entry, continuing\n"); |
1276 | 1329 | ||
1330 | if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS, | ||
1331 | c4iw_nl_cb_table)) | ||
1332 | pr_err("%s[%u]: Failed to add netlink callback\n" | ||
1333 | , __func__, __LINE__); | ||
1334 | |||
1277 | cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); | 1335 | cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); |
1278 | 1336 | ||
1279 | return 0; | 1337 | return 0; |
@@ -1291,6 +1349,7 @@ static void __exit c4iw_exit_module(void) | |||
1291 | } | 1349 | } |
1292 | mutex_unlock(&dev_mutex); | 1350 | mutex_unlock(&dev_mutex); |
1293 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); | 1351 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); |
1352 | ibnl_remove_client(RDMA_NL_C4IW); | ||
1294 | c4iw_cm_term(); | 1353 | c4iw_cm_term(); |
1295 | debugfs_remove_recursive(c4iw_debugfs_root); | 1354 | debugfs_remove_recursive(c4iw_debugfs_root); |
1296 | } | 1355 | } |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7474b490760a..6f533fbcc4b3 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -52,6 +52,8 @@ | |||
52 | 52 | ||
53 | #include <rdma/ib_verbs.h> | 53 | #include <rdma/ib_verbs.h> |
54 | #include <rdma/iw_cm.h> | 54 | #include <rdma/iw_cm.h> |
55 | #include <rdma/rdma_netlink.h> | ||
56 | #include <rdma/iw_portmap.h> | ||
55 | 57 | ||
56 | #include "cxgb4.h" | 58 | #include "cxgb4.h" |
57 | #include "cxgb4_uld.h" | 59 | #include "cxgb4_uld.h" |
@@ -728,6 +730,7 @@ enum c4iw_ep_flags { | |||
728 | CLOSE_SENT = 3, | 730 | CLOSE_SENT = 3, |
729 | TIMEOUT = 4, | 731 | TIMEOUT = 4, |
730 | QP_REFERENCED = 5, | 732 | QP_REFERENCED = 5, |
733 | RELEASE_MAPINFO = 6, | ||
731 | }; | 734 | }; |
732 | 735 | ||
733 | enum c4iw_ep_history { | 736 | enum c4iw_ep_history { |
@@ -764,6 +767,8 @@ struct c4iw_ep_common { | |||
764 | struct mutex mutex; | 767 | struct mutex mutex; |
765 | struct sockaddr_storage local_addr; | 768 | struct sockaddr_storage local_addr; |
766 | struct sockaddr_storage remote_addr; | 769 | struct sockaddr_storage remote_addr; |
770 | struct sockaddr_storage mapped_local_addr; | ||
771 | struct sockaddr_storage mapped_remote_addr; | ||
767 | struct c4iw_wr_wait wr_wait; | 772 | struct c4iw_wr_wait wr_wait; |
768 | unsigned long flags; | 773 | unsigned long flags; |
769 | unsigned long history; | 774 | unsigned long history; |
@@ -807,6 +812,45 @@ struct c4iw_ep { | |||
807 | unsigned int retry_count; | 812 | unsigned int retry_count; |
808 | }; | 813 | }; |
809 | 814 | ||
815 | static inline void print_addr(struct c4iw_ep_common *epc, const char *func, | ||
816 | const char *msg) | ||
817 | { | ||
818 | |||
819 | #define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr)) | ||
820 | #define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port) | ||
821 | #define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr)) | ||
822 | #define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port) | ||
823 | |||
824 | if (c4iw_debug) { | ||
825 | switch (epc->local_addr.ss_family) { | ||
826 | case AF_INET: | ||
827 | PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n", | ||
828 | func, msg, SINA(&epc->local_addr), | ||
829 | SINP(&epc->local_addr), | ||
830 | SINP(&epc->mapped_local_addr), | ||
831 | SINA(&epc->remote_addr), | ||
832 | SINP(&epc->remote_addr), | ||
833 | SINP(&epc->mapped_remote_addr)); | ||
834 | break; | ||
835 | case AF_INET6: | ||
836 | PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n", | ||
837 | func, msg, SIN6A(&epc->local_addr), | ||
838 | SIN6P(&epc->local_addr), | ||
839 | SIN6P(&epc->mapped_local_addr), | ||
840 | SIN6A(&epc->remote_addr), | ||
841 | SIN6P(&epc->remote_addr), | ||
842 | SIN6P(&epc->mapped_remote_addr)); | ||
843 | break; | ||
844 | default: | ||
845 | break; | ||
846 | } | ||
847 | } | ||
848 | #undef SINA | ||
849 | #undef SINP | ||
850 | #undef SIN6A | ||
851 | #undef SIN6P | ||
852 | } | ||
853 | |||
810 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | 854 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) |
811 | { | 855 | { |
812 | return cm_id->provider_data; | 856 | return cm_id->provider_data; |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index a94a3e12c349..c777e22bd8d5 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -122,7 +122,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, | |||
122 | INIT_LIST_HEAD(&context->mmaps); | 122 | INIT_LIST_HEAD(&context->mmaps); |
123 | spin_lock_init(&context->mmap_lock); | 123 | spin_lock_init(&context->mmap_lock); |
124 | 124 | ||
125 | if (udata->outlen < sizeof(uresp)) { | 125 | if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { |
126 | if (!warned++) | 126 | if (!warned++) |
127 | pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled."); | 127 | pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled."); |
128 | rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; | 128 | rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; |
@@ -140,7 +140,8 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, | |||
140 | context->key += PAGE_SIZE; | 140 | context->key += PAGE_SIZE; |
141 | spin_unlock(&context->mmap_lock); | 141 | spin_unlock(&context->mmap_lock); |
142 | 142 | ||
143 | ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | 143 | ret = ib_copy_to_udata(udata, &uresp, |
144 | sizeof(uresp) - sizeof(uresp.reserved)); | ||
144 | if (ret) | 145 | if (ret) |
145 | goto err_mm; | 146 | goto err_mm; |
146 | 147 | ||
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h index 11ccd276e5d9..cbd0ce170728 100644 --- a/drivers/infiniband/hw/cxgb4/user.h +++ b/drivers/infiniband/hw/cxgb4/user.h | |||
@@ -48,6 +48,7 @@ struct c4iw_create_cq_resp { | |||
48 | __u32 cqid; | 48 | __u32 cqid; |
49 | __u32 size; | 49 | __u32 size; |
50 | __u32 qid_mask; | 50 | __u32 qid_mask; |
51 | __u32 reserved; /* explicit padding (optional for i386) */ | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | 54 | ||
@@ -74,5 +75,6 @@ struct c4iw_create_qp_resp { | |||
74 | struct c4iw_alloc_ucontext_resp { | 75 | struct c4iw_alloc_ucontext_resp { |
75 | __u64 status_page_key; | 76 | __u64 status_page_key; |
76 | __u32 status_page_size; | 77 | __u32 status_page_size; |
78 | __u32 reserved; /* explicit padding (optional for i386) */ | ||
77 | }; | 79 | }; |
78 | #endif | 80 | #endif |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index e2f9a51f4a38..45802e97332e 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp, | |||
346 | ret = -EFAULT; | 346 | ret = -EFAULT; |
347 | goto bail; | 347 | goto bail; |
348 | } | 348 | } |
349 | dp.len = odp.len; | ||
350 | dp.unit = odp.unit; | ||
351 | dp.data = odp.data; | ||
352 | dp.pbc_wd = 0; | ||
349 | } else { | 353 | } else { |
350 | ret = -EINVAL; | 354 | ret = -EINVAL; |
351 | goto bail; | 355 | goto bail; |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 26dfbc8ee0f1..01ba792791a0 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -70,7 +70,7 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd) | |||
70 | if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { | 70 | if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { |
71 | int i; | 71 | int i; |
72 | if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) && | 72 | if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) && |
73 | dd->ipath_lastcancel > jiffies) { | 73 | time_after(dd->ipath_lastcancel, jiffies)) { |
74 | __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG, | 74 | __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG, |
75 | "SendbufErrs %lx %lx", sbuf[0], | 75 | "SendbufErrs %lx %lx", sbuf[0], |
76 | sbuf[1]); | 76 | sbuf[1]); |
@@ -755,7 +755,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
755 | 755 | ||
756 | /* likely due to cancel; so suppress message unless verbose */ | 756 | /* likely due to cancel; so suppress message unless verbose */ |
757 | if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && | 757 | if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && |
758 | dd->ipath_lastcancel > jiffies) { | 758 | time_after(dd->ipath_lastcancel, jiffies)) { |
759 | /* armlaunch takes precedence; it often causes both. */ | 759 | /* armlaunch takes precedence; it often causes both. */ |
760 | ipath_cdbg(VERBOSE, | 760 | ipath_cdbg(VERBOSE, |
761 | "Suppressed %s error (%llx) after sendbuf cancel\n", | 761 | "Suppressed %s error (%llx) after sendbuf cancel\n", |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 98ac18ec977e..17a517766ad2 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -247,7 +247,7 @@ static void sdma_abort_task(unsigned long opaque) | |||
247 | 247 | ||
248 | /* ipath_sdma_abort() is done, waiting for interrupt */ | 248 | /* ipath_sdma_abort() is done, waiting for interrupt */ |
249 | if (status == IPATH_SDMA_ABORT_DISARMED) { | 249 | if (status == IPATH_SDMA_ABORT_DISARMED) { |
250 | if (jiffies < dd->ipath_sdma_abort_intr_timeout) | 250 | if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout)) |
251 | goto resched_noprint; | 251 | goto resched_noprint; |
252 | /* give up, intr got lost somewhere */ | 252 | /* give up, intr got lost somewhere */ |
253 | ipath_dbg("give up waiting for SDMADISABLED intr\n"); | 253 | ipath_dbg("give up waiting for SDMADISABLED intr\n"); |
@@ -341,7 +341,7 @@ resched: | |||
341 | * JAG - this is bad to just have default be a loop without | 341 | * JAG - this is bad to just have default be a loop without |
342 | * state change | 342 | * state change |
343 | */ | 343 | */ |
344 | if (jiffies > dd->ipath_sdma_abort_jiffies) { | 344 | if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) { |
345 | ipath_dbg("looping with status 0x%08lx\n", | 345 | ipath_dbg("looping with status 0x%08lx\n", |
346 | dd->ipath_sdma_status); | 346 | dd->ipath_sdma_status); |
347 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; | 347 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 170dca608042..2d8c3397774f 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -73,7 +73,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
73 | { | 73 | { |
74 | struct mlx4_ib_dev *ibdev = to_mdev(pd->device); | 74 | struct mlx4_ib_dev *ibdev = to_mdev(pd->device); |
75 | struct mlx4_dev *dev = ibdev->dev; | 75 | struct mlx4_dev *dev = ibdev->dev; |
76 | int is_mcast; | 76 | int is_mcast = 0; |
77 | struct in6_addr in6; | 77 | struct in6_addr in6; |
78 | u16 vlan_tag; | 78 | u16 vlan_tag; |
79 | 79 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5f640814cc81..1066eec854a9 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * | |||
102 | int err; | 102 | int err; |
103 | 103 | ||
104 | err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, | 104 | err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, |
105 | PAGE_SIZE * 2, &buf->buf); | 105 | PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); |
106 | 106 | ||
107 | if (err) | 107 | if (err) |
108 | goto out; | 108 | goto out; |
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * | |||
113 | if (err) | 113 | if (err) |
114 | goto err_buf; | 114 | goto err_buf; |
115 | 115 | ||
116 | err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); | 116 | err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); |
117 | if (err) | 117 | if (err) |
118 | goto err_mtt; | 118 | goto err_mtt; |
119 | 119 | ||
@@ -209,7 +209,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
209 | 209 | ||
210 | uar = &to_mucontext(context)->uar; | 210 | uar = &to_mucontext(context)->uar; |
211 | } else { | 211 | } else { |
212 | err = mlx4_db_alloc(dev->dev, &cq->db, 1); | 212 | err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); |
213 | if (err) | 213 | if (err) |
214 | goto err_cq; | 214 | goto err_cq; |
215 | 215 | ||
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index fd36ec672632..287ad0564acd 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -478,10 +478,6 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
478 | if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) | 478 | if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) |
479 | return -EAGAIN; | 479 | return -EAGAIN; |
480 | 480 | ||
481 | /* QP0 forwarding only for Dom0 */ | ||
482 | if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave)) | ||
483 | return -EINVAL; | ||
484 | |||
485 | if (!dest_qpt) | 481 | if (!dest_qpt) |
486 | tun_qp = &tun_ctx->qp[0]; | 482 | tun_qp = &tun_ctx->qp[0]; |
487 | else | 483 | else |
@@ -667,6 +663,21 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, | |||
667 | } | 663 | } |
668 | /* Class-specific handling */ | 664 | /* Class-specific handling */ |
669 | switch (mad->mad_hdr.mgmt_class) { | 665 | switch (mad->mad_hdr.mgmt_class) { |
666 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | ||
667 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | ||
668 | /* 255 indicates the dom0 */ | ||
669 | if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) { | ||
670 | if (!mlx4_vf_smi_enabled(dev->dev, slave, port)) | ||
671 | return -EPERM; | ||
672 | /* for a VF. drop unsolicited MADs */ | ||
673 | if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) { | ||
674 | mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n", | ||
675 | slave, mad->mad_hdr.mgmt_class, | ||
676 | mad->mad_hdr.method); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | } | ||
680 | break; | ||
670 | case IB_MGMT_CLASS_SUBN_ADM: | 681 | case IB_MGMT_CLASS_SUBN_ADM: |
671 | if (mlx4_ib_demux_sa_handler(ibdev, port, slave, | 682 | if (mlx4_ib_demux_sa_handler(ibdev, port, slave, |
672 | (struct ib_sa_mad *) mad)) | 683 | (struct ib_sa_mad *) mad)) |
@@ -1165,10 +1176,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1165 | if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) | 1176 | if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) |
1166 | return -EAGAIN; | 1177 | return -EAGAIN; |
1167 | 1178 | ||
1168 | /* QP0 forwarding only for Dom0 */ | ||
1169 | if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave)) | ||
1170 | return -EINVAL; | ||
1171 | |||
1172 | if (dest_qpt == IB_QPT_SMI) { | 1179 | if (dest_qpt == IB_QPT_SMI) { |
1173 | src_qpnum = 0; | 1180 | src_qpnum = 0; |
1174 | sqp = &sqp_ctx->qp[0]; | 1181 | sqp = &sqp_ctx->qp[0]; |
@@ -1285,11 +1292,6 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc | |||
1285 | "belongs to another slave\n", wc->src_qp); | 1292 | "belongs to another slave\n", wc->src_qp); |
1286 | return; | 1293 | return; |
1287 | } | 1294 | } |
1288 | if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) { | ||
1289 | mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " | ||
1290 | "non-master trying to send QP0 packets\n", wc->src_qp); | ||
1291 | return; | ||
1292 | } | ||
1293 | 1295 | ||
1294 | /* Map transaction ID */ | 1296 | /* Map transaction ID */ |
1295 | ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, | 1297 | ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, |
@@ -1317,6 +1319,12 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc | |||
1317 | 1319 | ||
1318 | /* Class-specific handling */ | 1320 | /* Class-specific handling */ |
1319 | switch (tunnel->mad.mad_hdr.mgmt_class) { | 1321 | switch (tunnel->mad.mad_hdr.mgmt_class) { |
1322 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | ||
1323 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | ||
1324 | if (slave != mlx4_master_func_num(dev->dev) && | ||
1325 | !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) | ||
1326 | return; | ||
1327 | break; | ||
1320 | case IB_MGMT_CLASS_SUBN_ADM: | 1328 | case IB_MGMT_CLASS_SUBN_ADM: |
1321 | if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, | 1329 | if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, |
1322 | (struct ib_sa_mad *) &tunnel->mad)) | 1330 | (struct ib_sa_mad *) &tunnel->mad)) |
@@ -1749,9 +1757,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, | |||
1749 | return -EEXIST; | 1757 | return -EEXIST; |
1750 | 1758 | ||
1751 | ctx->state = DEMUX_PV_STATE_STARTING; | 1759 | ctx->state = DEMUX_PV_STATE_STARTING; |
1752 | /* have QP0 only on port owner, and only if link layer is IB */ | 1760 | /* have QP0 only if link layer is IB */ |
1753 | if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) && | 1761 | if (rdma_port_get_link_layer(ibdev, ctx->port) == |
1754 | rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND) | 1762 | IB_LINK_LAYER_INFINIBAND) |
1755 | ctx->has_smi = 1; | 1763 | ctx->has_smi = 1; |
1756 | 1764 | ||
1757 | if (ctx->has_smi) { | 1765 | if (ctx->has_smi) { |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a37..3c3806aff712 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -544,12 +544,11 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, | |||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, | 547 | static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, |
548 | u32 cap_mask) | 548 | u32 cap_mask) |
549 | { | 549 | { |
550 | struct mlx4_cmd_mailbox *mailbox; | 550 | struct mlx4_cmd_mailbox *mailbox; |
551 | int err; | 551 | int err; |
552 | u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; | ||
553 | 552 | ||
554 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); | 553 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); |
555 | if (IS_ERR(mailbox)) | 554 | if (IS_ERR(mailbox)) |
@@ -563,8 +562,8 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, | |||
563 | ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); | 562 | ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); |
564 | } | 563 | } |
565 | 564 | ||
566 | err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, | 565 | err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, |
567 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | 566 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
568 | 567 | ||
569 | mlx4_free_cmd_mailbox(dev->dev, mailbox); | 568 | mlx4_free_cmd_mailbox(dev->dev, mailbox); |
570 | return err; | 569 | return err; |
@@ -573,11 +572,20 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, | |||
573 | static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | 572 | static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, |
574 | struct ib_port_modify *props) | 573 | struct ib_port_modify *props) |
575 | { | 574 | { |
575 | struct mlx4_ib_dev *mdev = to_mdev(ibdev); | ||
576 | u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; | ||
576 | struct ib_port_attr attr; | 577 | struct ib_port_attr attr; |
577 | u32 cap_mask; | 578 | u32 cap_mask; |
578 | int err; | 579 | int err; |
579 | 580 | ||
580 | mutex_lock(&to_mdev(ibdev)->cap_mask_mutex); | 581 | /* return OK if this is RoCE. CM calls ib_modify_port() regardless |
582 | * of whether port link layer is ETH or IB. For ETH ports, qkey | ||
583 | * violations and port capabilities are not meaningful. | ||
584 | */ | ||
585 | if (is_eth) | ||
586 | return 0; | ||
587 | |||
588 | mutex_lock(&mdev->cap_mask_mutex); | ||
581 | 589 | ||
582 | err = mlx4_ib_query_port(ibdev, port, &attr); | 590 | err = mlx4_ib_query_port(ibdev, port, &attr); |
583 | if (err) | 591 | if (err) |
@@ -586,9 +594,9 @@ static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | |||
586 | cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & | 594 | cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & |
587 | ~props->clr_port_cap_mask; | 595 | ~props->clr_port_cap_mask; |
588 | 596 | ||
589 | err = mlx4_SET_PORT(to_mdev(ibdev), port, | 597 | err = mlx4_ib_SET_PORT(mdev, port, |
590 | !!(mask & IB_PORT_RESET_QKEY_CNTR), | 598 | !!(mask & IB_PORT_RESET_QKEY_CNTR), |
591 | cap_mask); | 599 | cap_mask); |
592 | 600 | ||
593 | out: | 601 | out: |
594 | mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); | 602 | mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddfd..bb8c9dd442ae 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -156,6 +156,7 @@ enum mlx4_ib_qp_flags { | |||
156 | MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, | 156 | MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, |
157 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, | 157 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
158 | MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, | 158 | MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, |
159 | MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO, | ||
159 | MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30, | 160 | MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30, |
160 | MLX4_IB_SRIOV_SQP = 1 << 31, | 161 | MLX4_IB_SRIOV_SQP = 1 << 31, |
161 | }; | 162 | }; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163c..5b0cb8e2d807 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -608,9 +608,20 @@ static int qp_has_rq(struct ib_qp_init_attr *attr) | |||
608 | return !attr->srq; | 608 | return !attr->srq; |
609 | } | 609 | } |
610 | 610 | ||
611 | static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn) | ||
612 | { | ||
613 | int i; | ||
614 | for (i = 0; i < dev->caps.num_ports; i++) { | ||
615 | if (qpn == dev->caps.qp0_proxy[i]) | ||
616 | return !!dev->caps.qp0_qkey[i]; | ||
617 | } | ||
618 | return 0; | ||
619 | } | ||
620 | |||
611 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | 621 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, |
612 | struct ib_qp_init_attr *init_attr, | 622 | struct ib_qp_init_attr *init_attr, |
613 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) | 623 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, |
624 | gfp_t gfp) | ||
614 | { | 625 | { |
615 | int qpn; | 626 | int qpn; |
616 | int err; | 627 | int err; |
@@ -625,10 +636,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
625 | !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { | 636 | !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { |
626 | if (init_attr->qp_type == IB_QPT_GSI) | 637 | if (init_attr->qp_type == IB_QPT_GSI) |
627 | qp_type = MLX4_IB_QPT_PROXY_GSI; | 638 | qp_type = MLX4_IB_QPT_PROXY_GSI; |
628 | else if (mlx4_is_master(dev->dev)) | 639 | else { |
629 | qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; | 640 | if (mlx4_is_master(dev->dev) || |
630 | else | 641 | qp0_enabled_vf(dev->dev, sqpn)) |
631 | qp_type = MLX4_IB_QPT_PROXY_SMI; | 642 | qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; |
643 | else | ||
644 | qp_type = MLX4_IB_QPT_PROXY_SMI; | ||
645 | } | ||
632 | } | 646 | } |
633 | qpn = sqpn; | 647 | qpn = sqpn; |
634 | /* add extra sg entry for tunneling */ | 648 | /* add extra sg entry for tunneling */ |
@@ -643,7 +657,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
643 | return -EINVAL; | 657 | return -EINVAL; |
644 | if (tnl_init->proxy_qp_type == IB_QPT_GSI) | 658 | if (tnl_init->proxy_qp_type == IB_QPT_GSI) |
645 | qp_type = MLX4_IB_QPT_TUN_GSI; | 659 | qp_type = MLX4_IB_QPT_TUN_GSI; |
646 | else if (tnl_init->slave == mlx4_master_func_num(dev->dev)) | 660 | else if (tnl_init->slave == mlx4_master_func_num(dev->dev) || |
661 | mlx4_vf_smi_enabled(dev->dev, tnl_init->slave, | ||
662 | tnl_init->port)) | ||
647 | qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; | 663 | qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; |
648 | else | 664 | else |
649 | qp_type = MLX4_IB_QPT_TUN_SMI; | 665 | qp_type = MLX4_IB_QPT_TUN_SMI; |
@@ -658,14 +674,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
658 | if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || | 674 | if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || |
659 | (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | | 675 | (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | |
660 | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { | 676 | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { |
661 | sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL); | 677 | sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); |
662 | if (!sqp) | 678 | if (!sqp) |
663 | return -ENOMEM; | 679 | return -ENOMEM; |
664 | qp = &sqp->qp; | 680 | qp = &sqp->qp; |
665 | qp->pri.vid = 0xFFFF; | 681 | qp->pri.vid = 0xFFFF; |
666 | qp->alt.vid = 0xFFFF; | 682 | qp->alt.vid = 0xFFFF; |
667 | } else { | 683 | } else { |
668 | qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); | 684 | qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); |
669 | if (!qp) | 685 | if (!qp) |
670 | return -ENOMEM; | 686 | return -ENOMEM; |
671 | qp->pri.vid = 0xFFFF; | 687 | qp->pri.vid = 0xFFFF; |
@@ -748,14 +764,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
748 | goto err; | 764 | goto err; |
749 | 765 | ||
750 | if (qp_has_rq(init_attr)) { | 766 | if (qp_has_rq(init_attr)) { |
751 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); | 767 | err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); |
752 | if (err) | 768 | if (err) |
753 | goto err; | 769 | goto err; |
754 | 770 | ||
755 | *qp->db.db = 0; | 771 | *qp->db.db = 0; |
756 | } | 772 | } |
757 | 773 | ||
758 | if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { | 774 | if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { |
759 | err = -ENOMEM; | 775 | err = -ENOMEM; |
760 | goto err_db; | 776 | goto err_db; |
761 | } | 777 | } |
@@ -765,13 +781,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
765 | if (err) | 781 | if (err) |
766 | goto err_buf; | 782 | goto err_buf; |
767 | 783 | ||
768 | err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); | 784 | err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); |
769 | if (err) | 785 | if (err) |
770 | goto err_mtt; | 786 | goto err_mtt; |
771 | 787 | ||
772 | qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); | 788 | qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); |
773 | qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); | 789 | qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); |
774 | |||
775 | if (!qp->sq.wrid || !qp->rq.wrid) { | 790 | if (!qp->sq.wrid || !qp->rq.wrid) { |
776 | err = -ENOMEM; | 791 | err = -ENOMEM; |
777 | goto err_wrid; | 792 | goto err_wrid; |
@@ -801,7 +816,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
801 | goto err_proxy; | 816 | goto err_proxy; |
802 | } | 817 | } |
803 | 818 | ||
804 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | 819 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); |
805 | if (err) | 820 | if (err) |
806 | goto err_qpn; | 821 | goto err_qpn; |
807 | 822 | ||
@@ -1040,7 +1055,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1040 | struct mlx4_ib_qp *qp = NULL; | 1055 | struct mlx4_ib_qp *qp = NULL; |
1041 | int err; | 1056 | int err; |
1042 | u16 xrcdn = 0; | 1057 | u16 xrcdn = 0; |
1058 | gfp_t gfp; | ||
1043 | 1059 | ||
1060 | gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ? | ||
1061 | GFP_NOIO : GFP_KERNEL; | ||
1044 | /* | 1062 | /* |
1045 | * We only support LSO, vendor flag1, and multicast loopback blocking, | 1063 | * We only support LSO, vendor flag1, and multicast loopback blocking, |
1046 | * and only for kernel UD QPs. | 1064 | * and only for kernel UD QPs. |
@@ -1049,7 +1067,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1049 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | | 1067 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | |
1050 | MLX4_IB_SRIOV_TUNNEL_QP | | 1068 | MLX4_IB_SRIOV_TUNNEL_QP | |
1051 | MLX4_IB_SRIOV_SQP | | 1069 | MLX4_IB_SRIOV_SQP | |
1052 | MLX4_IB_QP_NETIF)) | 1070 | MLX4_IB_QP_NETIF | |
1071 | MLX4_IB_QP_CREATE_USE_GFP_NOIO)) | ||
1053 | return ERR_PTR(-EINVAL); | 1072 | return ERR_PTR(-EINVAL); |
1054 | 1073 | ||
1055 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { | 1074 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { |
@@ -1059,7 +1078,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1059 | 1078 | ||
1060 | if (init_attr->create_flags && | 1079 | if (init_attr->create_flags && |
1061 | (udata || | 1080 | (udata || |
1062 | ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) && | 1081 | ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | MLX4_IB_QP_CREATE_USE_GFP_NOIO)) && |
1063 | init_attr->qp_type != IB_QPT_UD) || | 1082 | init_attr->qp_type != IB_QPT_UD) || |
1064 | ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) && | 1083 | ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) && |
1065 | init_attr->qp_type > IB_QPT_GSI))) | 1084 | init_attr->qp_type > IB_QPT_GSI))) |
@@ -1079,7 +1098,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1079 | case IB_QPT_RC: | 1098 | case IB_QPT_RC: |
1080 | case IB_QPT_UC: | 1099 | case IB_QPT_UC: |
1081 | case IB_QPT_RAW_PACKET: | 1100 | case IB_QPT_RAW_PACKET: |
1082 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | 1101 | qp = kzalloc(sizeof *qp, gfp); |
1083 | if (!qp) | 1102 | if (!qp) |
1084 | return ERR_PTR(-ENOMEM); | 1103 | return ERR_PTR(-ENOMEM); |
1085 | qp->pri.vid = 0xFFFF; | 1104 | qp->pri.vid = 0xFFFF; |
@@ -1088,7 +1107,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1088 | case IB_QPT_UD: | 1107 | case IB_QPT_UD: |
1089 | { | 1108 | { |
1090 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, | 1109 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, |
1091 | udata, 0, &qp); | 1110 | udata, 0, &qp, gfp); |
1092 | if (err) | 1111 | if (err) |
1093 | return ERR_PTR(err); | 1112 | return ERR_PTR(err); |
1094 | 1113 | ||
@@ -1106,7 +1125,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
1106 | 1125 | ||
1107 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, | 1126 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, |
1108 | get_sqp_num(to_mdev(pd->device), init_attr), | 1127 | get_sqp_num(to_mdev(pd->device), init_attr), |
1109 | &qp); | 1128 | &qp, gfp); |
1110 | if (err) | 1129 | if (err) |
1111 | return ERR_PTR(err); | 1130 | return ERR_PTR(err); |
1112 | 1131 | ||
@@ -1930,6 +1949,19 @@ out: | |||
1930 | return err; | 1949 | return err; |
1931 | } | 1950 | } |
1932 | 1951 | ||
1952 | static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) | ||
1953 | { | ||
1954 | int i; | ||
1955 | for (i = 0; i < dev->caps.num_ports; i++) { | ||
1956 | if (qpn == dev->caps.qp0_proxy[i] || | ||
1957 | qpn == dev->caps.qp0_tunnel[i]) { | ||
1958 | *qkey = dev->caps.qp0_qkey[i]; | ||
1959 | return 0; | ||
1960 | } | ||
1961 | } | ||
1962 | return -EINVAL; | ||
1963 | } | ||
1964 | |||
1933 | static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | 1965 | static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, |
1934 | struct ib_send_wr *wr, | 1966 | struct ib_send_wr *wr, |
1935 | void *wqe, unsigned *mlx_seg_len) | 1967 | void *wqe, unsigned *mlx_seg_len) |
@@ -1987,8 +2019,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
1987 | cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); | 2019 | cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); |
1988 | 2020 | ||
1989 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | 2021 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
1990 | if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) | 2022 | if (mlx4_is_master(mdev->dev)) { |
1991 | return -EINVAL; | 2023 | if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) |
2024 | return -EINVAL; | ||
2025 | } else { | ||
2026 | if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) | ||
2027 | return -EINVAL; | ||
2028 | } | ||
1992 | sqp->ud_header.deth.qkey = cpu_to_be32(qkey); | 2029 | sqp->ud_header.deth.qkey = cpu_to_be32(qkey); |
1993 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); | 2030 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); |
1994 | 2031 | ||
@@ -2370,7 +2407,8 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | |||
2370 | 2407 | ||
2371 | static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, | 2408 | static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, |
2372 | struct mlx4_wqe_datagram_seg *dseg, | 2409 | struct mlx4_wqe_datagram_seg *dseg, |
2373 | struct ib_send_wr *wr, enum ib_qp_type qpt) | 2410 | struct ib_send_wr *wr, |
2411 | enum mlx4_ib_qp_type qpt) | ||
2374 | { | 2412 | { |
2375 | union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; | 2413 | union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; |
2376 | struct mlx4_av sqp_av = {0}; | 2414 | struct mlx4_av sqp_av = {0}; |
@@ -2383,8 +2421,10 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, | |||
2383 | cpu_to_be32(0xf0000000); | 2421 | cpu_to_be32(0xf0000000); |
2384 | 2422 | ||
2385 | memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); | 2423 | memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); |
2386 | /* This function used only for sending on QP1 proxies */ | 2424 | if (qpt == MLX4_IB_QPT_PROXY_GSI) |
2387 | dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); | 2425 | dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); |
2426 | else | ||
2427 | dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]); | ||
2388 | /* Use QKEY from the QP context, which is set by master */ | 2428 | /* Use QKEY from the QP context, which is set by master */ |
2389 | dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); | 2429 | dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); |
2390 | } | 2430 | } |
@@ -2679,11 +2719,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2679 | break; | 2719 | break; |
2680 | 2720 | ||
2681 | case MLX4_IB_QPT_PROXY_SMI_OWNER: | 2721 | case MLX4_IB_QPT_PROXY_SMI_OWNER: |
2682 | if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) { | ||
2683 | err = -ENOSYS; | ||
2684 | *bad_wr = wr; | ||
2685 | goto out; | ||
2686 | } | ||
2687 | err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); | 2722 | err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); |
2688 | if (unlikely(err)) { | 2723 | if (unlikely(err)) { |
2689 | *bad_wr = wr; | 2724 | *bad_wr = wr; |
@@ -2700,16 +2735,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2700 | size += seglen / 16; | 2735 | size += seglen / 16; |
2701 | break; | 2736 | break; |
2702 | case MLX4_IB_QPT_PROXY_SMI: | 2737 | case MLX4_IB_QPT_PROXY_SMI: |
2703 | /* don't allow QP0 sends on guests */ | ||
2704 | err = -ENOSYS; | ||
2705 | *bad_wr = wr; | ||
2706 | goto out; | ||
2707 | case MLX4_IB_QPT_PROXY_GSI: | 2738 | case MLX4_IB_QPT_PROXY_GSI: |
2708 | /* If we are tunneling special qps, this is a UD qp. | 2739 | /* If we are tunneling special qps, this is a UD qp. |
2709 | * In this case we first add a UD segment targeting | 2740 | * In this case we first add a UD segment targeting |
2710 | * the tunnel qp, and then add a header with address | 2741 | * the tunnel qp, and then add a header with address |
2711 | * information */ | 2742 | * information */ |
2712 | set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type); | 2743 | set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, |
2744 | qp->mlx4_ib_qp_type); | ||
2713 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 2745 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
2714 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 2746 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
2715 | build_tunnel_header(wr, wqe, &seglen); | 2747 | build_tunnel_header(wr, wqe, &seglen); |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 60c5fb025fc7..62d9285300af 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -134,13 +134,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
134 | if (err) | 134 | if (err) |
135 | goto err_mtt; | 135 | goto err_mtt; |
136 | } else { | 136 | } else { |
137 | err = mlx4_db_alloc(dev->dev, &srq->db, 0); | 137 | err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); |
138 | if (err) | 138 | if (err) |
139 | goto err_srq; | 139 | goto err_srq; |
140 | 140 | ||
141 | *srq->db.db = 0; | 141 | *srq->db.db = 0; |
142 | 142 | ||
143 | if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { | 143 | if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, |
144 | GFP_KERNEL)) { | ||
144 | err = -ENOMEM; | 145 | err = -ENOMEM; |
145 | goto err_db; | 146 | goto err_db; |
146 | } | 147 | } |
@@ -165,7 +166,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
165 | if (err) | 166 | if (err) |
166 | goto err_buf; | 167 | goto err_buf; |
167 | 168 | ||
168 | err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); | 169 | err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); |
169 | if (err) | 170 | if (err) |
170 | goto err_mtt; | 171 | goto err_mtt; |
171 | 172 | ||
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 5a38e43eca65..cb4c66e723b5 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c | |||
@@ -389,8 +389,10 @@ struct mlx4_port { | |||
389 | struct mlx4_ib_dev *dev; | 389 | struct mlx4_ib_dev *dev; |
390 | struct attribute_group pkey_group; | 390 | struct attribute_group pkey_group; |
391 | struct attribute_group gid_group; | 391 | struct attribute_group gid_group; |
392 | u8 port_num; | 392 | struct device_attribute enable_smi_admin; |
393 | struct device_attribute smi_enabled; | ||
393 | int slave; | 394 | int slave; |
395 | u8 port_num; | ||
394 | }; | 396 | }; |
395 | 397 | ||
396 | 398 | ||
@@ -558,6 +560,101 @@ err: | |||
558 | return NULL; | 560 | return NULL; |
559 | } | 561 | } |
560 | 562 | ||
563 | static ssize_t sysfs_show_smi_enabled(struct device *dev, | ||
564 | struct device_attribute *attr, char *buf) | ||
565 | { | ||
566 | struct mlx4_port *p = | ||
567 | container_of(attr, struct mlx4_port, smi_enabled); | ||
568 | ssize_t len = 0; | ||
569 | |||
570 | if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num)) | ||
571 | len = sprintf(buf, "%d\n", 1); | ||
572 | else | ||
573 | len = sprintf(buf, "%d\n", 0); | ||
574 | |||
575 | return len; | ||
576 | } | ||
577 | |||
578 | static ssize_t sysfs_show_enable_smi_admin(struct device *dev, | ||
579 | struct device_attribute *attr, | ||
580 | char *buf) | ||
581 | { | ||
582 | struct mlx4_port *p = | ||
583 | container_of(attr, struct mlx4_port, enable_smi_admin); | ||
584 | ssize_t len = 0; | ||
585 | |||
586 | if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num)) | ||
587 | len = sprintf(buf, "%d\n", 1); | ||
588 | else | ||
589 | len = sprintf(buf, "%d\n", 0); | ||
590 | |||
591 | return len; | ||
592 | } | ||
593 | |||
594 | static ssize_t sysfs_store_enable_smi_admin(struct device *dev, | ||
595 | struct device_attribute *attr, | ||
596 | const char *buf, size_t count) | ||
597 | { | ||
598 | struct mlx4_port *p = | ||
599 | container_of(attr, struct mlx4_port, enable_smi_admin); | ||
600 | int enable; | ||
601 | |||
602 | if (sscanf(buf, "%i", &enable) != 1 || | ||
603 | enable < 0 || enable > 1) | ||
604 | return -EINVAL; | ||
605 | |||
606 | if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable)) | ||
607 | return -EINVAL; | ||
608 | return count; | ||
609 | } | ||
610 | |||
611 | static int add_vf_smi_entries(struct mlx4_port *p) | ||
612 | { | ||
613 | int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == | ||
614 | IB_LINK_LAYER_ETHERNET; | ||
615 | int ret; | ||
616 | |||
617 | /* do not display entries if eth transport, or if master */ | ||
618 | if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev)) | ||
619 | return 0; | ||
620 | |||
621 | sysfs_attr_init(&p->smi_enabled.attr); | ||
622 | p->smi_enabled.show = sysfs_show_smi_enabled; | ||
623 | p->smi_enabled.store = NULL; | ||
624 | p->smi_enabled.attr.name = "smi_enabled"; | ||
625 | p->smi_enabled.attr.mode = 0444; | ||
626 | ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr); | ||
627 | if (ret) { | ||
628 | pr_err("failed to create smi_enabled\n"); | ||
629 | return ret; | ||
630 | } | ||
631 | |||
632 | sysfs_attr_init(&p->enable_smi_admin.attr); | ||
633 | p->enable_smi_admin.show = sysfs_show_enable_smi_admin; | ||
634 | p->enable_smi_admin.store = sysfs_store_enable_smi_admin; | ||
635 | p->enable_smi_admin.attr.name = "enable_smi_admin"; | ||
636 | p->enable_smi_admin.attr.mode = 0644; | ||
637 | ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr); | ||
638 | if (ret) { | ||
639 | pr_err("failed to create enable_smi_admin\n"); | ||
640 | sysfs_remove_file(&p->kobj, &p->smi_enabled.attr); | ||
641 | return ret; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static void remove_vf_smi_entries(struct mlx4_port *p) | ||
647 | { | ||
648 | int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == | ||
649 | IB_LINK_LAYER_ETHERNET; | ||
650 | |||
651 | if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev)) | ||
652 | return; | ||
653 | |||
654 | sysfs_remove_file(&p->kobj, &p->smi_enabled.attr); | ||
655 | sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr); | ||
656 | } | ||
657 | |||
561 | static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) | 658 | static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) |
562 | { | 659 | { |
563 | struct mlx4_port *p; | 660 | struct mlx4_port *p; |
@@ -602,6 +699,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) | |||
602 | if (ret) | 699 | if (ret) |
603 | goto err_free_gid; | 700 | goto err_free_gid; |
604 | 701 | ||
702 | ret = add_vf_smi_entries(p); | ||
703 | if (ret) | ||
704 | goto err_free_gid; | ||
705 | |||
605 | list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]); | 706 | list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]); |
606 | return 0; | 707 | return 0; |
607 | 708 | ||
@@ -669,6 +770,7 @@ err_add: | |||
669 | mport = container_of(p, struct mlx4_port, kobj); | 770 | mport = container_of(p, struct mlx4_port, kobj); |
670 | sysfs_remove_group(p, &mport->pkey_group); | 771 | sysfs_remove_group(p, &mport->pkey_group); |
671 | sysfs_remove_group(p, &mport->gid_group); | 772 | sysfs_remove_group(p, &mport->gid_group); |
773 | remove_vf_smi_entries(mport); | ||
672 | kobject_put(p); | 774 | kobject_put(p); |
673 | } | 775 | } |
674 | kobject_put(dev->dev_ports_parent[slave]); | 776 | kobject_put(dev->dev_ports_parent[slave]); |
@@ -713,6 +815,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) | |||
713 | port = container_of(p, struct mlx4_port, kobj); | 815 | port = container_of(p, struct mlx4_port, kobj); |
714 | sysfs_remove_group(p, &port->pkey_group); | 816 | sysfs_remove_group(p, &port->pkey_group); |
715 | sysfs_remove_group(p, &port->gid_group); | 817 | sysfs_remove_group(p, &port->gid_group); |
818 | remove_vf_smi_entries(port); | ||
716 | kobject_put(p); | 819 | kobject_put(p); |
717 | kobject_put(device->dev_ports_parent[slave]); | 820 | kobject_put(device->dev_ports_parent[slave]); |
718 | } | 821 | } |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 62bb6b49dc1d..8ae4f896cb41 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/kref.h> | 33 | #include <linux/kref.h> |
34 | #include <rdma/ib_umem.h> | 34 | #include <rdma/ib_umem.h> |
35 | #include <rdma/ib_user_verbs.h> | ||
35 | #include "mlx5_ib.h" | 36 | #include "mlx5_ib.h" |
36 | #include "user.h" | 37 | #include "user.h" |
37 | 38 | ||
@@ -602,14 +603,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | |||
602 | int *cqe_size, int *index, int *inlen) | 603 | int *cqe_size, int *index, int *inlen) |
603 | { | 604 | { |
604 | struct mlx5_ib_create_cq ucmd; | 605 | struct mlx5_ib_create_cq ucmd; |
606 | size_t ucmdlen; | ||
605 | int page_shift; | 607 | int page_shift; |
606 | int npages; | 608 | int npages; |
607 | int ncont; | 609 | int ncont; |
608 | int err; | 610 | int err; |
609 | 611 | ||
610 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) | 612 | ucmdlen = |
613 | (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < | ||
614 | sizeof(ucmd)) ? (sizeof(ucmd) - | ||
615 | sizeof(ucmd.reserved)) : sizeof(ucmd); | ||
616 | |||
617 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) | ||
611 | return -EFAULT; | 618 | return -EFAULT; |
612 | 619 | ||
620 | if (ucmdlen == sizeof(ucmd) && | ||
621 | ucmd.reserved != 0) | ||
622 | return -EINVAL; | ||
623 | |||
613 | if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) | 624 | if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) |
614 | return -EINVAL; | 625 | return -EINVAL; |
615 | 626 | ||
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 50541586e0a6..f2ccf1a5a291 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -264,8 +264,6 @@ struct mlx5_ib_mr { | |||
264 | __be64 *pas; | 264 | __be64 *pas; |
265 | dma_addr_t dma; | 265 | dma_addr_t dma; |
266 | int npages; | 266 | int npages; |
267 | struct completion done; | ||
268 | enum ib_wc_status status; | ||
269 | struct mlx5_ib_dev *dev; | 267 | struct mlx5_ib_dev *dev; |
270 | struct mlx5_create_mkey_mbox_out out; | 268 | struct mlx5_create_mkey_mbox_out out; |
271 | struct mlx5_core_sig_ctx *sig; | 269 | struct mlx5_core_sig_ctx *sig; |
@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list { | |||
277 | dma_addr_t map; | 275 | dma_addr_t map; |
278 | }; | 276 | }; |
279 | 277 | ||
278 | struct mlx5_ib_umr_context { | ||
279 | enum ib_wc_status status; | ||
280 | struct completion done; | ||
281 | }; | ||
282 | |||
283 | static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) | ||
284 | { | ||
285 | context->status = -1; | ||
286 | init_completion(&context->done); | ||
287 | } | ||
288 | |||
280 | struct umr_common { | 289 | struct umr_common { |
281 | struct ib_pd *pd; | 290 | struct ib_pd *pd; |
282 | struct ib_cq *cq; | 291 | struct ib_cq *cq; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 81392b26d078..afa873bd028e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -73,6 +73,8 @@ static void reg_mr_callback(int status, void *context) | |||
73 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 73 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
74 | u8 key; | 74 | u8 key; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | struct mlx5_mr_table *table = &dev->mdev.priv.mr_table; | ||
77 | int err; | ||
76 | 78 | ||
77 | spin_lock_irqsave(&ent->lock, flags); | 79 | spin_lock_irqsave(&ent->lock, flags); |
78 | ent->pending--; | 80 | ent->pending--; |
@@ -107,6 +109,13 @@ static void reg_mr_callback(int status, void *context) | |||
107 | ent->cur++; | 109 | ent->cur++; |
108 | ent->size++; | 110 | ent->size++; |
109 | spin_unlock_irqrestore(&ent->lock, flags); | 111 | spin_unlock_irqrestore(&ent->lock, flags); |
112 | |||
113 | write_lock_irqsave(&table->lock, flags); | ||
114 | err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key), | ||
115 | &mr->mmr); | ||
116 | if (err) | ||
117 | pr_err("Error inserting to mr tree. 0x%x\n", -err); | ||
118 | write_unlock_irqrestore(&table->lock, flags); | ||
110 | } | 119 | } |
111 | 120 | ||
112 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) | 121 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) |
@@ -699,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, | |||
699 | 708 | ||
700 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) | 709 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) |
701 | { | 710 | { |
702 | struct mlx5_ib_mr *mr; | 711 | struct mlx5_ib_umr_context *context; |
703 | struct ib_wc wc; | 712 | struct ib_wc wc; |
704 | int err; | 713 | int err; |
705 | 714 | ||
@@ -712,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) | |||
712 | if (err == 0) | 721 | if (err == 0) |
713 | break; | 722 | break; |
714 | 723 | ||
715 | mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; | 724 | context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id; |
716 | mr->status = wc.status; | 725 | context->status = wc.status; |
717 | complete(&mr->done); | 726 | complete(&context->done); |
718 | } | 727 | } |
719 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 728 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
720 | } | 729 | } |
@@ -726,11 +735,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
726 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 735 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
727 | struct device *ddev = dev->ib_dev.dma_device; | 736 | struct device *ddev = dev->ib_dev.dma_device; |
728 | struct umr_common *umrc = &dev->umrc; | 737 | struct umr_common *umrc = &dev->umrc; |
738 | struct mlx5_ib_umr_context umr_context; | ||
729 | struct ib_send_wr wr, *bad; | 739 | struct ib_send_wr wr, *bad; |
730 | struct mlx5_ib_mr *mr; | 740 | struct mlx5_ib_mr *mr; |
731 | struct ib_sge sg; | 741 | struct ib_sge sg; |
732 | int size = sizeof(u64) * npages; | 742 | int size = sizeof(u64) * npages; |
733 | int err; | 743 | int err = 0; |
734 | int i; | 744 | int i; |
735 | 745 | ||
736 | for (i = 0; i < 1; i++) { | 746 | for (i = 0; i < 1; i++) { |
@@ -751,7 +761,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
751 | mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); | 761 | mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); |
752 | if (!mr->pas) { | 762 | if (!mr->pas) { |
753 | err = -ENOMEM; | 763 | err = -ENOMEM; |
754 | goto error; | 764 | goto free_mr; |
755 | } | 765 | } |
756 | 766 | ||
757 | mlx5_ib_populate_pas(dev, umem, page_shift, | 767 | mlx5_ib_populate_pas(dev, umem, page_shift, |
@@ -760,44 +770,46 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
760 | mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, | 770 | mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, |
761 | DMA_TO_DEVICE); | 771 | DMA_TO_DEVICE); |
762 | if (dma_mapping_error(ddev, mr->dma)) { | 772 | if (dma_mapping_error(ddev, mr->dma)) { |
763 | kfree(mr->pas); | ||
764 | err = -ENOMEM; | 773 | err = -ENOMEM; |
765 | goto error; | 774 | goto free_pas; |
766 | } | 775 | } |
767 | 776 | ||
768 | memset(&wr, 0, sizeof(wr)); | 777 | memset(&wr, 0, sizeof(wr)); |
769 | wr.wr_id = (u64)(unsigned long)mr; | 778 | wr.wr_id = (u64)(unsigned long)&umr_context; |
770 | prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); | 779 | prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); |
771 | 780 | ||
772 | /* We serialize polls so one process does not kidnap another's | 781 | mlx5_ib_init_umr_context(&umr_context); |
773 | * completion. This is not a problem since wr is completed in | ||
774 | * around 1 usec | ||
775 | */ | ||
776 | down(&umrc->sem); | 782 | down(&umrc->sem); |
777 | init_completion(&mr->done); | ||
778 | err = ib_post_send(umrc->qp, &wr, &bad); | 783 | err = ib_post_send(umrc->qp, &wr, &bad); |
779 | if (err) { | 784 | if (err) { |
780 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); | 785 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); |
781 | up(&umrc->sem); | 786 | goto unmap_dma; |
782 | goto error; | 787 | } else { |
788 | wait_for_completion(&umr_context.done); | ||
789 | if (umr_context.status != IB_WC_SUCCESS) { | ||
790 | mlx5_ib_warn(dev, "reg umr failed\n"); | ||
791 | err = -EFAULT; | ||
792 | } | ||
783 | } | 793 | } |
784 | wait_for_completion(&mr->done); | ||
785 | up(&umrc->sem); | ||
786 | 794 | ||
795 | mr->mmr.iova = virt_addr; | ||
796 | mr->mmr.size = len; | ||
797 | mr->mmr.pd = to_mpd(pd)->pdn; | ||
798 | |||
799 | unmap_dma: | ||
800 | up(&umrc->sem); | ||
787 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | 801 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); |
802 | |||
803 | free_pas: | ||
788 | kfree(mr->pas); | 804 | kfree(mr->pas); |
789 | 805 | ||
790 | if (mr->status != IB_WC_SUCCESS) { | 806 | free_mr: |
791 | mlx5_ib_warn(dev, "reg umr failed\n"); | 807 | if (err) { |
792 | err = -EFAULT; | 808 | free_cached_mr(dev, mr); |
793 | goto error; | 809 | return ERR_PTR(err); |
794 | } | 810 | } |
795 | 811 | ||
796 | return mr; | 812 | return mr; |
797 | |||
798 | error: | ||
799 | free_cached_mr(dev, mr); | ||
800 | return ERR_PTR(err); | ||
801 | } | 813 | } |
802 | 814 | ||
803 | static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, | 815 | static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, |
@@ -926,24 +938,26 @@ error: | |||
926 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | 938 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
927 | { | 939 | { |
928 | struct umr_common *umrc = &dev->umrc; | 940 | struct umr_common *umrc = &dev->umrc; |
941 | struct mlx5_ib_umr_context umr_context; | ||
929 | struct ib_send_wr wr, *bad; | 942 | struct ib_send_wr wr, *bad; |
930 | int err; | 943 | int err; |
931 | 944 | ||
932 | memset(&wr, 0, sizeof(wr)); | 945 | memset(&wr, 0, sizeof(wr)); |
933 | wr.wr_id = (u64)(unsigned long)mr; | 946 | wr.wr_id = (u64)(unsigned long)&umr_context; |
934 | prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); | 947 | prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); |
935 | 948 | ||
949 | mlx5_ib_init_umr_context(&umr_context); | ||
936 | down(&umrc->sem); | 950 | down(&umrc->sem); |
937 | init_completion(&mr->done); | ||
938 | err = ib_post_send(umrc->qp, &wr, &bad); | 951 | err = ib_post_send(umrc->qp, &wr, &bad); |
939 | if (err) { | 952 | if (err) { |
940 | up(&umrc->sem); | 953 | up(&umrc->sem); |
941 | mlx5_ib_dbg(dev, "err %d\n", err); | 954 | mlx5_ib_dbg(dev, "err %d\n", err); |
942 | goto error; | 955 | goto error; |
956 | } else { | ||
957 | wait_for_completion(&umr_context.done); | ||
958 | up(&umrc->sem); | ||
943 | } | 959 | } |
944 | wait_for_completion(&mr->done); | 960 | if (umr_context.status != IB_WC_SUCCESS) { |
945 | up(&umrc->sem); | ||
946 | if (mr->status != IB_WC_SUCCESS) { | ||
947 | mlx5_ib_warn(dev, "unreg umr failed\n"); | 961 | mlx5_ib_warn(dev, "unreg umr failed\n"); |
948 | err = -EFAULT; | 962 | err = -EFAULT; |
949 | goto error; | 963 | goto error; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index dc930ed21eca..d13ddf1c0033 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -574,6 +574,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
574 | uar_index = uuarn_to_uar_index(&context->uuari, uuarn); | 574 | uar_index = uuarn_to_uar_index(&context->uuari, uuarn); |
575 | mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); | 575 | mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); |
576 | 576 | ||
577 | qp->rq.offset = 0; | ||
578 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); | ||
579 | qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; | ||
580 | |||
577 | err = set_user_buf_size(dev, qp, &ucmd); | 581 | err = set_user_buf_size(dev, qp, &ucmd); |
578 | if (err) | 582 | if (err) |
579 | goto err_uuar; | 583 | goto err_uuar; |
@@ -2078,6 +2082,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, | |||
2078 | struct ib_sig_domain *wire = &sig_attrs->wire; | 2082 | struct ib_sig_domain *wire = &sig_attrs->wire; |
2079 | int ret, selector; | 2083 | int ret, selector; |
2080 | 2084 | ||
2085 | memset(bsf, 0, sizeof(*bsf)); | ||
2081 | switch (sig_attrs->mem.sig_type) { | 2086 | switch (sig_attrs->mem.sig_type) { |
2082 | case IB_SIG_TYPE_T10_DIF: | 2087 | case IB_SIG_TYPE_T10_DIF: |
2083 | if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) | 2088 | if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) |
@@ -2090,9 +2095,11 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, | |||
2090 | /* Same block structure */ | 2095 | /* Same block structure */ |
2091 | basic->bsf_size_sbs = 1 << 4; | 2096 | basic->bsf_size_sbs = 1 << 4; |
2092 | if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) | 2097 | if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) |
2093 | basic->wire.copy_byte_mask = 0xff; | 2098 | basic->wire.copy_byte_mask |= 0xc0; |
2094 | else | 2099 | if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) |
2095 | basic->wire.copy_byte_mask = 0x3f; | 2100 | basic->wire.copy_byte_mask |= 0x30; |
2101 | if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) | ||
2102 | basic->wire.copy_byte_mask |= 0x0f; | ||
2096 | } else | 2103 | } else |
2097 | basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); | 2104 | basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); |
2098 | 2105 | ||
@@ -2131,9 +2138,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | |||
2131 | int ret; | 2138 | int ret; |
2132 | int wqe_size; | 2139 | int wqe_size; |
2133 | 2140 | ||
2134 | if (!wr->wr.sig_handover.prot) { | 2141 | if (!wr->wr.sig_handover.prot || |
2142 | (data_key == wr->wr.sig_handover.prot->lkey && | ||
2143 | data_va == wr->wr.sig_handover.prot->addr && | ||
2144 | data_len == wr->wr.sig_handover.prot->length)) { | ||
2135 | /** | 2145 | /** |
2136 | * Source domain doesn't contain signature information | 2146 | * Source domain doesn't contain signature information |
2147 | * or data and protection are interleaved in memory. | ||
2137 | * So need construct: | 2148 | * So need construct: |
2138 | * ------------------ | 2149 | * ------------------ |
2139 | * | data_klm | | 2150 | * | data_klm | |
@@ -2187,23 +2198,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | |||
2187 | data_sentry->bcount = cpu_to_be16(block_size); | 2198 | data_sentry->bcount = cpu_to_be16(block_size); |
2188 | data_sentry->key = cpu_to_be32(data_key); | 2199 | data_sentry->key = cpu_to_be32(data_key); |
2189 | data_sentry->va = cpu_to_be64(data_va); | 2200 | data_sentry->va = cpu_to_be64(data_va); |
2201 | data_sentry->stride = cpu_to_be16(block_size); | ||
2202 | |||
2190 | prot_sentry->bcount = cpu_to_be16(prot_size); | 2203 | prot_sentry->bcount = cpu_to_be16(prot_size); |
2191 | prot_sentry->key = cpu_to_be32(prot_key); | 2204 | prot_sentry->key = cpu_to_be32(prot_key); |
2205 | prot_sentry->va = cpu_to_be64(prot_va); | ||
2206 | prot_sentry->stride = cpu_to_be16(prot_size); | ||
2192 | 2207 | ||
2193 | if (prot_key == data_key && prot_va == data_va) { | ||
2194 | /** | ||
2195 | * The data and protection are interleaved | ||
2196 | * in a single memory region | ||
2197 | **/ | ||
2198 | prot_sentry->va = cpu_to_be64(data_va + block_size); | ||
2199 | prot_sentry->stride = cpu_to_be16(block_size + prot_size); | ||
2200 | data_sentry->stride = prot_sentry->stride; | ||
2201 | } else { | ||
2202 | /* The data and protection are two different buffers */ | ||
2203 | prot_sentry->va = cpu_to_be64(prot_va); | ||
2204 | data_sentry->stride = cpu_to_be16(block_size); | ||
2205 | prot_sentry->stride = cpu_to_be16(prot_size); | ||
2206 | } | ||
2207 | wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + | 2208 | wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + |
2208 | sizeof(*prot_sentry), 64); | 2209 | sizeof(*prot_sentry), 64); |
2209 | } | 2210 | } |
@@ -2275,7 +2276,10 @@ static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | |||
2275 | 2276 | ||
2276 | /* length of the protected region, data + protection */ | 2277 | /* length of the protected region, data + protection */ |
2277 | region_len = wr->sg_list->length; | 2278 | region_len = wr->sg_list->length; |
2278 | if (wr->wr.sig_handover.prot) | 2279 | if (wr->wr.sig_handover.prot && |
2280 | (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || | ||
2281 | wr->wr.sig_handover.prot->addr != wr->sg_list->addr || | ||
2282 | wr->wr.sig_handover.prot->length != wr->sg_list->length)) | ||
2279 | region_len += wr->wr.sig_handover.prot->length; | 2283 | region_len += wr->wr.sig_handover.prot->length; |
2280 | 2284 | ||
2281 | /** | 2285 | /** |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 210b3eaf188a..384af6dec5eb 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/mlx5/srq.h> | 35 | #include <linux/mlx5/srq.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <rdma/ib_umem.h> | 37 | #include <rdma/ib_umem.h> |
38 | #include <rdma/ib_user_verbs.h> | ||
38 | 39 | ||
39 | #include "mlx5_ib.h" | 40 | #include "mlx5_ib.h" |
40 | #include "user.h" | 41 | #include "user.h" |
@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |||
78 | { | 79 | { |
79 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 80 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
80 | struct mlx5_ib_create_srq ucmd; | 81 | struct mlx5_ib_create_srq ucmd; |
82 | size_t ucmdlen; | ||
81 | int err; | 83 | int err; |
82 | int npages; | 84 | int npages; |
83 | int page_shift; | 85 | int page_shift; |
84 | int ncont; | 86 | int ncont; |
85 | u32 offset; | 87 | u32 offset; |
86 | 88 | ||
87 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | 89 | ucmdlen = |
90 | (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < | ||
91 | sizeof(ucmd)) ? (sizeof(ucmd) - | ||
92 | sizeof(ucmd.reserved)) : sizeof(ucmd); | ||
93 | |||
94 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { | ||
88 | mlx5_ib_dbg(dev, "failed copy udata\n"); | 95 | mlx5_ib_dbg(dev, "failed copy udata\n"); |
89 | return -EFAULT; | 96 | return -EFAULT; |
90 | } | 97 | } |
98 | |||
99 | if (ucmdlen == sizeof(ucmd) && | ||
100 | ucmd.reserved != 0) | ||
101 | return -EINVAL; | ||
102 | |||
91 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); | 103 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); |
92 | 104 | ||
93 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, | 105 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, |
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index 0f4f8e42a17f..d0ba264ac1ed 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h | |||
@@ -91,6 +91,7 @@ struct mlx5_ib_create_cq { | |||
91 | __u64 buf_addr; | 91 | __u64 buf_addr; |
92 | __u64 db_addr; | 92 | __u64 db_addr; |
93 | __u32 cqe_size; | 93 | __u32 cqe_size; |
94 | __u32 reserved; /* explicit padding (optional on i386) */ | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct mlx5_ib_create_cq_resp { | 97 | struct mlx5_ib_create_cq_resp { |
@@ -109,6 +110,7 @@ struct mlx5_ib_create_srq { | |||
109 | __u64 buf_addr; | 110 | __u64 buf_addr; |
110 | __u64 db_addr; | 111 | __u64 db_addr; |
111 | __u32 flags; | 112 | __u32 flags; |
113 | __u32 reserved; /* explicit padding (optional on i386) */ | ||
112 | }; | 114 | }; |
113 | 115 | ||
114 | struct mlx5_ib_create_srq_resp { | 116 | struct mlx5_ib_create_srq_resp { |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 353c7b05a90a..3b2a6dc8ea99 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -68,7 +68,6 @@ MODULE_VERSION(DRV_VERSION); | |||
68 | int max_mtu = 9000; | 68 | int max_mtu = 9000; |
69 | int interrupt_mod_interval = 0; | 69 | int interrupt_mod_interval = 0; |
70 | 70 | ||
71 | |||
72 | /* Interoperability */ | 71 | /* Interoperability */ |
73 | int mpa_version = 1; | 72 | int mpa_version = 1; |
74 | module_param(mpa_version, int, 0644); | 73 | module_param(mpa_version, int, 0644); |
@@ -112,6 +111,16 @@ static struct pci_device_id nes_pci_table[] = { | |||
112 | 111 | ||
113 | MODULE_DEVICE_TABLE(pci, nes_pci_table); | 112 | MODULE_DEVICE_TABLE(pci, nes_pci_table); |
114 | 113 | ||
114 | /* registered nes netlink callbacks */ | ||
115 | static struct ibnl_client_cbs nes_nl_cb_table[] = { | ||
116 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, | ||
117 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | ||
118 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | ||
119 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | ||
120 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | ||
121 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | ||
122 | }; | ||
123 | |||
115 | static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); | 124 | static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); |
116 | static int nes_net_event(struct notifier_block *, unsigned long, void *); | 125 | static int nes_net_event(struct notifier_block *, unsigned long, void *); |
117 | static int nes_notifiers_registered; | 126 | static int nes_notifiers_registered; |
@@ -672,6 +681,17 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
672 | } | 681 | } |
673 | nes_notifiers_registered++; | 682 | nes_notifiers_registered++; |
674 | 683 | ||
684 | if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table)) | ||
685 | printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n", | ||
686 | __func__, __LINE__); | ||
687 | |||
688 | ret = iwpm_init(RDMA_NL_NES); | ||
689 | if (ret) { | ||
690 | printk(KERN_ERR PFX "%s: port mapper initialization failed\n", | ||
691 | pci_name(pcidev)); | ||
692 | goto bail7; | ||
693 | } | ||
694 | |||
675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); | 695 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); |
676 | 696 | ||
677 | /* Initialize network devices */ | 697 | /* Initialize network devices */ |
@@ -710,6 +730,7 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
710 | 730 | ||
711 | nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", | 731 | nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", |
712 | nesdev->netdev_count, nesdev->nesadapter->netdev_count); | 732 | nesdev->netdev_count, nesdev->nesadapter->netdev_count); |
733 | ibnl_remove_client(RDMA_NL_NES); | ||
713 | 734 | ||
714 | nes_notifiers_registered--; | 735 | nes_notifiers_registered--; |
715 | if (nes_notifiers_registered == 0) { | 736 | if (nes_notifiers_registered == 0) { |
@@ -773,6 +794,8 @@ static void nes_remove(struct pci_dev *pcidev) | |||
773 | nesdev->nesadapter->netdev_count--; | 794 | nesdev->nesadapter->netdev_count--; |
774 | } | 795 | } |
775 | } | 796 | } |
797 | ibnl_remove_client(RDMA_NL_NES); | ||
798 | iwpm_exit(RDMA_NL_NES); | ||
776 | 799 | ||
777 | nes_notifiers_registered--; | 800 | nes_notifiers_registered--; |
778 | if (nes_notifiers_registered == 0) { | 801 | if (nes_notifiers_registered == 0) { |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 33cc58941a3e..bd9d132f11c7 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -51,6 +51,8 @@ | |||
51 | #include <rdma/ib_pack.h> | 51 | #include <rdma/ib_pack.h> |
52 | #include <rdma/rdma_cm.h> | 52 | #include <rdma/rdma_cm.h> |
53 | #include <rdma/iw_cm.h> | 53 | #include <rdma/iw_cm.h> |
54 | #include <rdma/rdma_netlink.h> | ||
55 | #include <rdma/iw_portmap.h> | ||
54 | 56 | ||
55 | #define NES_SEND_FIRST_WRITE | 57 | #define NES_SEND_FIRST_WRITE |
56 | 58 | ||
@@ -130,6 +132,7 @@ | |||
130 | #define NES_DBG_IW_TX 0x00040000 | 132 | #define NES_DBG_IW_TX 0x00040000 |
131 | #define NES_DBG_SHUTDOWN 0x00080000 | 133 | #define NES_DBG_SHUTDOWN 0x00080000 |
132 | #define NES_DBG_PAU 0x00100000 | 134 | #define NES_DBG_PAU 0x00100000 |
135 | #define NES_DBG_NLMSG 0x00200000 | ||
133 | #define NES_DBG_RSVD1 0x10000000 | 136 | #define NES_DBG_RSVD1 0x10000000 |
134 | #define NES_DBG_RSVD2 0x20000000 | 137 | #define NES_DBG_RSVD2 0x20000000 |
135 | #define NES_DBG_RSVD3 0x40000000 | 138 | #define NES_DBG_RSVD3 0x40000000 |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index dfa9df484505..6f09a72e78d7 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -59,6 +59,7 @@ | |||
59 | #include <net/route.h> | 59 | #include <net/route.h> |
60 | #include <net/ip_fib.h> | 60 | #include <net/ip_fib.h> |
61 | #include <net/tcp.h> | 61 | #include <net/tcp.h> |
62 | #include <linux/fcntl.h> | ||
62 | 63 | ||
63 | #include "nes.h" | 64 | #include "nes.h" |
64 | 65 | ||
@@ -166,7 +167,6 @@ int nes_rem_ref_cm_node(struct nes_cm_node *cm_node) | |||
166 | { | 167 | { |
167 | return rem_ref_cm_node(cm_node->cm_core, cm_node); | 168 | return rem_ref_cm_node(cm_node->cm_core, cm_node); |
168 | } | 169 | } |
169 | |||
170 | /** | 170 | /** |
171 | * create_event | 171 | * create_event |
172 | */ | 172 | */ |
@@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb, | |||
482 | iph->ttl = 0x40; | 482 | iph->ttl = 0x40; |
483 | iph->protocol = 0x06; /* IPPROTO_TCP */ | 483 | iph->protocol = 0x06; /* IPPROTO_TCP */ |
484 | 484 | ||
485 | iph->saddr = htonl(cm_node->loc_addr); | 485 | iph->saddr = htonl(cm_node->mapped_loc_addr); |
486 | iph->daddr = htonl(cm_node->rem_addr); | 486 | iph->daddr = htonl(cm_node->mapped_rem_addr); |
487 | 487 | ||
488 | tcph->source = htons(cm_node->loc_port); | 488 | tcph->source = htons(cm_node->mapped_loc_port); |
489 | tcph->dest = htons(cm_node->rem_port); | 489 | tcph->dest = htons(cm_node->mapped_rem_port); |
490 | tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); | 490 | tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); |
491 | 491 | ||
492 | if (flags & SET_ACK) { | 492 | if (flags & SET_ACK) { |
@@ -525,6 +525,100 @@ static void form_cm_frame(struct sk_buff *skb, | |||
525 | cm_packets_created++; | 525 | cm_packets_created++; |
526 | } | 526 | } |
527 | 527 | ||
528 | /* | ||
529 | * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct | ||
530 | */ | ||
531 | static void nes_create_sockaddr(__be32 ip_addr, __be16 port, | ||
532 | struct sockaddr_storage *addr) | ||
533 | { | ||
534 | struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr; | ||
535 | nes_sockaddr->sin_family = AF_INET; | ||
536 | memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32)); | ||
537 | nes_sockaddr->sin_port = port; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * nes_create_mapinfo - Create a mapinfo object in the port mapper data base | ||
542 | */ | ||
543 | static int nes_create_mapinfo(struct nes_cm_info *cm_info) | ||
544 | { | ||
545 | struct sockaddr_storage local_sockaddr; | ||
546 | struct sockaddr_storage mapped_sockaddr; | ||
547 | |||
548 | nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port), | ||
549 | &local_sockaddr); | ||
550 | nes_create_sockaddr(htonl(cm_info->mapped_loc_addr), | ||
551 | htons(cm_info->mapped_loc_port), &mapped_sockaddr); | ||
552 | |||
553 | return iwpm_create_mapinfo(&local_sockaddr, | ||
554 | &mapped_sockaddr, RDMA_NL_NES); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base | ||
559 | * and send a remove mapping op message to | ||
560 | * the userspace port mapper | ||
561 | */ | ||
562 | static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port, | ||
563 | u32 mapped_loc_addr, u16 mapped_loc_port) | ||
564 | { | ||
565 | struct sockaddr_storage local_sockaddr; | ||
566 | struct sockaddr_storage mapped_sockaddr; | ||
567 | |||
568 | nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr); | ||
569 | nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port), | ||
570 | &mapped_sockaddr); | ||
571 | |||
572 | iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr); | ||
573 | return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES); | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * nes_form_pm_msg - Form a port mapper message with mapping info | ||
578 | */ | ||
579 | static void nes_form_pm_msg(struct nes_cm_info *cm_info, | ||
580 | struct iwpm_sa_data *pm_msg) | ||
581 | { | ||
582 | nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port), | ||
583 | &pm_msg->loc_addr); | ||
584 | nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port), | ||
585 | &pm_msg->rem_addr); | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * nes_form_reg_msg - Form a port mapper message with dev info | ||
590 | */ | ||
591 | static void nes_form_reg_msg(struct nes_vnic *nesvnic, | ||
592 | struct iwpm_dev_data *pm_msg) | ||
593 | { | ||
594 | memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name, | ||
595 | IWPM_DEVNAME_SIZE); | ||
596 | memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * nes_record_pm_msg - Save the received mapping info | ||
601 | */ | ||
602 | static void nes_record_pm_msg(struct nes_cm_info *cm_info, | ||
603 | struct iwpm_sa_data *pm_msg) | ||
604 | { | ||
605 | struct sockaddr_in *mapped_loc_addr = | ||
606 | (struct sockaddr_in *)&pm_msg->mapped_loc_addr; | ||
607 | struct sockaddr_in *mapped_rem_addr = | ||
608 | (struct sockaddr_in *)&pm_msg->mapped_rem_addr; | ||
609 | |||
610 | if (mapped_loc_addr->sin_family == AF_INET) { | ||
611 | cm_info->mapped_loc_addr = | ||
612 | ntohl(mapped_loc_addr->sin_addr.s_addr); | ||
613 | cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); | ||
614 | } | ||
615 | if (mapped_rem_addr->sin_family == AF_INET) { | ||
616 | cm_info->mapped_rem_addr = | ||
617 | ntohl(mapped_rem_addr->sin_addr.s_addr); | ||
618 | cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); | ||
619 | } | ||
620 | } | ||
621 | |||
528 | /** | 622 | /** |
529 | * print_core - dump a cm core | 623 | * print_core - dump a cm core |
530 | */ | 624 | */ |
@@ -1147,8 +1241,11 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
1147 | loc_addr, loc_port, | 1241 | loc_addr, loc_port, |
1148 | cm_node->rem_addr, cm_node->rem_port, | 1242 | cm_node->rem_addr, cm_node->rem_port, |
1149 | rem_addr, rem_port); | 1243 | rem_addr, rem_port); |
1150 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && | 1244 | if ((cm_node->mapped_loc_addr == loc_addr) && |
1151 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { | 1245 | (cm_node->mapped_loc_port == loc_port) && |
1246 | (cm_node->mapped_rem_addr == rem_addr) && | ||
1247 | (cm_node->mapped_rem_port == rem_port)) { | ||
1248 | |||
1152 | add_ref_cm_node(cm_node); | 1249 | add_ref_cm_node(cm_node); |
1153 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | 1250 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); |
1154 | return cm_node; | 1251 | return cm_node; |
@@ -1165,18 +1262,28 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
1165 | * find_listener - find a cm node listening on this addr-port pair | 1262 | * find_listener - find a cm node listening on this addr-port pair |
1166 | */ | 1263 | */ |
1167 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | 1264 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, |
1168 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) | 1265 | nes_addr_t dst_addr, u16 dst_port, |
1266 | enum nes_cm_listener_state listener_state, int local) | ||
1169 | { | 1267 | { |
1170 | unsigned long flags; | 1268 | unsigned long flags; |
1171 | struct nes_cm_listener *listen_node; | 1269 | struct nes_cm_listener *listen_node; |
1270 | nes_addr_t listen_addr; | ||
1271 | u16 listen_port; | ||
1172 | 1272 | ||
1173 | /* walk list and find cm_node associated with this session ID */ | 1273 | /* walk list and find cm_node associated with this session ID */ |
1174 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | 1274 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); |
1175 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { | 1275 | list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { |
1276 | if (local) { | ||
1277 | listen_addr = listen_node->loc_addr; | ||
1278 | listen_port = listen_node->loc_port; | ||
1279 | } else { | ||
1280 | listen_addr = listen_node->mapped_loc_addr; | ||
1281 | listen_port = listen_node->mapped_loc_port; | ||
1282 | } | ||
1176 | /* compare node pair, return node handle if a match */ | 1283 | /* compare node pair, return node handle if a match */ |
1177 | if (((listen_node->loc_addr == dst_addr) || | 1284 | if (((listen_addr == dst_addr) || |
1178 | listen_node->loc_addr == 0x00000000) && | 1285 | listen_addr == 0x00000000) && |
1179 | (listen_node->loc_port == dst_port) && | 1286 | (listen_port == dst_port) && |
1180 | (listener_state & listen_node->listener_state)) { | 1287 | (listener_state & listen_node->listener_state)) { |
1181 | atomic_inc(&listen_node->ref_count); | 1288 | atomic_inc(&listen_node->ref_count); |
1182 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1289 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
@@ -1189,7 +1296,6 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
1189 | return NULL; | 1296 | return NULL; |
1190 | } | 1297 | } |
1191 | 1298 | ||
1192 | |||
1193 | /** | 1299 | /** |
1194 | * add_hte_node - add a cm node to the hash table | 1300 | * add_hte_node - add a cm node to the hash table |
1195 | */ | 1301 | */ |
@@ -1310,9 +1416,20 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
1310 | 1416 | ||
1311 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1417 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
1312 | 1418 | ||
1313 | if (listener->nesvnic) | 1419 | if (listener->nesvnic) { |
1314 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, | 1420 | nes_manage_apbvt(listener->nesvnic, |
1315 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); | 1421 | listener->mapped_loc_port, |
1422 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), | ||
1423 | NES_MANAGE_APBVT_DEL); | ||
1424 | |||
1425 | nes_remove_mapinfo(listener->loc_addr, | ||
1426 | listener->loc_port, | ||
1427 | listener->mapped_loc_addr, | ||
1428 | listener->mapped_loc_port); | ||
1429 | nes_debug(NES_DBG_NLMSG, | ||
1430 | "Delete APBVT mapped_loc_port = %04X\n", | ||
1431 | listener->mapped_loc_port); | ||
1432 | } | ||
1316 | 1433 | ||
1317 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 1434 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
1318 | 1435 | ||
@@ -1454,6 +1571,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1454 | cm_node->loc_port = cm_info->loc_port; | 1571 | cm_node->loc_port = cm_info->loc_port; |
1455 | cm_node->rem_port = cm_info->rem_port; | 1572 | cm_node->rem_port = cm_info->rem_port; |
1456 | 1573 | ||
1574 | cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; | ||
1575 | cm_node->mapped_rem_addr = cm_info->mapped_rem_addr; | ||
1576 | cm_node->mapped_loc_port = cm_info->mapped_loc_port; | ||
1577 | cm_node->mapped_rem_port = cm_info->mapped_rem_port; | ||
1578 | |||
1457 | cm_node->mpa_frame_rev = mpa_version; | 1579 | cm_node->mpa_frame_rev = mpa_version; |
1458 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | 1580 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; |
1459 | cm_node->mpav2_ird_ord = 0; | 1581 | cm_node->mpav2_ird_ord = 0; |
@@ -1500,8 +1622,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1500 | cm_node->loopbackpartner = NULL; | 1622 | cm_node->loopbackpartner = NULL; |
1501 | 1623 | ||
1502 | /* get the mac addr for the remote node */ | 1624 | /* get the mac addr for the remote node */ |
1503 | oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | 1625 | oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr, |
1504 | arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); | 1626 | NULL, NES_ARP_RESOLVE); |
1627 | arpindex = nes_addr_resolve_neigh(nesvnic, | ||
1628 | cm_node->mapped_rem_addr, oldarpindex); | ||
1505 | if (arpindex < 0) { | 1629 | if (arpindex < 0) { |
1506 | kfree(cm_node); | 1630 | kfree(cm_node); |
1507 | return NULL; | 1631 | return NULL; |
@@ -1563,11 +1687,14 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1563 | mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); | 1687 | mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); |
1564 | } else { | 1688 | } else { |
1565 | if (cm_node->apbvt_set && cm_node->nesvnic) { | 1689 | if (cm_node->apbvt_set && cm_node->nesvnic) { |
1566 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, | 1690 | nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port, |
1567 | PCI_FUNC( | 1691 | PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn), |
1568 | cm_node->nesvnic->nesdev->pcidev->devfn), | ||
1569 | NES_MANAGE_APBVT_DEL); | 1692 | NES_MANAGE_APBVT_DEL); |
1570 | } | 1693 | } |
1694 | nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n", | ||
1695 | cm_node->mapped_loc_port); | ||
1696 | nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port, | ||
1697 | cm_node->mapped_loc_addr, cm_node->mapped_loc_port); | ||
1571 | } | 1698 | } |
1572 | 1699 | ||
1573 | atomic_dec(&cm_core->node_cnt); | 1700 | atomic_dec(&cm_core->node_cnt); |
@@ -2235,17 +2362,21 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
2235 | * mini_cm_listen - create a listen node with params | 2362 | * mini_cm_listen - create a listen node with params |
2236 | */ | 2363 | */ |
2237 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | 2364 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, |
2238 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) | 2365 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) |
2239 | { | 2366 | { |
2240 | struct nes_cm_listener *listener; | 2367 | struct nes_cm_listener *listener; |
2368 | struct iwpm_dev_data pm_reg_msg; | ||
2369 | struct iwpm_sa_data pm_msg; | ||
2241 | unsigned long flags; | 2370 | unsigned long flags; |
2371 | int iwpm_err = 0; | ||
2242 | 2372 | ||
2243 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", | 2373 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", |
2244 | cm_info->loc_addr, cm_info->loc_port); | 2374 | cm_info->loc_addr, cm_info->loc_port); |
2245 | 2375 | ||
2246 | /* cannot have multiple matching listeners */ | 2376 | /* cannot have multiple matching listeners */ |
2247 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), | 2377 | listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port, |
2248 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); | 2378 | NES_CM_LISTENER_EITHER_STATE, 1); |
2379 | |||
2249 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { | 2380 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { |
2250 | /* find automatically incs ref count ??? */ | 2381 | /* find automatically incs ref count ??? */ |
2251 | atomic_dec(&listener->ref_count); | 2382 | atomic_dec(&listener->ref_count); |
@@ -2254,6 +2385,22 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2254 | } | 2385 | } |
2255 | 2386 | ||
2256 | if (!listener) { | 2387 | if (!listener) { |
2388 | nes_form_reg_msg(nesvnic, &pm_reg_msg); | ||
2389 | iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES); | ||
2390 | if (iwpm_err) { | ||
2391 | nes_debug(NES_DBG_NLMSG, | ||
2392 | "Port Mapper reg pid fail (err = %d).\n", iwpm_err); | ||
2393 | } | ||
2394 | if (iwpm_valid_pid() && !iwpm_err) { | ||
2395 | nes_form_pm_msg(cm_info, &pm_msg); | ||
2396 | iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES); | ||
2397 | if (iwpm_err) | ||
2398 | nes_debug(NES_DBG_NLMSG, | ||
2399 | "Port Mapper query fail (err = %d).\n", iwpm_err); | ||
2400 | else | ||
2401 | nes_record_pm_msg(cm_info, &pm_msg); | ||
2402 | } | ||
2403 | |||
2257 | /* create a CM listen node (1/2 node to compare incoming traffic to) */ | 2404 | /* create a CM listen node (1/2 node to compare incoming traffic to) */ |
2258 | listener = kzalloc(sizeof(*listener), GFP_ATOMIC); | 2405 | listener = kzalloc(sizeof(*listener), GFP_ATOMIC); |
2259 | if (!listener) { | 2406 | if (!listener) { |
@@ -2261,8 +2408,10 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | |||
2261 | return NULL; | 2408 | return NULL; |
2262 | } | 2409 | } |
2263 | 2410 | ||
2264 | listener->loc_addr = htonl(cm_info->loc_addr); | 2411 | listener->loc_addr = cm_info->loc_addr; |
2265 | listener->loc_port = htons(cm_info->loc_port); | 2412 | listener->loc_port = cm_info->loc_port; |
2413 | listener->mapped_loc_addr = cm_info->mapped_loc_addr; | ||
2414 | listener->mapped_loc_port = cm_info->mapped_loc_port; | ||
2266 | listener->reused_node = 0; | 2415 | listener->reused_node = 0; |
2267 | 2416 | ||
2268 | atomic_set(&listener->ref_count, 1); | 2417 | atomic_set(&listener->ref_count, 1); |
@@ -2324,14 +2473,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
2324 | 2473 | ||
2325 | if (cm_info->loc_addr == cm_info->rem_addr) { | 2474 | if (cm_info->loc_addr == cm_info->rem_addr) { |
2326 | loopbackremotelistener = find_listener(cm_core, | 2475 | loopbackremotelistener = find_listener(cm_core, |
2327 | ntohl(nesvnic->local_ipaddr), cm_node->rem_port, | 2476 | cm_node->mapped_loc_addr, cm_node->mapped_rem_port, |
2328 | NES_CM_LISTENER_ACTIVE_STATE); | 2477 | NES_CM_LISTENER_ACTIVE_STATE, 0); |
2329 | if (loopbackremotelistener == NULL) { | 2478 | if (loopbackremotelistener == NULL) { |
2330 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 2479 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
2331 | } else { | 2480 | } else { |
2332 | loopback_cm_info = *cm_info; | 2481 | loopback_cm_info = *cm_info; |
2333 | loopback_cm_info.loc_port = cm_info->rem_port; | 2482 | loopback_cm_info.loc_port = cm_info->rem_port; |
2334 | loopback_cm_info.rem_port = cm_info->loc_port; | 2483 | loopback_cm_info.rem_port = cm_info->loc_port; |
2484 | loopback_cm_info.mapped_loc_port = | ||
2485 | cm_info->mapped_rem_port; | ||
2486 | loopback_cm_info.mapped_rem_port = | ||
2487 | cm_info->mapped_loc_port; | ||
2335 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; | 2488 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; |
2336 | loopbackremotenode = make_cm_node(cm_core, nesvnic, | 2489 | loopbackremotenode = make_cm_node(cm_core, nesvnic, |
2337 | &loopback_cm_info, loopbackremotelistener); | 2490 | &loopback_cm_info, loopbackremotelistener); |
@@ -2560,6 +2713,12 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2560 | nfo.rem_addr = ntohl(iph->saddr); | 2713 | nfo.rem_addr = ntohl(iph->saddr); |
2561 | nfo.rem_port = ntohs(tcph->source); | 2714 | nfo.rem_port = ntohs(tcph->source); |
2562 | 2715 | ||
2716 | /* If port mapper is available these should be mapped address info */ | ||
2717 | nfo.mapped_loc_addr = ntohl(iph->daddr); | ||
2718 | nfo.mapped_loc_port = ntohs(tcph->dest); | ||
2719 | nfo.mapped_rem_addr = ntohl(iph->saddr); | ||
2720 | nfo.mapped_rem_port = ntohs(tcph->source); | ||
2721 | |||
2563 | tmp_daddr = cpu_to_be32(iph->daddr); | 2722 | tmp_daddr = cpu_to_be32(iph->daddr); |
2564 | tmp_saddr = cpu_to_be32(iph->saddr); | 2723 | tmp_saddr = cpu_to_be32(iph->saddr); |
2565 | 2724 | ||
@@ -2568,8 +2727,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2568 | 2727 | ||
2569 | do { | 2728 | do { |
2570 | cm_node = find_node(cm_core, | 2729 | cm_node = find_node(cm_core, |
2571 | nfo.rem_port, nfo.rem_addr, | 2730 | nfo.mapped_rem_port, nfo.mapped_rem_addr, |
2572 | nfo.loc_port, nfo.loc_addr); | 2731 | nfo.mapped_loc_port, nfo.mapped_loc_addr); |
2573 | 2732 | ||
2574 | if (!cm_node) { | 2733 | if (!cm_node) { |
2575 | /* Only type of packet accepted are for */ | 2734 | /* Only type of packet accepted are for */ |
@@ -2578,9 +2737,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, | |||
2578 | skb_handled = 0; | 2737 | skb_handled = 0; |
2579 | break; | 2738 | break; |
2580 | } | 2739 | } |
2581 | listener = find_listener(cm_core, nfo.loc_addr, | 2740 | listener = find_listener(cm_core, nfo.mapped_loc_addr, |
2582 | nfo.loc_port, | 2741 | nfo.mapped_loc_port, |
2583 | NES_CM_LISTENER_ACTIVE_STATE); | 2742 | NES_CM_LISTENER_ACTIVE_STATE, 0); |
2584 | if (!listener) { | 2743 | if (!listener) { |
2585 | nfo.cm_id = NULL; | 2744 | nfo.cm_id = NULL; |
2586 | nfo.conn_type = 0; | 2745 | nfo.conn_type = 0; |
@@ -3184,10 +3343,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3184 | 3343 | ||
3185 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3344 | nes_cm_init_tsa_conn(nesqp, cm_node); |
3186 | 3345 | ||
3187 | nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port)); | 3346 | nesqp->nesqp_context->tcpPorts[0] = |
3188 | nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port)); | 3347 | cpu_to_le16(cm_node->mapped_loc_port); |
3348 | nesqp->nesqp_context->tcpPorts[1] = | ||
3349 | cpu_to_le16(cm_node->mapped_rem_port); | ||
3189 | 3350 | ||
3190 | nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr)); | 3351 | nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr); |
3191 | 3352 | ||
3192 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | 3353 | nesqp->nesqp_context->misc2 |= cpu_to_le32( |
3193 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << | 3354 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << |
@@ -3211,9 +3372,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3211 | memset(&nes_quad, 0, sizeof(nes_quad)); | 3372 | memset(&nes_quad, 0, sizeof(nes_quad)); |
3212 | nes_quad.DstIpAdrIndex = | 3373 | nes_quad.DstIpAdrIndex = |
3213 | cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); | 3374 | cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); |
3214 | nes_quad.SrcIpadr = raddr->sin_addr.s_addr; | 3375 | nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr); |
3215 | nes_quad.TcpPorts[0] = raddr->sin_port; | 3376 | nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port); |
3216 | nes_quad.TcpPorts[1] = laddr->sin_port; | 3377 | nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port); |
3217 | 3378 | ||
3218 | /* Produce hash key */ | 3379 | /* Produce hash key */ |
3219 | crc_value = get_crc_value(&nes_quad); | 3380 | crc_value = get_crc_value(&nes_quad); |
@@ -3315,6 +3476,9 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3315 | int apbvt_set = 0; | 3476 | int apbvt_set = 0; |
3316 | struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; | 3477 | struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; |
3317 | struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; | 3478 | struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; |
3479 | struct iwpm_dev_data pm_reg_msg; | ||
3480 | struct iwpm_sa_data pm_msg; | ||
3481 | int iwpm_err = 0; | ||
3318 | 3482 | ||
3319 | if (cm_id->remote_addr.ss_family != AF_INET) | 3483 | if (cm_id->remote_addr.ss_family != AF_INET) |
3320 | return -ENOSYS; | 3484 | return -ENOSYS; |
@@ -3352,20 +3516,44 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3352 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", | 3516 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", |
3353 | conn_param->private_data_len); | 3517 | conn_param->private_data_len); |
3354 | 3518 | ||
3519 | /* set up the connection params for the node */ | ||
3520 | cm_info.loc_addr = ntohl(laddr->sin_addr.s_addr); | ||
3521 | cm_info.loc_port = ntohs(laddr->sin_port); | ||
3522 | cm_info.rem_addr = ntohl(raddr->sin_addr.s_addr); | ||
3523 | cm_info.rem_port = ntohs(raddr->sin_port); | ||
3524 | cm_info.cm_id = cm_id; | ||
3525 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; | ||
3526 | |||
3527 | /* No port mapper available, go with the specified peer information */ | ||
3528 | cm_info.mapped_loc_addr = cm_info.loc_addr; | ||
3529 | cm_info.mapped_loc_port = cm_info.loc_port; | ||
3530 | cm_info.mapped_rem_addr = cm_info.rem_addr; | ||
3531 | cm_info.mapped_rem_port = cm_info.rem_port; | ||
3532 | |||
3533 | nes_form_reg_msg(nesvnic, &pm_reg_msg); | ||
3534 | iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES); | ||
3535 | if (iwpm_err) { | ||
3536 | nes_debug(NES_DBG_NLMSG, | ||
3537 | "Port Mapper reg pid fail (err = %d).\n", iwpm_err); | ||
3538 | } | ||
3539 | if (iwpm_valid_pid() && !iwpm_err) { | ||
3540 | nes_form_pm_msg(&cm_info, &pm_msg); | ||
3541 | iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES); | ||
3542 | if (iwpm_err) | ||
3543 | nes_debug(NES_DBG_NLMSG, | ||
3544 | "Port Mapper query fail (err = %d).\n", iwpm_err); | ||
3545 | else | ||
3546 | nes_record_pm_msg(&cm_info, &pm_msg); | ||
3547 | } | ||
3548 | |||
3355 | if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) { | 3549 | if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) { |
3356 | nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port), | 3550 | nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port, |
3357 | PCI_FUNC(nesdev->pcidev->devfn), | 3551 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); |
3358 | NES_MANAGE_APBVT_ADD); | ||
3359 | apbvt_set = 1; | 3552 | apbvt_set = 1; |
3360 | } | 3553 | } |
3361 | 3554 | ||
3362 | /* set up the connection params for the node */ | 3555 | if (nes_create_mapinfo(&cm_info)) |
3363 | cm_info.loc_addr = htonl(laddr->sin_addr.s_addr); | 3556 | return -ENOMEM; |
3364 | cm_info.loc_port = htons(laddr->sin_port); | ||
3365 | cm_info.rem_addr = htonl(raddr->sin_addr.s_addr); | ||
3366 | cm_info.rem_port = htons(raddr->sin_port); | ||
3367 | cm_info.cm_id = cm_id; | ||
3368 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; | ||
3369 | 3557 | ||
3370 | cm_id->add_ref(cm_id); | 3558 | cm_id->add_ref(cm_id); |
3371 | 3559 | ||
@@ -3375,10 +3563,14 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3375 | &cm_info); | 3563 | &cm_info); |
3376 | if (!cm_node) { | 3564 | if (!cm_node) { |
3377 | if (apbvt_set) | 3565 | if (apbvt_set) |
3378 | nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port), | 3566 | nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port, |
3379 | PCI_FUNC(nesdev->pcidev->devfn), | 3567 | PCI_FUNC(nesdev->pcidev->devfn), |
3380 | NES_MANAGE_APBVT_DEL); | 3568 | NES_MANAGE_APBVT_DEL); |
3381 | 3569 | ||
3570 | nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n", | ||
3571 | cm_info.mapped_loc_port); | ||
3572 | nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port, | ||
3573 | cm_info.mapped_loc_addr, cm_info.mapped_loc_port); | ||
3382 | cm_id->rem_ref(cm_id); | 3574 | cm_id->rem_ref(cm_id); |
3383 | return -ENOMEM; | 3575 | return -ENOMEM; |
3384 | } | 3576 | } |
@@ -3424,13 +3616,16 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3424 | nesvnic->local_ipaddr, laddr->sin_addr.s_addr); | 3616 | nesvnic->local_ipaddr, laddr->sin_addr.s_addr); |
3425 | 3617 | ||
3426 | /* setup listen params in our api call struct */ | 3618 | /* setup listen params in our api call struct */ |
3427 | cm_info.loc_addr = nesvnic->local_ipaddr; | 3619 | cm_info.loc_addr = ntohl(nesvnic->local_ipaddr); |
3428 | cm_info.loc_port = laddr->sin_port; | 3620 | cm_info.loc_port = ntohs(laddr->sin_port); |
3429 | cm_info.backlog = backlog; | 3621 | cm_info.backlog = backlog; |
3430 | cm_info.cm_id = cm_id; | 3622 | cm_info.cm_id = cm_id; |
3431 | 3623 | ||
3432 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; | 3624 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; |
3433 | 3625 | ||
3626 | /* No port mapper available, go with the specified info */ | ||
3627 | cm_info.mapped_loc_addr = cm_info.loc_addr; | ||
3628 | cm_info.mapped_loc_port = cm_info.loc_port; | ||
3434 | 3629 | ||
3435 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); | 3630 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); |
3436 | if (!cm_node) { | 3631 | if (!cm_node) { |
@@ -3442,7 +3637,10 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3442 | cm_id->provider_data = cm_node; | 3637 | cm_id->provider_data = cm_node; |
3443 | 3638 | ||
3444 | if (!cm_node->reused_node) { | 3639 | if (!cm_node->reused_node) { |
3445 | err = nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port), | 3640 | if (nes_create_mapinfo(&cm_info)) |
3641 | return -ENOMEM; | ||
3642 | |||
3643 | err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port, | ||
3446 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), | 3644 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), |
3447 | NES_MANAGE_APBVT_ADD); | 3645 | NES_MANAGE_APBVT_ADD); |
3448 | if (err) { | 3646 | if (err) { |
@@ -3567,9 +3765,11 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3567 | nes_cm_init_tsa_conn(nesqp, cm_node); | 3765 | nes_cm_init_tsa_conn(nesqp, cm_node); |
3568 | 3766 | ||
3569 | /* set the QP tsa context */ | 3767 | /* set the QP tsa context */ |
3570 | nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port)); | 3768 | nesqp->nesqp_context->tcpPorts[0] = |
3571 | nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port)); | 3769 | cpu_to_le16(cm_node->mapped_loc_port); |
3572 | nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr)); | 3770 | nesqp->nesqp_context->tcpPorts[1] = |
3771 | cpu_to_le16(cm_node->mapped_rem_port); | ||
3772 | nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr); | ||
3573 | 3773 | ||
3574 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | 3774 | nesqp->nesqp_context->misc2 |= cpu_to_le32( |
3575 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << | 3775 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << |
@@ -3599,9 +3799,9 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3599 | 3799 | ||
3600 | nes_quad.DstIpAdrIndex = | 3800 | nes_quad.DstIpAdrIndex = |
3601 | cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); | 3801 | cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); |
3602 | nes_quad.SrcIpadr = raddr->sin_addr.s_addr; | 3802 | nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr); |
3603 | nes_quad.TcpPorts[0] = raddr->sin_port; | 3803 | nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port); |
3604 | nes_quad.TcpPorts[1] = laddr->sin_port; | 3804 | nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port); |
3605 | 3805 | ||
3606 | /* Produce hash key */ | 3806 | /* Produce hash key */ |
3607 | crc_value = get_crc_value(&nes_quad); | 3807 | crc_value = get_crc_value(&nes_quad); |
@@ -3629,7 +3829,7 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3629 | cm_event.ird = cm_node->ird_size; | 3829 | cm_event.ird = cm_node->ird_size; |
3630 | cm_event.ord = cm_node->ord_size; | 3830 | cm_event.ord = cm_node->ord_size; |
3631 | 3831 | ||
3632 | cm_event_laddr->sin_addr.s_addr = event->cm_info.rem_addr; | 3832 | cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr); |
3633 | ret = cm_id->event_handler(cm_id, &cm_event); | 3833 | ret = cm_id->event_handler(cm_id, &cm_event); |
3634 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | 3834 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); |
3635 | 3835 | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 522c99cd07c4..f522cf639789 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -293,8 +293,8 @@ struct nes_cm_listener { | |||
293 | struct list_head list; | 293 | struct list_head list; |
294 | struct nes_cm_core *cm_core; | 294 | struct nes_cm_core *cm_core; |
295 | u8 loc_mac[ETH_ALEN]; | 295 | u8 loc_mac[ETH_ALEN]; |
296 | nes_addr_t loc_addr; | 296 | nes_addr_t loc_addr, mapped_loc_addr; |
297 | u16 loc_port; | 297 | u16 loc_port, mapped_loc_port; |
298 | struct iw_cm_id *cm_id; | 298 | struct iw_cm_id *cm_id; |
299 | enum nes_cm_conn_type conn_type; | 299 | enum nes_cm_conn_type conn_type; |
300 | atomic_t ref_count; | 300 | atomic_t ref_count; |
@@ -308,7 +308,9 @@ struct nes_cm_listener { | |||
308 | /* per connection node and node state information */ | 308 | /* per connection node and node state information */ |
309 | struct nes_cm_node { | 309 | struct nes_cm_node { |
310 | nes_addr_t loc_addr, rem_addr; | 310 | nes_addr_t loc_addr, rem_addr; |
311 | nes_addr_t mapped_loc_addr, mapped_rem_addr; | ||
311 | u16 loc_port, rem_port; | 312 | u16 loc_port, rem_port; |
313 | u16 mapped_loc_port, mapped_rem_port; | ||
312 | 314 | ||
313 | u8 loc_mac[ETH_ALEN]; | 315 | u8 loc_mac[ETH_ALEN]; |
314 | u8 rem_mac[ETH_ALEN]; | 316 | u8 rem_mac[ETH_ALEN]; |
@@ -364,6 +366,10 @@ struct nes_cm_info { | |||
364 | u16 rem_port; | 366 | u16 rem_port; |
365 | nes_addr_t loc_addr; | 367 | nes_addr_t loc_addr; |
366 | nes_addr_t rem_addr; | 368 | nes_addr_t rem_addr; |
369 | u16 mapped_loc_port; | ||
370 | u16 mapped_rem_port; | ||
371 | nes_addr_t mapped_loc_addr; | ||
372 | nes_addr_t mapped_rem_addr; | ||
367 | 373 | ||
368 | enum nes_cm_conn_type conn_type; | 374 | enum nes_cm_conn_type conn_type; |
369 | int backlog; | 375 | int backlog; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 6c54106f5e64..41a9aec9998d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c | |||
@@ -510,16 +510,9 @@ exit: | |||
510 | return status; | 510 | return status; |
511 | } | 511 | } |
512 | 512 | ||
513 | static int ocrdma_debugfs_open(struct inode *inode, struct file *file) | ||
514 | { | ||
515 | if (inode->i_private) | ||
516 | file->private_data = inode->i_private; | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static const struct file_operations ocrdma_dbg_ops = { | 513 | static const struct file_operations ocrdma_dbg_ops = { |
521 | .owner = THIS_MODULE, | 514 | .owner = THIS_MODULE, |
522 | .open = ocrdma_debugfs_open, | 515 | .open = simple_open, |
523 | .read = ocrdma_dbgfs_ops_read, | 516 | .read = ocrdma_dbgfs_ops_read, |
524 | }; | 517 | }; |
525 | 518 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 5b7aeb224a30..8d3c78ddc906 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -1272,7 +1272,7 @@ static int qib_notify_dca(struct notifier_block *nb, unsigned long event, | |||
1272 | * Do all the generic driver unit- and chip-independent memory | 1272 | * Do all the generic driver unit- and chip-independent memory |
1273 | * allocation and initialization. | 1273 | * allocation and initialization. |
1274 | */ | 1274 | */ |
1275 | static int __init qlogic_ib_init(void) | 1275 | static int __init qib_ib_init(void) |
1276 | { | 1276 | { |
1277 | int ret; | 1277 | int ret; |
1278 | 1278 | ||
@@ -1316,12 +1316,12 @@ bail: | |||
1316 | return ret; | 1316 | return ret; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | module_init(qlogic_ib_init); | 1319 | module_init(qib_ib_init); |
1320 | 1320 | ||
1321 | /* | 1321 | /* |
1322 | * Do the non-unit driver cleanup, memory free, etc. at unload. | 1322 | * Do the non-unit driver cleanup, memory free, etc. at unload. |
1323 | */ | 1323 | */ |
1324 | static void __exit qlogic_ib_cleanup(void) | 1324 | static void __exit qib_ib_cleanup(void) |
1325 | { | 1325 | { |
1326 | int ret; | 1326 | int ret; |
1327 | 1327 | ||
@@ -1346,7 +1346,7 @@ static void __exit qlogic_ib_cleanup(void) | |||
1346 | qib_dev_cleanup(); | 1346 | qib_dev_cleanup(); |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | module_exit(qlogic_ib_cleanup); | 1349 | module_exit(qib_ib_cleanup); |
1350 | 1350 | ||
1351 | /* this can only be called after a successful initialization */ | 1351 | /* this can only be called after a successful initialization */ |
1352 | static void cleanup_device_data(struct qib_devdata *dd) | 1352 | static void cleanup_device_data(struct qib_devdata *dd) |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index edad991d60ed..22c720e5740d 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) | |||
1028 | 1028 | ||
1029 | event.event = IB_EVENT_PKEY_CHANGE; | 1029 | event.event = IB_EVENT_PKEY_CHANGE; |
1030 | event.device = &dd->verbs_dev.ibdev; | 1030 | event.device = &dd->verbs_dev.ibdev; |
1031 | event.element.port_num = 1; | 1031 | event.element.port_num = port; |
1032 | ib_dispatch_event(&event); | 1032 | ib_dispatch_event(&event); |
1033 | } | 1033 | } |
1034 | return 0; | 1034 | return 0; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 0cad0c40d742..7fcc150d603c 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -985,7 +985,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
985 | struct ib_qp *ret; | 985 | struct ib_qp *ret; |
986 | 986 | ||
987 | if (init_attr->cap.max_send_sge > ib_qib_max_sges || | 987 | if (init_attr->cap.max_send_sge > ib_qib_max_sges || |
988 | init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) { | 988 | init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || |
989 | init_attr->create_flags) { | ||
989 | ret = ERR_PTR(-EINVAL); | 990 | ret = ERR_PTR(-EINVAL); |
990 | goto bail; | 991 | goto bail; |
991 | } | 992 | } |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index d48d2c0a2e3c..53bd6a2d9cdb 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c | |||
@@ -466,6 +466,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, | |||
466 | ucontext = to_uucontext(pd->uobject->context); | 466 | ucontext = to_uucontext(pd->uobject->context); |
467 | us_ibdev = to_usdev(pd->device); | 467 | us_ibdev = to_usdev(pd->device); |
468 | 468 | ||
469 | if (init_attr->create_flags) | ||
470 | return ERR_PTR(-EINVAL); | ||
471 | |||
469 | err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); | 472 | err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); |
470 | if (err) { | 473 | if (err) { |
471 | usnic_err("%s: cannot copy udata for create_qp\n", | 474 | usnic_err("%s: cannot copy udata for create_qp\n", |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index d135ad90d914..3a4288e0fbac 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c | |||
@@ -1,3 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you may redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; version 2 of the License. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
15 | * SOFTWARE. | ||
16 | * | ||
17 | */ | ||
18 | |||
1 | #include <linux/init.h> | 19 | #include <linux/init.h> |
2 | #include <linux/list.h> | 20 | #include <linux/list.h> |
3 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile new file mode 100644 index 000000000000..f3c7dcf03098 --- /dev/null +++ b/drivers/infiniband/ulp/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/ | ||
2 | obj-$(CONFIG_INFINIBAND_SRP) += srp/ | ||
3 | obj-$(CONFIG_INFINIBAND_SRPT) += srpt/ | ||
4 | obj-$(CONFIG_INFINIBAND_ISER) += iser/ | ||
5 | obj-$(CONFIG_INFINIBAND_ISERT) += isert/ | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 1377f85911c2..933efcea0d03 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1030,10 +1030,20 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ | |||
1030 | .cap.max_send_sge = 1, | 1030 | .cap.max_send_sge = 1, |
1031 | .sq_sig_type = IB_SIGNAL_ALL_WR, | 1031 | .sq_sig_type = IB_SIGNAL_ALL_WR, |
1032 | .qp_type = IB_QPT_RC, | 1032 | .qp_type = IB_QPT_RC, |
1033 | .qp_context = tx | 1033 | .qp_context = tx, |
1034 | .create_flags = IB_QP_CREATE_USE_GFP_NOIO | ||
1034 | }; | 1035 | }; |
1035 | 1036 | ||
1036 | return ib_create_qp(priv->pd, &attr); | 1037 | struct ib_qp *tx_qp; |
1038 | |||
1039 | tx_qp = ib_create_qp(priv->pd, &attr); | ||
1040 | if (PTR_ERR(tx_qp) == -EINVAL) { | ||
1041 | ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", | ||
1042 | priv->ca->name); | ||
1043 | attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; | ||
1044 | tx_qp = ib_create_qp(priv->pd, &attr); | ||
1045 | } | ||
1046 | return tx_qp; | ||
1037 | } | 1047 | } |
1038 | 1048 | ||
1039 | static int ipoib_cm_send_req(struct net_device *dev, | 1049 | static int ipoib_cm_send_req(struct net_device *dev, |
@@ -1104,12 +1114,14 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |||
1104 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | 1114 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); |
1105 | int ret; | 1115 | int ret; |
1106 | 1116 | ||
1107 | p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring); | 1117 | p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring, |
1118 | GFP_NOIO, PAGE_KERNEL); | ||
1108 | if (!p->tx_ring) { | 1119 | if (!p->tx_ring) { |
1109 | ipoib_warn(priv, "failed to allocate tx ring\n"); | 1120 | ipoib_warn(priv, "failed to allocate tx ring\n"); |
1110 | ret = -ENOMEM; | 1121 | ret = -ENOMEM; |
1111 | goto err_tx; | 1122 | goto err_tx; |
1112 | } | 1123 | } |
1124 | memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); | ||
1113 | 1125 | ||
1114 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); | 1126 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); |
1115 | if (IS_ERR(p->qp)) { | 1127 | if (IS_ERR(p->qp)) { |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 25f195ef44b0..eb7973957a6e 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | |||
99 | module_param_named(pi_guard, iser_pi_guard, int, 0644); | 99 | module_param_named(pi_guard, iser_pi_guard, int, 0644); |
100 | MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); | 100 | MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); |
101 | 101 | ||
102 | static struct workqueue_struct *release_wq; | ||
102 | struct iser_global ig; | 103 | struct iser_global ig; |
103 | 104 | ||
104 | void | 105 | void |
@@ -337,24 +338,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
337 | return cls_conn; | 338 | return cls_conn; |
338 | } | 339 | } |
339 | 340 | ||
340 | static void | ||
341 | iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | ||
342 | { | ||
343 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
344 | struct iser_conn *ib_conn = conn->dd_data; | ||
345 | |||
346 | iscsi_conn_teardown(cls_conn); | ||
347 | /* | ||
348 | * Userspace will normally call the stop callback and | ||
349 | * already have freed the ib_conn, but if it goofed up then | ||
350 | * we free it here. | ||
351 | */ | ||
352 | if (ib_conn) { | ||
353 | ib_conn->iscsi_conn = NULL; | ||
354 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | ||
355 | } | ||
356 | } | ||
357 | |||
358 | static int | 341 | static int |
359 | iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | 342 | iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, |
360 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, | 343 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, |
@@ -392,29 +375,39 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
392 | conn->dd_data = ib_conn; | 375 | conn->dd_data = ib_conn; |
393 | ib_conn->iscsi_conn = conn; | 376 | ib_conn->iscsi_conn = conn; |
394 | 377 | ||
395 | iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ | ||
396 | return 0; | 378 | return 0; |
397 | } | 379 | } |
398 | 380 | ||
381 | static int | ||
382 | iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | ||
383 | { | ||
384 | struct iscsi_conn *iscsi_conn; | ||
385 | struct iser_conn *ib_conn; | ||
386 | |||
387 | iscsi_conn = cls_conn->dd_data; | ||
388 | ib_conn = iscsi_conn->dd_data; | ||
389 | reinit_completion(&ib_conn->stop_completion); | ||
390 | |||
391 | return iscsi_conn_start(cls_conn); | ||
392 | } | ||
393 | |||
399 | static void | 394 | static void |
400 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | 395 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) |
401 | { | 396 | { |
402 | struct iscsi_conn *conn = cls_conn->dd_data; | 397 | struct iscsi_conn *conn = cls_conn->dd_data; |
403 | struct iser_conn *ib_conn = conn->dd_data; | 398 | struct iser_conn *ib_conn = conn->dd_data; |
404 | 399 | ||
400 | iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn); | ||
401 | iscsi_conn_stop(cls_conn, flag); | ||
402 | |||
405 | /* | 403 | /* |
406 | * Userspace may have goofed up and not bound the connection or | 404 | * Userspace may have goofed up and not bound the connection or |
407 | * might have only partially setup the connection. | 405 | * might have only partially setup the connection. |
408 | */ | 406 | */ |
409 | if (ib_conn) { | 407 | if (ib_conn) { |
410 | iscsi_conn_stop(cls_conn, flag); | 408 | conn->dd_data = NULL; |
411 | /* | 409 | complete(&ib_conn->stop_completion); |
412 | * There is no unbind event so the stop callback | ||
413 | * must release the ref from the bind. | ||
414 | */ | ||
415 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | ||
416 | } | 410 | } |
417 | conn->dd_data = NULL; | ||
418 | } | 411 | } |
419 | 412 | ||
420 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | 413 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
@@ -515,28 +508,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, | |||
515 | case ISCSI_PARAM_HDRDGST_EN: | 508 | case ISCSI_PARAM_HDRDGST_EN: |
516 | sscanf(buf, "%d", &value); | 509 | sscanf(buf, "%d", &value); |
517 | if (value) { | 510 | if (value) { |
518 | iser_err("DataDigest wasn't negotiated to None"); | 511 | iser_err("DataDigest wasn't negotiated to None\n"); |
519 | return -EPROTO; | 512 | return -EPROTO; |
520 | } | 513 | } |
521 | break; | 514 | break; |
522 | case ISCSI_PARAM_DATADGST_EN: | 515 | case ISCSI_PARAM_DATADGST_EN: |
523 | sscanf(buf, "%d", &value); | 516 | sscanf(buf, "%d", &value); |
524 | if (value) { | 517 | if (value) { |
525 | iser_err("DataDigest wasn't negotiated to None"); | 518 | iser_err("DataDigest wasn't negotiated to None\n"); |
526 | return -EPROTO; | 519 | return -EPROTO; |
527 | } | 520 | } |
528 | break; | 521 | break; |
529 | case ISCSI_PARAM_IFMARKER_EN: | 522 | case ISCSI_PARAM_IFMARKER_EN: |
530 | sscanf(buf, "%d", &value); | 523 | sscanf(buf, "%d", &value); |
531 | if (value) { | 524 | if (value) { |
532 | iser_err("IFMarker wasn't negotiated to No"); | 525 | iser_err("IFMarker wasn't negotiated to No\n"); |
533 | return -EPROTO; | 526 | return -EPROTO; |
534 | } | 527 | } |
535 | break; | 528 | break; |
536 | case ISCSI_PARAM_OFMARKER_EN: | 529 | case ISCSI_PARAM_OFMARKER_EN: |
537 | sscanf(buf, "%d", &value); | 530 | sscanf(buf, "%d", &value); |
538 | if (value) { | 531 | if (value) { |
539 | iser_err("OFMarker wasn't negotiated to No"); | 532 | iser_err("OFMarker wasn't negotiated to No\n"); |
540 | return -EPROTO; | 533 | return -EPROTO; |
541 | } | 534 | } |
542 | break; | 535 | break; |
@@ -652,19 +645,20 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
652 | struct iser_conn *ib_conn; | 645 | struct iser_conn *ib_conn; |
653 | 646 | ||
654 | ib_conn = ep->dd_data; | 647 | ib_conn = ep->dd_data; |
655 | if (ib_conn->iscsi_conn) | 648 | iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); |
656 | /* | ||
657 | * Must suspend xmit path if the ep is bound to the | ||
658 | * iscsi_conn, so we know we are not accessing the ib_conn | ||
659 | * when we free it. | ||
660 | * | ||
661 | * This may not be bound if the ep poll failed. | ||
662 | */ | ||
663 | iscsi_suspend_tx(ib_conn->iscsi_conn); | ||
664 | |||
665 | |||
666 | iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); | ||
667 | iser_conn_terminate(ib_conn); | 649 | iser_conn_terminate(ib_conn); |
650 | |||
651 | /* | ||
652 | * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop | ||
653 | * call and ISER_CONN_DOWN state before freeing the iser resources. | ||
654 | * otherwise we are safe to free resources immediately. | ||
655 | */ | ||
656 | if (ib_conn->iscsi_conn) { | ||
657 | INIT_WORK(&ib_conn->release_work, iser_release_work); | ||
658 | queue_work(release_wq, &ib_conn->release_work); | ||
659 | } else { | ||
660 | iser_conn_release(ib_conn); | ||
661 | } | ||
668 | } | 662 | } |
669 | 663 | ||
670 | static umode_t iser_attr_is_visible(int param_type, int param) | 664 | static umode_t iser_attr_is_visible(int param_type, int param) |
@@ -748,13 +742,13 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
748 | /* connection management */ | 742 | /* connection management */ |
749 | .create_conn = iscsi_iser_conn_create, | 743 | .create_conn = iscsi_iser_conn_create, |
750 | .bind_conn = iscsi_iser_conn_bind, | 744 | .bind_conn = iscsi_iser_conn_bind, |
751 | .destroy_conn = iscsi_iser_conn_destroy, | 745 | .destroy_conn = iscsi_conn_teardown, |
752 | .attr_is_visible = iser_attr_is_visible, | 746 | .attr_is_visible = iser_attr_is_visible, |
753 | .set_param = iscsi_iser_set_param, | 747 | .set_param = iscsi_iser_set_param, |
754 | .get_conn_param = iscsi_conn_get_param, | 748 | .get_conn_param = iscsi_conn_get_param, |
755 | .get_ep_param = iscsi_iser_get_ep_param, | 749 | .get_ep_param = iscsi_iser_get_ep_param, |
756 | .get_session_param = iscsi_session_get_param, | 750 | .get_session_param = iscsi_session_get_param, |
757 | .start_conn = iscsi_conn_start, | 751 | .start_conn = iscsi_iser_conn_start, |
758 | .stop_conn = iscsi_iser_conn_stop, | 752 | .stop_conn = iscsi_iser_conn_stop, |
759 | /* iscsi host params */ | 753 | /* iscsi host params */ |
760 | .get_host_param = iscsi_host_get_param, | 754 | .get_host_param = iscsi_host_get_param, |
@@ -801,6 +795,12 @@ static int __init iser_init(void) | |||
801 | mutex_init(&ig.connlist_mutex); | 795 | mutex_init(&ig.connlist_mutex); |
802 | INIT_LIST_HEAD(&ig.connlist); | 796 | INIT_LIST_HEAD(&ig.connlist); |
803 | 797 | ||
798 | release_wq = alloc_workqueue("release workqueue", 0, 0); | ||
799 | if (!release_wq) { | ||
800 | iser_err("failed to allocate release workqueue\n"); | ||
801 | return -ENOMEM; | ||
802 | } | ||
803 | |||
804 | iscsi_iser_scsi_transport = iscsi_register_transport( | 804 | iscsi_iser_scsi_transport = iscsi_register_transport( |
805 | &iscsi_iser_transport); | 805 | &iscsi_iser_transport); |
806 | if (!iscsi_iser_scsi_transport) { | 806 | if (!iscsi_iser_scsi_transport) { |
@@ -819,7 +819,24 @@ register_transport_failure: | |||
819 | 819 | ||
820 | static void __exit iser_exit(void) | 820 | static void __exit iser_exit(void) |
821 | { | 821 | { |
822 | struct iser_conn *ib_conn, *n; | ||
823 | int connlist_empty; | ||
824 | |||
822 | iser_dbg("Removing iSER datamover...\n"); | 825 | iser_dbg("Removing iSER datamover...\n"); |
826 | destroy_workqueue(release_wq); | ||
827 | |||
828 | mutex_lock(&ig.connlist_mutex); | ||
829 | connlist_empty = list_empty(&ig.connlist); | ||
830 | mutex_unlock(&ig.connlist_mutex); | ||
831 | |||
832 | if (!connlist_empty) { | ||
833 | iser_err("Error cleanup stage completed but we still have iser " | ||
834 | "connections, destroying them anyway.\n"); | ||
835 | list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) { | ||
836 | iser_conn_release(ib_conn); | ||
837 | } | ||
838 | } | ||
839 | |||
823 | iscsi_unregister_transport(&iscsi_iser_transport); | 840 | iscsi_unregister_transport(&iscsi_iser_transport); |
824 | kmem_cache_destroy(ig.desc_cache); | 841 | kmem_cache_destroy(ig.desc_cache); |
825 | } | 842 | } |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 324129f80d40..97cd385bf7f7 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | #define DRV_NAME "iser" | 70 | #define DRV_NAME "iser" |
71 | #define PFX DRV_NAME ": " | 71 | #define PFX DRV_NAME ": " |
72 | #define DRV_VER "1.3" | 72 | #define DRV_VER "1.4" |
73 | 73 | ||
74 | #define iser_dbg(fmt, arg...) \ | 74 | #define iser_dbg(fmt, arg...) \ |
75 | do { \ | 75 | do { \ |
@@ -333,6 +333,8 @@ struct iser_conn { | |||
333 | int post_recv_buf_count; /* posted rx count */ | 333 | int post_recv_buf_count; /* posted rx count */ |
334 | atomic_t post_send_buf_count; /* posted tx count */ | 334 | atomic_t post_send_buf_count; /* posted tx count */ |
335 | char name[ISER_OBJECT_NAME_SIZE]; | 335 | char name[ISER_OBJECT_NAME_SIZE]; |
336 | struct work_struct release_work; | ||
337 | struct completion stop_completion; | ||
336 | struct list_head conn_list; /* entry in ig conn list */ | 338 | struct list_head conn_list; /* entry in ig conn list */ |
337 | 339 | ||
338 | char *login_buf; | 340 | char *login_buf; |
@@ -417,12 +419,12 @@ void iscsi_iser_recv(struct iscsi_conn *conn, | |||
417 | 419 | ||
418 | void iser_conn_init(struct iser_conn *ib_conn); | 420 | void iser_conn_init(struct iser_conn *ib_conn); |
419 | 421 | ||
420 | void iser_conn_get(struct iser_conn *ib_conn); | 422 | void iser_conn_release(struct iser_conn *ib_conn); |
421 | |||
422 | int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed); | ||
423 | 423 | ||
424 | void iser_conn_terminate(struct iser_conn *ib_conn); | 424 | void iser_conn_terminate(struct iser_conn *ib_conn); |
425 | 425 | ||
426 | void iser_release_work(struct work_struct *work); | ||
427 | |||
426 | void iser_rcv_completion(struct iser_rx_desc *desc, | 428 | void iser_rcv_completion(struct iser_rx_desc *desc, |
427 | unsigned long dto_xfer_len, | 429 | unsigned long dto_xfer_len, |
428 | struct iser_conn *ib_conn); | 430 | struct iser_conn *ib_conn); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 32849f2becde..ea01075f9f9b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -581,14 +581,30 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, | |||
581 | return ret; | 581 | return ret; |
582 | } | 582 | } |
583 | 583 | ||
584 | void iser_release_work(struct work_struct *work) | ||
585 | { | ||
586 | struct iser_conn *ib_conn; | ||
587 | |||
588 | ib_conn = container_of(work, struct iser_conn, release_work); | ||
589 | |||
590 | /* wait for .conn_stop callback */ | ||
591 | wait_for_completion(&ib_conn->stop_completion); | ||
592 | |||
593 | /* wait for the qp`s post send and post receive buffers to empty */ | ||
594 | wait_event_interruptible(ib_conn->wait, | ||
595 | ib_conn->state == ISER_CONN_DOWN); | ||
596 | |||
597 | iser_conn_release(ib_conn); | ||
598 | } | ||
599 | |||
584 | /** | 600 | /** |
585 | * Frees all conn objects and deallocs conn descriptor | 601 | * Frees all conn objects and deallocs conn descriptor |
586 | */ | 602 | */ |
587 | static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) | 603 | void iser_conn_release(struct iser_conn *ib_conn) |
588 | { | 604 | { |
589 | struct iser_device *device = ib_conn->device; | 605 | struct iser_device *device = ib_conn->device; |
590 | 606 | ||
591 | BUG_ON(ib_conn->state != ISER_CONN_DOWN); | 607 | BUG_ON(ib_conn->state == ISER_CONN_UP); |
592 | 608 | ||
593 | mutex_lock(&ig.connlist_mutex); | 609 | mutex_lock(&ig.connlist_mutex); |
594 | list_del(&ib_conn->conn_list); | 610 | list_del(&ib_conn->conn_list); |
@@ -600,27 +616,13 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) | |||
600 | if (device != NULL) | 616 | if (device != NULL) |
601 | iser_device_try_release(device); | 617 | iser_device_try_release(device); |
602 | /* if cma handler context, the caller actually destroy the id */ | 618 | /* if cma handler context, the caller actually destroy the id */ |
603 | if (ib_conn->cma_id != NULL && can_destroy_id) { | 619 | if (ib_conn->cma_id != NULL) { |
604 | rdma_destroy_id(ib_conn->cma_id); | 620 | rdma_destroy_id(ib_conn->cma_id); |
605 | ib_conn->cma_id = NULL; | 621 | ib_conn->cma_id = NULL; |
606 | } | 622 | } |
607 | iscsi_destroy_endpoint(ib_conn->ep); | 623 | iscsi_destroy_endpoint(ib_conn->ep); |
608 | } | 624 | } |
609 | 625 | ||
610 | void iser_conn_get(struct iser_conn *ib_conn) | ||
611 | { | ||
612 | atomic_inc(&ib_conn->refcount); | ||
613 | } | ||
614 | |||
615 | int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) | ||
616 | { | ||
617 | if (atomic_dec_and_test(&ib_conn->refcount)) { | ||
618 | iser_conn_release(ib_conn, can_destroy_id); | ||
619 | return 1; | ||
620 | } | ||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | /** | 626 | /** |
625 | * triggers start of the disconnect procedures and wait for them to be done | 627 | * triggers start of the disconnect procedures and wait for them to be done |
626 | */ | 628 | */ |
@@ -638,24 +640,19 @@ void iser_conn_terminate(struct iser_conn *ib_conn) | |||
638 | if (err) | 640 | if (err) |
639 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", | 641 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", |
640 | ib_conn,err); | 642 | ib_conn,err); |
641 | |||
642 | wait_event_interruptible(ib_conn->wait, | ||
643 | ib_conn->state == ISER_CONN_DOWN); | ||
644 | |||
645 | iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ | ||
646 | } | 643 | } |
647 | 644 | ||
648 | static int iser_connect_error(struct rdma_cm_id *cma_id) | 645 | static void iser_connect_error(struct rdma_cm_id *cma_id) |
649 | { | 646 | { |
650 | struct iser_conn *ib_conn; | 647 | struct iser_conn *ib_conn; |
648 | |||
651 | ib_conn = (struct iser_conn *)cma_id->context; | 649 | ib_conn = (struct iser_conn *)cma_id->context; |
652 | 650 | ||
653 | ib_conn->state = ISER_CONN_DOWN; | 651 | ib_conn->state = ISER_CONN_DOWN; |
654 | wake_up_interruptible(&ib_conn->wait); | 652 | wake_up_interruptible(&ib_conn->wait); |
655 | return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ | ||
656 | } | 653 | } |
657 | 654 | ||
658 | static int iser_addr_handler(struct rdma_cm_id *cma_id) | 655 | static void iser_addr_handler(struct rdma_cm_id *cma_id) |
659 | { | 656 | { |
660 | struct iser_device *device; | 657 | struct iser_device *device; |
661 | struct iser_conn *ib_conn; | 658 | struct iser_conn *ib_conn; |
@@ -664,7 +661,8 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id) | |||
664 | device = iser_device_find_by_ib_device(cma_id); | 661 | device = iser_device_find_by_ib_device(cma_id); |
665 | if (!device) { | 662 | if (!device) { |
666 | iser_err("device lookup/creation failed\n"); | 663 | iser_err("device lookup/creation failed\n"); |
667 | return iser_connect_error(cma_id); | 664 | iser_connect_error(cma_id); |
665 | return; | ||
668 | } | 666 | } |
669 | 667 | ||
670 | ib_conn = (struct iser_conn *)cma_id->context; | 668 | ib_conn = (struct iser_conn *)cma_id->context; |
@@ -686,13 +684,12 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id) | |||
686 | ret = rdma_resolve_route(cma_id, 1000); | 684 | ret = rdma_resolve_route(cma_id, 1000); |
687 | if (ret) { | 685 | if (ret) { |
688 | iser_err("resolve route failed: %d\n", ret); | 686 | iser_err("resolve route failed: %d\n", ret); |
689 | return iser_connect_error(cma_id); | 687 | iser_connect_error(cma_id); |
688 | return; | ||
690 | } | 689 | } |
691 | |||
692 | return 0; | ||
693 | } | 690 | } |
694 | 691 | ||
695 | static int iser_route_handler(struct rdma_cm_id *cma_id) | 692 | static void iser_route_handler(struct rdma_cm_id *cma_id) |
696 | { | 693 | { |
697 | struct rdma_conn_param conn_param; | 694 | struct rdma_conn_param conn_param; |
698 | int ret; | 695 | int ret; |
@@ -720,9 +717,9 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) | |||
720 | goto failure; | 717 | goto failure; |
721 | } | 718 | } |
722 | 719 | ||
723 | return 0; | 720 | return; |
724 | failure: | 721 | failure: |
725 | return iser_connect_error(cma_id); | 722 | iser_connect_error(cma_id); |
726 | } | 723 | } |
727 | 724 | ||
728 | static void iser_connected_handler(struct rdma_cm_id *cma_id) | 725 | static void iser_connected_handler(struct rdma_cm_id *cma_id) |
@@ -735,14 +732,13 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) | |||
735 | iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); | 732 | iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); |
736 | 733 | ||
737 | ib_conn = (struct iser_conn *)cma_id->context; | 734 | ib_conn = (struct iser_conn *)cma_id->context; |
738 | ib_conn->state = ISER_CONN_UP; | 735 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP)) |
739 | wake_up_interruptible(&ib_conn->wait); | 736 | wake_up_interruptible(&ib_conn->wait); |
740 | } | 737 | } |
741 | 738 | ||
742 | static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | 739 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) |
743 | { | 740 | { |
744 | struct iser_conn *ib_conn; | 741 | struct iser_conn *ib_conn; |
745 | int ret; | ||
746 | 742 | ||
747 | ib_conn = (struct iser_conn *)cma_id->context; | 743 | ib_conn = (struct iser_conn *)cma_id->context; |
748 | 744 | ||
@@ -762,24 +758,19 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
762 | ib_conn->state = ISER_CONN_DOWN; | 758 | ib_conn->state = ISER_CONN_DOWN; |
763 | wake_up_interruptible(&ib_conn->wait); | 759 | wake_up_interruptible(&ib_conn->wait); |
764 | } | 760 | } |
765 | |||
766 | ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ | ||
767 | return ret; | ||
768 | } | 761 | } |
769 | 762 | ||
770 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 763 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
771 | { | 764 | { |
772 | int ret = 0; | ||
773 | |||
774 | iser_info("event %d status %d conn %p id %p\n", | 765 | iser_info("event %d status %d conn %p id %p\n", |
775 | event->event, event->status, cma_id->context, cma_id); | 766 | event->event, event->status, cma_id->context, cma_id); |
776 | 767 | ||
777 | switch (event->event) { | 768 | switch (event->event) { |
778 | case RDMA_CM_EVENT_ADDR_RESOLVED: | 769 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
779 | ret = iser_addr_handler(cma_id); | 770 | iser_addr_handler(cma_id); |
780 | break; | 771 | break; |
781 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | 772 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
782 | ret = iser_route_handler(cma_id); | 773 | iser_route_handler(cma_id); |
783 | break; | 774 | break; |
784 | case RDMA_CM_EVENT_ESTABLISHED: | 775 | case RDMA_CM_EVENT_ESTABLISHED: |
785 | iser_connected_handler(cma_id); | 776 | iser_connected_handler(cma_id); |
@@ -789,18 +780,18 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
789 | case RDMA_CM_EVENT_CONNECT_ERROR: | 780 | case RDMA_CM_EVENT_CONNECT_ERROR: |
790 | case RDMA_CM_EVENT_UNREACHABLE: | 781 | case RDMA_CM_EVENT_UNREACHABLE: |
791 | case RDMA_CM_EVENT_REJECTED: | 782 | case RDMA_CM_EVENT_REJECTED: |
792 | ret = iser_connect_error(cma_id); | 783 | iser_connect_error(cma_id); |
793 | break; | 784 | break; |
794 | case RDMA_CM_EVENT_DISCONNECTED: | 785 | case RDMA_CM_EVENT_DISCONNECTED: |
795 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 786 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
796 | case RDMA_CM_EVENT_ADDR_CHANGE: | 787 | case RDMA_CM_EVENT_ADDR_CHANGE: |
797 | ret = iser_disconnected_handler(cma_id); | 788 | iser_disconnected_handler(cma_id); |
798 | break; | 789 | break; |
799 | default: | 790 | default: |
800 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); | 791 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); |
801 | break; | 792 | break; |
802 | } | 793 | } |
803 | return ret; | 794 | return 0; |
804 | } | 795 | } |
805 | 796 | ||
806 | void iser_conn_init(struct iser_conn *ib_conn) | 797 | void iser_conn_init(struct iser_conn *ib_conn) |
@@ -809,7 +800,7 @@ void iser_conn_init(struct iser_conn *ib_conn) | |||
809 | init_waitqueue_head(&ib_conn->wait); | 800 | init_waitqueue_head(&ib_conn->wait); |
810 | ib_conn->post_recv_buf_count = 0; | 801 | ib_conn->post_recv_buf_count = 0; |
811 | atomic_set(&ib_conn->post_send_buf_count, 0); | 802 | atomic_set(&ib_conn->post_send_buf_count, 0); |
812 | atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ | 803 | init_completion(&ib_conn->stop_completion); |
813 | INIT_LIST_HEAD(&ib_conn->conn_list); | 804 | INIT_LIST_HEAD(&ib_conn->conn_list); |
814 | spin_lock_init(&ib_conn->lock); | 805 | spin_lock_init(&ib_conn->lock); |
815 | } | 806 | } |
@@ -837,7 +828,6 @@ int iser_connect(struct iser_conn *ib_conn, | |||
837 | 828 | ||
838 | ib_conn->state = ISER_CONN_PENDING; | 829 | ib_conn->state = ISER_CONN_PENDING; |
839 | 830 | ||
840 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ | ||
841 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, | 831 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
842 | (void *)ib_conn, | 832 | (void *)ib_conn, |
843 | RDMA_PS_TCP, IB_QPT_RC); | 833 | RDMA_PS_TCP, IB_QPT_RC); |
@@ -874,9 +864,8 @@ id_failure: | |||
874 | ib_conn->cma_id = NULL; | 864 | ib_conn->cma_id = NULL; |
875 | addr_failure: | 865 | addr_failure: |
876 | ib_conn->state = ISER_CONN_DOWN; | 866 | ib_conn->state = ISER_CONN_DOWN; |
877 | iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */ | ||
878 | connect_failure: | 867 | connect_failure: |
879 | iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ | 868 | iser_conn_release(ib_conn); |
880 | return err; | 869 | return err; |
881 | } | 870 | } |
882 | 871 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 66a908bf3fb9..e3c2c5b4297f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -30,7 +30,7 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #define pr_fmt(fmt) PFX fmt | 33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
34 | 34 | ||
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
@@ -66,6 +66,8 @@ static unsigned int srp_sg_tablesize; | |||
66 | static unsigned int cmd_sg_entries; | 66 | static unsigned int cmd_sg_entries; |
67 | static unsigned int indirect_sg_entries; | 67 | static unsigned int indirect_sg_entries; |
68 | static bool allow_ext_sg; | 68 | static bool allow_ext_sg; |
69 | static bool prefer_fr; | ||
70 | static bool register_always; | ||
69 | static int topspin_workarounds = 1; | 71 | static int topspin_workarounds = 1; |
70 | 72 | ||
71 | module_param(srp_sg_tablesize, uint, 0444); | 73 | module_param(srp_sg_tablesize, uint, 0444); |
@@ -87,6 +89,14 @@ module_param(topspin_workarounds, int, 0444); | |||
87 | MODULE_PARM_DESC(topspin_workarounds, | 89 | MODULE_PARM_DESC(topspin_workarounds, |
88 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); | 90 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); |
89 | 91 | ||
92 | module_param(prefer_fr, bool, 0444); | ||
93 | MODULE_PARM_DESC(prefer_fr, | ||
94 | "Whether to use fast registration if both FMR and fast registration are supported"); | ||
95 | |||
96 | module_param(register_always, bool, 0444); | ||
97 | MODULE_PARM_DESC(register_always, | ||
98 | "Use memory registration even for contiguous memory regions"); | ||
99 | |||
90 | static struct kernel_param_ops srp_tmo_ops; | 100 | static struct kernel_param_ops srp_tmo_ops; |
91 | 101 | ||
92 | static int srp_reconnect_delay = 10; | 102 | static int srp_reconnect_delay = 10; |
@@ -288,28 +298,174 @@ static int srp_new_cm_id(struct srp_target_port *target) | |||
288 | return 0; | 298 | return 0; |
289 | } | 299 | } |
290 | 300 | ||
301 | static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) | ||
302 | { | ||
303 | struct srp_device *dev = target->srp_host->srp_dev; | ||
304 | struct ib_fmr_pool_param fmr_param; | ||
305 | |||
306 | memset(&fmr_param, 0, sizeof(fmr_param)); | ||
307 | fmr_param.pool_size = target->scsi_host->can_queue; | ||
308 | fmr_param.dirty_watermark = fmr_param.pool_size / 4; | ||
309 | fmr_param.cache = 1; | ||
310 | fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; | ||
311 | fmr_param.page_shift = ilog2(dev->mr_page_size); | ||
312 | fmr_param.access = (IB_ACCESS_LOCAL_WRITE | | ||
313 | IB_ACCESS_REMOTE_WRITE | | ||
314 | IB_ACCESS_REMOTE_READ); | ||
315 | |||
316 | return ib_create_fmr_pool(dev->pd, &fmr_param); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * srp_destroy_fr_pool() - free the resources owned by a pool | ||
321 | * @pool: Fast registration pool to be destroyed. | ||
322 | */ | ||
323 | static void srp_destroy_fr_pool(struct srp_fr_pool *pool) | ||
324 | { | ||
325 | int i; | ||
326 | struct srp_fr_desc *d; | ||
327 | |||
328 | if (!pool) | ||
329 | return; | ||
330 | |||
331 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { | ||
332 | if (d->frpl) | ||
333 | ib_free_fast_reg_page_list(d->frpl); | ||
334 | if (d->mr) | ||
335 | ib_dereg_mr(d->mr); | ||
336 | } | ||
337 | kfree(pool); | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * srp_create_fr_pool() - allocate and initialize a pool for fast registration | ||
342 | * @device: IB device to allocate fast registration descriptors for. | ||
343 | * @pd: Protection domain associated with the FR descriptors. | ||
344 | * @pool_size: Number of descriptors to allocate. | ||
345 | * @max_page_list_len: Maximum fast registration work request page list length. | ||
346 | */ | ||
347 | static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, | ||
348 | struct ib_pd *pd, int pool_size, | ||
349 | int max_page_list_len) | ||
350 | { | ||
351 | struct srp_fr_pool *pool; | ||
352 | struct srp_fr_desc *d; | ||
353 | struct ib_mr *mr; | ||
354 | struct ib_fast_reg_page_list *frpl; | ||
355 | int i, ret = -EINVAL; | ||
356 | |||
357 | if (pool_size <= 0) | ||
358 | goto err; | ||
359 | ret = -ENOMEM; | ||
360 | pool = kzalloc(sizeof(struct srp_fr_pool) + | ||
361 | pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); | ||
362 | if (!pool) | ||
363 | goto err; | ||
364 | pool->size = pool_size; | ||
365 | pool->max_page_list_len = max_page_list_len; | ||
366 | spin_lock_init(&pool->lock); | ||
367 | INIT_LIST_HEAD(&pool->free_list); | ||
368 | |||
369 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { | ||
370 | mr = ib_alloc_fast_reg_mr(pd, max_page_list_len); | ||
371 | if (IS_ERR(mr)) { | ||
372 | ret = PTR_ERR(mr); | ||
373 | goto destroy_pool; | ||
374 | } | ||
375 | d->mr = mr; | ||
376 | frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len); | ||
377 | if (IS_ERR(frpl)) { | ||
378 | ret = PTR_ERR(frpl); | ||
379 | goto destroy_pool; | ||
380 | } | ||
381 | d->frpl = frpl; | ||
382 | list_add_tail(&d->entry, &pool->free_list); | ||
383 | } | ||
384 | |||
385 | out: | ||
386 | return pool; | ||
387 | |||
388 | destroy_pool: | ||
389 | srp_destroy_fr_pool(pool); | ||
390 | |||
391 | err: | ||
392 | pool = ERR_PTR(ret); | ||
393 | goto out; | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * srp_fr_pool_get() - obtain a descriptor suitable for fast registration | ||
398 | * @pool: Pool to obtain descriptor from. | ||
399 | */ | ||
400 | static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) | ||
401 | { | ||
402 | struct srp_fr_desc *d = NULL; | ||
403 | unsigned long flags; | ||
404 | |||
405 | spin_lock_irqsave(&pool->lock, flags); | ||
406 | if (!list_empty(&pool->free_list)) { | ||
407 | d = list_first_entry(&pool->free_list, typeof(*d), entry); | ||
408 | list_del(&d->entry); | ||
409 | } | ||
410 | spin_unlock_irqrestore(&pool->lock, flags); | ||
411 | |||
412 | return d; | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * srp_fr_pool_put() - put an FR descriptor back in the free list | ||
417 | * @pool: Pool the descriptor was allocated from. | ||
418 | * @desc: Pointer to an array of fast registration descriptor pointers. | ||
419 | * @n: Number of descriptors to put back. | ||
420 | * | ||
421 | * Note: The caller must already have queued an invalidation request for | ||
422 | * desc->mr->rkey before calling this function. | ||
423 | */ | ||
424 | static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, | ||
425 | int n) | ||
426 | { | ||
427 | unsigned long flags; | ||
428 | int i; | ||
429 | |||
430 | spin_lock_irqsave(&pool->lock, flags); | ||
431 | for (i = 0; i < n; i++) | ||
432 | list_add(&desc[i]->entry, &pool->free_list); | ||
433 | spin_unlock_irqrestore(&pool->lock, flags); | ||
434 | } | ||
435 | |||
436 | static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) | ||
437 | { | ||
438 | struct srp_device *dev = target->srp_host->srp_dev; | ||
439 | |||
440 | return srp_create_fr_pool(dev->dev, dev->pd, | ||
441 | target->scsi_host->can_queue, | ||
442 | dev->max_pages_per_mr); | ||
443 | } | ||
444 | |||
291 | static int srp_create_target_ib(struct srp_target_port *target) | 445 | static int srp_create_target_ib(struct srp_target_port *target) |
292 | { | 446 | { |
447 | struct srp_device *dev = target->srp_host->srp_dev; | ||
293 | struct ib_qp_init_attr *init_attr; | 448 | struct ib_qp_init_attr *init_attr; |
294 | struct ib_cq *recv_cq, *send_cq; | 449 | struct ib_cq *recv_cq, *send_cq; |
295 | struct ib_qp *qp; | 450 | struct ib_qp *qp; |
451 | struct ib_fmr_pool *fmr_pool = NULL; | ||
452 | struct srp_fr_pool *fr_pool = NULL; | ||
453 | const int m = 1 + dev->use_fast_reg; | ||
296 | int ret; | 454 | int ret; |
297 | 455 | ||
298 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); | 456 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); |
299 | if (!init_attr) | 457 | if (!init_attr) |
300 | return -ENOMEM; | 458 | return -ENOMEM; |
301 | 459 | ||
302 | recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, | 460 | recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target, |
303 | srp_recv_completion, NULL, target, | ||
304 | target->queue_size, target->comp_vector); | 461 | target->queue_size, target->comp_vector); |
305 | if (IS_ERR(recv_cq)) { | 462 | if (IS_ERR(recv_cq)) { |
306 | ret = PTR_ERR(recv_cq); | 463 | ret = PTR_ERR(recv_cq); |
307 | goto err; | 464 | goto err; |
308 | } | 465 | } |
309 | 466 | ||
310 | send_cq = ib_create_cq(target->srp_host->srp_dev->dev, | 467 | send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target, |
311 | srp_send_completion, NULL, target, | 468 | m * target->queue_size, target->comp_vector); |
312 | target->queue_size, target->comp_vector); | ||
313 | if (IS_ERR(send_cq)) { | 469 | if (IS_ERR(send_cq)) { |
314 | ret = PTR_ERR(send_cq); | 470 | ret = PTR_ERR(send_cq); |
315 | goto err_recv_cq; | 471 | goto err_recv_cq; |
@@ -318,16 +474,16 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
318 | ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); | 474 | ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); |
319 | 475 | ||
320 | init_attr->event_handler = srp_qp_event; | 476 | init_attr->event_handler = srp_qp_event; |
321 | init_attr->cap.max_send_wr = target->queue_size; | 477 | init_attr->cap.max_send_wr = m * target->queue_size; |
322 | init_attr->cap.max_recv_wr = target->queue_size; | 478 | init_attr->cap.max_recv_wr = target->queue_size; |
323 | init_attr->cap.max_recv_sge = 1; | 479 | init_attr->cap.max_recv_sge = 1; |
324 | init_attr->cap.max_send_sge = 1; | 480 | init_attr->cap.max_send_sge = 1; |
325 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 481 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
326 | init_attr->qp_type = IB_QPT_RC; | 482 | init_attr->qp_type = IB_QPT_RC; |
327 | init_attr->send_cq = send_cq; | 483 | init_attr->send_cq = send_cq; |
328 | init_attr->recv_cq = recv_cq; | 484 | init_attr->recv_cq = recv_cq; |
329 | 485 | ||
330 | qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); | 486 | qp = ib_create_qp(dev->pd, init_attr); |
331 | if (IS_ERR(qp)) { | 487 | if (IS_ERR(qp)) { |
332 | ret = PTR_ERR(qp); | 488 | ret = PTR_ERR(qp); |
333 | goto err_send_cq; | 489 | goto err_send_cq; |
@@ -337,6 +493,30 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
337 | if (ret) | 493 | if (ret) |
338 | goto err_qp; | 494 | goto err_qp; |
339 | 495 | ||
496 | if (dev->use_fast_reg && dev->has_fr) { | ||
497 | fr_pool = srp_alloc_fr_pool(target); | ||
498 | if (IS_ERR(fr_pool)) { | ||
499 | ret = PTR_ERR(fr_pool); | ||
500 | shost_printk(KERN_WARNING, target->scsi_host, PFX | ||
501 | "FR pool allocation failed (%d)\n", ret); | ||
502 | goto err_qp; | ||
503 | } | ||
504 | if (target->fr_pool) | ||
505 | srp_destroy_fr_pool(target->fr_pool); | ||
506 | target->fr_pool = fr_pool; | ||
507 | } else if (!dev->use_fast_reg && dev->has_fmr) { | ||
508 | fmr_pool = srp_alloc_fmr_pool(target); | ||
509 | if (IS_ERR(fmr_pool)) { | ||
510 | ret = PTR_ERR(fmr_pool); | ||
511 | shost_printk(KERN_WARNING, target->scsi_host, PFX | ||
512 | "FMR pool allocation failed (%d)\n", ret); | ||
513 | goto err_qp; | ||
514 | } | ||
515 | if (target->fmr_pool) | ||
516 | ib_destroy_fmr_pool(target->fmr_pool); | ||
517 | target->fmr_pool = fmr_pool; | ||
518 | } | ||
519 | |||
340 | if (target->qp) | 520 | if (target->qp) |
341 | ib_destroy_qp(target->qp); | 521 | ib_destroy_qp(target->qp); |
342 | if (target->recv_cq) | 522 | if (target->recv_cq) |
@@ -371,8 +551,16 @@ err: | |||
371 | */ | 551 | */ |
372 | static void srp_free_target_ib(struct srp_target_port *target) | 552 | static void srp_free_target_ib(struct srp_target_port *target) |
373 | { | 553 | { |
554 | struct srp_device *dev = target->srp_host->srp_dev; | ||
374 | int i; | 555 | int i; |
375 | 556 | ||
557 | if (dev->use_fast_reg) { | ||
558 | if (target->fr_pool) | ||
559 | srp_destroy_fr_pool(target->fr_pool); | ||
560 | } else { | ||
561 | if (target->fmr_pool) | ||
562 | ib_destroy_fmr_pool(target->fmr_pool); | ||
563 | } | ||
376 | ib_destroy_qp(target->qp); | 564 | ib_destroy_qp(target->qp); |
377 | ib_destroy_cq(target->send_cq); | 565 | ib_destroy_cq(target->send_cq); |
378 | ib_destroy_cq(target->recv_cq); | 566 | ib_destroy_cq(target->recv_cq); |
@@ -577,7 +765,8 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
577 | 765 | ||
578 | static void srp_free_req_data(struct srp_target_port *target) | 766 | static void srp_free_req_data(struct srp_target_port *target) |
579 | { | 767 | { |
580 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; | 768 | struct srp_device *dev = target->srp_host->srp_dev; |
769 | struct ib_device *ibdev = dev->dev; | ||
581 | struct srp_request *req; | 770 | struct srp_request *req; |
582 | int i; | 771 | int i; |
583 | 772 | ||
@@ -586,7 +775,10 @@ static void srp_free_req_data(struct srp_target_port *target) | |||
586 | 775 | ||
587 | for (i = 0; i < target->req_ring_size; ++i) { | 776 | for (i = 0; i < target->req_ring_size; ++i) { |
588 | req = &target->req_ring[i]; | 777 | req = &target->req_ring[i]; |
589 | kfree(req->fmr_list); | 778 | if (dev->use_fast_reg) |
779 | kfree(req->fr_list); | ||
780 | else | ||
781 | kfree(req->fmr_list); | ||
590 | kfree(req->map_page); | 782 | kfree(req->map_page); |
591 | if (req->indirect_dma_addr) { | 783 | if (req->indirect_dma_addr) { |
592 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, | 784 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, |
@@ -605,6 +797,7 @@ static int srp_alloc_req_data(struct srp_target_port *target) | |||
605 | struct srp_device *srp_dev = target->srp_host->srp_dev; | 797 | struct srp_device *srp_dev = target->srp_host->srp_dev; |
606 | struct ib_device *ibdev = srp_dev->dev; | 798 | struct ib_device *ibdev = srp_dev->dev; |
607 | struct srp_request *req; | 799 | struct srp_request *req; |
800 | void *mr_list; | ||
608 | dma_addr_t dma_addr; | 801 | dma_addr_t dma_addr; |
609 | int i, ret = -ENOMEM; | 802 | int i, ret = -ENOMEM; |
610 | 803 | ||
@@ -617,12 +810,20 @@ static int srp_alloc_req_data(struct srp_target_port *target) | |||
617 | 810 | ||
618 | for (i = 0; i < target->req_ring_size; ++i) { | 811 | for (i = 0; i < target->req_ring_size; ++i) { |
619 | req = &target->req_ring[i]; | 812 | req = &target->req_ring[i]; |
620 | req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), | 813 | mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), |
621 | GFP_KERNEL); | 814 | GFP_KERNEL); |
622 | req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *), | 815 | if (!mr_list) |
623 | GFP_KERNEL); | 816 | goto out; |
817 | if (srp_dev->use_fast_reg) | ||
818 | req->fr_list = mr_list; | ||
819 | else | ||
820 | req->fmr_list = mr_list; | ||
821 | req->map_page = kmalloc(srp_dev->max_pages_per_mr * | ||
822 | sizeof(void *), GFP_KERNEL); | ||
823 | if (!req->map_page) | ||
824 | goto out; | ||
624 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); | 825 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); |
625 | if (!req->fmr_list || !req->map_page || !req->indirect_desc) | 826 | if (!req->indirect_desc) |
626 | goto out; | 827 | goto out; |
627 | 828 | ||
628 | dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, | 829 | dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, |
@@ -759,21 +960,56 @@ static int srp_connect_target(struct srp_target_port *target) | |||
759 | } | 960 | } |
760 | } | 961 | } |
761 | 962 | ||
963 | static int srp_inv_rkey(struct srp_target_port *target, u32 rkey) | ||
964 | { | ||
965 | struct ib_send_wr *bad_wr; | ||
966 | struct ib_send_wr wr = { | ||
967 | .opcode = IB_WR_LOCAL_INV, | ||
968 | .wr_id = LOCAL_INV_WR_ID_MASK, | ||
969 | .next = NULL, | ||
970 | .num_sge = 0, | ||
971 | .send_flags = 0, | ||
972 | .ex.invalidate_rkey = rkey, | ||
973 | }; | ||
974 | |||
975 | return ib_post_send(target->qp, &wr, &bad_wr); | ||
976 | } | ||
977 | |||
762 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | 978 | static void srp_unmap_data(struct scsi_cmnd *scmnd, |
763 | struct srp_target_port *target, | 979 | struct srp_target_port *target, |
764 | struct srp_request *req) | 980 | struct srp_request *req) |
765 | { | 981 | { |
766 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; | 982 | struct srp_device *dev = target->srp_host->srp_dev; |
767 | struct ib_pool_fmr **pfmr; | 983 | struct ib_device *ibdev = dev->dev; |
984 | int i, res; | ||
768 | 985 | ||
769 | if (!scsi_sglist(scmnd) || | 986 | if (!scsi_sglist(scmnd) || |
770 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 987 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
771 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 988 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
772 | return; | 989 | return; |
773 | 990 | ||
774 | pfmr = req->fmr_list; | 991 | if (dev->use_fast_reg) { |
775 | while (req->nfmr--) | 992 | struct srp_fr_desc **pfr; |
776 | ib_fmr_pool_unmap(*pfmr++); | 993 | |
994 | for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { | ||
995 | res = srp_inv_rkey(target, (*pfr)->mr->rkey); | ||
996 | if (res < 0) { | ||
997 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
998 | "Queueing INV WR for rkey %#x failed (%d)\n", | ||
999 | (*pfr)->mr->rkey, res); | ||
1000 | queue_work(system_long_wq, | ||
1001 | &target->tl_err_work); | ||
1002 | } | ||
1003 | } | ||
1004 | if (req->nmdesc) | ||
1005 | srp_fr_pool_put(target->fr_pool, req->fr_list, | ||
1006 | req->nmdesc); | ||
1007 | } else { | ||
1008 | struct ib_pool_fmr **pfmr; | ||
1009 | |||
1010 | for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) | ||
1011 | ib_fmr_pool_unmap(*pfmr); | ||
1012 | } | ||
777 | 1013 | ||
778 | ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), | 1014 | ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), |
779 | scmnd->sc_data_direction); | 1015 | scmnd->sc_data_direction); |
@@ -813,6 +1049,10 @@ static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, | |||
813 | 1049 | ||
814 | /** | 1050 | /** |
815 | * srp_free_req() - Unmap data and add request to the free request list. | 1051 | * srp_free_req() - Unmap data and add request to the free request list. |
1052 | * @target: SRP target port. | ||
1053 | * @req: Request to be freed. | ||
1054 | * @scmnd: SCSI command associated with @req. | ||
1055 | * @req_lim_delta: Amount to be added to @target->req_lim. | ||
816 | */ | 1056 | */ |
817 | static void srp_free_req(struct srp_target_port *target, | 1057 | static void srp_free_req(struct srp_target_port *target, |
818 | struct srp_request *req, struct scsi_cmnd *scmnd, | 1058 | struct srp_request *req, struct scsi_cmnd *scmnd, |
@@ -882,21 +1122,19 @@ static int srp_rport_reconnect(struct srp_rport *rport) | |||
882 | * callbacks will have finished before a new QP is allocated. | 1122 | * callbacks will have finished before a new QP is allocated. |
883 | */ | 1123 | */ |
884 | ret = srp_new_cm_id(target); | 1124 | ret = srp_new_cm_id(target); |
885 | /* | ||
886 | * Whether or not creating a new CM ID succeeded, create a new | ||
887 | * QP. This guarantees that all completion callback function | ||
888 | * invocations have finished before request resetting starts. | ||
889 | */ | ||
890 | if (ret == 0) | ||
891 | ret = srp_create_target_ib(target); | ||
892 | else | ||
893 | srp_create_target_ib(target); | ||
894 | 1125 | ||
895 | for (i = 0; i < target->req_ring_size; ++i) { | 1126 | for (i = 0; i < target->req_ring_size; ++i) { |
896 | struct srp_request *req = &target->req_ring[i]; | 1127 | struct srp_request *req = &target->req_ring[i]; |
897 | srp_finish_req(target, req, NULL, DID_RESET << 16); | 1128 | srp_finish_req(target, req, NULL, DID_RESET << 16); |
898 | } | 1129 | } |
899 | 1130 | ||
1131 | /* | ||
1132 | * Whether or not creating a new CM ID succeeded, create a new | ||
1133 | * QP. This guarantees that all callback functions for the old QP have | ||
1134 | * finished before any send requests are posted on the new QP. | ||
1135 | */ | ||
1136 | ret += srp_create_target_ib(target); | ||
1137 | |||
900 | INIT_LIST_HEAD(&target->free_tx); | 1138 | INIT_LIST_HEAD(&target->free_tx); |
901 | for (i = 0; i < target->queue_size; ++i) | 1139 | for (i = 0; i < target->queue_size; ++i) |
902 | list_add(&target->tx_ring[i]->list, &target->free_tx); | 1140 | list_add(&target->tx_ring[i]->list, &target->free_tx); |
@@ -928,33 +1166,87 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, | |||
928 | static int srp_map_finish_fmr(struct srp_map_state *state, | 1166 | static int srp_map_finish_fmr(struct srp_map_state *state, |
929 | struct srp_target_port *target) | 1167 | struct srp_target_port *target) |
930 | { | 1168 | { |
931 | struct srp_device *dev = target->srp_host->srp_dev; | ||
932 | struct ib_pool_fmr *fmr; | 1169 | struct ib_pool_fmr *fmr; |
933 | u64 io_addr = 0; | 1170 | u64 io_addr = 0; |
934 | 1171 | ||
935 | if (!state->npages) | 1172 | fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages, |
936 | return 0; | ||
937 | |||
938 | if (state->npages == 1) { | ||
939 | srp_map_desc(state, state->base_dma_addr, state->fmr_len, | ||
940 | target->rkey); | ||
941 | state->npages = state->fmr_len = 0; | ||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages, | ||
946 | state->npages, io_addr); | 1173 | state->npages, io_addr); |
947 | if (IS_ERR(fmr)) | 1174 | if (IS_ERR(fmr)) |
948 | return PTR_ERR(fmr); | 1175 | return PTR_ERR(fmr); |
949 | 1176 | ||
950 | *state->next_fmr++ = fmr; | 1177 | *state->next_fmr++ = fmr; |
951 | state->nfmr++; | 1178 | state->nmdesc++; |
1179 | |||
1180 | srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey); | ||
952 | 1181 | ||
953 | srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); | ||
954 | state->npages = state->fmr_len = 0; | ||
955 | return 0; | 1182 | return 0; |
956 | } | 1183 | } |
957 | 1184 | ||
1185 | static int srp_map_finish_fr(struct srp_map_state *state, | ||
1186 | struct srp_target_port *target) | ||
1187 | { | ||
1188 | struct srp_device *dev = target->srp_host->srp_dev; | ||
1189 | struct ib_send_wr *bad_wr; | ||
1190 | struct ib_send_wr wr; | ||
1191 | struct srp_fr_desc *desc; | ||
1192 | u32 rkey; | ||
1193 | |||
1194 | desc = srp_fr_pool_get(target->fr_pool); | ||
1195 | if (!desc) | ||
1196 | return -ENOMEM; | ||
1197 | |||
1198 | rkey = ib_inc_rkey(desc->mr->rkey); | ||
1199 | ib_update_fast_reg_key(desc->mr, rkey); | ||
1200 | |||
1201 | memcpy(desc->frpl->page_list, state->pages, | ||
1202 | sizeof(state->pages[0]) * state->npages); | ||
1203 | |||
1204 | memset(&wr, 0, sizeof(wr)); | ||
1205 | wr.opcode = IB_WR_FAST_REG_MR; | ||
1206 | wr.wr_id = FAST_REG_WR_ID_MASK; | ||
1207 | wr.wr.fast_reg.iova_start = state->base_dma_addr; | ||
1208 | wr.wr.fast_reg.page_list = desc->frpl; | ||
1209 | wr.wr.fast_reg.page_list_len = state->npages; | ||
1210 | wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); | ||
1211 | wr.wr.fast_reg.length = state->dma_len; | ||
1212 | wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | | ||
1213 | IB_ACCESS_REMOTE_READ | | ||
1214 | IB_ACCESS_REMOTE_WRITE); | ||
1215 | wr.wr.fast_reg.rkey = desc->mr->lkey; | ||
1216 | |||
1217 | *state->next_fr++ = desc; | ||
1218 | state->nmdesc++; | ||
1219 | |||
1220 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | ||
1221 | desc->mr->rkey); | ||
1222 | |||
1223 | return ib_post_send(target->qp, &wr, &bad_wr); | ||
1224 | } | ||
1225 | |||
1226 | static int srp_finish_mapping(struct srp_map_state *state, | ||
1227 | struct srp_target_port *target) | ||
1228 | { | ||
1229 | int ret = 0; | ||
1230 | |||
1231 | if (state->npages == 0) | ||
1232 | return 0; | ||
1233 | |||
1234 | if (state->npages == 1 && !register_always) | ||
1235 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | ||
1236 | target->rkey); | ||
1237 | else | ||
1238 | ret = target->srp_host->srp_dev->use_fast_reg ? | ||
1239 | srp_map_finish_fr(state, target) : | ||
1240 | srp_map_finish_fmr(state, target); | ||
1241 | |||
1242 | if (ret == 0) { | ||
1243 | state->npages = 0; | ||
1244 | state->dma_len = 0; | ||
1245 | } | ||
1246 | |||
1247 | return ret; | ||
1248 | } | ||
1249 | |||
958 | static void srp_map_update_start(struct srp_map_state *state, | 1250 | static void srp_map_update_start(struct srp_map_state *state, |
959 | struct scatterlist *sg, int sg_index, | 1251 | struct scatterlist *sg, int sg_index, |
960 | dma_addr_t dma_addr) | 1252 | dma_addr_t dma_addr) |
@@ -967,7 +1259,7 @@ static void srp_map_update_start(struct srp_map_state *state, | |||
967 | static int srp_map_sg_entry(struct srp_map_state *state, | 1259 | static int srp_map_sg_entry(struct srp_map_state *state, |
968 | struct srp_target_port *target, | 1260 | struct srp_target_port *target, |
969 | struct scatterlist *sg, int sg_index, | 1261 | struct scatterlist *sg, int sg_index, |
970 | int use_fmr) | 1262 | bool use_mr) |
971 | { | 1263 | { |
972 | struct srp_device *dev = target->srp_host->srp_dev; | 1264 | struct srp_device *dev = target->srp_host->srp_dev; |
973 | struct ib_device *ibdev = dev->dev; | 1265 | struct ib_device *ibdev = dev->dev; |
@@ -979,23 +1271,25 @@ static int srp_map_sg_entry(struct srp_map_state *state, | |||
979 | if (!dma_len) | 1271 | if (!dma_len) |
980 | return 0; | 1272 | return 0; |
981 | 1273 | ||
982 | if (use_fmr == SRP_MAP_NO_FMR) { | 1274 | if (!use_mr) { |
983 | /* Once we're in direct map mode for a request, we don't | 1275 | /* |
984 | * go back to FMR mode, so no need to update anything | 1276 | * Once we're in direct map mode for a request, we don't |
1277 | * go back to FMR or FR mode, so no need to update anything | ||
985 | * other than the descriptor. | 1278 | * other than the descriptor. |
986 | */ | 1279 | */ |
987 | srp_map_desc(state, dma_addr, dma_len, target->rkey); | 1280 | srp_map_desc(state, dma_addr, dma_len, target->rkey); |
988 | return 0; | 1281 | return 0; |
989 | } | 1282 | } |
990 | 1283 | ||
991 | /* If we start at an offset into the FMR page, don't merge into | 1284 | /* |
992 | * the current FMR. Finish it out, and use the kernel's MR for this | 1285 | * Since not all RDMA HW drivers support non-zero page offsets for |
993 | * sg entry. This is to avoid potential bugs on some SRP targets | 1286 | * FMR, if we start at an offset into a page, don't merge into the |
994 | * that were never quite defined, but went away when the initiator | 1287 | * current FMR mapping. Finish it out, and use the kernel's MR for |
995 | * avoided using FMR on such page fragments. | 1288 | * this sg entry. |
996 | */ | 1289 | */ |
997 | if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) { | 1290 | if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || |
998 | ret = srp_map_finish_fmr(state, target); | 1291 | dma_len > dev->mr_max_size) { |
1292 | ret = srp_finish_mapping(state, target); | ||
999 | if (ret) | 1293 | if (ret) |
1000 | return ret; | 1294 | return ret; |
1001 | 1295 | ||
@@ -1004,52 +1298,106 @@ static int srp_map_sg_entry(struct srp_map_state *state, | |||
1004 | return 0; | 1298 | return 0; |
1005 | } | 1299 | } |
1006 | 1300 | ||
1007 | /* If this is the first sg to go into the FMR, save our position. | 1301 | /* |
1008 | * We need to know the first unmapped entry, its index, and the | 1302 | * If this is the first sg that will be mapped via FMR or via FR, save |
1009 | * first unmapped address within that entry to be able to restart | 1303 | * our position. We need to know the first unmapped entry, its index, |
1010 | * mapping after an error. | 1304 | * and the first unmapped address within that entry to be able to |
1305 | * restart mapping after an error. | ||
1011 | */ | 1306 | */ |
1012 | if (!state->unmapped_sg) | 1307 | if (!state->unmapped_sg) |
1013 | srp_map_update_start(state, sg, sg_index, dma_addr); | 1308 | srp_map_update_start(state, sg, sg_index, dma_addr); |
1014 | 1309 | ||
1015 | while (dma_len) { | 1310 | while (dma_len) { |
1016 | if (state->npages == SRP_FMR_SIZE) { | 1311 | unsigned offset = dma_addr & ~dev->mr_page_mask; |
1017 | ret = srp_map_finish_fmr(state, target); | 1312 | if (state->npages == dev->max_pages_per_mr || offset != 0) { |
1313 | ret = srp_finish_mapping(state, target); | ||
1018 | if (ret) | 1314 | if (ret) |
1019 | return ret; | 1315 | return ret; |
1020 | 1316 | ||
1021 | srp_map_update_start(state, sg, sg_index, dma_addr); | 1317 | srp_map_update_start(state, sg, sg_index, dma_addr); |
1022 | } | 1318 | } |
1023 | 1319 | ||
1024 | len = min_t(unsigned int, dma_len, dev->fmr_page_size); | 1320 | len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); |
1025 | 1321 | ||
1026 | if (!state->npages) | 1322 | if (!state->npages) |
1027 | state->base_dma_addr = dma_addr; | 1323 | state->base_dma_addr = dma_addr; |
1028 | state->pages[state->npages++] = dma_addr; | 1324 | state->pages[state->npages++] = dma_addr & dev->mr_page_mask; |
1029 | state->fmr_len += len; | 1325 | state->dma_len += len; |
1030 | dma_addr += len; | 1326 | dma_addr += len; |
1031 | dma_len -= len; | 1327 | dma_len -= len; |
1032 | } | 1328 | } |
1033 | 1329 | ||
1034 | /* If the last entry of the FMR wasn't a full page, then we need to | 1330 | /* |
1331 | * If the last entry of the MR wasn't a full page, then we need to | ||
1035 | * close it out and start a new one -- we can only merge at page | 1332 | * close it out and start a new one -- we can only merge at page |
1036 | * boundries. | 1333 | * boundries. |
1037 | */ | 1334 | */ |
1038 | ret = 0; | 1335 | ret = 0; |
1039 | if (len != dev->fmr_page_size) { | 1336 | if (len != dev->mr_page_size) { |
1040 | ret = srp_map_finish_fmr(state, target); | 1337 | ret = srp_finish_mapping(state, target); |
1041 | if (!ret) | 1338 | if (!ret) |
1042 | srp_map_update_start(state, NULL, 0, 0); | 1339 | srp_map_update_start(state, NULL, 0, 0); |
1043 | } | 1340 | } |
1044 | return ret; | 1341 | return ret; |
1045 | } | 1342 | } |
1046 | 1343 | ||
1344 | static int srp_map_sg(struct srp_map_state *state, | ||
1345 | struct srp_target_port *target, struct srp_request *req, | ||
1346 | struct scatterlist *scat, int count) | ||
1347 | { | ||
1348 | struct srp_device *dev = target->srp_host->srp_dev; | ||
1349 | struct ib_device *ibdev = dev->dev; | ||
1350 | struct scatterlist *sg; | ||
1351 | int i; | ||
1352 | bool use_mr; | ||
1353 | |||
1354 | state->desc = req->indirect_desc; | ||
1355 | state->pages = req->map_page; | ||
1356 | if (dev->use_fast_reg) { | ||
1357 | state->next_fr = req->fr_list; | ||
1358 | use_mr = !!target->fr_pool; | ||
1359 | } else { | ||
1360 | state->next_fmr = req->fmr_list; | ||
1361 | use_mr = !!target->fmr_pool; | ||
1362 | } | ||
1363 | |||
1364 | for_each_sg(scat, sg, count, i) { | ||
1365 | if (srp_map_sg_entry(state, target, sg, i, use_mr)) { | ||
1366 | /* | ||
1367 | * Memory registration failed, so backtrack to the | ||
1368 | * first unmapped entry and continue on without using | ||
1369 | * memory registration. | ||
1370 | */ | ||
1371 | dma_addr_t dma_addr; | ||
1372 | unsigned int dma_len; | ||
1373 | |||
1374 | backtrack: | ||
1375 | sg = state->unmapped_sg; | ||
1376 | i = state->unmapped_index; | ||
1377 | |||
1378 | dma_addr = ib_sg_dma_address(ibdev, sg); | ||
1379 | dma_len = ib_sg_dma_len(ibdev, sg); | ||
1380 | dma_len -= (state->unmapped_addr - dma_addr); | ||
1381 | dma_addr = state->unmapped_addr; | ||
1382 | use_mr = false; | ||
1383 | srp_map_desc(state, dma_addr, dma_len, target->rkey); | ||
1384 | } | ||
1385 | } | ||
1386 | |||
1387 | if (use_mr && srp_finish_mapping(state, target)) | ||
1388 | goto backtrack; | ||
1389 | |||
1390 | req->nmdesc = state->nmdesc; | ||
1391 | |||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1047 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | 1395 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, |
1048 | struct srp_request *req) | 1396 | struct srp_request *req) |
1049 | { | 1397 | { |
1050 | struct scatterlist *scat, *sg; | 1398 | struct scatterlist *scat; |
1051 | struct srp_cmd *cmd = req->cmd->buf; | 1399 | struct srp_cmd *cmd = req->cmd->buf; |
1052 | int i, len, nents, count, use_fmr; | 1400 | int len, nents, count; |
1053 | struct srp_device *dev; | 1401 | struct srp_device *dev; |
1054 | struct ib_device *ibdev; | 1402 | struct ib_device *ibdev; |
1055 | struct srp_map_state state; | 1403 | struct srp_map_state state; |
@@ -1081,7 +1429,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
1081 | fmt = SRP_DATA_DESC_DIRECT; | 1429 | fmt = SRP_DATA_DESC_DIRECT; |
1082 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | 1430 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); |
1083 | 1431 | ||
1084 | if (count == 1) { | 1432 | if (count == 1 && !register_always) { |
1085 | /* | 1433 | /* |
1086 | * The midlayer only generated a single gather/scatter | 1434 | * The midlayer only generated a single gather/scatter |
1087 | * entry, or DMA mapping coalesced everything to a | 1435 | * entry, or DMA mapping coalesced everything to a |
@@ -1094,13 +1442,13 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
1094 | buf->key = cpu_to_be32(target->rkey); | 1442 | buf->key = cpu_to_be32(target->rkey); |
1095 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); | 1443 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); |
1096 | 1444 | ||
1097 | req->nfmr = 0; | 1445 | req->nmdesc = 0; |
1098 | goto map_complete; | 1446 | goto map_complete; |
1099 | } | 1447 | } |
1100 | 1448 | ||
1101 | /* We have more than one scatter/gather entry, so build our indirect | 1449 | /* |
1102 | * descriptor table, trying to merge as many entries with FMR as we | 1450 | * We have more than one scatter/gather entry, so build our indirect |
1103 | * can. | 1451 | * descriptor table, trying to merge as many entries as we can. |
1104 | */ | 1452 | */ |
1105 | indirect_hdr = (void *) cmd->add_data; | 1453 | indirect_hdr = (void *) cmd->add_data; |
1106 | 1454 | ||
@@ -1108,35 +1456,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
1108 | target->indirect_size, DMA_TO_DEVICE); | 1456 | target->indirect_size, DMA_TO_DEVICE); |
1109 | 1457 | ||
1110 | memset(&state, 0, sizeof(state)); | 1458 | memset(&state, 0, sizeof(state)); |
1111 | state.desc = req->indirect_desc; | 1459 | srp_map_sg(&state, target, req, scat, count); |
1112 | state.pages = req->map_page; | ||
1113 | state.next_fmr = req->fmr_list; | ||
1114 | |||
1115 | use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR; | ||
1116 | |||
1117 | for_each_sg(scat, sg, count, i) { | ||
1118 | if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) { | ||
1119 | /* FMR mapping failed, so backtrack to the first | ||
1120 | * unmapped entry and continue on without using FMR. | ||
1121 | */ | ||
1122 | dma_addr_t dma_addr; | ||
1123 | unsigned int dma_len; | ||
1124 | |||
1125 | backtrack: | ||
1126 | sg = state.unmapped_sg; | ||
1127 | i = state.unmapped_index; | ||
1128 | |||
1129 | dma_addr = ib_sg_dma_address(ibdev, sg); | ||
1130 | dma_len = ib_sg_dma_len(ibdev, sg); | ||
1131 | dma_len -= (state.unmapped_addr - dma_addr); | ||
1132 | dma_addr = state.unmapped_addr; | ||
1133 | use_fmr = SRP_MAP_NO_FMR; | ||
1134 | srp_map_desc(&state, dma_addr, dma_len, target->rkey); | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target)) | ||
1139 | goto backtrack; | ||
1140 | 1460 | ||
1141 | /* We've mapped the request, now pull as much of the indirect | 1461 | /* We've mapped the request, now pull as much of the indirect |
1142 | * descriptor table as we can into the command buffer. If this | 1462 | * descriptor table as we can into the command buffer. If this |
@@ -1144,9 +1464,9 @@ backtrack: | |||
1144 | * guaranteed to fit into the command, as the SCSI layer won't | 1464 | * guaranteed to fit into the command, as the SCSI layer won't |
1145 | * give us more S/G entries than we allow. | 1465 | * give us more S/G entries than we allow. |
1146 | */ | 1466 | */ |
1147 | req->nfmr = state.nfmr; | ||
1148 | if (state.ndesc == 1) { | 1467 | if (state.ndesc == 1) { |
1149 | /* FMR mapping was able to collapse this to one entry, | 1468 | /* |
1469 | * Memory registration collapsed the sg-list into one entry, | ||
1150 | * so use a direct descriptor. | 1470 | * so use a direct descriptor. |
1151 | */ | 1471 | */ |
1152 | struct srp_direct_buf *buf = (void *) cmd->add_data; | 1472 | struct srp_direct_buf *buf = (void *) cmd->add_data; |
@@ -1455,6 +1775,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
1455 | 1775 | ||
1456 | /** | 1776 | /** |
1457 | * srp_tl_err_work() - handle a transport layer error | 1777 | * srp_tl_err_work() - handle a transport layer error |
1778 | * @work: Work structure embedded in an SRP target port. | ||
1458 | * | 1779 | * |
1459 | * Note: This function may get invoked before the rport has been created, | 1780 | * Note: This function may get invoked before the rport has been created, |
1460 | * hence the target->rport test. | 1781 | * hence the target->rport test. |
@@ -1468,14 +1789,24 @@ static void srp_tl_err_work(struct work_struct *work) | |||
1468 | srp_start_tl_fail_timers(target->rport); | 1789 | srp_start_tl_fail_timers(target->rport); |
1469 | } | 1790 | } |
1470 | 1791 | ||
1471 | static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err, | 1792 | static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, |
1472 | struct srp_target_port *target) | 1793 | bool send_err, struct srp_target_port *target) |
1473 | { | 1794 | { |
1474 | if (target->connected && !target->qp_in_error) { | 1795 | if (target->connected && !target->qp_in_error) { |
1475 | shost_printk(KERN_ERR, target->scsi_host, | 1796 | if (wr_id & LOCAL_INV_WR_ID_MASK) { |
1476 | PFX "failed %s status %d\n", | 1797 | shost_printk(KERN_ERR, target->scsi_host, PFX |
1477 | send_err ? "send" : "receive", | 1798 | "LOCAL_INV failed with status %d\n", |
1478 | wc_status); | 1799 | wc_status); |
1800 | } else if (wr_id & FAST_REG_WR_ID_MASK) { | ||
1801 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
1802 | "FAST_REG_MR failed status %d\n", | ||
1803 | wc_status); | ||
1804 | } else { | ||
1805 | shost_printk(KERN_ERR, target->scsi_host, | ||
1806 | PFX "failed %s status %d for iu %p\n", | ||
1807 | send_err ? "send" : "receive", | ||
1808 | wc_status, (void *)(uintptr_t)wr_id); | ||
1809 | } | ||
1479 | queue_work(system_long_wq, &target->tl_err_work); | 1810 | queue_work(system_long_wq, &target->tl_err_work); |
1480 | } | 1811 | } |
1481 | target->qp_in_error = true; | 1812 | target->qp_in_error = true; |
@@ -1491,7 +1822,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) | |||
1491 | if (likely(wc.status == IB_WC_SUCCESS)) { | 1822 | if (likely(wc.status == IB_WC_SUCCESS)) { |
1492 | srp_handle_recv(target, &wc); | 1823 | srp_handle_recv(target, &wc); |
1493 | } else { | 1824 | } else { |
1494 | srp_handle_qp_err(wc.status, false, target); | 1825 | srp_handle_qp_err(wc.wr_id, wc.status, false, target); |
1495 | } | 1826 | } |
1496 | } | 1827 | } |
1497 | } | 1828 | } |
@@ -1507,7 +1838,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1507 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; | 1838 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; |
1508 | list_add(&iu->list, &target->free_tx); | 1839 | list_add(&iu->list, &target->free_tx); |
1509 | } else { | 1840 | } else { |
1510 | srp_handle_qp_err(wc.status, true, target); | 1841 | srp_handle_qp_err(wc.wr_id, wc.status, true, target); |
1511 | } | 1842 | } |
1512 | } | 1843 | } |
1513 | } | 1844 | } |
@@ -1521,7 +1852,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1521 | struct srp_cmd *cmd; | 1852 | struct srp_cmd *cmd; |
1522 | struct ib_device *dev; | 1853 | struct ib_device *dev; |
1523 | unsigned long flags; | 1854 | unsigned long flags; |
1524 | int len, result; | 1855 | int len, ret; |
1525 | const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; | 1856 | const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; |
1526 | 1857 | ||
1527 | /* | 1858 | /* |
@@ -1533,12 +1864,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1533 | if (in_scsi_eh) | 1864 | if (in_scsi_eh) |
1534 | mutex_lock(&rport->mutex); | 1865 | mutex_lock(&rport->mutex); |
1535 | 1866 | ||
1536 | result = srp_chkready(target->rport); | 1867 | scmnd->result = srp_chkready(target->rport); |
1537 | if (unlikely(result)) { | 1868 | if (unlikely(scmnd->result)) |
1538 | scmnd->result = result; | 1869 | goto err; |
1539 | scmnd->scsi_done(scmnd); | ||
1540 | goto unlock_rport; | ||
1541 | } | ||
1542 | 1870 | ||
1543 | spin_lock_irqsave(&target->lock, flags); | 1871 | spin_lock_irqsave(&target->lock, flags); |
1544 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1872 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
@@ -1553,7 +1881,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1553 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, | 1881 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, |
1554 | DMA_TO_DEVICE); | 1882 | DMA_TO_DEVICE); |
1555 | 1883 | ||
1556 | scmnd->result = 0; | ||
1557 | scmnd->host_scribble = (void *) req; | 1884 | scmnd->host_scribble = (void *) req; |
1558 | 1885 | ||
1559 | cmd = iu->buf; | 1886 | cmd = iu->buf; |
@@ -1570,7 +1897,15 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1570 | len = srp_map_data(scmnd, target, req); | 1897 | len = srp_map_data(scmnd, target, req); |
1571 | if (len < 0) { | 1898 | if (len < 0) { |
1572 | shost_printk(KERN_ERR, target->scsi_host, | 1899 | shost_printk(KERN_ERR, target->scsi_host, |
1573 | PFX "Failed to map data\n"); | 1900 | PFX "Failed to map data (%d)\n", len); |
1901 | /* | ||
1902 | * If we ran out of memory descriptors (-ENOMEM) because an | ||
1903 | * application is queuing many requests with more than | ||
1904 | * max_pages_per_mr sg-list elements, tell the SCSI mid-layer | ||
1905 | * to reduce queue depth temporarily. | ||
1906 | */ | ||
1907 | scmnd->result = len == -ENOMEM ? | ||
1908 | DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; | ||
1574 | goto err_iu; | 1909 | goto err_iu; |
1575 | } | 1910 | } |
1576 | 1911 | ||
@@ -1582,11 +1917,13 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1582 | goto err_unmap; | 1917 | goto err_unmap; |
1583 | } | 1918 | } |
1584 | 1919 | ||
1920 | ret = 0; | ||
1921 | |||
1585 | unlock_rport: | 1922 | unlock_rport: |
1586 | if (in_scsi_eh) | 1923 | if (in_scsi_eh) |
1587 | mutex_unlock(&rport->mutex); | 1924 | mutex_unlock(&rport->mutex); |
1588 | 1925 | ||
1589 | return 0; | 1926 | return ret; |
1590 | 1927 | ||
1591 | err_unmap: | 1928 | err_unmap: |
1592 | srp_unmap_data(scmnd, target, req); | 1929 | srp_unmap_data(scmnd, target, req); |
@@ -1594,16 +1931,27 @@ err_unmap: | |||
1594 | err_iu: | 1931 | err_iu: |
1595 | srp_put_tx_iu(target, iu, SRP_IU_CMD); | 1932 | srp_put_tx_iu(target, iu, SRP_IU_CMD); |
1596 | 1933 | ||
1934 | /* | ||
1935 | * Avoid that the loops that iterate over the request ring can | ||
1936 | * encounter a dangling SCSI command pointer. | ||
1937 | */ | ||
1938 | req->scmnd = NULL; | ||
1939 | |||
1597 | spin_lock_irqsave(&target->lock, flags); | 1940 | spin_lock_irqsave(&target->lock, flags); |
1598 | list_add(&req->list, &target->free_reqs); | 1941 | list_add(&req->list, &target->free_reqs); |
1599 | 1942 | ||
1600 | err_unlock: | 1943 | err_unlock: |
1601 | spin_unlock_irqrestore(&target->lock, flags); | 1944 | spin_unlock_irqrestore(&target->lock, flags); |
1602 | 1945 | ||
1603 | if (in_scsi_eh) | 1946 | err: |
1604 | mutex_unlock(&rport->mutex); | 1947 | if (scmnd->result) { |
1948 | scmnd->scsi_done(scmnd); | ||
1949 | ret = 0; | ||
1950 | } else { | ||
1951 | ret = SCSI_MLQUEUE_HOST_BUSY; | ||
1952 | } | ||
1605 | 1953 | ||
1606 | return SCSI_MLQUEUE_HOST_BUSY; | 1954 | goto unlock_rport; |
1607 | } | 1955 | } |
1608 | 1956 | ||
1609 | /* | 1957 | /* |
@@ -2310,6 +2658,8 @@ static struct class srp_class = { | |||
2310 | 2658 | ||
2311 | /** | 2659 | /** |
2312 | * srp_conn_unique() - check whether the connection to a target is unique | 2660 | * srp_conn_unique() - check whether the connection to a target is unique |
2661 | * @host: SRP host. | ||
2662 | * @target: SRP target port. | ||
2313 | */ | 2663 | */ |
2314 | static bool srp_conn_unique(struct srp_host *host, | 2664 | static bool srp_conn_unique(struct srp_host *host, |
2315 | struct srp_target_port *target) | 2665 | struct srp_target_port *target) |
@@ -2605,7 +2955,8 @@ static ssize_t srp_create_target(struct device *dev, | |||
2605 | container_of(dev, struct srp_host, dev); | 2955 | container_of(dev, struct srp_host, dev); |
2606 | struct Scsi_Host *target_host; | 2956 | struct Scsi_Host *target_host; |
2607 | struct srp_target_port *target; | 2957 | struct srp_target_port *target; |
2608 | struct ib_device *ibdev = host->srp_dev->dev; | 2958 | struct srp_device *srp_dev = host->srp_dev; |
2959 | struct ib_device *ibdev = srp_dev->dev; | ||
2609 | int ret; | 2960 | int ret; |
2610 | 2961 | ||
2611 | target_host = scsi_host_alloc(&srp_template, | 2962 | target_host = scsi_host_alloc(&srp_template, |
@@ -2650,9 +3001,9 @@ static ssize_t srp_create_target(struct device *dev, | |||
2650 | goto err; | 3001 | goto err; |
2651 | } | 3002 | } |
2652 | 3003 | ||
2653 | if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && | 3004 | if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && |
2654 | target->cmd_sg_cnt < target->sg_tablesize) { | 3005 | target->cmd_sg_cnt < target->sg_tablesize) { |
2655 | pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); | 3006 | pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); |
2656 | target->sg_tablesize = target->cmd_sg_cnt; | 3007 | target->sg_tablesize = target->cmd_sg_cnt; |
2657 | } | 3008 | } |
2658 | 3009 | ||
@@ -2790,9 +3141,9 @@ static void srp_add_one(struct ib_device *device) | |||
2790 | { | 3141 | { |
2791 | struct srp_device *srp_dev; | 3142 | struct srp_device *srp_dev; |
2792 | struct ib_device_attr *dev_attr; | 3143 | struct ib_device_attr *dev_attr; |
2793 | struct ib_fmr_pool_param fmr_param; | ||
2794 | struct srp_host *host; | 3144 | struct srp_host *host; |
2795 | int max_pages_per_fmr, fmr_page_shift, s, e, p; | 3145 | int mr_page_shift, s, e, p; |
3146 | u64 max_pages_per_mr; | ||
2796 | 3147 | ||
2797 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); | 3148 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); |
2798 | if (!dev_attr) | 3149 | if (!dev_attr) |
@@ -2807,15 +3158,39 @@ static void srp_add_one(struct ib_device *device) | |||
2807 | if (!srp_dev) | 3158 | if (!srp_dev) |
2808 | goto free_attr; | 3159 | goto free_attr; |
2809 | 3160 | ||
3161 | srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && | ||
3162 | device->map_phys_fmr && device->unmap_fmr); | ||
3163 | srp_dev->has_fr = (dev_attr->device_cap_flags & | ||
3164 | IB_DEVICE_MEM_MGT_EXTENSIONS); | ||
3165 | if (!srp_dev->has_fmr && !srp_dev->has_fr) | ||
3166 | dev_warn(&device->dev, "neither FMR nor FR is supported\n"); | ||
3167 | |||
3168 | srp_dev->use_fast_reg = (srp_dev->has_fr && | ||
3169 | (!srp_dev->has_fmr || prefer_fr)); | ||
3170 | |||
2810 | /* | 3171 | /* |
2811 | * Use the smallest page size supported by the HCA, down to a | 3172 | * Use the smallest page size supported by the HCA, down to a |
2812 | * minimum of 4096 bytes. We're unlikely to build large sglists | 3173 | * minimum of 4096 bytes. We're unlikely to build large sglists |
2813 | * out of smaller entries. | 3174 | * out of smaller entries. |
2814 | */ | 3175 | */ |
2815 | fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); | 3176 | mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); |
2816 | srp_dev->fmr_page_size = 1 << fmr_page_shift; | 3177 | srp_dev->mr_page_size = 1 << mr_page_shift; |
2817 | srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); | 3178 | srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); |
2818 | srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE; | 3179 | max_pages_per_mr = dev_attr->max_mr_size; |
3180 | do_div(max_pages_per_mr, srp_dev->mr_page_size); | ||
3181 | srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, | ||
3182 | max_pages_per_mr); | ||
3183 | if (srp_dev->use_fast_reg) { | ||
3184 | srp_dev->max_pages_per_mr = | ||
3185 | min_t(u32, srp_dev->max_pages_per_mr, | ||
3186 | dev_attr->max_fast_reg_page_list_len); | ||
3187 | } | ||
3188 | srp_dev->mr_max_size = srp_dev->mr_page_size * | ||
3189 | srp_dev->max_pages_per_mr; | ||
3190 | pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", | ||
3191 | device->name, mr_page_shift, dev_attr->max_mr_size, | ||
3192 | dev_attr->max_fast_reg_page_list_len, | ||
3193 | srp_dev->max_pages_per_mr, srp_dev->mr_max_size); | ||
2819 | 3194 | ||
2820 | INIT_LIST_HEAD(&srp_dev->dev_list); | 3195 | INIT_LIST_HEAD(&srp_dev->dev_list); |
2821 | 3196 | ||
@@ -2831,27 +3206,6 @@ static void srp_add_one(struct ib_device *device) | |||
2831 | if (IS_ERR(srp_dev->mr)) | 3206 | if (IS_ERR(srp_dev->mr)) |
2832 | goto err_pd; | 3207 | goto err_pd; |
2833 | 3208 | ||
2834 | for (max_pages_per_fmr = SRP_FMR_SIZE; | ||
2835 | max_pages_per_fmr >= SRP_FMR_MIN_SIZE; | ||
2836 | max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) { | ||
2837 | memset(&fmr_param, 0, sizeof fmr_param); | ||
2838 | fmr_param.pool_size = SRP_FMR_POOL_SIZE; | ||
2839 | fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; | ||
2840 | fmr_param.cache = 1; | ||
2841 | fmr_param.max_pages_per_fmr = max_pages_per_fmr; | ||
2842 | fmr_param.page_shift = fmr_page_shift; | ||
2843 | fmr_param.access = (IB_ACCESS_LOCAL_WRITE | | ||
2844 | IB_ACCESS_REMOTE_WRITE | | ||
2845 | IB_ACCESS_REMOTE_READ); | ||
2846 | |||
2847 | srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); | ||
2848 | if (!IS_ERR(srp_dev->fmr_pool)) | ||
2849 | break; | ||
2850 | } | ||
2851 | |||
2852 | if (IS_ERR(srp_dev->fmr_pool)) | ||
2853 | srp_dev->fmr_pool = NULL; | ||
2854 | |||
2855 | if (device->node_type == RDMA_NODE_IB_SWITCH) { | 3209 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
2856 | s = 0; | 3210 | s = 0; |
2857 | e = 0; | 3211 | e = 0; |
@@ -2914,8 +3268,6 @@ static void srp_remove_one(struct ib_device *device) | |||
2914 | kfree(host); | 3268 | kfree(host); |
2915 | } | 3269 | } |
2916 | 3270 | ||
2917 | if (srp_dev->fmr_pool) | ||
2918 | ib_destroy_fmr_pool(srp_dev->fmr_pool); | ||
2919 | ib_dereg_mr(srp_dev->mr); | 3271 | ib_dereg_mr(srp_dev->mr); |
2920 | ib_dealloc_pd(srp_dev->pd); | 3272 | ib_dealloc_pd(srp_dev->pd); |
2921 | 3273 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index aad27b7b4a46..e46ecb15aa0d 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -66,13 +66,10 @@ enum { | |||
66 | SRP_TAG_NO_REQ = ~0U, | 66 | SRP_TAG_NO_REQ = ~0U, |
67 | SRP_TAG_TSK_MGMT = 1U << 31, | 67 | SRP_TAG_TSK_MGMT = 1U << 31, |
68 | 68 | ||
69 | SRP_FMR_SIZE = 512, | 69 | SRP_MAX_PAGES_PER_MR = 512, |
70 | SRP_FMR_MIN_SIZE = 128, | ||
71 | SRP_FMR_POOL_SIZE = 1024, | ||
72 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4, | ||
73 | 70 | ||
74 | SRP_MAP_ALLOW_FMR = 0, | 71 | LOCAL_INV_WR_ID_MASK = 1, |
75 | SRP_MAP_NO_FMR = 1, | 72 | FAST_REG_WR_ID_MASK = 2, |
76 | }; | 73 | }; |
77 | 74 | ||
78 | enum srp_target_state { | 75 | enum srp_target_state { |
@@ -86,15 +83,24 @@ enum srp_iu_type { | |||
86 | SRP_IU_RSP, | 83 | SRP_IU_RSP, |
87 | }; | 84 | }; |
88 | 85 | ||
86 | /* | ||
87 | * @mr_page_mask: HCA memory registration page mask. | ||
88 | * @mr_page_size: HCA memory registration page size. | ||
89 | * @mr_max_size: Maximum size in bytes of a single FMR / FR registration | ||
90 | * request. | ||
91 | */ | ||
89 | struct srp_device { | 92 | struct srp_device { |
90 | struct list_head dev_list; | 93 | struct list_head dev_list; |
91 | struct ib_device *dev; | 94 | struct ib_device *dev; |
92 | struct ib_pd *pd; | 95 | struct ib_pd *pd; |
93 | struct ib_mr *mr; | 96 | struct ib_mr *mr; |
94 | struct ib_fmr_pool *fmr_pool; | 97 | u64 mr_page_mask; |
95 | u64 fmr_page_mask; | 98 | int mr_page_size; |
96 | int fmr_page_size; | 99 | int mr_max_size; |
97 | int fmr_max_size; | 100 | int max_pages_per_mr; |
101 | bool has_fmr; | ||
102 | bool has_fr; | ||
103 | bool use_fast_reg; | ||
98 | }; | 104 | }; |
99 | 105 | ||
100 | struct srp_host { | 106 | struct srp_host { |
@@ -112,11 +118,14 @@ struct srp_request { | |||
112 | struct list_head list; | 118 | struct list_head list; |
113 | struct scsi_cmnd *scmnd; | 119 | struct scsi_cmnd *scmnd; |
114 | struct srp_iu *cmd; | 120 | struct srp_iu *cmd; |
115 | struct ib_pool_fmr **fmr_list; | 121 | union { |
122 | struct ib_pool_fmr **fmr_list; | ||
123 | struct srp_fr_desc **fr_list; | ||
124 | }; | ||
116 | u64 *map_page; | 125 | u64 *map_page; |
117 | struct srp_direct_buf *indirect_desc; | 126 | struct srp_direct_buf *indirect_desc; |
118 | dma_addr_t indirect_dma_addr; | 127 | dma_addr_t indirect_dma_addr; |
119 | short nfmr; | 128 | short nmdesc; |
120 | short index; | 129 | short index; |
121 | }; | 130 | }; |
122 | 131 | ||
@@ -131,6 +140,10 @@ struct srp_target_port { | |||
131 | struct ib_cq *send_cq ____cacheline_aligned_in_smp; | 140 | struct ib_cq *send_cq ____cacheline_aligned_in_smp; |
132 | struct ib_cq *recv_cq; | 141 | struct ib_cq *recv_cq; |
133 | struct ib_qp *qp; | 142 | struct ib_qp *qp; |
143 | union { | ||
144 | struct ib_fmr_pool *fmr_pool; | ||
145 | struct srp_fr_pool *fr_pool; | ||
146 | }; | ||
134 | u32 lkey; | 147 | u32 lkey; |
135 | u32 rkey; | 148 | u32 rkey; |
136 | enum srp_target_state state; | 149 | enum srp_target_state state; |
@@ -197,15 +210,66 @@ struct srp_iu { | |||
197 | enum dma_data_direction direction; | 210 | enum dma_data_direction direction; |
198 | }; | 211 | }; |
199 | 212 | ||
213 | /** | ||
214 | * struct srp_fr_desc - fast registration work request arguments | ||
215 | * @entry: Entry in srp_fr_pool.free_list. | ||
216 | * @mr: Memory region. | ||
217 | * @frpl: Fast registration page list. | ||
218 | */ | ||
219 | struct srp_fr_desc { | ||
220 | struct list_head entry; | ||
221 | struct ib_mr *mr; | ||
222 | struct ib_fast_reg_page_list *frpl; | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * struct srp_fr_pool - pool of fast registration descriptors | ||
227 | * | ||
228 | * An entry is available for allocation if and only if it occurs in @free_list. | ||
229 | * | ||
230 | * @size: Number of descriptors in this pool. | ||
231 | * @max_page_list_len: Maximum fast registration work request page list length. | ||
232 | * @lock: Protects free_list. | ||
233 | * @free_list: List of free descriptors. | ||
234 | * @desc: Fast registration descriptor pool. | ||
235 | */ | ||
236 | struct srp_fr_pool { | ||
237 | int size; | ||
238 | int max_page_list_len; | ||
239 | spinlock_t lock; | ||
240 | struct list_head free_list; | ||
241 | struct srp_fr_desc desc[0]; | ||
242 | }; | ||
243 | |||
244 | /** | ||
245 | * struct srp_map_state - per-request DMA memory mapping state | ||
246 | * @desc: Pointer to the element of the SRP buffer descriptor array | ||
247 | * that is being filled in. | ||
248 | * @pages: Array with DMA addresses of pages being considered for | ||
249 | * memory registration. | ||
250 | * @base_dma_addr: DMA address of the first page that has not yet been mapped. | ||
251 | * @dma_len: Number of bytes that will be registered with the next | ||
252 | * FMR or FR memory registration call. | ||
253 | * @total_len: Total number of bytes in the sg-list being mapped. | ||
254 | * @npages: Number of page addresses in the pages[] array. | ||
255 | * @nmdesc: Number of FMR or FR memory descriptors used for mapping. | ||
256 | * @ndesc: Number of SRP buffer descriptors that have been filled in. | ||
257 | * @unmapped_sg: First element of the sg-list that is mapped via FMR or FR. | ||
258 | * @unmapped_index: Index of the first element mapped via FMR or FR. | ||
259 | * @unmapped_addr: DMA address of the first element mapped via FMR or FR. | ||
260 | */ | ||
200 | struct srp_map_state { | 261 | struct srp_map_state { |
201 | struct ib_pool_fmr **next_fmr; | 262 | union { |
263 | struct ib_pool_fmr **next_fmr; | ||
264 | struct srp_fr_desc **next_fr; | ||
265 | }; | ||
202 | struct srp_direct_buf *desc; | 266 | struct srp_direct_buf *desc; |
203 | u64 *pages; | 267 | u64 *pages; |
204 | dma_addr_t base_dma_addr; | 268 | dma_addr_t base_dma_addr; |
205 | u32 fmr_len; | 269 | u32 dma_len; |
206 | u32 total_len; | 270 | u32 total_len; |
207 | unsigned int npages; | 271 | unsigned int npages; |
208 | unsigned int nfmr; | 272 | unsigned int nmdesc; |
209 | unsigned int ndesc; | 273 | unsigned int ndesc; |
210 | struct scatterlist *unmapped_sg; | 274 | struct scatterlist *unmapped_sg; |
211 | int unmapped_index; | 275 | int unmapped_index; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index c3ad464d0627..b0297da50304 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c | |||
@@ -171,7 +171,7 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) | |||
171 | */ | 171 | */ |
172 | 172 | ||
173 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 173 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
174 | struct mlx4_buf *buf) | 174 | struct mlx4_buf *buf, gfp_t gfp) |
175 | { | 175 | { |
176 | dma_addr_t t; | 176 | dma_addr_t t; |
177 | 177 | ||
@@ -180,7 +180,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
180 | buf->npages = 1; | 180 | buf->npages = 1; |
181 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 181 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
182 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, | 182 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
183 | size, &t, GFP_KERNEL); | 183 | size, &t, gfp); |
184 | if (!buf->direct.buf) | 184 | if (!buf->direct.buf) |
185 | return -ENOMEM; | 185 | return -ENOMEM; |
186 | 186 | ||
@@ -200,14 +200,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
200 | buf->npages = buf->nbufs; | 200 | buf->npages = buf->nbufs; |
201 | buf->page_shift = PAGE_SHIFT; | 201 | buf->page_shift = PAGE_SHIFT; |
202 | buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), | 202 | buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), |
203 | GFP_KERNEL); | 203 | gfp); |
204 | if (!buf->page_list) | 204 | if (!buf->page_list) |
205 | return -ENOMEM; | 205 | return -ENOMEM; |
206 | 206 | ||
207 | for (i = 0; i < buf->nbufs; ++i) { | 207 | for (i = 0; i < buf->nbufs; ++i) { |
208 | buf->page_list[i].buf = | 208 | buf->page_list[i].buf = |
209 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | 209 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
210 | &t, GFP_KERNEL); | 210 | &t, gfp); |
211 | if (!buf->page_list[i].buf) | 211 | if (!buf->page_list[i].buf) |
212 | goto err_free; | 212 | goto err_free; |
213 | 213 | ||
@@ -218,7 +218,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
218 | 218 | ||
219 | if (BITS_PER_LONG == 64) { | 219 | if (BITS_PER_LONG == 64) { |
220 | struct page **pages; | 220 | struct page **pages; |
221 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | 221 | pages = kmalloc(sizeof *pages * buf->nbufs, gfp); |
222 | if (!pages) | 222 | if (!pages) |
223 | goto err_free; | 223 | goto err_free; |
224 | for (i = 0; i < buf->nbufs; ++i) | 224 | for (i = 0; i < buf->nbufs; ++i) |
@@ -260,11 +260,12 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
260 | } | 260 | } |
261 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 261 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
262 | 262 | ||
263 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | 263 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, |
264 | gfp_t gfp) | ||
264 | { | 265 | { |
265 | struct mlx4_db_pgdir *pgdir; | 266 | struct mlx4_db_pgdir *pgdir; |
266 | 267 | ||
267 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | 268 | pgdir = kzalloc(sizeof *pgdir, gfp); |
268 | if (!pgdir) | 269 | if (!pgdir) |
269 | return NULL; | 270 | return NULL; |
270 | 271 | ||
@@ -272,7 +273,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | |||
272 | pgdir->bits[0] = pgdir->order0; | 273 | pgdir->bits[0] = pgdir->order0; |
273 | pgdir->bits[1] = pgdir->order1; | 274 | pgdir->bits[1] = pgdir->order1; |
274 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | 275 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, |
275 | &pgdir->db_dma, GFP_KERNEL); | 276 | &pgdir->db_dma, gfp); |
276 | if (!pgdir->db_page) { | 277 | if (!pgdir->db_page) { |
277 | kfree(pgdir); | 278 | kfree(pgdir); |
278 | return NULL; | 279 | return NULL; |
@@ -312,7 +313,7 @@ found: | |||
312 | return 0; | 313 | return 0; |
313 | } | 314 | } |
314 | 315 | ||
315 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | 316 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) |
316 | { | 317 | { |
317 | struct mlx4_priv *priv = mlx4_priv(dev); | 318 | struct mlx4_priv *priv = mlx4_priv(dev); |
318 | struct mlx4_db_pgdir *pgdir; | 319 | struct mlx4_db_pgdir *pgdir; |
@@ -324,7 +325,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | |||
324 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | 325 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) |
325 | goto out; | 326 | goto out; |
326 | 327 | ||
327 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | 328 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); |
328 | if (!pgdir) { | 329 | if (!pgdir) { |
329 | ret = -ENOMEM; | 330 | ret = -ENOMEM; |
330 | goto out; | 331 | goto out; |
@@ -376,13 +377,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | |||
376 | { | 377 | { |
377 | int err; | 378 | int err; |
378 | 379 | ||
379 | err = mlx4_db_alloc(dev, &wqres->db, 1); | 380 | err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); |
380 | if (err) | 381 | if (err) |
381 | return err; | 382 | return err; |
382 | 383 | ||
383 | *wqres->db.db = 0; | 384 | *wqres->db.db = 0; |
384 | 385 | ||
385 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); | 386 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); |
386 | if (err) | 387 | if (err) |
387 | goto err_db; | 388 | goto err_db; |
388 | 389 | ||
@@ -391,7 +392,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | |||
391 | if (err) | 392 | if (err) |
392 | goto err_buf; | 393 | goto err_buf; |
393 | 394 | ||
394 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); | 395 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); |
395 | if (err) | 396 | if (err) |
396 | goto err_mtt; | 397 | goto err_mtt; |
397 | 398 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78099eab7673..3370ecb8c3d2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -705,20 +705,28 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
705 | struct ib_smp *smp = inbox->buf; | 705 | struct ib_smp *smp = inbox->buf; |
706 | u32 index; | 706 | u32 index; |
707 | u8 port; | 707 | u8 port; |
708 | u8 opcode_modifier; | ||
708 | u16 *table; | 709 | u16 *table; |
709 | int err; | 710 | int err; |
710 | int vidx, pidx; | 711 | int vidx, pidx; |
712 | int network_view; | ||
711 | struct mlx4_priv *priv = mlx4_priv(dev); | 713 | struct mlx4_priv *priv = mlx4_priv(dev); |
712 | struct ib_smp *outsmp = outbox->buf; | 714 | struct ib_smp *outsmp = outbox->buf; |
713 | __be16 *outtab = (__be16 *)(outsmp->data); | 715 | __be16 *outtab = (__be16 *)(outsmp->data); |
714 | __be32 slave_cap_mask; | 716 | __be32 slave_cap_mask; |
715 | __be64 slave_node_guid; | 717 | __be64 slave_node_guid; |
718 | |||
716 | port = vhcr->in_modifier; | 719 | port = vhcr->in_modifier; |
717 | 720 | ||
721 | /* network-view bit is for driver use only, and should not be passed to FW */ | ||
722 | opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ | ||
723 | network_view = !!(vhcr->op_modifier & 0x8); | ||
724 | |||
718 | if (smp->base_version == 1 && | 725 | if (smp->base_version == 1 && |
719 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && | 726 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && |
720 | smp->class_version == 1) { | 727 | smp->class_version == 1) { |
721 | if (smp->method == IB_MGMT_METHOD_GET) { | 728 | /* host view is paravirtualized */ |
729 | if (!network_view && smp->method == IB_MGMT_METHOD_GET) { | ||
722 | if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { | 730 | if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { |
723 | index = be32_to_cpu(smp->attr_mod); | 731 | index = be32_to_cpu(smp->attr_mod); |
724 | if (port < 1 || port > dev->caps.num_ports) | 732 | if (port < 1 || port > dev->caps.num_ports) |
@@ -743,7 +751,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
743 | /*get the slave specific caps:*/ | 751 | /*get the slave specific caps:*/ |
744 | /*do the command */ | 752 | /*do the command */ |
745 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, | 753 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, |
746 | vhcr->in_modifier, vhcr->op_modifier, | 754 | vhcr->in_modifier, opcode_modifier, |
747 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); | 755 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); |
748 | /* modify the response for slaves */ | 756 | /* modify the response for slaves */ |
749 | if (!err && slave != mlx4_master_func_num(dev)) { | 757 | if (!err && slave != mlx4_master_func_num(dev)) { |
@@ -760,7 +768,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
760 | smp->attr_mod = cpu_to_be32(slave / 8); | 768 | smp->attr_mod = cpu_to_be32(slave / 8); |
761 | /* execute cmd */ | 769 | /* execute cmd */ |
762 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, | 770 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, |
763 | vhcr->in_modifier, vhcr->op_modifier, | 771 | vhcr->in_modifier, opcode_modifier, |
764 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); | 772 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); |
765 | if (!err) { | 773 | if (!err) { |
766 | /* if needed, move slave gid to index 0 */ | 774 | /* if needed, move slave gid to index 0 */ |
@@ -774,7 +782,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
774 | } | 782 | } |
775 | if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { | 783 | if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { |
776 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, | 784 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, |
777 | vhcr->in_modifier, vhcr->op_modifier, | 785 | vhcr->in_modifier, opcode_modifier, |
778 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); | 786 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); |
779 | if (!err) { | 787 | if (!err) { |
780 | slave_node_guid = mlx4_get_slave_node_guid(dev, slave); | 788 | slave_node_guid = mlx4_get_slave_node_guid(dev, slave); |
@@ -784,19 +792,24 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
784 | } | 792 | } |
785 | } | 793 | } |
786 | } | 794 | } |
795 | |||
796 | /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs. | ||
797 | * These are the MADs used by ib verbs (such as ib_query_gids). | ||
798 | */ | ||
787 | if (slave != mlx4_master_func_num(dev) && | 799 | if (slave != mlx4_master_func_num(dev) && |
788 | ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) || | 800 | !mlx4_vf_smi_enabled(dev, slave, port)) { |
789 | (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && | 801 | if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && |
790 | smp->method == IB_MGMT_METHOD_SET))) { | 802 | smp->method == IB_MGMT_METHOD_GET) || network_view) { |
791 | mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, " | 803 | mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", |
792 | "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n", | 804 | slave, smp->method, smp->mgmt_class, |
793 | slave, smp->method, smp->mgmt_class, | 805 | network_view ? "Network" : "Host", |
794 | be16_to_cpu(smp->attr_id)); | 806 | be16_to_cpu(smp->attr_id)); |
795 | return -EPERM; | 807 | return -EPERM; |
808 | } | ||
796 | } | 809 | } |
797 | /*default:*/ | 810 | |
798 | return mlx4_cmd_box(dev, inbox->dma, outbox->dma, | 811 | return mlx4_cmd_box(dev, inbox->dma, outbox->dma, |
799 | vhcr->in_modifier, vhcr->op_modifier, | 812 | vhcr->in_modifier, opcode_modifier, |
800 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); | 813 | vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); |
801 | } | 814 | } |
802 | 815 | ||
@@ -1653,6 +1666,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) | |||
1653 | for (port = min_port; port <= max_port; port++) { | 1666 | for (port = min_port; port <= max_port; port++) { |
1654 | if (!test_bit(port - 1, actv_ports.ports)) | 1667 | if (!test_bit(port - 1, actv_ports.ports)) |
1655 | continue; | 1668 | continue; |
1669 | priv->mfunc.master.vf_oper[slave].smi_enabled[port] = | ||
1670 | priv->mfunc.master.vf_admin[slave].enable_smi[port]; | ||
1656 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 1671 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
1657 | vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; | 1672 | vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; |
1658 | vp_oper->state = *vp_admin; | 1673 | vp_oper->state = *vp_admin; |
@@ -1704,6 +1719,8 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave | |||
1704 | for (port = min_port; port <= max_port; port++) { | 1719 | for (port = min_port; port <= max_port; port++) { |
1705 | if (!test_bit(port - 1, actv_ports.ports)) | 1720 | if (!test_bit(port - 1, actv_ports.ports)) |
1706 | continue; | 1721 | continue; |
1722 | priv->mfunc.master.vf_oper[slave].smi_enabled[port] = | ||
1723 | MLX4_VF_SMI_DISABLED; | ||
1707 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 1724 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
1708 | if (NO_INDX != vp_oper->vlan_idx) { | 1725 | if (NO_INDX != vp_oper->vlan_idx) { |
1709 | __mlx4_unregister_vlan(&priv->dev, | 1726 | __mlx4_unregister_vlan(&priv->dev, |
@@ -2537,3 +2554,50 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat | |||
2537 | return 0; | 2554 | return 0; |
2538 | } | 2555 | } |
2539 | EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); | 2556 | EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); |
2557 | |||
2558 | int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) | ||
2559 | { | ||
2560 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
2561 | |||
2562 | if (slave < 1 || slave >= dev->num_slaves || | ||
2563 | port < 1 || port > MLX4_MAX_PORTS) | ||
2564 | return 0; | ||
2565 | |||
2566 | return priv->mfunc.master.vf_oper[slave].smi_enabled[port] == | ||
2567 | MLX4_VF_SMI_ENABLED; | ||
2568 | } | ||
2569 | EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled); | ||
2570 | |||
2571 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port) | ||
2572 | { | ||
2573 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
2574 | |||
2575 | if (slave == mlx4_master_func_num(dev)) | ||
2576 | return 1; | ||
2577 | |||
2578 | if (slave < 1 || slave >= dev->num_slaves || | ||
2579 | port < 1 || port > MLX4_MAX_PORTS) | ||
2580 | return 0; | ||
2581 | |||
2582 | return priv->mfunc.master.vf_admin[slave].enable_smi[port] == | ||
2583 | MLX4_VF_SMI_ENABLED; | ||
2584 | } | ||
2585 | EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin); | ||
2586 | |||
2587 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, | ||
2588 | int enabled) | ||
2589 | { | ||
2590 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
2591 | |||
2592 | if (slave == mlx4_master_func_num(dev)) | ||
2593 | return 0; | ||
2594 | |||
2595 | if (slave < 1 || slave >= dev->num_slaves || | ||
2596 | port < 1 || port > MLX4_MAX_PORTS || | ||
2597 | enabled < 0 || enabled > 1) | ||
2598 | return -EINVAL; | ||
2599 | |||
2600 | priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; | ||
2601 | return 0; | ||
2602 | } | ||
2603 | EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin); | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 0487121e4a0f..c90cde5b4aee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c | |||
@@ -173,11 +173,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) | |||
173 | if (*cqn == -1) | 173 | if (*cqn == -1) |
174 | return -ENOMEM; | 174 | return -ENOMEM; |
175 | 175 | ||
176 | err = mlx4_table_get(dev, &cq_table->table, *cqn); | 176 | err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); |
177 | if (err) | 177 | if (err) |
178 | goto err_out; | 178 | goto err_out; |
179 | 179 | ||
180 | err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); | 180 | err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); |
181 | if (err) | 181 | if (err) |
182 | goto err_put; | 182 | goto err_put; |
183 | return 0; | 183 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index ba049ae88749..87857a6463eb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -972,7 +972,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, | |||
972 | if (!context) | 972 | if (!context) |
973 | return -ENOMEM; | 973 | return -ENOMEM; |
974 | 974 | ||
975 | err = mlx4_qp_alloc(mdev->dev, qpn, qp); | 975 | err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); |
976 | if (err) { | 976 | if (err) { |
977 | en_err(priv, "Failed to allocate qp #%x\n", qpn); | 977 | en_err(priv, "Failed to allocate qp #%x\n", qpn); |
978 | goto out; | 978 | goto out; |
@@ -1012,7 +1012,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) | |||
1012 | en_err(priv, "Failed reserving drop qpn\n"); | 1012 | en_err(priv, "Failed reserving drop qpn\n"); |
1013 | return err; | 1013 | return err; |
1014 | } | 1014 | } |
1015 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); | 1015 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); |
1016 | if (err) { | 1016 | if (err) { |
1017 | en_err(priv, "Failed allocating drop qp\n"); | 1017 | en_err(priv, "Failed allocating drop qp\n"); |
1018 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | 1018 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); |
@@ -1071,7 +1071,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | /* Configure RSS indirection qp */ | 1073 | /* Configure RSS indirection qp */ |
1074 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | 1074 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); |
1075 | if (err) { | 1075 | if (err) { |
1076 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | 1076 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
1077 | goto rss_err; | 1077 | goto rss_err; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index dd1f6d346459..bc0cc1eb214d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -113,7 +113,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
113 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | 113 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); |
114 | 114 | ||
115 | ring->qpn = qpn; | 115 | ring->qpn = qpn; |
116 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | 116 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); |
117 | if (err) { | 117 | if (err) { |
118 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); | 118 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); |
119 | goto err_map; | 119 | goto err_map; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index d16a4d118903..01e6dd61ee3c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -178,8 +178,8 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
178 | struct mlx4_cmd_info *cmd) | 178 | struct mlx4_cmd_info *cmd) |
179 | { | 179 | { |
180 | struct mlx4_priv *priv = mlx4_priv(dev); | 180 | struct mlx4_priv *priv = mlx4_priv(dev); |
181 | u8 field; | 181 | u8 field, port; |
182 | u32 size; | 182 | u32 size, proxy_qp, qkey; |
183 | int err = 0; | 183 | int err = 0; |
184 | 184 | ||
185 | #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 | 185 | #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 |
@@ -209,6 +209,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
209 | 209 | ||
210 | /* when opcode modifier = 1 */ | 210 | /* when opcode modifier = 1 */ |
211 | #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 | 211 | #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 |
212 | #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 | ||
212 | #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 | 213 | #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 |
213 | #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc | 214 | #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc |
214 | 215 | ||
@@ -221,6 +222,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
221 | #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 | 222 | #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 |
222 | #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 | 223 | #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 |
223 | #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 | 224 | #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 |
225 | #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 | ||
224 | 226 | ||
225 | #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 | 227 | #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 |
226 | 228 | ||
@@ -234,28 +236,35 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
234 | return -EINVAL; | 236 | return -EINVAL; |
235 | 237 | ||
236 | vhcr->in_modifier = converted_port; | 238 | vhcr->in_modifier = converted_port; |
237 | /* Set nic_info bit to mark new fields support */ | ||
238 | field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; | ||
239 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); | ||
240 | |||
241 | /* phys-port = logical-port */ | 239 | /* phys-port = logical-port */ |
242 | field = vhcr->in_modifier - | 240 | field = vhcr->in_modifier - |
243 | find_first_bit(actv_ports.ports, dev->caps.num_ports); | 241 | find_first_bit(actv_ports.ports, dev->caps.num_ports); |
244 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); | 242 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); |
245 | 243 | ||
246 | field = vhcr->in_modifier; | 244 | port = vhcr->in_modifier; |
245 | proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; | ||
246 | |||
247 | /* Set nic_info bit to mark new fields support */ | ||
248 | field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; | ||
249 | |||
250 | if (mlx4_vf_smi_enabled(dev, slave, port) && | ||
251 | !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { | ||
252 | field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; | ||
253 | MLX4_PUT(outbox->buf, qkey, | ||
254 | QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); | ||
255 | } | ||
256 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); | ||
257 | |||
247 | /* size is now the QP number */ | 258 | /* size is now the QP number */ |
248 | size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; | 259 | size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; |
249 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); | 260 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); |
250 | 261 | ||
251 | size += 2; | 262 | size += 2; |
252 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); | 263 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); |
253 | 264 | ||
254 | size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1; | 265 | MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); |
255 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY); | 266 | proxy_qp += 2; |
256 | 267 | MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); | |
257 | size += 2; | ||
258 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); | ||
259 | 268 | ||
260 | MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], | 269 | MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], |
261 | QUERY_FUNC_CAP_PHYS_PORT_ID); | 270 | QUERY_FUNC_CAP_PHYS_PORT_ID); |
@@ -326,7 +335,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, | |||
326 | struct mlx4_cmd_mailbox *mailbox; | 335 | struct mlx4_cmd_mailbox *mailbox; |
327 | u32 *outbox; | 336 | u32 *outbox; |
328 | u8 field, op_modifier; | 337 | u8 field, op_modifier; |
329 | u32 size; | 338 | u32 size, qkey; |
330 | int err = 0, quotas = 0; | 339 | int err = 0, quotas = 0; |
331 | 340 | ||
332 | op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ | 341 | op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ |
@@ -414,7 +423,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, | |||
414 | 423 | ||
415 | MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); | 424 | MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); |
416 | if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { | 425 | if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { |
417 | if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) { | 426 | if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { |
418 | mlx4_err(dev, "VLAN is enforced on this port\n"); | 427 | mlx4_err(dev, "VLAN is enforced on this port\n"); |
419 | err = -EPROTONOSUPPORT; | 428 | err = -EPROTONOSUPPORT; |
420 | goto out; | 429 | goto out; |
@@ -442,6 +451,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, | |||
442 | goto out; | 451 | goto out; |
443 | } | 452 | } |
444 | 453 | ||
454 | if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { | ||
455 | MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); | ||
456 | func_cap->qp0_qkey = qkey; | ||
457 | } else { | ||
458 | func_cap->qp0_qkey = 0; | ||
459 | } | ||
460 | |||
445 | MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); | 461 | MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); |
446 | func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; | 462 | func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; |
447 | 463 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 6811ee00ba7c..1fce03ebe5c4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h | |||
@@ -134,6 +134,7 @@ struct mlx4_func_cap { | |||
134 | int max_eq; | 134 | int max_eq; |
135 | int reserved_eq; | 135 | int reserved_eq; |
136 | int mcg_quota; | 136 | int mcg_quota; |
137 | u32 qp0_qkey; | ||
137 | u32 qp0_tunnel_qpn; | 138 | u32 qp0_tunnel_qpn; |
138 | u32 qp0_proxy_qpn; | 139 | u32 qp0_proxy_qpn; |
139 | u32 qp1_tunnel_qpn; | 140 | u32 qp1_tunnel_qpn; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 5fbf4924c272..97c9b1db1d27 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
@@ -245,7 +245,8 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) | |||
245 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | 245 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); |
246 | } | 246 | } |
247 | 247 | ||
248 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) | 248 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, |
249 | gfp_t gfp) | ||
249 | { | 250 | { |
250 | u32 i = (obj & (table->num_obj - 1)) / | 251 | u32 i = (obj & (table->num_obj - 1)) / |
251 | (MLX4_TABLE_CHUNK_SIZE / table->obj_size); | 252 | (MLX4_TABLE_CHUNK_SIZE / table->obj_size); |
@@ -259,7 +260,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) | |||
259 | } | 260 | } |
260 | 261 | ||
261 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | 262 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, |
262 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | 263 | (table->lowmem ? gfp : GFP_HIGHUSER) | |
263 | __GFP_NOWARN, table->coherent); | 264 | __GFP_NOWARN, table->coherent); |
264 | if (!table->icm[i]) { | 265 | if (!table->icm[i]) { |
265 | ret = -ENOMEM; | 266 | ret = -ENOMEM; |
@@ -356,7 +357,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
356 | u32 i; | 357 | u32 i; |
357 | 358 | ||
358 | for (i = start; i <= end; i += inc) { | 359 | for (i = start; i <= end; i += inc) { |
359 | err = mlx4_table_get(dev, table, i); | 360 | err = mlx4_table_get(dev, table, i, GFP_KERNEL); |
360 | if (err) | 361 | if (err) |
361 | goto fail; | 362 | goto fail; |
362 | } | 363 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index dee67fa39107..0c7364550150 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h | |||
@@ -71,7 +71,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
71 | gfp_t gfp_mask, int coherent); | 71 | gfp_t gfp_mask, int coherent); |
72 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); | 72 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); |
73 | 73 | ||
74 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); | 74 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, |
75 | gfp_t gfp); | ||
75 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); | 76 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); |
76 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | 77 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
77 | u32 start, u32 end); | 78 | u32 start, u32 end); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7cf9dadcb471..908326876ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -666,13 +666,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
666 | return -ENODEV; | 666 | return -ENODEV; |
667 | } | 667 | } |
668 | 668 | ||
669 | dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); | ||
669 | dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); | 670 | dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); |
670 | dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); | 671 | dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); |
671 | dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); | 672 | dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); |
672 | dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); | 673 | dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); |
673 | 674 | ||
674 | if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || | 675 | if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || |
675 | !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { | 676 | !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || |
677 | !dev->caps.qp0_qkey) { | ||
676 | err = -ENOMEM; | 678 | err = -ENOMEM; |
677 | goto err_mem; | 679 | goto err_mem; |
678 | } | 680 | } |
@@ -684,6 +686,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
684 | " port %d, aborting (%d).\n", i, err); | 686 | " port %d, aborting (%d).\n", i, err); |
685 | goto err_mem; | 687 | goto err_mem; |
686 | } | 688 | } |
689 | dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; | ||
687 | dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; | 690 | dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; |
688 | dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; | 691 | dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; |
689 | dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; | 692 | dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; |
@@ -729,12 +732,16 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
729 | return 0; | 732 | return 0; |
730 | 733 | ||
731 | err_mem: | 734 | err_mem: |
735 | kfree(dev->caps.qp0_qkey); | ||
732 | kfree(dev->caps.qp0_tunnel); | 736 | kfree(dev->caps.qp0_tunnel); |
733 | kfree(dev->caps.qp0_proxy); | 737 | kfree(dev->caps.qp0_proxy); |
734 | kfree(dev->caps.qp1_tunnel); | 738 | kfree(dev->caps.qp1_tunnel); |
735 | kfree(dev->caps.qp1_proxy); | 739 | kfree(dev->caps.qp1_proxy); |
736 | dev->caps.qp0_tunnel = dev->caps.qp0_proxy = | 740 | dev->caps.qp0_qkey = NULL; |
737 | dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; | 741 | dev->caps.qp0_tunnel = NULL; |
742 | dev->caps.qp0_proxy = NULL; | ||
743 | dev->caps.qp1_tunnel = NULL; | ||
744 | dev->caps.qp1_proxy = NULL; | ||
738 | 745 | ||
739 | return err; | 746 | return err; |
740 | } | 747 | } |
@@ -1696,6 +1703,14 @@ unmap_bf: | |||
1696 | unmap_internal_clock(dev); | 1703 | unmap_internal_clock(dev); |
1697 | unmap_bf_area(dev); | 1704 | unmap_bf_area(dev); |
1698 | 1705 | ||
1706 | if (mlx4_is_slave(dev)) { | ||
1707 | kfree(dev->caps.qp0_qkey); | ||
1708 | kfree(dev->caps.qp0_tunnel); | ||
1709 | kfree(dev->caps.qp0_proxy); | ||
1710 | kfree(dev->caps.qp1_tunnel); | ||
1711 | kfree(dev->caps.qp1_proxy); | ||
1712 | } | ||
1713 | |||
1699 | err_close: | 1714 | err_close: |
1700 | if (mlx4_is_slave(dev)) | 1715 | if (mlx4_is_slave(dev)) |
1701 | mlx4_slave_exit(dev); | 1716 | mlx4_slave_exit(dev); |
@@ -2565,6 +2580,14 @@ err_master_mfunc: | |||
2565 | if (mlx4_is_master(dev)) | 2580 | if (mlx4_is_master(dev)) |
2566 | mlx4_multi_func_cleanup(dev); | 2581 | mlx4_multi_func_cleanup(dev); |
2567 | 2582 | ||
2583 | if (mlx4_is_slave(dev)) { | ||
2584 | kfree(dev->caps.qp0_qkey); | ||
2585 | kfree(dev->caps.qp0_tunnel); | ||
2586 | kfree(dev->caps.qp0_proxy); | ||
2587 | kfree(dev->caps.qp1_tunnel); | ||
2588 | kfree(dev->caps.qp1_proxy); | ||
2589 | } | ||
2590 | |||
2568 | err_close: | 2591 | err_close: |
2569 | if (dev->flags & MLX4_FLAG_MSI_X) | 2592 | if (dev->flags & MLX4_FLAG_MSI_X) |
2570 | pci_disable_msix(pdev); | 2593 | pci_disable_msix(pdev); |
@@ -2688,6 +2711,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) | |||
2688 | if (!mlx4_is_slave(dev)) | 2711 | if (!mlx4_is_slave(dev)) |
2689 | mlx4_free_ownership(dev); | 2712 | mlx4_free_ownership(dev); |
2690 | 2713 | ||
2714 | kfree(dev->caps.qp0_qkey); | ||
2691 | kfree(dev->caps.qp0_tunnel); | 2715 | kfree(dev->caps.qp0_tunnel); |
2692 | kfree(dev->caps.qp0_proxy); | 2716 | kfree(dev->caps.qp0_proxy); |
2693 | kfree(dev->caps.qp1_tunnel); | 2717 | kfree(dev->caps.qp1_tunnel); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f9c465101963..7d39cb30c883 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -133,6 +133,11 @@ enum { | |||
133 | MLX4_COMM_CMD_FLR = 254 | 133 | MLX4_COMM_CMD_FLR = 254 |
134 | }; | 134 | }; |
135 | 135 | ||
136 | enum { | ||
137 | MLX4_VF_SMI_DISABLED, | ||
138 | MLX4_VF_SMI_ENABLED | ||
139 | }; | ||
140 | |||
136 | /*The flag indicates that the slave should delay the RESET cmd*/ | 141 | /*The flag indicates that the slave should delay the RESET cmd*/ |
137 | #define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb | 142 | #define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb |
138 | /*indicates how many retries will be done if we are in the middle of FLR*/ | 143 | /*indicates how many retries will be done if we are in the middle of FLR*/ |
@@ -488,6 +493,7 @@ struct mlx4_vport_state { | |||
488 | 493 | ||
489 | struct mlx4_vf_admin_state { | 494 | struct mlx4_vf_admin_state { |
490 | struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; | 495 | struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; |
496 | u8 enable_smi[MLX4_MAX_PORTS + 1]; | ||
491 | }; | 497 | }; |
492 | 498 | ||
493 | struct mlx4_vport_oper_state { | 499 | struct mlx4_vport_oper_state { |
@@ -495,8 +501,10 @@ struct mlx4_vport_oper_state { | |||
495 | int mac_idx; | 501 | int mac_idx; |
496 | int vlan_idx; | 502 | int vlan_idx; |
497 | }; | 503 | }; |
504 | |||
498 | struct mlx4_vf_oper_state { | 505 | struct mlx4_vf_oper_state { |
499 | struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; | 506 | struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; |
507 | u8 smi_enabled[MLX4_MAX_PORTS + 1]; | ||
500 | }; | 508 | }; |
501 | 509 | ||
502 | struct slave_list { | 510 | struct slave_list { |
@@ -888,7 +896,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev); | |||
888 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev); | 896 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev); |
889 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); | 897 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); |
890 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); | 898 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); |
891 | int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn); | 899 | int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); |
892 | void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); | 900 | void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); |
893 | int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); | 901 | int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); |
894 | void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); | 902 | void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); |
@@ -896,7 +904,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); | |||
896 | void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); | 904 | void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); |
897 | int __mlx4_mpt_reserve(struct mlx4_dev *dev); | 905 | int __mlx4_mpt_reserve(struct mlx4_dev *dev); |
898 | void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); | 906 | void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); |
899 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index); | 907 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); |
900 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); | 908 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); |
901 | u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); | 909 | u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); |
902 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); | 910 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 24835853b753..4c71dafad217 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -364,14 +364,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) | |||
364 | __mlx4_mpt_release(dev, index); | 364 | __mlx4_mpt_release(dev, index); |
365 | } | 365 | } |
366 | 366 | ||
367 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) | 367 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) |
368 | { | 368 | { |
369 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | 369 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; |
370 | 370 | ||
371 | return mlx4_table_get(dev, &mr_table->dmpt_table, index); | 371 | return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); |
372 | } | 372 | } |
373 | 373 | ||
374 | static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) | 374 | static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) |
375 | { | 375 | { |
376 | u64 param = 0; | 376 | u64 param = 0; |
377 | 377 | ||
@@ -382,7 +382,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) | |||
382 | MLX4_CMD_TIME_CLASS_A, | 382 | MLX4_CMD_TIME_CLASS_A, |
383 | MLX4_CMD_WRAPPED); | 383 | MLX4_CMD_WRAPPED); |
384 | } | 384 | } |
385 | return __mlx4_mpt_alloc_icm(dev, index); | 385 | return __mlx4_mpt_alloc_icm(dev, index, gfp); |
386 | } | 386 | } |
387 | 387 | ||
388 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) | 388 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) |
@@ -469,7 +469,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
469 | struct mlx4_mpt_entry *mpt_entry; | 469 | struct mlx4_mpt_entry *mpt_entry; |
470 | int err; | 470 | int err; |
471 | 471 | ||
472 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); | 472 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); |
473 | if (err) | 473 | if (err) |
474 | return err; | 474 | return err; |
475 | 475 | ||
@@ -627,13 +627,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
627 | EXPORT_SYMBOL_GPL(mlx4_write_mtt); | 627 | EXPORT_SYMBOL_GPL(mlx4_write_mtt); |
628 | 628 | ||
629 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 629 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
630 | struct mlx4_buf *buf) | 630 | struct mlx4_buf *buf, gfp_t gfp) |
631 | { | 631 | { |
632 | u64 *page_list; | 632 | u64 *page_list; |
633 | int err; | 633 | int err; |
634 | int i; | 634 | int i; |
635 | 635 | ||
636 | page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); | 636 | page_list = kmalloc(buf->npages * sizeof *page_list, |
637 | gfp); | ||
637 | if (!page_list) | 638 | if (!page_list) |
638 | return -ENOMEM; | 639 | return -ENOMEM; |
639 | 640 | ||
@@ -680,7 +681,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) | |||
680 | struct mlx4_mpt_entry *mpt_entry; | 681 | struct mlx4_mpt_entry *mpt_entry; |
681 | int err; | 682 | int err; |
682 | 683 | ||
683 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); | 684 | err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); |
684 | if (err) | 685 | if (err) |
685 | return err; | 686 | return err; |
686 | 687 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 61d64ebffd56..07198cacbb20 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -272,29 +272,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | |||
272 | } | 272 | } |
273 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | 273 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); |
274 | 274 | ||
275 | int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) | 275 | int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) |
276 | { | 276 | { |
277 | struct mlx4_priv *priv = mlx4_priv(dev); | 277 | struct mlx4_priv *priv = mlx4_priv(dev); |
278 | struct mlx4_qp_table *qp_table = &priv->qp_table; | 278 | struct mlx4_qp_table *qp_table = &priv->qp_table; |
279 | int err; | 279 | int err; |
280 | 280 | ||
281 | err = mlx4_table_get(dev, &qp_table->qp_table, qpn); | 281 | err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); |
282 | if (err) | 282 | if (err) |
283 | goto err_out; | 283 | goto err_out; |
284 | 284 | ||
285 | err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); | 285 | err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); |
286 | if (err) | 286 | if (err) |
287 | goto err_put_qp; | 287 | goto err_put_qp; |
288 | 288 | ||
289 | err = mlx4_table_get(dev, &qp_table->altc_table, qpn); | 289 | err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); |
290 | if (err) | 290 | if (err) |
291 | goto err_put_auxc; | 291 | goto err_put_auxc; |
292 | 292 | ||
293 | err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); | 293 | err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); |
294 | if (err) | 294 | if (err) |
295 | goto err_put_altc; | 295 | goto err_put_altc; |
296 | 296 | ||
297 | err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); | 297 | err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); |
298 | if (err) | 298 | if (err) |
299 | goto err_put_rdmarc; | 299 | goto err_put_rdmarc; |
300 | 300 | ||
@@ -316,7 +316,7 @@ err_out: | |||
316 | return err; | 316 | return err; |
317 | } | 317 | } |
318 | 318 | ||
319 | static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) | 319 | static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) |
320 | { | 320 | { |
321 | u64 param = 0; | 321 | u64 param = 0; |
322 | 322 | ||
@@ -326,7 +326,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) | |||
326 | MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, | 326 | MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, |
327 | MLX4_CMD_WRAPPED); | 327 | MLX4_CMD_WRAPPED); |
328 | } | 328 | } |
329 | return __mlx4_qp_alloc_icm(dev, qpn); | 329 | return __mlx4_qp_alloc_icm(dev, qpn, gfp); |
330 | } | 330 | } |
331 | 331 | ||
332 | void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | 332 | void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) |
@@ -355,7 +355,7 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | |||
355 | __mlx4_qp_free_icm(dev, qpn); | 355 | __mlx4_qp_free_icm(dev, qpn); |
356 | } | 356 | } |
357 | 357 | ||
358 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | 358 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) |
359 | { | 359 | { |
360 | struct mlx4_priv *priv = mlx4_priv(dev); | 360 | struct mlx4_priv *priv = mlx4_priv(dev); |
361 | struct mlx4_qp_table *qp_table = &priv->qp_table; | 361 | struct mlx4_qp_table *qp_table = &priv->qp_table; |
@@ -366,7 +366,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | |||
366 | 366 | ||
367 | qp->qpn = qpn; | 367 | qp->qpn = qpn; |
368 | 368 | ||
369 | err = mlx4_qp_alloc_icm(dev, qpn); | 369 | err = mlx4_qp_alloc_icm(dev, qpn, gfp); |
370 | if (err) | 370 | if (err) |
371 | return err; | 371 | return err; |
372 | 372 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1c3fdd4a1f7d..abdb000bba30 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -1532,7 +1532,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, | |||
1532 | return err; | 1532 | return err; |
1533 | 1533 | ||
1534 | if (!fw_reserved(dev, qpn)) { | 1534 | if (!fw_reserved(dev, qpn)) { |
1535 | err = __mlx4_qp_alloc_icm(dev, qpn); | 1535 | err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); |
1536 | if (err) { | 1536 | if (err) { |
1537 | res_abort_move(dev, slave, RES_QP, qpn); | 1537 | res_abort_move(dev, slave, RES_QP, qpn); |
1538 | return err; | 1538 | return err; |
@@ -1619,7 +1619,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, | |||
1619 | if (err) | 1619 | if (err) |
1620 | return err; | 1620 | return err; |
1621 | 1621 | ||
1622 | err = __mlx4_mpt_alloc_icm(dev, mpt->key); | 1622 | err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); |
1623 | if (err) { | 1623 | if (err) { |
1624 | res_abort_move(dev, slave, RES_MPT, id); | 1624 | res_abort_move(dev, slave, RES_MPT, id); |
1625 | return err; | 1625 | return err; |
@@ -2827,10 +2827,12 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, | |||
2827 | } | 2827 | } |
2828 | 2828 | ||
2829 | static int verify_qp_parameters(struct mlx4_dev *dev, | 2829 | static int verify_qp_parameters(struct mlx4_dev *dev, |
2830 | struct mlx4_vhcr *vhcr, | ||
2830 | struct mlx4_cmd_mailbox *inbox, | 2831 | struct mlx4_cmd_mailbox *inbox, |
2831 | enum qp_transition transition, u8 slave) | 2832 | enum qp_transition transition, u8 slave) |
2832 | { | 2833 | { |
2833 | u32 qp_type; | 2834 | u32 qp_type; |
2835 | u32 qpn; | ||
2834 | struct mlx4_qp_context *qp_ctx; | 2836 | struct mlx4_qp_context *qp_ctx; |
2835 | enum mlx4_qp_optpar optpar; | 2837 | enum mlx4_qp_optpar optpar; |
2836 | int port; | 2838 | int port; |
@@ -2873,8 +2875,22 @@ static int verify_qp_parameters(struct mlx4_dev *dev, | |||
2873 | default: | 2875 | default: |
2874 | break; | 2876 | break; |
2875 | } | 2877 | } |
2878 | break; | ||
2876 | 2879 | ||
2880 | case MLX4_QP_ST_MLX: | ||
2881 | qpn = vhcr->in_modifier & 0x7fffff; | ||
2882 | port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; | ||
2883 | if (transition == QP_TRANS_INIT2RTR && | ||
2884 | slave != mlx4_master_func_num(dev) && | ||
2885 | mlx4_is_qp_reserved(dev, qpn) && | ||
2886 | !mlx4_vf_smi_enabled(dev, slave, port)) { | ||
2887 | /* only enabled VFs may create MLX proxy QPs */ | ||
2888 | mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", | ||
2889 | __func__, slave, port); | ||
2890 | return -EPERM; | ||
2891 | } | ||
2877 | break; | 2892 | break; |
2893 | |||
2878 | default: | 2894 | default: |
2879 | break; | 2895 | break; |
2880 | } | 2896 | } |
@@ -3454,7 +3470,7 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3454 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); | 3470 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
3455 | if (err) | 3471 | if (err) |
3456 | return err; | 3472 | return err; |
3457 | err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); | 3473 | err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); |
3458 | if (err) | 3474 | if (err) |
3459 | return err; | 3475 | return err; |
3460 | 3476 | ||
@@ -3508,7 +3524,7 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3508 | err = adjust_qp_sched_queue(dev, slave, context, inbox); | 3524 | err = adjust_qp_sched_queue(dev, slave, context, inbox); |
3509 | if (err) | 3525 | if (err) |
3510 | return err; | 3526 | return err; |
3511 | err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); | 3527 | err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); |
3512 | if (err) | 3528 | if (err) |
3513 | return err; | 3529 | return err; |
3514 | 3530 | ||
@@ -3530,7 +3546,7 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3530 | err = adjust_qp_sched_queue(dev, slave, context, inbox); | 3546 | err = adjust_qp_sched_queue(dev, slave, context, inbox); |
3531 | if (err) | 3547 | if (err) |
3532 | return err; | 3548 | return err; |
3533 | err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); | 3549 | err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); |
3534 | if (err) | 3550 | if (err) |
3535 | return err; | 3551 | return err; |
3536 | 3552 | ||
@@ -3567,7 +3583,7 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3567 | err = adjust_qp_sched_queue(dev, slave, context, inbox); | 3583 | err = adjust_qp_sched_queue(dev, slave, context, inbox); |
3568 | if (err) | 3584 | if (err) |
3569 | return err; | 3585 | return err; |
3570 | err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); | 3586 | err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); |
3571 | if (err) | 3587 | if (err) |
3572 | return err; | 3588 | return err; |
3573 | 3589 | ||
@@ -3589,7 +3605,7 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3589 | err = adjust_qp_sched_queue(dev, slave, context, inbox); | 3605 | err = adjust_qp_sched_queue(dev, slave, context, inbox); |
3590 | if (err) | 3606 | if (err) |
3591 | return err; | 3607 | return err; |
3592 | err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); | 3608 | err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); |
3593 | if (err) | 3609 | if (err) |
3594 | return err; | 3610 | return err; |
3595 | 3611 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index 98faf870b0b0..67146624eb58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c | |||
@@ -103,11 +103,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) | |||
103 | if (*srqn == -1) | 103 | if (*srqn == -1) |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
105 | 105 | ||
106 | err = mlx4_table_get(dev, &srq_table->table, *srqn); | 106 | err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); |
107 | if (err) | 107 | if (err) |
108 | goto err_out; | 108 | goto err_out; |
109 | 109 | ||
110 | err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); | 110 | err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); |
111 | if (err) | 111 | if (err) |
112 | goto err_put; | 112 | goto err_put; |
113 | return 0; | 113 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 4cc927649404..ac52a0fe2d3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c | |||
@@ -82,7 +82,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | |||
82 | return mlx5_cmd_status_to_err(&lout.hdr); | 82 | return mlx5_cmd_status_to_err(&lout.hdr); |
83 | } | 83 | } |
84 | 84 | ||
85 | mr->iova = be64_to_cpu(in->seg.start_addr); | ||
86 | mr->size = be64_to_cpu(in->seg.len); | ||
85 | mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; | 87 | mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; |
88 | mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; | ||
89 | |||
86 | mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", | 90 | mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", |
87 | be32_to_cpu(lout.mkey), key, mr->key); | 91 | be32_to_cpu(lout.mkey), key, mr->key); |
88 | 92 | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ba87bd21295a..3447bead9620 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -401,6 +401,7 @@ struct mlx4_caps { | |||
401 | int max_rq_desc_sz; | 401 | int max_rq_desc_sz; |
402 | int max_qp_init_rdma; | 402 | int max_qp_init_rdma; |
403 | int max_qp_dest_rdma; | 403 | int max_qp_dest_rdma; |
404 | u32 *qp0_qkey; | ||
404 | u32 *qp0_proxy; | 405 | u32 *qp0_proxy; |
405 | u32 *qp1_proxy; | 406 | u32 *qp1_proxy; |
406 | u32 *qp0_tunnel; | 407 | u32 *qp0_tunnel; |
@@ -837,7 +838,7 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev) | |||
837 | } | 838 | } |
838 | 839 | ||
839 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 840 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
840 | struct mlx4_buf *buf); | 841 | struct mlx4_buf *buf, gfp_t gfp); |
841 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 842 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); |
842 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) | 843 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) |
843 | { | 844 | { |
@@ -874,9 +875,10 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); | |||
874 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 875 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
875 | int start_index, int npages, u64 *page_list); | 876 | int start_index, int npages, u64 *page_list); |
876 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 877 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
877 | struct mlx4_buf *buf); | 878 | struct mlx4_buf *buf, gfp_t gfp); |
878 | 879 | ||
879 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | 880 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, |
881 | gfp_t gfp); | ||
880 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | 882 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); |
881 | 883 | ||
882 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | 884 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, |
@@ -892,7 +894,8 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | |||
892 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 894 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); |
893 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 895 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
894 | 896 | ||
895 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | 897 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, |
898 | gfp_t gfp); | ||
896 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 899 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); |
897 | 900 | ||
898 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, | 901 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, |
@@ -1234,4 +1237,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port); | |||
1234 | int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); | 1237 | int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); |
1235 | 1238 | ||
1236 | int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); | 1239 | int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); |
1240 | int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); | ||
1241 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); | ||
1242 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, | ||
1243 | int enable); | ||
1237 | #endif /* MLX4_DEVICE_H */ | 1244 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 93cef6313e72..2bce4aad2570 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -427,7 +427,6 @@ struct mlx5_core_mr { | |||
427 | u64 size; | 427 | u64 size; |
428 | u32 key; | 428 | u32 key; |
429 | u32 pd; | 429 | u32 pd; |
430 | u32 access; | ||
431 | }; | 430 | }; |
432 | 431 | ||
433 | struct mlx5_core_srq { | 432 | struct mlx5_core_srq { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index acd825182977..7ccef342f724 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -80,8 +80,8 @@ enum rdma_transport_type { | |||
80 | RDMA_TRANSPORT_USNIC_UDP | 80 | RDMA_TRANSPORT_USNIC_UDP |
81 | }; | 81 | }; |
82 | 82 | ||
83 | enum rdma_transport_type | 83 | __attribute_const__ enum rdma_transport_type |
84 | rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; | 84 | rdma_node_get_transport(enum rdma_node_type node_type); |
85 | 85 | ||
86 | enum rdma_link_layer { | 86 | enum rdma_link_layer { |
87 | IB_LINK_LAYER_UNSPECIFIED, | 87 | IB_LINK_LAYER_UNSPECIFIED, |
@@ -466,14 +466,14 @@ enum ib_rate { | |||
466 | * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. | 466 | * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. |
467 | * @rate: rate to convert. | 467 | * @rate: rate to convert. |
468 | */ | 468 | */ |
469 | int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; | 469 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); |
470 | 470 | ||
471 | /** | 471 | /** |
472 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. | 472 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. |
473 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. | 473 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. |
474 | * @rate: rate to convert. | 474 | * @rate: rate to convert. |
475 | */ | 475 | */ |
476 | int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; | 476 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); |
477 | 477 | ||
478 | enum ib_mr_create_flags { | 478 | enum ib_mr_create_flags { |
479 | IB_MR_SIGNATURE_EN = 1, | 479 | IB_MR_SIGNATURE_EN = 1, |
@@ -604,7 +604,7 @@ struct ib_mr_status { | |||
604 | * enum. | 604 | * enum. |
605 | * @mult: multiple to convert. | 605 | * @mult: multiple to convert. |
606 | */ | 606 | */ |
607 | enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; | 607 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); |
608 | 608 | ||
609 | struct ib_ah_attr { | 609 | struct ib_ah_attr { |
610 | struct ib_global_route grh; | 610 | struct ib_global_route grh; |
@@ -783,6 +783,7 @@ enum ib_qp_create_flags { | |||
783 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, | 783 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, |
784 | IB_QP_CREATE_NETIF_QP = 1 << 5, | 784 | IB_QP_CREATE_NETIF_QP = 1 << 5, |
785 | IB_QP_CREATE_SIGNATURE_EN = 1 << 6, | 785 | IB_QP_CREATE_SIGNATURE_EN = 1 << 6, |
786 | IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, | ||
786 | /* reserve bits 26-31 for low level drivers' internal use */ | 787 | /* reserve bits 26-31 for low level drivers' internal use */ |
787 | IB_QP_CREATE_RESERVED_START = 1 << 26, | 788 | IB_QP_CREATE_RESERVED_START = 1 << 26, |
788 | IB_QP_CREATE_RESERVED_END = 1 << 31, | 789 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h new file mode 100644 index 000000000000..928b2775e992 --- /dev/null +++ b/include/rdma/iw_portmap.h | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2014 Chelsio, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _IW_PORTMAP_H | ||
34 | #define _IW_PORTMAP_H | ||
35 | |||
36 | #define IWPM_ULIBNAME_SIZE 32 | ||
37 | #define IWPM_DEVNAME_SIZE 32 | ||
38 | #define IWPM_IFNAME_SIZE 16 | ||
39 | #define IWPM_IPADDR_SIZE 16 | ||
40 | |||
41 | enum { | ||
42 | IWPM_INVALID_NLMSG_ERR = 10, | ||
43 | IWPM_CREATE_MAPPING_ERR, | ||
44 | IWPM_DUPLICATE_MAPPING_ERR, | ||
45 | IWPM_UNKNOWN_MAPPING_ERR, | ||
46 | IWPM_CLIENT_DEV_INFO_ERR, | ||
47 | IWPM_USER_LIB_INFO_ERR, | ||
48 | IWPM_REMOTE_QUERY_REJECT | ||
49 | }; | ||
50 | |||
51 | struct iwpm_dev_data { | ||
52 | char dev_name[IWPM_DEVNAME_SIZE]; | ||
53 | char if_name[IWPM_IFNAME_SIZE]; | ||
54 | }; | ||
55 | |||
56 | struct iwpm_sa_data { | ||
57 | struct sockaddr_storage loc_addr; | ||
58 | struct sockaddr_storage mapped_loc_addr; | ||
59 | struct sockaddr_storage rem_addr; | ||
60 | struct sockaddr_storage mapped_rem_addr; | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * iwpm_init - Allocate resources for the iwarp port mapper | ||
65 | * | ||
66 | * Should be called when network interface goes up. | ||
67 | */ | ||
68 | int iwpm_init(u8); | ||
69 | |||
70 | /** | ||
71 | * iwpm_exit - Deallocate resources for the iwarp port mapper | ||
72 | * | ||
73 | * Should be called when network interface goes down. | ||
74 | */ | ||
75 | int iwpm_exit(u8); | ||
76 | |||
77 | /** | ||
78 | * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid | ||
79 | * | ||
80 | * Returns true if the pid is greater than zero, otherwise returns false | ||
81 | */ | ||
82 | int iwpm_valid_pid(void); | ||
83 | |||
84 | /** | ||
85 | * iwpm_register_pid - Send a netlink query to userspace | ||
86 | * to get the iwarp port mapper pid | ||
87 | * @pm_msg: Contains driver info to send to the userspace port mapper | ||
88 | * @nl_client: The index of the netlink client | ||
89 | */ | ||
90 | int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client); | ||
91 | |||
92 | /** | ||
93 | * iwpm_add_mapping - Send a netlink add mapping request to | ||
94 | * the userspace port mapper | ||
95 | * @pm_msg: Contains the local ip/tcp address info to send | ||
96 | * @nl_client: The index of the netlink client | ||
97 | * | ||
98 | * If the request is successful, the pm_msg stores | ||
99 | * the port mapper response (mapped address info) | ||
100 | */ | ||
101 | int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client); | ||
102 | |||
103 | /** | ||
104 | * iwpm_add_and_query_mapping - Send a netlink add and query mapping request | ||
105 | * to the userspace port mapper | ||
106 | * @pm_msg: Contains the local and remote ip/tcp address info to send | ||
107 | * @nl_client: The index of the netlink client | ||
108 | * | ||
109 | * If the request is successful, the pm_msg stores the | ||
110 | * port mapper response (mapped local and remote address info) | ||
111 | */ | ||
112 | int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client); | ||
113 | |||
114 | /** | ||
115 | * iwpm_remove_mapping - Send a netlink remove mapping request | ||
116 | * to the userspace port mapper | ||
117 | * | ||
118 | * @local_addr: Local ip/tcp address to remove | ||
119 | * @nl_client: The index of the netlink client | ||
120 | */ | ||
121 | int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client); | ||
122 | |||
123 | /** | ||
124 | * iwpm_register_pid_cb - Process the port mapper response to | ||
125 | * iwpm_register_pid query | ||
126 | * @skb: | ||
127 | * @cb: Contains the received message (payload and netlink header) | ||
128 | * | ||
129 | * If successful, the function receives the userspace port mapper pid | ||
130 | * which is used in future communication with the port mapper | ||
131 | */ | ||
132 | int iwpm_register_pid_cb(struct sk_buff *, struct netlink_callback *); | ||
133 | |||
134 | /** | ||
135 | * iwpm_add_mapping_cb - Process the port mapper response to | ||
136 | * iwpm_add_mapping request | ||
137 | * @skb: | ||
138 | * @cb: Contains the received message (payload and netlink header) | ||
139 | */ | ||
140 | int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *); | ||
141 | |||
142 | /** | ||
143 | * iwpm_add_and_query_mapping_cb - Process the port mapper response to | ||
144 | * iwpm_add_and_query_mapping request | ||
145 | * @skb: | ||
146 | * @cb: Contains the received message (payload and netlink header) | ||
147 | */ | ||
148 | int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); | ||
149 | |||
150 | /** | ||
151 | * iwpm_mapping_error_cb - Process port mapper notification for error | ||
152 | * | ||
153 | * @skb: | ||
154 | * @cb: Contains the received message (payload and netlink header) | ||
155 | */ | ||
156 | int iwpm_mapping_error_cb(struct sk_buff *, struct netlink_callback *); | ||
157 | |||
158 | /** | ||
159 | * iwpm_mapping_info_cb - Process a notification that the userspace | ||
160 | * port mapper daemon is started | ||
161 | * @skb: | ||
162 | * @cb: Contains the received message (payload and netlink header) | ||
163 | * | ||
164 | * Using the received port mapper pid, send all the local mapping | ||
165 | * info records to the userspace port mapper | ||
166 | */ | ||
167 | int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | ||
168 | |||
169 | /** | ||
170 | * iwpm_ack_mapping_info_cb - Process the port mapper ack for | ||
171 | * the provided local mapping info records | ||
172 | * @skb: | ||
173 | * @cb: Contains the received message (payload and netlink header) | ||
174 | */ | ||
175 | int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | ||
176 | |||
177 | /** | ||
178 | * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address | ||
179 | * info in a hash table | ||
180 | * @local_addr: Local ip/tcp address | ||
181 | * @mapped_addr: Mapped local ip/tcp address | ||
182 | * @nl_client: The index of the netlink client | ||
183 | */ | ||
184 | int iwpm_create_mapinfo(struct sockaddr_storage *local_addr, | ||
185 | struct sockaddr_storage *mapped_addr, u8 nl_client); | ||
186 | |||
187 | /** | ||
188 | * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address | ||
189 | * info from the hash table | ||
190 | * @local_addr: Local ip/tcp address | ||
191 | * @mapped_addr: Mapped local ip/tcp address | ||
192 | * | ||
193 | * Returns err code if mapping info is not found in the hash table, | ||
194 | * otherwise returns 0 | ||
195 | */ | ||
196 | int iwpm_remove_mapinfo(struct sockaddr_storage *local_addr, | ||
197 | struct sockaddr_storage *mapped_addr); | ||
198 | |||
199 | #endif /* _IW_PORTMAP_H */ | ||
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index e38de79eeb48..0790882e0c9b 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h | |||
@@ -43,7 +43,7 @@ int ibnl_remove_client(int index); | |||
43 | * Returns the allocated buffer on success and NULL on failure. | 43 | * Returns the allocated buffer on success and NULL on failure. |
44 | */ | 44 | */ |
45 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | 45 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, |
46 | int len, int client, int op); | 46 | int len, int client, int op, int flags); |
47 | /** | 47 | /** |
48 | * Put a new attribute in a supplied skb. | 48 | * Put a new attribute in a supplied skb. |
49 | * @skb: The netlink skb. | 49 | * @skb: The netlink skb. |
@@ -56,4 +56,25 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | |||
56 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | 56 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, |
57 | int len, void *data, int type); | 57 | int len, void *data, int type); |
58 | 58 | ||
59 | /** | ||
60 | * Send the supplied skb to a specific userspace PID. | ||
61 | * @skb: The netlink skb | ||
62 | * @nlh: Header of the netlink message to send | ||
63 | * @pid: Userspace netlink process ID | ||
64 | * Returns 0 on success or a negative error code. | ||
65 | */ | ||
66 | int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
67 | __u32 pid); | ||
68 | |||
69 | /** | ||
70 | * Send the supplied skb to a netlink group. | ||
71 | * @skb: The netlink skb | ||
72 | * @nlh: Header of the netlink message to send | ||
73 | * @group: Netlink group ID | ||
74 | * @flags: allocation flags | ||
75 | * Returns 0 on success or a negative error code. | ||
76 | */ | ||
77 | int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
78 | unsigned int group, gfp_t flags); | ||
79 | |||
59 | #endif /* _RDMA_NETLINK_H */ | 80 | #endif /* _RDMA_NETLINK_H */ |
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 8297285b6288..de69170a30ce 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h | |||
@@ -4,7 +4,16 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | enum { | 6 | enum { |
7 | RDMA_NL_RDMA_CM = 1 | 7 | RDMA_NL_RDMA_CM = 1, |
8 | RDMA_NL_NES, | ||
9 | RDMA_NL_C4IW, | ||
10 | RDMA_NL_NUM_CLIENTS | ||
11 | }; | ||
12 | |||
13 | enum { | ||
14 | RDMA_NL_GROUP_CM = 1, | ||
15 | RDMA_NL_GROUP_IWPM, | ||
16 | RDMA_NL_NUM_GROUPS | ||
8 | }; | 17 | }; |
9 | 18 | ||
10 | #define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) | 19 | #define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) |
@@ -22,6 +31,18 @@ enum { | |||
22 | RDMA_NL_RDMA_CM_NUM_ATTR, | 31 | RDMA_NL_RDMA_CM_NUM_ATTR, |
23 | }; | 32 | }; |
24 | 33 | ||
34 | /* iwarp port mapper op-codes */ | ||
35 | enum { | ||
36 | RDMA_NL_IWPM_REG_PID = 0, | ||
37 | RDMA_NL_IWPM_ADD_MAPPING, | ||
38 | RDMA_NL_IWPM_QUERY_MAPPING, | ||
39 | RDMA_NL_IWPM_REMOVE_MAPPING, | ||
40 | RDMA_NL_IWPM_HANDLE_ERR, | ||
41 | RDMA_NL_IWPM_MAPINFO, | ||
42 | RDMA_NL_IWPM_MAPINFO_NUM, | ||
43 | RDMA_NL_IWPM_NUM_OPS | ||
44 | }; | ||
45 | |||
25 | struct rdma_cm_id_stats { | 46 | struct rdma_cm_id_stats { |
26 | __u32 qp_num; | 47 | __u32 qp_num; |
27 | __u32 bound_dev_if; | 48 | __u32 bound_dev_if; |
@@ -33,5 +54,78 @@ struct rdma_cm_id_stats { | |||
33 | __u8 qp_type; | 54 | __u8 qp_type; |
34 | }; | 55 | }; |
35 | 56 | ||
57 | enum { | ||
58 | IWPM_NLA_REG_PID_UNSPEC = 0, | ||
59 | IWPM_NLA_REG_PID_SEQ, | ||
60 | IWPM_NLA_REG_IF_NAME, | ||
61 | IWPM_NLA_REG_IBDEV_NAME, | ||
62 | IWPM_NLA_REG_ULIB_NAME, | ||
63 | IWPM_NLA_REG_PID_MAX | ||
64 | }; | ||
65 | |||
66 | enum { | ||
67 | IWPM_NLA_RREG_PID_UNSPEC = 0, | ||
68 | IWPM_NLA_RREG_PID_SEQ, | ||
69 | IWPM_NLA_RREG_IBDEV_NAME, | ||
70 | IWPM_NLA_RREG_ULIB_NAME, | ||
71 | IWPM_NLA_RREG_ULIB_VER, | ||
72 | IWPM_NLA_RREG_PID_ERR, | ||
73 | IWPM_NLA_RREG_PID_MAX | ||
74 | |||
75 | }; | ||
76 | |||
77 | enum { | ||
78 | IWPM_NLA_MANAGE_MAPPING_UNSPEC = 0, | ||
79 | IWPM_NLA_MANAGE_MAPPING_SEQ, | ||
80 | IWPM_NLA_MANAGE_ADDR, | ||
81 | IWPM_NLA_MANAGE_MAPPED_LOC_ADDR, | ||
82 | IWPM_NLA_RMANAGE_MAPPING_ERR, | ||
83 | IWPM_NLA_RMANAGE_MAPPING_MAX | ||
84 | }; | ||
85 | |||
86 | #define IWPM_NLA_MANAGE_MAPPING_MAX 3 | ||
87 | #define IWPM_NLA_QUERY_MAPPING_MAX 4 | ||
88 | #define IWPM_NLA_MAPINFO_SEND_MAX 3 | ||
89 | |||
90 | enum { | ||
91 | IWPM_NLA_QUERY_MAPPING_UNSPEC = 0, | ||
92 | IWPM_NLA_QUERY_MAPPING_SEQ, | ||
93 | IWPM_NLA_QUERY_LOCAL_ADDR, | ||
94 | IWPM_NLA_QUERY_REMOTE_ADDR, | ||
95 | IWPM_NLA_RQUERY_MAPPED_LOC_ADDR, | ||
96 | IWPM_NLA_RQUERY_MAPPED_REM_ADDR, | ||
97 | IWPM_NLA_RQUERY_MAPPING_ERR, | ||
98 | IWPM_NLA_RQUERY_MAPPING_MAX | ||
99 | }; | ||
100 | |||
101 | enum { | ||
102 | IWPM_NLA_MAPINFO_REQ_UNSPEC = 0, | ||
103 | IWPM_NLA_MAPINFO_ULIB_NAME, | ||
104 | IWPM_NLA_MAPINFO_ULIB_VER, | ||
105 | IWPM_NLA_MAPINFO_REQ_MAX | ||
106 | }; | ||
107 | |||
108 | enum { | ||
109 | IWPM_NLA_MAPINFO_UNSPEC = 0, | ||
110 | IWPM_NLA_MAPINFO_LOCAL_ADDR, | ||
111 | IWPM_NLA_MAPINFO_MAPPED_ADDR, | ||
112 | IWPM_NLA_MAPINFO_MAX | ||
113 | }; | ||
114 | |||
115 | enum { | ||
116 | IWPM_NLA_MAPINFO_NUM_UNSPEC = 0, | ||
117 | IWPM_NLA_MAPINFO_SEQ, | ||
118 | IWPM_NLA_MAPINFO_SEND_NUM, | ||
119 | IWPM_NLA_MAPINFO_ACK_NUM, | ||
120 | IWPM_NLA_MAPINFO_NUM_MAX | ||
121 | }; | ||
122 | |||
123 | enum { | ||
124 | IWPM_NLA_ERR_UNSPEC = 0, | ||
125 | IWPM_NLA_ERR_SEQ, | ||
126 | IWPM_NLA_ERR_CODE, | ||
127 | IWPM_NLA_ERR_MAX | ||
128 | }; | ||
129 | |||
36 | 130 | ||
37 | #endif /* _UAPI_RDMA_NETLINK_H */ | 131 | #endif /* _UAPI_RDMA_NETLINK_H */ |