diff options
author | Sean Hefty <sean.hefty@intel.com> | 2006-11-30 19:53:41 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-12-12 14:50:22 -0500 |
commit | 75216638572f53612304c05a374f0246fe1d16da (patch) | |
tree | 25697e2e380f4eda1eccc308ba1bbf4b428714c7 /drivers/infiniband | |
parent | 628e5f6d39d5a6be96c1272a6709f2dd3ec8b7ce (diff) |
RDMA/cma: Export rdma cm interface to userspace
Export the rdma cm interfaces to userspace via a misc device.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/Makefile | 6 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 874 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_marshall.c | 5 |
3 files changed, 882 insertions, 3 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 163d991eb8c9..50fb1cd447b7 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
@@ -1,9 +1,11 @@ | |||
1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o | 1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o |
2 | user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o | ||
2 | 3 | ||
3 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ | 4 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ |
4 | ib_cm.o iw_cm.o $(infiniband-y) | 5 | ib_cm.o iw_cm.o $(infiniband-y) |
5 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o | 6 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o |
6 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o | 7 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ |
8 | $(user_access-y) | ||
7 | 9 | ||
8 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ | 10 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ |
9 | device.o fmr_pool.o cache.o | 11 | device.o fmr_pool.o cache.o |
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o | |||
18 | 20 | ||
19 | rdma_cm-y := cma.o | 21 | rdma_cm-y := cma.o |
20 | 22 | ||
23 | rdma_ucm-y := ucma.o | ||
24 | |||
21 | ib_addr-y := addr.o | 25 | ib_addr-y := addr.o |
22 | 26 | ||
23 | ib_umad-y := user_mad.o | 27 | ib_umad-y := user_mad.o |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c new file mode 100644 index 000000000000..81a5cdc5733a --- /dev/null +++ b/drivers/infiniband/core/ucma.c | |||
@@ -0,0 +1,874 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/completion.h> | ||
34 | #include <linux/mutex.h> | ||
35 | #include <linux/poll.h> | ||
36 | #include <linux/idr.h> | ||
37 | #include <linux/in.h> | ||
38 | #include <linux/in6.h> | ||
39 | #include <linux/miscdevice.h> | ||
40 | |||
41 | #include <rdma/rdma_user_cm.h> | ||
42 | #include <rdma/ib_marshall.h> | ||
43 | #include <rdma/rdma_cm.h> | ||
44 | |||
45 | MODULE_AUTHOR("Sean Hefty"); | ||
46 | MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); | ||
47 | MODULE_LICENSE("Dual BSD/GPL"); | ||
48 | |||
49 | enum { | ||
50 | UCMA_MAX_BACKLOG = 128 | ||
51 | }; | ||
52 | |||
53 | struct ucma_file { | ||
54 | struct mutex mut; | ||
55 | struct file *filp; | ||
56 | struct list_head ctx_list; | ||
57 | struct list_head event_list; | ||
58 | wait_queue_head_t poll_wait; | ||
59 | }; | ||
60 | |||
61 | struct ucma_context { | ||
62 | int id; | ||
63 | struct completion comp; | ||
64 | atomic_t ref; | ||
65 | int events_reported; | ||
66 | int backlog; | ||
67 | |||
68 | struct ucma_file *file; | ||
69 | struct rdma_cm_id *cm_id; | ||
70 | u64 uid; | ||
71 | |||
72 | struct list_head list; | ||
73 | }; | ||
74 | |||
75 | struct ucma_event { | ||
76 | struct ucma_context *ctx; | ||
77 | struct list_head list; | ||
78 | struct rdma_cm_id *cm_id; | ||
79 | struct rdma_ucm_event_resp resp; | ||
80 | }; | ||
81 | |||
82 | static DEFINE_MUTEX(mut); | ||
83 | static DEFINE_IDR(ctx_idr); | ||
84 | |||
85 | static inline struct ucma_context *_ucma_find_context(int id, | ||
86 | struct ucma_file *file) | ||
87 | { | ||
88 | struct ucma_context *ctx; | ||
89 | |||
90 | ctx = idr_find(&ctx_idr, id); | ||
91 | if (!ctx) | ||
92 | ctx = ERR_PTR(-ENOENT); | ||
93 | else if (ctx->file != file) | ||
94 | ctx = ERR_PTR(-EINVAL); | ||
95 | return ctx; | ||
96 | } | ||
97 | |||
98 | static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) | ||
99 | { | ||
100 | struct ucma_context *ctx; | ||
101 | |||
102 | mutex_lock(&mut); | ||
103 | ctx = _ucma_find_context(id, file); | ||
104 | if (!IS_ERR(ctx)) | ||
105 | atomic_inc(&ctx->ref); | ||
106 | mutex_unlock(&mut); | ||
107 | return ctx; | ||
108 | } | ||
109 | |||
110 | static void ucma_put_ctx(struct ucma_context *ctx) | ||
111 | { | ||
112 | if (atomic_dec_and_test(&ctx->ref)) | ||
113 | complete(&ctx->comp); | ||
114 | } | ||
115 | |||
116 | static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) | ||
117 | { | ||
118 | struct ucma_context *ctx; | ||
119 | int ret; | ||
120 | |||
121 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
122 | if (!ctx) | ||
123 | return NULL; | ||
124 | |||
125 | atomic_set(&ctx->ref, 1); | ||
126 | init_completion(&ctx->comp); | ||
127 | ctx->file = file; | ||
128 | |||
129 | do { | ||
130 | ret = idr_pre_get(&ctx_idr, GFP_KERNEL); | ||
131 | if (!ret) | ||
132 | goto error; | ||
133 | |||
134 | mutex_lock(&mut); | ||
135 | ret = idr_get_new(&ctx_idr, ctx, &ctx->id); | ||
136 | mutex_unlock(&mut); | ||
137 | } while (ret == -EAGAIN); | ||
138 | |||
139 | if (ret) | ||
140 | goto error; | ||
141 | |||
142 | list_add_tail(&ctx->list, &file->ctx_list); | ||
143 | return ctx; | ||
144 | |||
145 | error: | ||
146 | kfree(ctx); | ||
147 | return NULL; | ||
148 | } | ||
149 | |||
150 | static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, | ||
151 | struct rdma_conn_param *src) | ||
152 | { | ||
153 | if (src->private_data_len) | ||
154 | memcpy(dst->private_data, src->private_data, | ||
155 | src->private_data_len); | ||
156 | dst->private_data_len = src->private_data_len; | ||
157 | dst->responder_resources =src->responder_resources; | ||
158 | dst->initiator_depth = src->initiator_depth; | ||
159 | dst->flow_control = src->flow_control; | ||
160 | dst->retry_count = src->retry_count; | ||
161 | dst->rnr_retry_count = src->rnr_retry_count; | ||
162 | dst->srq = src->srq; | ||
163 | dst->qp_num = src->qp_num; | ||
164 | } | ||
165 | |||
166 | static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, | ||
167 | struct rdma_ud_param *src) | ||
168 | { | ||
169 | if (src->private_data_len) | ||
170 | memcpy(dst->private_data, src->private_data, | ||
171 | src->private_data_len); | ||
172 | dst->private_data_len = src->private_data_len; | ||
173 | ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); | ||
174 | dst->qp_num = src->qp_num; | ||
175 | dst->qkey = src->qkey; | ||
176 | } | ||
177 | |||
178 | static void ucma_set_event_context(struct ucma_context *ctx, | ||
179 | struct rdma_cm_event *event, | ||
180 | struct ucma_event *uevent) | ||
181 | { | ||
182 | uevent->ctx = ctx; | ||
183 | uevent->resp.uid = ctx->uid; | ||
184 | uevent->resp.id = ctx->id; | ||
185 | } | ||
186 | |||
187 | static int ucma_event_handler(struct rdma_cm_id *cm_id, | ||
188 | struct rdma_cm_event *event) | ||
189 | { | ||
190 | struct ucma_event *uevent; | ||
191 | struct ucma_context *ctx = cm_id->context; | ||
192 | int ret = 0; | ||
193 | |||
194 | uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); | ||
195 | if (!uevent) | ||
196 | return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; | ||
197 | |||
198 | uevent->cm_id = cm_id; | ||
199 | ucma_set_event_context(ctx, event, uevent); | ||
200 | uevent->resp.event = event->event; | ||
201 | uevent->resp.status = event->status; | ||
202 | if (cm_id->ps == RDMA_PS_UDP) | ||
203 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); | ||
204 | else | ||
205 | ucma_copy_conn_event(&uevent->resp.param.conn, | ||
206 | &event->param.conn); | ||
207 | |||
208 | mutex_lock(&ctx->file->mut); | ||
209 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { | ||
210 | if (!ctx->backlog) { | ||
211 | ret = -EDQUOT; | ||
212 | goto out; | ||
213 | } | ||
214 | ctx->backlog--; | ||
215 | } | ||
216 | list_add_tail(&uevent->list, &ctx->file->event_list); | ||
217 | wake_up_interruptible(&ctx->file->poll_wait); | ||
218 | out: | ||
219 | mutex_unlock(&ctx->file->mut); | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, | ||
224 | int in_len, int out_len) | ||
225 | { | ||
226 | struct ucma_context *ctx; | ||
227 | struct rdma_ucm_get_event cmd; | ||
228 | struct ucma_event *uevent; | ||
229 | int ret = 0; | ||
230 | DEFINE_WAIT(wait); | ||
231 | |||
232 | if (out_len < sizeof uevent->resp) | ||
233 | return -ENOSPC; | ||
234 | |||
235 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
236 | return -EFAULT; | ||
237 | |||
238 | mutex_lock(&file->mut); | ||
239 | while (list_empty(&file->event_list)) { | ||
240 | if (file->filp->f_flags & O_NONBLOCK) { | ||
241 | ret = -EAGAIN; | ||
242 | break; | ||
243 | } | ||
244 | |||
245 | if (signal_pending(current)) { | ||
246 | ret = -ERESTARTSYS; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); | ||
251 | mutex_unlock(&file->mut); | ||
252 | schedule(); | ||
253 | mutex_lock(&file->mut); | ||
254 | finish_wait(&file->poll_wait, &wait); | ||
255 | } | ||
256 | |||
257 | if (ret) | ||
258 | goto done; | ||
259 | |||
260 | uevent = list_entry(file->event_list.next, struct ucma_event, list); | ||
261 | |||
262 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { | ||
263 | ctx = ucma_alloc_ctx(file); | ||
264 | if (!ctx) { | ||
265 | ret = -ENOMEM; | ||
266 | goto done; | ||
267 | } | ||
268 | uevent->ctx->backlog++; | ||
269 | ctx->cm_id = uevent->cm_id; | ||
270 | ctx->cm_id->context = ctx; | ||
271 | uevent->resp.id = ctx->id; | ||
272 | } | ||
273 | |||
274 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
275 | &uevent->resp, sizeof uevent->resp)) { | ||
276 | ret = -EFAULT; | ||
277 | goto done; | ||
278 | } | ||
279 | |||
280 | list_del(&uevent->list); | ||
281 | uevent->ctx->events_reported++; | ||
282 | kfree(uevent); | ||
283 | done: | ||
284 | mutex_unlock(&file->mut); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static ssize_t ucma_create_id(struct ucma_file *file, | ||
289 | const char __user *inbuf, | ||
290 | int in_len, int out_len) | ||
291 | { | ||
292 | struct rdma_ucm_create_id cmd; | ||
293 | struct rdma_ucm_create_id_resp resp; | ||
294 | struct ucma_context *ctx; | ||
295 | int ret; | ||
296 | |||
297 | if (out_len < sizeof(resp)) | ||
298 | return -ENOSPC; | ||
299 | |||
300 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
301 | return -EFAULT; | ||
302 | |||
303 | mutex_lock(&file->mut); | ||
304 | ctx = ucma_alloc_ctx(file); | ||
305 | mutex_unlock(&file->mut); | ||
306 | if (!ctx) | ||
307 | return -ENOMEM; | ||
308 | |||
309 | ctx->uid = cmd.uid; | ||
310 | ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); | ||
311 | if (IS_ERR(ctx->cm_id)) { | ||
312 | ret = PTR_ERR(ctx->cm_id); | ||
313 | goto err1; | ||
314 | } | ||
315 | |||
316 | resp.id = ctx->id; | ||
317 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
318 | &resp, sizeof(resp))) { | ||
319 | ret = -EFAULT; | ||
320 | goto err2; | ||
321 | } | ||
322 | return 0; | ||
323 | |||
324 | err2: | ||
325 | rdma_destroy_id(ctx->cm_id); | ||
326 | err1: | ||
327 | mutex_lock(&mut); | ||
328 | idr_remove(&ctx_idr, ctx->id); | ||
329 | mutex_unlock(&mut); | ||
330 | kfree(ctx); | ||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static void ucma_cleanup_events(struct ucma_context *ctx) | ||
335 | { | ||
336 | struct ucma_event *uevent, *tmp; | ||
337 | |||
338 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { | ||
339 | if (uevent->ctx != ctx) | ||
340 | continue; | ||
341 | |||
342 | list_del(&uevent->list); | ||
343 | |||
344 | /* clear incoming connections. */ | ||
345 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
346 | rdma_destroy_id(uevent->cm_id); | ||
347 | |||
348 | kfree(uevent); | ||
349 | } | ||
350 | } | ||
351 | |||
352 | static int ucma_free_ctx(struct ucma_context *ctx) | ||
353 | { | ||
354 | int events_reported; | ||
355 | |||
356 | /* No new events will be generated after destroying the id. */ | ||
357 | rdma_destroy_id(ctx->cm_id); | ||
358 | |||
359 | /* Cleanup events not yet reported to the user. */ | ||
360 | mutex_lock(&ctx->file->mut); | ||
361 | ucma_cleanup_events(ctx); | ||
362 | list_del(&ctx->list); | ||
363 | mutex_unlock(&ctx->file->mut); | ||
364 | |||
365 | events_reported = ctx->events_reported; | ||
366 | kfree(ctx); | ||
367 | return events_reported; | ||
368 | } | ||
369 | |||
370 | static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, | ||
371 | int in_len, int out_len) | ||
372 | { | ||
373 | struct rdma_ucm_destroy_id cmd; | ||
374 | struct rdma_ucm_destroy_id_resp resp; | ||
375 | struct ucma_context *ctx; | ||
376 | int ret = 0; | ||
377 | |||
378 | if (out_len < sizeof(resp)) | ||
379 | return -ENOSPC; | ||
380 | |||
381 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
382 | return -EFAULT; | ||
383 | |||
384 | mutex_lock(&mut); | ||
385 | ctx = _ucma_find_context(cmd.id, file); | ||
386 | if (!IS_ERR(ctx)) | ||
387 | idr_remove(&ctx_idr, ctx->id); | ||
388 | mutex_unlock(&mut); | ||
389 | |||
390 | if (IS_ERR(ctx)) | ||
391 | return PTR_ERR(ctx); | ||
392 | |||
393 | ucma_put_ctx(ctx); | ||
394 | wait_for_completion(&ctx->comp); | ||
395 | resp.events_reported = ucma_free_ctx(ctx); | ||
396 | |||
397 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
398 | &resp, sizeof(resp))) | ||
399 | ret = -EFAULT; | ||
400 | |||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf, | ||
405 | int in_len, int out_len) | ||
406 | { | ||
407 | struct rdma_ucm_bind_addr cmd; | ||
408 | struct ucma_context *ctx; | ||
409 | int ret; | ||
410 | |||
411 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
412 | return -EFAULT; | ||
413 | |||
414 | ctx = ucma_get_ctx(file, cmd.id); | ||
415 | if (IS_ERR(ctx)) | ||
416 | return PTR_ERR(ctx); | ||
417 | |||
418 | ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); | ||
419 | ucma_put_ctx(ctx); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | static ssize_t ucma_resolve_addr(struct ucma_file *file, | ||
424 | const char __user *inbuf, | ||
425 | int in_len, int out_len) | ||
426 | { | ||
427 | struct rdma_ucm_resolve_addr cmd; | ||
428 | struct ucma_context *ctx; | ||
429 | int ret; | ||
430 | |||
431 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
432 | return -EFAULT; | ||
433 | |||
434 | ctx = ucma_get_ctx(file, cmd.id); | ||
435 | if (IS_ERR(ctx)) | ||
436 | return PTR_ERR(ctx); | ||
437 | |||
438 | ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, | ||
439 | (struct sockaddr *) &cmd.dst_addr, | ||
440 | cmd.timeout_ms); | ||
441 | ucma_put_ctx(ctx); | ||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | static ssize_t ucma_resolve_route(struct ucma_file *file, | ||
446 | const char __user *inbuf, | ||
447 | int in_len, int out_len) | ||
448 | { | ||
449 | struct rdma_ucm_resolve_route cmd; | ||
450 | struct ucma_context *ctx; | ||
451 | int ret; | ||
452 | |||
453 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
454 | return -EFAULT; | ||
455 | |||
456 | ctx = ucma_get_ctx(file, cmd.id); | ||
457 | if (IS_ERR(ctx)) | ||
458 | return PTR_ERR(ctx); | ||
459 | |||
460 | ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); | ||
461 | ucma_put_ctx(ctx); | ||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, | ||
466 | struct rdma_route *route) | ||
467 | { | ||
468 | struct rdma_dev_addr *dev_addr; | ||
469 | |||
470 | resp->num_paths = route->num_paths; | ||
471 | switch (route->num_paths) { | ||
472 | case 0: | ||
473 | dev_addr = &route->addr.dev_addr; | ||
474 | ib_addr_get_dgid(dev_addr, | ||
475 | (union ib_gid *) &resp->ib_route[0].dgid); | ||
476 | ib_addr_get_sgid(dev_addr, | ||
477 | (union ib_gid *) &resp->ib_route[0].sgid); | ||
478 | resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); | ||
479 | break; | ||
480 | case 2: | ||
481 | ib_copy_path_rec_to_user(&resp->ib_route[1], | ||
482 | &route->path_rec[1]); | ||
483 | /* fall through */ | ||
484 | case 1: | ||
485 | ib_copy_path_rec_to_user(&resp->ib_route[0], | ||
486 | &route->path_rec[0]); | ||
487 | break; | ||
488 | default: | ||
489 | break; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | static ssize_t ucma_query_route(struct ucma_file *file, | ||
494 | const char __user *inbuf, | ||
495 | int in_len, int out_len) | ||
496 | { | ||
497 | struct rdma_ucm_query_route cmd; | ||
498 | struct rdma_ucm_query_route_resp resp; | ||
499 | struct ucma_context *ctx; | ||
500 | struct sockaddr *addr; | ||
501 | int ret = 0; | ||
502 | |||
503 | if (out_len < sizeof(resp)) | ||
504 | return -ENOSPC; | ||
505 | |||
506 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
507 | return -EFAULT; | ||
508 | |||
509 | ctx = ucma_get_ctx(file, cmd.id); | ||
510 | if (IS_ERR(ctx)) | ||
511 | return PTR_ERR(ctx); | ||
512 | |||
513 | memset(&resp, 0, sizeof resp); | ||
514 | addr = &ctx->cm_id->route.addr.src_addr; | ||
515 | memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? | ||
516 | sizeof(struct sockaddr_in) : | ||
517 | sizeof(struct sockaddr_in6)); | ||
518 | addr = &ctx->cm_id->route.addr.dst_addr; | ||
519 | memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? | ||
520 | sizeof(struct sockaddr_in) : | ||
521 | sizeof(struct sockaddr_in6)); | ||
522 | if (!ctx->cm_id->device) | ||
523 | goto out; | ||
524 | |||
525 | resp.node_guid = ctx->cm_id->device->node_guid; | ||
526 | resp.port_num = ctx->cm_id->port_num; | ||
527 | switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { | ||
528 | case RDMA_TRANSPORT_IB: | ||
529 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); | ||
530 | break; | ||
531 | default: | ||
532 | break; | ||
533 | } | ||
534 | |||
535 | out: | ||
536 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
537 | &resp, sizeof(resp))) | ||
538 | ret = -EFAULT; | ||
539 | |||
540 | ucma_put_ctx(ctx); | ||
541 | return ret; | ||
542 | } | ||
543 | |||
544 | static void ucma_copy_conn_param(struct rdma_conn_param *dst, | ||
545 | struct rdma_ucm_conn_param *src) | ||
546 | { | ||
547 | dst->private_data = src->private_data; | ||
548 | dst->private_data_len = src->private_data_len; | ||
549 | dst->responder_resources =src->responder_resources; | ||
550 | dst->initiator_depth = src->initiator_depth; | ||
551 | dst->flow_control = src->flow_control; | ||
552 | dst->retry_count = src->retry_count; | ||
553 | dst->rnr_retry_count = src->rnr_retry_count; | ||
554 | dst->srq = src->srq; | ||
555 | dst->qp_num = src->qp_num; | ||
556 | } | ||
557 | |||
558 | static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, | ||
559 | int in_len, int out_len) | ||
560 | { | ||
561 | struct rdma_ucm_connect cmd; | ||
562 | struct rdma_conn_param conn_param; | ||
563 | struct ucma_context *ctx; | ||
564 | int ret; | ||
565 | |||
566 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
567 | return -EFAULT; | ||
568 | |||
569 | if (!cmd.conn_param.valid) | ||
570 | return -EINVAL; | ||
571 | |||
572 | ctx = ucma_get_ctx(file, cmd.id); | ||
573 | if (IS_ERR(ctx)) | ||
574 | return PTR_ERR(ctx); | ||
575 | |||
576 | ucma_copy_conn_param(&conn_param, &cmd.conn_param); | ||
577 | ret = rdma_connect(ctx->cm_id, &conn_param); | ||
578 | ucma_put_ctx(ctx); | ||
579 | return ret; | ||
580 | } | ||
581 | |||
582 | static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, | ||
583 | int in_len, int out_len) | ||
584 | { | ||
585 | struct rdma_ucm_listen cmd; | ||
586 | struct ucma_context *ctx; | ||
587 | int ret; | ||
588 | |||
589 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
590 | return -EFAULT; | ||
591 | |||
592 | ctx = ucma_get_ctx(file, cmd.id); | ||
593 | if (IS_ERR(ctx)) | ||
594 | return PTR_ERR(ctx); | ||
595 | |||
596 | ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ? | ||
597 | cmd.backlog : UCMA_MAX_BACKLOG; | ||
598 | ret = rdma_listen(ctx->cm_id, ctx->backlog); | ||
599 | ucma_put_ctx(ctx); | ||
600 | return ret; | ||
601 | } | ||
602 | |||
603 | static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, | ||
604 | int in_len, int out_len) | ||
605 | { | ||
606 | struct rdma_ucm_accept cmd; | ||
607 | struct rdma_conn_param conn_param; | ||
608 | struct ucma_context *ctx; | ||
609 | int ret; | ||
610 | |||
611 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
612 | return -EFAULT; | ||
613 | |||
614 | ctx = ucma_get_ctx(file, cmd.id); | ||
615 | if (IS_ERR(ctx)) | ||
616 | return PTR_ERR(ctx); | ||
617 | |||
618 | if (cmd.conn_param.valid) { | ||
619 | ctx->uid = cmd.uid; | ||
620 | ucma_copy_conn_param(&conn_param, &cmd.conn_param); | ||
621 | ret = rdma_accept(ctx->cm_id, &conn_param); | ||
622 | } else | ||
623 | ret = rdma_accept(ctx->cm_id, NULL); | ||
624 | |||
625 | ucma_put_ctx(ctx); | ||
626 | return ret; | ||
627 | } | ||
628 | |||
629 | static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, | ||
630 | int in_len, int out_len) | ||
631 | { | ||
632 | struct rdma_ucm_reject cmd; | ||
633 | struct ucma_context *ctx; | ||
634 | int ret; | ||
635 | |||
636 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
637 | return -EFAULT; | ||
638 | |||
639 | ctx = ucma_get_ctx(file, cmd.id); | ||
640 | if (IS_ERR(ctx)) | ||
641 | return PTR_ERR(ctx); | ||
642 | |||
643 | ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); | ||
644 | ucma_put_ctx(ctx); | ||
645 | return ret; | ||
646 | } | ||
647 | |||
648 | static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, | ||
649 | int in_len, int out_len) | ||
650 | { | ||
651 | struct rdma_ucm_disconnect cmd; | ||
652 | struct ucma_context *ctx; | ||
653 | int ret; | ||
654 | |||
655 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
656 | return -EFAULT; | ||
657 | |||
658 | ctx = ucma_get_ctx(file, cmd.id); | ||
659 | if (IS_ERR(ctx)) | ||
660 | return PTR_ERR(ctx); | ||
661 | |||
662 | ret = rdma_disconnect(ctx->cm_id); | ||
663 | ucma_put_ctx(ctx); | ||
664 | return ret; | ||
665 | } | ||
666 | |||
667 | static ssize_t ucma_init_qp_attr(struct ucma_file *file, | ||
668 | const char __user *inbuf, | ||
669 | int in_len, int out_len) | ||
670 | { | ||
671 | struct rdma_ucm_init_qp_attr cmd; | ||
672 | struct ib_uverbs_qp_attr resp; | ||
673 | struct ucma_context *ctx; | ||
674 | struct ib_qp_attr qp_attr; | ||
675 | int ret; | ||
676 | |||
677 | if (out_len < sizeof(resp)) | ||
678 | return -ENOSPC; | ||
679 | |||
680 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
681 | return -EFAULT; | ||
682 | |||
683 | ctx = ucma_get_ctx(file, cmd.id); | ||
684 | if (IS_ERR(ctx)) | ||
685 | return PTR_ERR(ctx); | ||
686 | |||
687 | resp.qp_attr_mask = 0; | ||
688 | memset(&qp_attr, 0, sizeof qp_attr); | ||
689 | qp_attr.qp_state = cmd.qp_state; | ||
690 | ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); | ||
691 | if (ret) | ||
692 | goto out; | ||
693 | |||
694 | ib_copy_qp_attr_to_user(&resp, &qp_attr); | ||
695 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
696 | &resp, sizeof(resp))) | ||
697 | ret = -EFAULT; | ||
698 | |||
699 | out: | ||
700 | ucma_put_ctx(ctx); | ||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, | ||
705 | int in_len, int out_len) | ||
706 | { | ||
707 | struct rdma_ucm_notify cmd; | ||
708 | struct ucma_context *ctx; | ||
709 | int ret; | ||
710 | |||
711 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
712 | return -EFAULT; | ||
713 | |||
714 | ctx = ucma_get_ctx(file, cmd.id); | ||
715 | if (IS_ERR(ctx)) | ||
716 | return PTR_ERR(ctx); | ||
717 | |||
718 | ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); | ||
719 | ucma_put_ctx(ctx); | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, | ||
724 | const char __user *inbuf, | ||
725 | int in_len, int out_len) = { | ||
726 | [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, | ||
727 | [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, | ||
728 | [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr, | ||
729 | [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, | ||
730 | [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route, | ||
731 | [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, | ||
732 | [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, | ||
733 | [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, | ||
734 | [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, | ||
735 | [RDMA_USER_CM_CMD_REJECT] = ucma_reject, | ||
736 | [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, | ||
737 | [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, | ||
738 | [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, | ||
739 | [RDMA_USER_CM_CMD_GET_OPTION] = NULL, | ||
740 | [RDMA_USER_CM_CMD_SET_OPTION] = NULL, | ||
741 | [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, | ||
742 | }; | ||
743 | |||
744 | static ssize_t ucma_write(struct file *filp, const char __user *buf, | ||
745 | size_t len, loff_t *pos) | ||
746 | { | ||
747 | struct ucma_file *file = filp->private_data; | ||
748 | struct rdma_ucm_cmd_hdr hdr; | ||
749 | ssize_t ret; | ||
750 | |||
751 | if (len < sizeof(hdr)) | ||
752 | return -EINVAL; | ||
753 | |||
754 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | ||
755 | return -EFAULT; | ||
756 | |||
757 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) | ||
758 | return -EINVAL; | ||
759 | |||
760 | if (hdr.in + sizeof(hdr) > len) | ||
761 | return -EINVAL; | ||
762 | |||
763 | if (!ucma_cmd_table[hdr.cmd]) | ||
764 | return -ENOSYS; | ||
765 | |||
766 | ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); | ||
767 | if (!ret) | ||
768 | ret = len; | ||
769 | |||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) | ||
774 | { | ||
775 | struct ucma_file *file = filp->private_data; | ||
776 | unsigned int mask = 0; | ||
777 | |||
778 | poll_wait(filp, &file->poll_wait, wait); | ||
779 | |||
780 | if (!list_empty(&file->event_list)) | ||
781 | mask = POLLIN | POLLRDNORM; | ||
782 | |||
783 | return mask; | ||
784 | } | ||
785 | |||
786 | static int ucma_open(struct inode *inode, struct file *filp) | ||
787 | { | ||
788 | struct ucma_file *file; | ||
789 | |||
790 | file = kmalloc(sizeof *file, GFP_KERNEL); | ||
791 | if (!file) | ||
792 | return -ENOMEM; | ||
793 | |||
794 | INIT_LIST_HEAD(&file->event_list); | ||
795 | INIT_LIST_HEAD(&file->ctx_list); | ||
796 | init_waitqueue_head(&file->poll_wait); | ||
797 | mutex_init(&file->mut); | ||
798 | |||
799 | filp->private_data = file; | ||
800 | file->filp = filp; | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | static int ucma_close(struct inode *inode, struct file *filp) | ||
805 | { | ||
806 | struct ucma_file *file = filp->private_data; | ||
807 | struct ucma_context *ctx, *tmp; | ||
808 | |||
809 | mutex_lock(&file->mut); | ||
810 | list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { | ||
811 | mutex_unlock(&file->mut); | ||
812 | |||
813 | mutex_lock(&mut); | ||
814 | idr_remove(&ctx_idr, ctx->id); | ||
815 | mutex_unlock(&mut); | ||
816 | |||
817 | ucma_free_ctx(ctx); | ||
818 | mutex_lock(&file->mut); | ||
819 | } | ||
820 | mutex_unlock(&file->mut); | ||
821 | kfree(file); | ||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | static struct file_operations ucma_fops = { | ||
826 | .owner = THIS_MODULE, | ||
827 | .open = ucma_open, | ||
828 | .release = ucma_close, | ||
829 | .write = ucma_write, | ||
830 | .poll = ucma_poll, | ||
831 | }; | ||
832 | |||
833 | static struct miscdevice ucma_misc = { | ||
834 | .minor = MISC_DYNAMIC_MINOR, | ||
835 | .name = "rdma_cm", | ||
836 | .fops = &ucma_fops, | ||
837 | }; | ||
838 | |||
839 | static ssize_t show_abi_version(struct device *dev, | ||
840 | struct device_attribute *attr, | ||
841 | char *buf) | ||
842 | { | ||
843 | return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); | ||
844 | } | ||
845 | static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); | ||
846 | |||
847 | static int __init ucma_init(void) | ||
848 | { | ||
849 | int ret; | ||
850 | |||
851 | ret = misc_register(&ucma_misc); | ||
852 | if (ret) | ||
853 | return ret; | ||
854 | |||
855 | ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); | ||
856 | if (ret) { | ||
857 | printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); | ||
858 | goto err; | ||
859 | } | ||
860 | return 0; | ||
861 | err: | ||
862 | misc_deregister(&ucma_misc); | ||
863 | return ret; | ||
864 | } | ||
865 | |||
866 | static void __exit ucma_cleanup(void) | ||
867 | { | ||
868 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | ||
869 | misc_deregister(&ucma_misc); | ||
870 | idr_destroy(&ctx_idr); | ||
871 | } | ||
872 | |||
873 | module_init(ucma_init); | ||
874 | module_exit(ucma_cleanup); | ||
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index ce46b13ae02b..5440da0e59b4 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
@@ -32,8 +32,8 @@ | |||
32 | 32 | ||
33 | #include <rdma/ib_marshall.h> | 33 | #include <rdma/ib_marshall.h> |
34 | 34 | ||
35 | static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, | 35 | void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, |
36 | struct ib_ah_attr *src) | 36 | struct ib_ah_attr *src) |
37 | { | 37 | { |
38 | memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); | 38 | memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); |
39 | dst->grh.flow_label = src->grh.flow_label; | 39 | dst->grh.flow_label = src->grh.flow_label; |
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, | |||
47 | dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; | 47 | dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; |
48 | dst->port_num = src->port_num; | 48 | dst->port_num = src->port_num; |
49 | } | 49 | } |
50 | EXPORT_SYMBOL(ib_copy_ah_attr_to_user); | ||
50 | 51 | ||
51 | void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, | 52 | void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, |
52 | struct ib_qp_attr *src) | 53 | struct ib_qp_attr *src) |