diff options
author | Sean Hefty <sean.hefty@intel.com> | 2005-09-01 12:28:03 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-09-07 12:48:52 -0400 |
commit | 0b2b35f68140ceeb1b78ef85680198e63ebc8649 (patch) | |
tree | 342c13bd8a1e1c071389df8ef9951a723cb4b270 | |
parent | 1d6801f9dd3ebb054ae685153a01b1a4ec817f46 (diff) |
[PATCH] IB: Add user-supplied context to userspace CM ABI
- Add user specified context to all uCM events. Users will not retrieve
any events associated with the context after destroying the corresponding
cm_id.
- Provide the ib_cm_init_qp_attr() call to userspace clients of the CM.
This call may be used to set QP attributes properly before modifying the QP.
- Fixes some error handling synchonization and cleanup issues.
- Performs some minor code cleanup.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/core/ucm.c | 287 | ||||
-rw-r--r-- | drivers/infiniband/core/ucm.h | 11 | ||||
-rw-r--r-- | include/rdma/ib_user_cm.h | 72 |
3 files changed, 261 insertions, 109 deletions
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 79595826ccc7..d0f0b0a2edd3 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -72,7 +72,6 @@ enum { | |||
72 | 72 | ||
73 | static struct semaphore ctx_id_mutex; | 73 | static struct semaphore ctx_id_mutex; |
74 | static struct idr ctx_id_table; | 74 | static struct idr ctx_id_table; |
75 | static int ctx_id_rover = 0; | ||
76 | 75 | ||
77 | static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) | 76 | static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) |
78 | { | 77 | { |
@@ -97,33 +96,16 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) | |||
97 | wake_up(&ctx->wait); | 96 | wake_up(&ctx->wait); |
98 | } | 97 | } |
99 | 98 | ||
100 | static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) | 99 | static inline int ib_ucm_new_cm_id(int event) |
101 | { | 100 | { |
102 | struct ib_ucm_context *ctx; | 101 | return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED; |
103 | struct ib_ucm_event *uevent; | 102 | } |
104 | |||
105 | down(&ctx_id_mutex); | ||
106 | ctx = idr_find(&ctx_id_table, id); | ||
107 | if (!ctx) | ||
108 | ctx = ERR_PTR(-ENOENT); | ||
109 | else if (ctx->file != file) | ||
110 | ctx = ERR_PTR(-EINVAL); | ||
111 | else | ||
112 | idr_remove(&ctx_id_table, ctx->id); | ||
113 | up(&ctx_id_mutex); | ||
114 | |||
115 | if (IS_ERR(ctx)) | ||
116 | return PTR_ERR(ctx); | ||
117 | |||
118 | atomic_dec(&ctx->ref); | ||
119 | wait_event(ctx->wait, !atomic_read(&ctx->ref)); | ||
120 | 103 | ||
121 | /* No new events will be generated after destroying the cm_id. */ | 104 | static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) |
122 | if (!IS_ERR(ctx->cm_id)) | 105 | { |
123 | ib_destroy_cm_id(ctx->cm_id); | 106 | struct ib_ucm_event *uevent; |
124 | 107 | ||
125 | /* Cleanup events not yet reported to the user. */ | 108 | down(&ctx->file->mutex); |
126 | down(&file->mutex); | ||
127 | list_del(&ctx->file_list); | 109 | list_del(&ctx->file_list); |
128 | while (!list_empty(&ctx->events)) { | 110 | while (!list_empty(&ctx->events)) { |
129 | 111 | ||
@@ -133,15 +115,12 @@ static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) | |||
133 | list_del(&uevent->ctx_list); | 115 | list_del(&uevent->ctx_list); |
134 | 116 | ||
135 | /* clear incoming connections. */ | 117 | /* clear incoming connections. */ |
136 | if (uevent->cm_id) | 118 | if (ib_ucm_new_cm_id(uevent->resp.event)) |
137 | ib_destroy_cm_id(uevent->cm_id); | 119 | ib_destroy_cm_id(uevent->cm_id); |
138 | 120 | ||
139 | kfree(uevent); | 121 | kfree(uevent); |
140 | } | 122 | } |
141 | up(&file->mutex); | 123 | up(&ctx->file->mutex); |
142 | |||
143 | kfree(ctx); | ||
144 | return 0; | ||
145 | } | 124 | } |
146 | 125 | ||
147 | static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) | 126 | static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) |
@@ -153,36 +132,31 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) | |||
153 | if (!ctx) | 132 | if (!ctx) |
154 | return NULL; | 133 | return NULL; |
155 | 134 | ||
135 | memset(ctx, 0, sizeof *ctx); | ||
156 | atomic_set(&ctx->ref, 1); | 136 | atomic_set(&ctx->ref, 1); |
157 | init_waitqueue_head(&ctx->wait); | 137 | init_waitqueue_head(&ctx->wait); |
158 | ctx->file = file; | 138 | ctx->file = file; |
159 | |||
160 | INIT_LIST_HEAD(&ctx->events); | 139 | INIT_LIST_HEAD(&ctx->events); |
161 | 140 | ||
162 | list_add_tail(&ctx->file_list, &file->ctxs); | 141 | do { |
163 | 142 | result = idr_pre_get(&ctx_id_table, GFP_KERNEL); | |
164 | ctx_id_rover = (ctx_id_rover + 1) & INT_MAX; | 143 | if (!result) |
165 | retry: | 144 | goto error; |
166 | result = idr_pre_get(&ctx_id_table, GFP_KERNEL); | ||
167 | if (!result) | ||
168 | goto error; | ||
169 | 145 | ||
170 | down(&ctx_id_mutex); | 146 | down(&ctx_id_mutex); |
171 | result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id); | 147 | result = idr_get_new(&ctx_id_table, ctx, &ctx->id); |
172 | up(&ctx_id_mutex); | 148 | up(&ctx_id_mutex); |
149 | } while (result == -EAGAIN); | ||
173 | 150 | ||
174 | if (result == -EAGAIN) | ||
175 | goto retry; | ||
176 | if (result) | 151 | if (result) |
177 | goto error; | 152 | goto error; |
178 | 153 | ||
154 | list_add_tail(&ctx->file_list, &file->ctxs); | ||
179 | ucm_dbg("Allocated CM ID <%d>\n", ctx->id); | 155 | ucm_dbg("Allocated CM ID <%d>\n", ctx->id); |
180 | |||
181 | return ctx; | 156 | return ctx; |
157 | |||
182 | error: | 158 | error: |
183 | list_del(&ctx->file_list); | ||
184 | kfree(ctx); | 159 | kfree(ctx); |
185 | |||
186 | return NULL; | 160 | return NULL; |
187 | } | 161 | } |
188 | /* | 162 | /* |
@@ -219,12 +193,9 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, | |||
219 | kpath->packet_life_time_selector; | 193 | kpath->packet_life_time_selector; |
220 | } | 194 | } |
221 | 195 | ||
222 | static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, | 196 | static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, |
223 | struct ib_ucm_req_event_resp *ureq, | ||
224 | struct ib_cm_req_event_param *kreq) | 197 | struct ib_cm_req_event_param *kreq) |
225 | { | 198 | { |
226 | ureq->listen_id = ctx->id; | ||
227 | |||
228 | ureq->remote_ca_guid = kreq->remote_ca_guid; | 199 | ureq->remote_ca_guid = kreq->remote_ca_guid; |
229 | ureq->remote_qkey = kreq->remote_qkey; | 200 | ureq->remote_qkey = kreq->remote_qkey; |
230 | ureq->remote_qpn = kreq->remote_qpn; | 201 | ureq->remote_qpn = kreq->remote_qpn; |
@@ -259,14 +230,6 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep, | |||
259 | urep->srq = krep->srq; | 230 | urep->srq = krep->srq; |
260 | } | 231 | } |
261 | 232 | ||
262 | static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, | ||
263 | struct ib_ucm_sidr_req_event_resp *ureq, | ||
264 | struct ib_cm_sidr_req_event_param *kreq) | ||
265 | { | ||
266 | ureq->listen_id = ctx->id; | ||
267 | ureq->pkey = kreq->pkey; | ||
268 | } | ||
269 | |||
270 | static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, | 233 | static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, |
271 | struct ib_cm_sidr_rep_event_param *krep) | 234 | struct ib_cm_sidr_rep_event_param *krep) |
272 | { | 235 | { |
@@ -275,15 +238,14 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, | |||
275 | urep->qpn = krep->qpn; | 238 | urep->qpn = krep->qpn; |
276 | }; | 239 | }; |
277 | 240 | ||
278 | static int ib_ucm_event_process(struct ib_ucm_context *ctx, | 241 | static int ib_ucm_event_process(struct ib_cm_event *evt, |
279 | struct ib_cm_event *evt, | ||
280 | struct ib_ucm_event *uvt) | 242 | struct ib_ucm_event *uvt) |
281 | { | 243 | { |
282 | void *info = NULL; | 244 | void *info = NULL; |
283 | 245 | ||
284 | switch (evt->event) { | 246 | switch (evt->event) { |
285 | case IB_CM_REQ_RECEIVED: | 247 | case IB_CM_REQ_RECEIVED: |
286 | ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, | 248 | ib_ucm_event_req_get(&uvt->resp.u.req_resp, |
287 | &evt->param.req_rcvd); | 249 | &evt->param.req_rcvd); |
288 | uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; | 250 | uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; |
289 | uvt->resp.present = IB_UCM_PRES_PRIMARY; | 251 | uvt->resp.present = IB_UCM_PRES_PRIMARY; |
@@ -331,8 +293,8 @@ static int ib_ucm_event_process(struct ib_ucm_context *ctx, | |||
331 | info = evt->param.apr_rcvd.apr_info; | 293 | info = evt->param.apr_rcvd.apr_info; |
332 | break; | 294 | break; |
333 | case IB_CM_SIDR_REQ_RECEIVED: | 295 | case IB_CM_SIDR_REQ_RECEIVED: |
334 | ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, | 296 | uvt->resp.u.sidr_req_resp.pkey = |
335 | &evt->param.sidr_req_rcvd); | 297 | evt->param.sidr_req_rcvd.pkey; |
336 | uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; | 298 | uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; |
337 | break; | 299 | break; |
338 | case IB_CM_SIDR_REP_RECEIVED: | 300 | case IB_CM_SIDR_REP_RECEIVED: |
@@ -378,31 +340,24 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id, | |||
378 | struct ib_ucm_event *uevent; | 340 | struct ib_ucm_event *uevent; |
379 | struct ib_ucm_context *ctx; | 341 | struct ib_ucm_context *ctx; |
380 | int result = 0; | 342 | int result = 0; |
381 | int id; | ||
382 | 343 | ||
383 | ctx = cm_id->context; | 344 | ctx = cm_id->context; |
384 | 345 | ||
385 | if (event->event == IB_CM_REQ_RECEIVED || | ||
386 | event->event == IB_CM_SIDR_REQ_RECEIVED) | ||
387 | id = IB_UCM_CM_ID_INVALID; | ||
388 | else | ||
389 | id = ctx->id; | ||
390 | |||
391 | uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); | 346 | uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); |
392 | if (!uevent) | 347 | if (!uevent) |
393 | goto err1; | 348 | goto err1; |
394 | 349 | ||
395 | memset(uevent, 0, sizeof(*uevent)); | 350 | memset(uevent, 0, sizeof(*uevent)); |
396 | uevent->resp.id = id; | 351 | uevent->ctx = ctx; |
352 | uevent->cm_id = cm_id; | ||
353 | uevent->resp.uid = ctx->uid; | ||
354 | uevent->resp.id = ctx->id; | ||
397 | uevent->resp.event = event->event; | 355 | uevent->resp.event = event->event; |
398 | 356 | ||
399 | result = ib_ucm_event_process(ctx, event, uevent); | 357 | result = ib_ucm_event_process(event, uevent); |
400 | if (result) | 358 | if (result) |
401 | goto err2; | 359 | goto err2; |
402 | 360 | ||
403 | uevent->ctx = ctx; | ||
404 | uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; | ||
405 | |||
406 | down(&ctx->file->mutex); | 361 | down(&ctx->file->mutex); |
407 | list_add_tail(&uevent->file_list, &ctx->file->events); | 362 | list_add_tail(&uevent->file_list, &ctx->file->events); |
408 | list_add_tail(&uevent->ctx_list, &ctx->events); | 363 | list_add_tail(&uevent->ctx_list, &ctx->events); |
@@ -414,7 +369,7 @@ err2: | |||
414 | kfree(uevent); | 369 | kfree(uevent); |
415 | err1: | 370 | err1: |
416 | /* Destroy new cm_id's */ | 371 | /* Destroy new cm_id's */ |
417 | return (id == IB_UCM_CM_ID_INVALID); | 372 | return ib_ucm_new_cm_id(event->event); |
418 | } | 373 | } |
419 | 374 | ||
420 | static ssize_t ib_ucm_event(struct ib_ucm_file *file, | 375 | static ssize_t ib_ucm_event(struct ib_ucm_file *file, |
@@ -423,7 +378,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, | |||
423 | { | 378 | { |
424 | struct ib_ucm_context *ctx; | 379 | struct ib_ucm_context *ctx; |
425 | struct ib_ucm_event_get cmd; | 380 | struct ib_ucm_event_get cmd; |
426 | struct ib_ucm_event *uevent = NULL; | 381 | struct ib_ucm_event *uevent; |
427 | int result = 0; | 382 | int result = 0; |
428 | DEFINE_WAIT(wait); | 383 | DEFINE_WAIT(wait); |
429 | 384 | ||
@@ -436,7 +391,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, | |||
436 | * wait | 391 | * wait |
437 | */ | 392 | */ |
438 | down(&file->mutex); | 393 | down(&file->mutex); |
439 | |||
440 | while (list_empty(&file->events)) { | 394 | while (list_empty(&file->events)) { |
441 | 395 | ||
442 | if (file->filp->f_flags & O_NONBLOCK) { | 396 | if (file->filp->f_flags & O_NONBLOCK) { |
@@ -463,21 +417,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, | |||
463 | 417 | ||
464 | uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); | 418 | uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); |
465 | 419 | ||
466 | if (!uevent->cm_id) | 420 | if (ib_ucm_new_cm_id(uevent->resp.event)) { |
467 | goto user; | 421 | ctx = ib_ucm_ctx_alloc(file); |
422 | if (!ctx) { | ||
423 | result = -ENOMEM; | ||
424 | goto done; | ||
425 | } | ||
468 | 426 | ||
469 | ctx = ib_ucm_ctx_alloc(file); | 427 | ctx->cm_id = uevent->cm_id; |
470 | if (!ctx) { | 428 | ctx->cm_id->context = ctx; |
471 | result = -ENOMEM; | 429 | uevent->resp.id = ctx->id; |
472 | goto done; | ||
473 | } | 430 | } |
474 | 431 | ||
475 | ctx->cm_id = uevent->cm_id; | ||
476 | ctx->cm_id->context = ctx; | ||
477 | |||
478 | uevent->resp.id = ctx->id; | ||
479 | |||
480 | user: | ||
481 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | 432 | if (copy_to_user((void __user *)(unsigned long)cmd.response, |
482 | &uevent->resp, sizeof(uevent->resp))) { | 433 | &uevent->resp, sizeof(uevent->resp))) { |
483 | result = -EFAULT; | 434 | result = -EFAULT; |
@@ -485,12 +436,10 @@ user: | |||
485 | } | 436 | } |
486 | 437 | ||
487 | if (uevent->data) { | 438 | if (uevent->data) { |
488 | |||
489 | if (cmd.data_len < uevent->data_len) { | 439 | if (cmd.data_len < uevent->data_len) { |
490 | result = -ENOMEM; | 440 | result = -ENOMEM; |
491 | goto done; | 441 | goto done; |
492 | } | 442 | } |
493 | |||
494 | if (copy_to_user((void __user *)(unsigned long)cmd.data, | 443 | if (copy_to_user((void __user *)(unsigned long)cmd.data, |
495 | uevent->data, uevent->data_len)) { | 444 | uevent->data, uevent->data_len)) { |
496 | result = -EFAULT; | 445 | result = -EFAULT; |
@@ -499,12 +448,10 @@ user: | |||
499 | } | 448 | } |
500 | 449 | ||
501 | if (uevent->info) { | 450 | if (uevent->info) { |
502 | |||
503 | if (cmd.info_len < uevent->info_len) { | 451 | if (cmd.info_len < uevent->info_len) { |
504 | result = -ENOMEM; | 452 | result = -ENOMEM; |
505 | goto done; | 453 | goto done; |
506 | } | 454 | } |
507 | |||
508 | if (copy_to_user((void __user *)(unsigned long)cmd.info, | 455 | if (copy_to_user((void __user *)(unsigned long)cmd.info, |
509 | uevent->info, uevent->info_len)) { | 456 | uevent->info, uevent->info_len)) { |
510 | result = -EFAULT; | 457 | result = -EFAULT; |
@@ -514,6 +461,7 @@ user: | |||
514 | 461 | ||
515 | list_del(&uevent->file_list); | 462 | list_del(&uevent->file_list); |
516 | list_del(&uevent->ctx_list); | 463 | list_del(&uevent->ctx_list); |
464 | uevent->ctx->events_reported++; | ||
517 | 465 | ||
518 | kfree(uevent->data); | 466 | kfree(uevent->data); |
519 | kfree(uevent->info); | 467 | kfree(uevent->info); |
@@ -545,6 +493,7 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, | |||
545 | if (!ctx) | 493 | if (!ctx) |
546 | return -ENOMEM; | 494 | return -ENOMEM; |
547 | 495 | ||
496 | ctx->uid = cmd.uid; | ||
548 | ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); | 497 | ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); |
549 | if (IS_ERR(ctx->cm_id)) { | 498 | if (IS_ERR(ctx->cm_id)) { |
550 | result = PTR_ERR(ctx->cm_id); | 499 | result = PTR_ERR(ctx->cm_id); |
@@ -561,7 +510,14 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, | |||
561 | return 0; | 510 | return 0; |
562 | 511 | ||
563 | err: | 512 | err: |
564 | ib_ucm_destroy_ctx(file, ctx->id); | 513 | down(&ctx_id_mutex); |
514 | idr_remove(&ctx_id_table, ctx->id); | ||
515 | up(&ctx_id_mutex); | ||
516 | |||
517 | if (!IS_ERR(ctx->cm_id)) | ||
518 | ib_destroy_cm_id(ctx->cm_id); | ||
519 | |||
520 | kfree(ctx); | ||
565 | return result; | 521 | return result; |
566 | } | 522 | } |
567 | 523 | ||
@@ -570,11 +526,44 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, | |||
570 | int in_len, int out_len) | 526 | int in_len, int out_len) |
571 | { | 527 | { |
572 | struct ib_ucm_destroy_id cmd; | 528 | struct ib_ucm_destroy_id cmd; |
529 | struct ib_ucm_destroy_id_resp resp; | ||
530 | struct ib_ucm_context *ctx; | ||
531 | int result = 0; | ||
532 | |||
533 | if (out_len < sizeof(resp)) | ||
534 | return -ENOSPC; | ||
573 | 535 | ||
574 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 536 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
575 | return -EFAULT; | 537 | return -EFAULT; |
576 | 538 | ||
577 | return ib_ucm_destroy_ctx(file, cmd.id); | 539 | down(&ctx_id_mutex); |
540 | ctx = idr_find(&ctx_id_table, cmd.id); | ||
541 | if (!ctx) | ||
542 | ctx = ERR_PTR(-ENOENT); | ||
543 | else if (ctx->file != file) | ||
544 | ctx = ERR_PTR(-EINVAL); | ||
545 | else | ||
546 | idr_remove(&ctx_id_table, ctx->id); | ||
547 | up(&ctx_id_mutex); | ||
548 | |||
549 | if (IS_ERR(ctx)) | ||
550 | return PTR_ERR(ctx); | ||
551 | |||
552 | atomic_dec(&ctx->ref); | ||
553 | wait_event(ctx->wait, !atomic_read(&ctx->ref)); | ||
554 | |||
555 | /* No new events will be generated after destroying the cm_id. */ | ||
556 | ib_destroy_cm_id(ctx->cm_id); | ||
557 | /* Cleanup events not yet reported to the user. */ | ||
558 | ib_ucm_cleanup_events(ctx); | ||
559 | |||
560 | resp.events_reported = ctx->events_reported; | ||
561 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
562 | &resp, sizeof(resp))) | ||
563 | result = -EFAULT; | ||
564 | |||
565 | kfree(ctx); | ||
566 | return result; | ||
578 | } | 567 | } |
579 | 568 | ||
580 | static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, | 569 | static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, |
@@ -609,6 +598,98 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, | |||
609 | return result; | 598 | return result; |
610 | } | 599 | } |
611 | 600 | ||
601 | static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr *dest_attr, | ||
602 | struct ib_ah_attr *src_attr) | ||
603 | { | ||
604 | memcpy(dest_attr->grh_dgid, src_attr->grh.dgid.raw, | ||
605 | sizeof src_attr->grh.dgid); | ||
606 | dest_attr->grh_flow_label = src_attr->grh.flow_label; | ||
607 | dest_attr->grh_sgid_index = src_attr->grh.sgid_index; | ||
608 | dest_attr->grh_hop_limit = src_attr->grh.hop_limit; | ||
609 | dest_attr->grh_traffic_class = src_attr->grh.traffic_class; | ||
610 | |||
611 | dest_attr->dlid = src_attr->dlid; | ||
612 | dest_attr->sl = src_attr->sl; | ||
613 | dest_attr->src_path_bits = src_attr->src_path_bits; | ||
614 | dest_attr->static_rate = src_attr->static_rate; | ||
615 | dest_attr->is_global = (src_attr->ah_flags & IB_AH_GRH); | ||
616 | dest_attr->port_num = src_attr->port_num; | ||
617 | } | ||
618 | |||
619 | static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp *dest_attr, | ||
620 | struct ib_qp_attr *src_attr) | ||
621 | { | ||
622 | dest_attr->cur_qp_state = src_attr->cur_qp_state; | ||
623 | dest_attr->path_mtu = src_attr->path_mtu; | ||
624 | dest_attr->path_mig_state = src_attr->path_mig_state; | ||
625 | dest_attr->qkey = src_attr->qkey; | ||
626 | dest_attr->rq_psn = src_attr->rq_psn; | ||
627 | dest_attr->sq_psn = src_attr->sq_psn; | ||
628 | dest_attr->dest_qp_num = src_attr->dest_qp_num; | ||
629 | dest_attr->qp_access_flags = src_attr->qp_access_flags; | ||
630 | |||
631 | dest_attr->max_send_wr = src_attr->cap.max_send_wr; | ||
632 | dest_attr->max_recv_wr = src_attr->cap.max_recv_wr; | ||
633 | dest_attr->max_send_sge = src_attr->cap.max_send_sge; | ||
634 | dest_attr->max_recv_sge = src_attr->cap.max_recv_sge; | ||
635 | dest_attr->max_inline_data = src_attr->cap.max_inline_data; | ||
636 | |||
637 | ib_ucm_copy_ah_attr(&dest_attr->ah_attr, &src_attr->ah_attr); | ||
638 | ib_ucm_copy_ah_attr(&dest_attr->alt_ah_attr, &src_attr->alt_ah_attr); | ||
639 | |||
640 | dest_attr->pkey_index = src_attr->pkey_index; | ||
641 | dest_attr->alt_pkey_index = src_attr->alt_pkey_index; | ||
642 | dest_attr->en_sqd_async_notify = src_attr->en_sqd_async_notify; | ||
643 | dest_attr->sq_draining = src_attr->sq_draining; | ||
644 | dest_attr->max_rd_atomic = src_attr->max_rd_atomic; | ||
645 | dest_attr->max_dest_rd_atomic = src_attr->max_dest_rd_atomic; | ||
646 | dest_attr->min_rnr_timer = src_attr->min_rnr_timer; | ||
647 | dest_attr->port_num = src_attr->port_num; | ||
648 | dest_attr->timeout = src_attr->timeout; | ||
649 | dest_attr->retry_cnt = src_attr->retry_cnt; | ||
650 | dest_attr->rnr_retry = src_attr->rnr_retry; | ||
651 | dest_attr->alt_port_num = src_attr->alt_port_num; | ||
652 | dest_attr->alt_timeout = src_attr->alt_timeout; | ||
653 | } | ||
654 | |||
655 | static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file, | ||
656 | const char __user *inbuf, | ||
657 | int in_len, int out_len) | ||
658 | { | ||
659 | struct ib_ucm_init_qp_attr_resp resp; | ||
660 | struct ib_ucm_init_qp_attr cmd; | ||
661 | struct ib_ucm_context *ctx; | ||
662 | struct ib_qp_attr qp_attr; | ||
663 | int result = 0; | ||
664 | |||
665 | if (out_len < sizeof(resp)) | ||
666 | return -ENOSPC; | ||
667 | |||
668 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | ||
669 | return -EFAULT; | ||
670 | |||
671 | ctx = ib_ucm_ctx_get(file, cmd.id); | ||
672 | if (IS_ERR(ctx)) | ||
673 | return PTR_ERR(ctx); | ||
674 | |||
675 | resp.qp_attr_mask = 0; | ||
676 | memset(&qp_attr, 0, sizeof qp_attr); | ||
677 | qp_attr.qp_state = cmd.qp_state; | ||
678 | result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); | ||
679 | if (result) | ||
680 | goto out; | ||
681 | |||
682 | ib_ucm_copy_qp_attr(&resp, &qp_attr); | ||
683 | |||
684 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
685 | &resp, sizeof(resp))) | ||
686 | result = -EFAULT; | ||
687 | |||
688 | out: | ||
689 | ib_ucm_ctx_put(ctx); | ||
690 | return result; | ||
691 | } | ||
692 | |||
612 | static ssize_t ib_ucm_listen(struct ib_ucm_file *file, | 693 | static ssize_t ib_ucm_listen(struct ib_ucm_file *file, |
613 | const char __user *inbuf, | 694 | const char __user *inbuf, |
614 | int in_len, int out_len) | 695 | int in_len, int out_len) |
@@ -808,6 +889,7 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file, | |||
808 | 889 | ||
809 | ctx = ib_ucm_ctx_get(file, cmd.id); | 890 | ctx = ib_ucm_ctx_get(file, cmd.id); |
810 | if (!IS_ERR(ctx)) { | 891 | if (!IS_ERR(ctx)) { |
892 | ctx->uid = cmd.uid; | ||
811 | result = ib_send_cm_rep(ctx->cm_id, ¶m); | 893 | result = ib_send_cm_rep(ctx->cm_id, ¶m); |
812 | ib_ucm_ctx_put(ctx); | 894 | ib_ucm_ctx_put(ctx); |
813 | } else | 895 | } else |
@@ -1086,6 +1168,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file, | |||
1086 | [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req, | 1168 | [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req, |
1087 | [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep, | 1169 | [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep, |
1088 | [IB_USER_CM_CMD_EVENT] = ib_ucm_event, | 1170 | [IB_USER_CM_CMD_EVENT] = ib_ucm_event, |
1171 | [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr, | ||
1089 | }; | 1172 | }; |
1090 | 1173 | ||
1091 | static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | 1174 | static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, |
@@ -1161,12 +1244,18 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) | |||
1161 | 1244 | ||
1162 | down(&file->mutex); | 1245 | down(&file->mutex); |
1163 | while (!list_empty(&file->ctxs)) { | 1246 | while (!list_empty(&file->ctxs)) { |
1164 | |||
1165 | ctx = list_entry(file->ctxs.next, | 1247 | ctx = list_entry(file->ctxs.next, |
1166 | struct ib_ucm_context, file_list); | 1248 | struct ib_ucm_context, file_list); |
1167 | |||
1168 | up(&file->mutex); | 1249 | up(&file->mutex); |
1169 | ib_ucm_destroy_ctx(file, ctx->id); | 1250 | |
1251 | down(&ctx_id_mutex); | ||
1252 | idr_remove(&ctx_id_table, ctx->id); | ||
1253 | up(&ctx_id_mutex); | ||
1254 | |||
1255 | ib_destroy_cm_id(ctx->cm_id); | ||
1256 | ib_ucm_cleanup_events(ctx); | ||
1257 | kfree(ctx); | ||
1258 | |||
1170 | down(&file->mutex); | 1259 | down(&file->mutex); |
1171 | } | 1260 | } |
1172 | up(&file->mutex); | 1261 | up(&file->mutex); |
diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h index c8819b928a1b..f46f37bc1201 100644 --- a/drivers/infiniband/core/ucm.h +++ b/drivers/infiniband/core/ucm.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -43,8 +44,6 @@ | |||
43 | #include <rdma/ib_cm.h> | 44 | #include <rdma/ib_cm.h> |
44 | #include <rdma/ib_user_cm.h> | 45 | #include <rdma/ib_user_cm.h> |
45 | 46 | ||
46 | #define IB_UCM_CM_ID_INVALID 0xffffffff | ||
47 | |||
48 | struct ib_ucm_file { | 47 | struct ib_ucm_file { |
49 | struct semaphore mutex; | 48 | struct semaphore mutex; |
50 | struct file *filp; | 49 | struct file *filp; |
@@ -58,9 +57,11 @@ struct ib_ucm_context { | |||
58 | int id; | 57 | int id; |
59 | wait_queue_head_t wait; | 58 | wait_queue_head_t wait; |
60 | atomic_t ref; | 59 | atomic_t ref; |
60 | int events_reported; | ||
61 | 61 | ||
62 | struct ib_ucm_file *file; | 62 | struct ib_ucm_file *file; |
63 | struct ib_cm_id *cm_id; | 63 | struct ib_cm_id *cm_id; |
64 | __u64 uid; | ||
64 | 65 | ||
65 | struct list_head events; /* list of pending events. */ | 66 | struct list_head events; /* list of pending events. */ |
66 | struct list_head file_list; /* member in file ctx list */ | 67 | struct list_head file_list; /* member in file ctx list */ |
@@ -71,16 +72,12 @@ struct ib_ucm_event { | |||
71 | struct list_head file_list; /* member in file event list */ | 72 | struct list_head file_list; /* member in file event list */ |
72 | struct list_head ctx_list; /* member in ctx event list */ | 73 | struct list_head ctx_list; /* member in ctx event list */ |
73 | 74 | ||
75 | struct ib_cm_id *cm_id; | ||
74 | struct ib_ucm_event_resp resp; | 76 | struct ib_ucm_event_resp resp; |
75 | void *data; | 77 | void *data; |
76 | void *info; | 78 | void *info; |
77 | int data_len; | 79 | int data_len; |
78 | int info_len; | 80 | int info_len; |
79 | /* | ||
80 | * new connection identifiers needs to be saved until | ||
81 | * userspace can get a handle on them. | ||
82 | */ | ||
83 | struct ib_cm_id *cm_id; | ||
84 | }; | 81 | }; |
85 | 82 | ||
86 | #endif /* UCM_H */ | 83 | #endif /* UCM_H */ |
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h index 72182d16778b..e4d1654276ad 100644 --- a/include/rdma/ib_user_cm.h +++ b/include/rdma/ib_user_cm.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -37,7 +38,7 @@ | |||
37 | 38 | ||
38 | #include <linux/types.h> | 39 | #include <linux/types.h> |
39 | 40 | ||
40 | #define IB_USER_CM_ABI_VERSION 1 | 41 | #define IB_USER_CM_ABI_VERSION 2 |
41 | 42 | ||
42 | enum { | 43 | enum { |
43 | IB_USER_CM_CMD_CREATE_ID, | 44 | IB_USER_CM_CMD_CREATE_ID, |
@@ -60,6 +61,7 @@ enum { | |||
60 | IB_USER_CM_CMD_SEND_SIDR_REP, | 61 | IB_USER_CM_CMD_SEND_SIDR_REP, |
61 | 62 | ||
62 | IB_USER_CM_CMD_EVENT, | 63 | IB_USER_CM_CMD_EVENT, |
64 | IB_USER_CM_CMD_INIT_QP_ATTR, | ||
63 | }; | 65 | }; |
64 | /* | 66 | /* |
65 | * command ABI structures. | 67 | * command ABI structures. |
@@ -71,6 +73,7 @@ struct ib_ucm_cmd_hdr { | |||
71 | }; | 73 | }; |
72 | 74 | ||
73 | struct ib_ucm_create_id { | 75 | struct ib_ucm_create_id { |
76 | __u64 uid; | ||
74 | __u64 response; | 77 | __u64 response; |
75 | }; | 78 | }; |
76 | 79 | ||
@@ -79,9 +82,14 @@ struct ib_ucm_create_id_resp { | |||
79 | }; | 82 | }; |
80 | 83 | ||
81 | struct ib_ucm_destroy_id { | 84 | struct ib_ucm_destroy_id { |
85 | __u64 response; | ||
82 | __u32 id; | 86 | __u32 id; |
83 | }; | 87 | }; |
84 | 88 | ||
89 | struct ib_ucm_destroy_id_resp { | ||
90 | __u32 events_reported; | ||
91 | }; | ||
92 | |||
85 | struct ib_ucm_attr_id { | 93 | struct ib_ucm_attr_id { |
86 | __u64 response; | 94 | __u64 response; |
87 | __u32 id; | 95 | __u32 id; |
@@ -94,6 +102,64 @@ struct ib_ucm_attr_id_resp { | |||
94 | __be32 remote_id; | 102 | __be32 remote_id; |
95 | }; | 103 | }; |
96 | 104 | ||
105 | struct ib_ucm_init_qp_attr { | ||
106 | __u64 response; | ||
107 | __u32 id; | ||
108 | __u32 qp_state; | ||
109 | }; | ||
110 | |||
111 | struct ib_ucm_ah_attr { | ||
112 | __u8 grh_dgid[16]; | ||
113 | __u32 grh_flow_label; | ||
114 | __u16 dlid; | ||
115 | __u16 reserved; | ||
116 | __u8 grh_sgid_index; | ||
117 | __u8 grh_hop_limit; | ||
118 | __u8 grh_traffic_class; | ||
119 | __u8 sl; | ||
120 | __u8 src_path_bits; | ||
121 | __u8 static_rate; | ||
122 | __u8 is_global; | ||
123 | __u8 port_num; | ||
124 | }; | ||
125 | |||
126 | struct ib_ucm_init_qp_attr_resp { | ||
127 | __u32 qp_attr_mask; | ||
128 | __u32 qp_state; | ||
129 | __u32 cur_qp_state; | ||
130 | __u32 path_mtu; | ||
131 | __u32 path_mig_state; | ||
132 | __u32 qkey; | ||
133 | __u32 rq_psn; | ||
134 | __u32 sq_psn; | ||
135 | __u32 dest_qp_num; | ||
136 | __u32 qp_access_flags; | ||
137 | |||
138 | struct ib_ucm_ah_attr ah_attr; | ||
139 | struct ib_ucm_ah_attr alt_ah_attr; | ||
140 | |||
141 | /* ib_qp_cap */ | ||
142 | __u32 max_send_wr; | ||
143 | __u32 max_recv_wr; | ||
144 | __u32 max_send_sge; | ||
145 | __u32 max_recv_sge; | ||
146 | __u32 max_inline_data; | ||
147 | |||
148 | __u16 pkey_index; | ||
149 | __u16 alt_pkey_index; | ||
150 | __u8 en_sqd_async_notify; | ||
151 | __u8 sq_draining; | ||
152 | __u8 max_rd_atomic; | ||
153 | __u8 max_dest_rd_atomic; | ||
154 | __u8 min_rnr_timer; | ||
155 | __u8 port_num; | ||
156 | __u8 timeout; | ||
157 | __u8 retry_cnt; | ||
158 | __u8 rnr_retry; | ||
159 | __u8 alt_port_num; | ||
160 | __u8 alt_timeout; | ||
161 | }; | ||
162 | |||
97 | struct ib_ucm_listen { | 163 | struct ib_ucm_listen { |
98 | __be64 service_id; | 164 | __be64 service_id; |
99 | __be64 service_mask; | 165 | __be64 service_mask; |
@@ -157,6 +223,7 @@ struct ib_ucm_req { | |||
157 | }; | 223 | }; |
158 | 224 | ||
159 | struct ib_ucm_rep { | 225 | struct ib_ucm_rep { |
226 | __u64 uid; | ||
160 | __u64 data; | 227 | __u64 data; |
161 | __u32 id; | 228 | __u32 id; |
162 | __u32 qpn; | 229 | __u32 qpn; |
@@ -232,7 +299,6 @@ struct ib_ucm_event_get { | |||
232 | }; | 299 | }; |
233 | 300 | ||
234 | struct ib_ucm_req_event_resp { | 301 | struct ib_ucm_req_event_resp { |
235 | __u32 listen_id; | ||
236 | /* device */ | 302 | /* device */ |
237 | /* port */ | 303 | /* port */ |
238 | struct ib_ucm_path_rec primary_path; | 304 | struct ib_ucm_path_rec primary_path; |
@@ -287,7 +353,6 @@ struct ib_ucm_apr_event_resp { | |||
287 | }; | 353 | }; |
288 | 354 | ||
289 | struct ib_ucm_sidr_req_event_resp { | 355 | struct ib_ucm_sidr_req_event_resp { |
290 | __u32 listen_id; | ||
291 | /* device */ | 356 | /* device */ |
292 | /* port */ | 357 | /* port */ |
293 | __u16 pkey; | 358 | __u16 pkey; |
@@ -307,6 +372,7 @@ struct ib_ucm_sidr_rep_event_resp { | |||
307 | #define IB_UCM_PRES_ALTERNATE 0x08 | 372 | #define IB_UCM_PRES_ALTERNATE 0x08 |
308 | 373 | ||
309 | struct ib_ucm_event_resp { | 374 | struct ib_ucm_event_resp { |
375 | __u64 uid; | ||
310 | __u32 id; | 376 | __u32 id; |
311 | __u32 event; | 377 | __u32 event; |
312 | __u32 present; | 378 | __u32 present; |