aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cm.c115
-rw-r--r--drivers/infiniband/core/ucm.c12
-rw-r--r--include/rdma/ib_cm.h16
-rw-r--r--include/rdma/ib_user_cm.h7
4 files changed, 110 insertions, 40 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 78d9c0c33148..e5dc4530808a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -147,12 +147,12 @@ struct cm_id_private {
147 __be32 rq_psn; 147 __be32 rq_psn;
148 int timeout_ms; 148 int timeout_ms;
149 enum ib_mtu path_mtu; 149 enum ib_mtu path_mtu;
150 __be16 pkey;
150 u8 private_data_len; 151 u8 private_data_len;
151 u8 max_cm_retries; 152 u8 max_cm_retries;
152 u8 peer_to_peer; 153 u8 peer_to_peer;
153 u8 responder_resources; 154 u8 responder_resources;
154 u8 initiator_depth; 155 u8 initiator_depth;
155 u8 local_ack_timeout;
156 u8 retry_count; 156 u8 retry_count;
157 u8 rnr_retry_count; 157 u8 rnr_retry_count;
158 u8 service_timeout; 158 u8 service_timeout;
@@ -690,7 +690,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
690 * timewait before notifying the user that we've exited timewait. 690 * timewait before notifying the user that we've exited timewait.
691 */ 691 */
692 cm_id_priv->id.state = IB_CM_TIMEWAIT; 692 cm_id_priv->id.state = IB_CM_TIMEWAIT;
693 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 693 wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
694 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 694 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
695 msecs_to_jiffies(wait_time)); 695 msecs_to_jiffies(wait_time));
696 cm_id_priv->timewait_info = NULL; 696 cm_id_priv->timewait_info = NULL;
@@ -1009,6 +1009,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1009 cm_id_priv->responder_resources = param->responder_resources; 1009 cm_id_priv->responder_resources = param->responder_resources;
1010 cm_id_priv->retry_count = param->retry_count; 1010 cm_id_priv->retry_count = param->retry_count;
1011 cm_id_priv->path_mtu = param->primary_path->mtu; 1011 cm_id_priv->path_mtu = param->primary_path->mtu;
1012 cm_id_priv->pkey = param->primary_path->pkey;
1012 cm_id_priv->qp_type = param->qp_type; 1013 cm_id_priv->qp_type = param->qp_type;
1013 1014
1014 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1015 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1023,8 +1024,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1023 1024
1024 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1025 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1025 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1026 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1026 cm_id_priv->local_ack_timeout =
1027 cm_req_get_primary_local_ack_timeout(req_msg);
1028 1027
1029 spin_lock_irqsave(&cm_id_priv->lock, flags); 1028 spin_lock_irqsave(&cm_id_priv->lock, flags);
1030 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1029 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1409,9 +1408,8 @@ static int cm_req_handler(struct cm_work *work)
1409 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1408 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1410 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1409 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1411 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1410 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1411 cm_id_priv->pkey = req_msg->pkey;
1412 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1412 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1413 cm_id_priv->local_ack_timeout =
1414 cm_req_get_primary_local_ack_timeout(req_msg);
1415 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1413 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1416 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1414 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1417 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1415 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1715,7 +1713,7 @@ static int cm_establish_handler(struct cm_work *work)
1715 unsigned long flags; 1713 unsigned long flags;
1716 int ret; 1714 int ret;
1717 1715
1718 /* See comment in ib_cm_establish about lookup. */ 1716 /* See comment in cm_establish about lookup. */
1719 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1717 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1720 if (!cm_id_priv) 1718 if (!cm_id_priv)
1721 return -EINVAL; 1719 return -EINVAL;
@@ -2401,11 +2399,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
2401 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2399 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2402 spin_lock_irqsave(&cm_id_priv->lock, flags); 2400 spin_lock_irqsave(&cm_id_priv->lock, flags);
2403 if (cm_id->state != IB_CM_ESTABLISHED || 2401 if (cm_id->state != IB_CM_ESTABLISHED ||
2404 cm_id->lap_state != IB_CM_LAP_IDLE) { 2402 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2403 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2405 ret = -EINVAL; 2404 ret = -EINVAL;
2406 goto out; 2405 goto out;
2407 } 2406 }
2408 2407
2408 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2409 if (ret)
2410 goto out;
2411
2409 ret = cm_alloc_msg(cm_id_priv, &msg); 2412 ret = cm_alloc_msg(cm_id_priv, &msg);
2410 if (ret) 2413 if (ret)
2411 goto out; 2414 goto out;
@@ -2430,7 +2433,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2430} 2433}
2431EXPORT_SYMBOL(ib_send_cm_lap); 2434EXPORT_SYMBOL(ib_send_cm_lap);
2432 2435
2433static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2436static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2437 struct ib_sa_path_rec *path,
2434 struct cm_lap_msg *lap_msg) 2438 struct cm_lap_msg *lap_msg)
2435{ 2439{
2436 memset(path, 0, sizeof *path); 2440 memset(path, 0, sizeof *path);
@@ -2442,10 +2446,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2442 path->hop_limit = lap_msg->alt_hop_limit; 2446 path->hop_limit = lap_msg->alt_hop_limit;
2443 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2447 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2444 path->reversible = 1; 2448 path->reversible = 1;
2445 /* pkey is same as in REQ */ 2449 path->pkey = cm_id_priv->pkey;
2446 path->sl = cm_lap_get_sl(lap_msg); 2450 path->sl = cm_lap_get_sl(lap_msg);
2447 path->mtu_selector = IB_SA_EQ; 2451 path->mtu_selector = IB_SA_EQ;
2448 /* mtu is same as in REQ */ 2452 path->mtu = cm_id_priv->path_mtu;
2449 path->rate_selector = IB_SA_EQ; 2453 path->rate_selector = IB_SA_EQ;
2450 path->rate = cm_lap_get_packet_rate(lap_msg); 2454 path->rate = cm_lap_get_packet_rate(lap_msg);
2451 path->packet_life_time_selector = IB_SA_EQ; 2455 path->packet_life_time_selector = IB_SA_EQ;
@@ -2471,7 +2475,7 @@ static int cm_lap_handler(struct cm_work *work)
2471 2475
2472 param = &work->cm_event.param.lap_rcvd; 2476 param = &work->cm_event.param.lap_rcvd;
2473 param->alternate_path = &work->path[0]; 2477 param->alternate_path = &work->path[0];
2474 cm_format_path_from_lap(param->alternate_path, lap_msg); 2478 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2475 work->cm_event.private_data = &lap_msg->private_data; 2479 work->cm_event.private_data = &lap_msg->private_data;
2476 2480
2477 spin_lock_irqsave(&cm_id_priv->lock, flags); 2481 spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2479,6 +2483,7 @@ static int cm_lap_handler(struct cm_work *work)
2479 goto unlock; 2483 goto unlock;
2480 2484
2481 switch (cm_id_priv->id.lap_state) { 2485 switch (cm_id_priv->id.lap_state) {
2486 case IB_CM_LAP_UNINIT:
2482 case IB_CM_LAP_IDLE: 2487 case IB_CM_LAP_IDLE:
2483 break; 2488 break;
2484 case IB_CM_MRA_LAP_SENT: 2489 case IB_CM_MRA_LAP_SENT:
@@ -2501,6 +2506,10 @@ static int cm_lap_handler(struct cm_work *work)
2501 2506
2502 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2507 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2503 cm_id_priv->tid = lap_msg->hdr.tid; 2508 cm_id_priv->tid = lap_msg->hdr.tid;
2509 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2510 work->mad_recv_wc->recv_buf.grh,
2511 &cm_id_priv->av);
2512 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2504 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2513 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2505 if (!ret) 2514 if (!ret)
2506 list_add_tail(&work->list, &cm_id_priv->work_list); 2515 list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3039,7 +3048,7 @@ static void cm_work_handler(void *data)
3039 cm_free_work(work); 3048 cm_free_work(work);
3040} 3049}
3041 3050
3042int ib_cm_establish(struct ib_cm_id *cm_id) 3051static int cm_establish(struct ib_cm_id *cm_id)
3043{ 3052{
3044 struct cm_id_private *cm_id_priv; 3053 struct cm_id_private *cm_id_priv;
3045 struct cm_work *work; 3054 struct cm_work *work;
@@ -3087,7 +3096,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
3087out: 3096out:
3088 return ret; 3097 return ret;
3089} 3098}
3090EXPORT_SYMBOL(ib_cm_establish); 3099
3100static int cm_migrate(struct ib_cm_id *cm_id)
3101{
3102 struct cm_id_private *cm_id_priv;
3103 unsigned long flags;
3104 int ret = 0;
3105
3106 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3107 spin_lock_irqsave(&cm_id_priv->lock, flags);
3108 if (cm_id->state == IB_CM_ESTABLISHED &&
3109 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3110 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3111 cm_id->lap_state = IB_CM_LAP_IDLE;
3112 cm_id_priv->av = cm_id_priv->alt_av;
3113 } else
3114 ret = -EINVAL;
3115 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3116
3117 return ret;
3118}
3119
3120int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3121{
3122 int ret;
3123
3124 switch (event) {
3125 case IB_EVENT_COMM_EST:
3126 ret = cm_establish(cm_id);
3127 break;
3128 case IB_EVENT_PATH_MIG:
3129 ret = cm_migrate(cm_id);
3130 break;
3131 default:
3132 ret = -EINVAL;
3133 }
3134 return ret;
3135}
3136EXPORT_SYMBOL(ib_cm_notify);
3091 3137
3092static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3138static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3093 struct ib_mad_recv_wc *mad_recv_wc) 3139 struct ib_mad_recv_wc *mad_recv_wc)
@@ -3220,6 +3266,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3220 if (cm_id_priv->alt_av.ah_attr.dlid) { 3266 if (cm_id_priv->alt_av.ah_attr.dlid) {
3221 *qp_attr_mask |= IB_QP_ALT_PATH; 3267 *qp_attr_mask |= IB_QP_ALT_PATH;
3222 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3268 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3269 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3270 qp_attr->alt_timeout =
3271 cm_id_priv->alt_av.packet_life_time + 1;
3223 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3272 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3224 } 3273 }
3225 ret = 0; 3274 ret = 0;
@@ -3246,19 +3295,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3246 case IB_CM_REP_SENT: 3295 case IB_CM_REP_SENT:
3247 case IB_CM_MRA_REP_RCVD: 3296 case IB_CM_MRA_REP_RCVD:
3248 case IB_CM_ESTABLISHED: 3297 case IB_CM_ESTABLISHED:
3249 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3298 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3250 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3299 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3251 if (cm_id_priv->qp_type == IB_QPT_RC) { 3300 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3252 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3301 if (cm_id_priv->qp_type == IB_QPT_RC) {
3253 IB_QP_RNR_RETRY | 3302 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3254 IB_QP_MAX_QP_RD_ATOMIC; 3303 IB_QP_RNR_RETRY |
3255 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3304 IB_QP_MAX_QP_RD_ATOMIC;
3256 qp_attr->retry_cnt = cm_id_priv->retry_count; 3305 qp_attr->timeout =
3257 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3306 cm_id_priv->av.packet_life_time + 1;
3258 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3307 qp_attr->retry_cnt = cm_id_priv->retry_count;
3259 } 3308 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3260 if (cm_id_priv->alt_av.ah_attr.dlid) { 3309 qp_attr->max_rd_atomic =
3261 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3310 cm_id_priv->initiator_depth;
3311 }
3312 if (cm_id_priv->alt_av.ah_attr.dlid) {
3313 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3314 qp_attr->path_mig_state = IB_MIG_REARM;
3315 }
3316 } else {
3317 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3318 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3319 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3320 qp_attr->alt_timeout =
3321 cm_id_priv->alt_av.packet_life_time + 1;
3322 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3262 qp_attr->path_mig_state = IB_MIG_REARM; 3323 qp_attr->path_mig_state = IB_MIG_REARM;
3263 } 3324 }
3264 ret = 0; 3325 ret = 0;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b4894ba223b7..1f4f2d2cfa2e 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -683,11 +683,11 @@ out:
683 return result; 683 return result;
684} 684}
685 685
686static ssize_t ib_ucm_establish(struct ib_ucm_file *file, 686static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
687 const char __user *inbuf, 687 const char __user *inbuf,
688 int in_len, int out_len) 688 int in_len, int out_len)
689{ 689{
690 struct ib_ucm_establish cmd; 690 struct ib_ucm_notify cmd;
691 struct ib_ucm_context *ctx; 691 struct ib_ucm_context *ctx;
692 int result; 692 int result;
693 693
@@ -698,7 +698,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
698 if (IS_ERR(ctx)) 698 if (IS_ERR(ctx))
699 return PTR_ERR(ctx); 699 return PTR_ERR(ctx);
700 700
701 result = ib_cm_establish(ctx->cm_id); 701 result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
702 ib_ucm_ctx_put(ctx); 702 ib_ucm_ctx_put(ctx);
703 return result; 703 return result;
704} 704}
@@ -1105,7 +1105,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1105 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id, 1105 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1106 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id, 1106 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1107 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen, 1107 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1108 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish, 1108 [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify,
1109 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req, 1109 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1110 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep, 1110 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1111 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu, 1111 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c9b4738be9d6..5c070176d9ab 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -60,6 +60,7 @@ enum ib_cm_state {
60}; 60};
61 61
62enum ib_cm_lap_state { 62enum ib_cm_lap_state {
63 IB_CM_LAP_UNINIT,
63 IB_CM_LAP_IDLE, 64 IB_CM_LAP_IDLE,
64 IB_CM_LAP_SENT, 65 IB_CM_LAP_SENT,
65 IB_CM_LAP_RCVD, 66 IB_CM_LAP_RCVD,
@@ -443,13 +444,20 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
443 u8 private_data_len); 444 u8 private_data_len);
444 445
445/** 446/**
446 * ib_cm_establish - Forces a connection state to established. 447 * ib_cm_notify - Notifies the CM of an event reported to the consumer.
447 * @cm_id: Connection identifier to transition to established. 448 * @cm_id: Connection identifier to transition to established.
449 * @event: Type of event.
448 * 450 *
449 * This routine should be invoked by users who receive messages on a 451 * This routine should be invoked by users to notify the CM of relevant
450 * connected QP before an RTU has been received. 452 * communication events. Events that should be reported to the CM and
453 * when to report them are:
454 *
455 * IB_EVENT_COMM_EST - Used when a message is received on a connected
456 * QP before an RTU has been received.
457 * IB_EVENT_PATH_MIG - Notifies the CM that the connection has failed over
458 * to the alternate path.
451 */ 459 */
452int ib_cm_establish(struct ib_cm_id *cm_id); 460int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
453 461
454/** 462/**
455 * ib_send_cm_rej - Sends a connection rejection message to the 463 * ib_send_cm_rej - Sends a connection rejection message to the
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
index 066c20b7cdfb..37650afb982c 100644
--- a/include/rdma/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -38,7 +38,7 @@
38 38
39#include <rdma/ib_user_sa.h> 39#include <rdma/ib_user_sa.h>
40 40
41#define IB_USER_CM_ABI_VERSION 4 41#define IB_USER_CM_ABI_VERSION 5
42 42
43enum { 43enum {
44 IB_USER_CM_CMD_CREATE_ID, 44 IB_USER_CM_CMD_CREATE_ID,
@@ -46,7 +46,7 @@ enum {
46 IB_USER_CM_CMD_ATTR_ID, 46 IB_USER_CM_CMD_ATTR_ID,
47 47
48 IB_USER_CM_CMD_LISTEN, 48 IB_USER_CM_CMD_LISTEN,
49 IB_USER_CM_CMD_ESTABLISH, 49 IB_USER_CM_CMD_NOTIFY,
50 50
51 IB_USER_CM_CMD_SEND_REQ, 51 IB_USER_CM_CMD_SEND_REQ,
52 IB_USER_CM_CMD_SEND_REP, 52 IB_USER_CM_CMD_SEND_REP,
@@ -117,8 +117,9 @@ struct ib_ucm_listen {
117 __u32 reserved; 117 __u32 reserved;
118}; 118};
119 119
120struct ib_ucm_establish { 120struct ib_ucm_notify {
121 __u32 id; 121 __u32 id;
122 __u32 event;
122}; 123};
123 124
124struct ib_ucm_private_data { 125struct ib_ucm_private_data {