aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorYossi Etigin <root@voltaire.com>2009-04-08 16:42:33 -0400
committerRoland Dreier <rolandd@cisco.com>2009-04-08 16:42:33 -0400
commitd2ca39f262806aa2f035f680a14aa55ff9e3d889 (patch)
treee58c94b8c6e02b72565daff9c19d156ac0f02655 /drivers
parent84adeee9aaa0d81712de1e0ea74caed3398e4a1d (diff)
RDMA/cma: Create cm id even when IB port is down
When doing rdma_resolve_addr(), if the relevant IB port is down, the function fails and the cm_id is not bound to the correct device. Therefore, application does not have a device handle and cannot wait for the port to become active. The function fails because the underlying IPoIB interface is not joined to the broadcast group and therefore the SA does not have a multicast record to take a Q_Key from. The fix is to use lazy Q_Key resolution - cma_set_qkey() will set id_priv->qkey if it was not set, and will be called just before the Q_Key is really required. Signed-off-by: Yossi Etigin <yosefe@voltaire.com> Acked-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/cma.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 3f9c03a36571..851de83ff455 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -297,21 +297,25 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
297 id_priv->cma_dev = NULL; 297 id_priv->cma_dev = NULL;
298} 298}
299 299
300static int cma_set_qkey(struct ib_device *device, u8 port_num, 300static int cma_set_qkey(struct rdma_id_private *id_priv)
301 enum rdma_port_space ps,
302 struct rdma_dev_addr *dev_addr, u32 *qkey)
303{ 301{
304 struct ib_sa_mcmember_rec rec; 302 struct ib_sa_mcmember_rec rec;
305 int ret = 0; 303 int ret = 0;
306 304
307 switch (ps) { 305 if (id_priv->qkey)
306 return 0;
307
308 switch (id_priv->id.ps) {
308 case RDMA_PS_UDP: 309 case RDMA_PS_UDP:
309 *qkey = RDMA_UDP_QKEY; 310 id_priv->qkey = RDMA_UDP_QKEY;
310 break; 311 break;
311 case RDMA_PS_IPOIB: 312 case RDMA_PS_IPOIB:
312 ib_addr_get_mgid(dev_addr, &rec.mgid); 313 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
313 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); 314 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
314 *qkey = be32_to_cpu(rec.qkey); 315 id_priv->id.port_num, &rec.mgid,
316 &rec);
317 if (!ret)
318 id_priv->qkey = be32_to_cpu(rec.qkey);
315 break; 319 break;
316 default: 320 default:
317 break; 321 break;
@@ -341,12 +345,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
341 ret = ib_find_cached_gid(cma_dev->device, &gid, 345 ret = ib_find_cached_gid(cma_dev->device, &gid,
342 &id_priv->id.port_num, NULL); 346 &id_priv->id.port_num, NULL);
343 if (!ret) { 347 if (!ret) {
344 ret = cma_set_qkey(cma_dev->device, 348 cma_attach_to_dev(id_priv, cma_dev);
345 id_priv->id.port_num,
346 id_priv->id.ps, dev_addr,
347 &id_priv->qkey);
348 if (!ret)
349 cma_attach_to_dev(id_priv, cma_dev);
350 break; 349 break;
351 } 350 }
352 } 351 }
@@ -578,6 +577,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
578 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 577 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
579 578
580 if (cma_is_ud_ps(id_priv->id.ps)) { 579 if (cma_is_ud_ps(id_priv->id.ps)) {
580 ret = cma_set_qkey(id_priv);
581 if (ret)
582 return ret;
583
581 qp_attr->qkey = id_priv->qkey; 584 qp_attr->qkey = id_priv->qkey;
582 *qp_attr_mask |= IB_QP_QKEY; 585 *qp_attr_mask |= IB_QP_QKEY;
583 } else { 586 } else {
@@ -2201,6 +2204,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2201 event.status = ib_event->param.sidr_rep_rcvd.status; 2204 event.status = ib_event->param.sidr_rep_rcvd.status;
2202 break; 2205 break;
2203 } 2206 }
2207 ret = cma_set_qkey(id_priv);
2208 if (ret) {
2209 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2210 event.status = -EINVAL;
2211 break;
2212 }
2204 if (id_priv->qkey != rep->qkey) { 2213 if (id_priv->qkey != rep->qkey) {
2205 event.event = RDMA_CM_EVENT_UNREACHABLE; 2214 event.event = RDMA_CM_EVENT_UNREACHABLE;
2206 event.status = -EINVAL; 2215 event.status = -EINVAL;
@@ -2480,10 +2489,14 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2480 const void *private_data, int private_data_len) 2489 const void *private_data, int private_data_len)
2481{ 2490{
2482 struct ib_cm_sidr_rep_param rep; 2491 struct ib_cm_sidr_rep_param rep;
2492 int ret;
2483 2493
2484 memset(&rep, 0, sizeof rep); 2494 memset(&rep, 0, sizeof rep);
2485 rep.status = status; 2495 rep.status = status;
2486 if (status == IB_SIDR_SUCCESS) { 2496 if (status == IB_SIDR_SUCCESS) {
2497 ret = cma_set_qkey(id_priv);
2498 if (ret)
2499 return ret;
2487 rep.qp_num = id_priv->qp_num; 2500 rep.qp_num = id_priv->qp_num;
2488 rep.qkey = id_priv->qkey; 2501 rep.qkey = id_priv->qkey;
2489 } 2502 }