aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c31
5 files changed, 36 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index f29dbb767e87..9559248f265b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1342,6 +1342,12 @@ static __be32 convert_access(int acc)
1342static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) 1342static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
1343{ 1343{
1344 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 1344 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1345 int i;
1346
1347 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
1348 wr->wr.fast_reg.page_list->page_list[i] =
1349 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
1350 MLX4_MTT_FLAG_PRESENT);
1345 1351
1346 fseg->flags = convert_access(wr->wr.fast_reg.access_flags); 1352 fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
1347 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); 1353 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9f0b964b2c99..499d3cf83e1f 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1956,13 +1956,6 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
1956 return ret; 1956 return ret;
1957 cleanup_retrans_entry(cm_node); 1957 cleanup_retrans_entry(cm_node);
1958 cm_node->state = NES_CM_STATE_CLOSED; 1958 cm_node->state = NES_CM_STATE_CLOSED;
1959 ret = send_fin(cm_node, NULL);
1960
1961 if (cm_node->accept_pend) {
1962 BUG_ON(!cm_node->listener);
1963 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1964 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1965 }
1966 1959
1967 ret = send_reset(cm_node, NULL); 1960 ret = send_reset(cm_node, NULL);
1968 return ret; 1961 return ret;
@@ -2383,6 +2376,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2383 atomic_inc(&cm_disconnects); 2376 atomic_inc(&cm_disconnects);
2384 cm_event.event = IW_CM_EVENT_DISCONNECT; 2377 cm_event.event = IW_CM_EVENT_DISCONNECT;
2385 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { 2378 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
2379 issued_disconnect_reset = 1;
2386 cm_event.status = IW_CM_EVENT_STATUS_RESET; 2380 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2387 nes_debug(NES_DBG_CM, "Generating a CM " 2381 nes_debug(NES_DBG_CM, "Generating a CM "
2388 "Disconnect Event (status reset) for " 2382 "Disconnect Event (status reset) for "
@@ -2508,7 +2502,6 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2508 nes_debug(NES_DBG_CM, "Call close API\n"); 2502 nes_debug(NES_DBG_CM, "Call close API\n");
2509 2503
2510 g_cm_core->api->close(g_cm_core, nesqp->cm_node); 2504 g_cm_core->api->close(g_cm_core, nesqp->cm_node);
2511 nesqp->cm_node = NULL;
2512 } 2505 }
2513 2506
2514 return ret; 2507 return ret;
@@ -2837,6 +2830,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2837 cm_node->apbvt_set = 1; 2830 cm_node->apbvt_set = 1;
2838 nesqp->cm_node = cm_node; 2831 nesqp->cm_node = cm_node;
2839 cm_node->nesqp = nesqp; 2832 cm_node->nesqp = nesqp;
2833 nes_add_ref(&nesqp->ibqp);
2840 2834
2841 return 0; 2835 return 0;
2842} 2836}
@@ -3167,7 +3161,6 @@ static void cm_event_connect_error(struct nes_cm_event *event)
3167 if (ret) 3161 if (ret)
3168 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " 3162 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
3169 "ret=%d\n", __func__, __LINE__, ret); 3163 "ret=%d\n", __func__, __LINE__, ret);
3170 nes_rem_ref(&nesqp->ibqp);
3171 cm_id->rem_ref(cm_id); 3164 cm_id->rem_ref(cm_id);
3172 3165
3173 rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); 3166 rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index b0ffc9abe8c0..05eb41b8ab63 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -293,6 +293,7 @@ struct ipoib_dev_priv {
293 293
294 struct delayed_work pkey_poll_task; 294 struct delayed_work pkey_poll_task;
295 struct delayed_work mcast_task; 295 struct delayed_work mcast_task;
296 struct work_struct carrier_on_task;
296 struct work_struct flush_light; 297 struct work_struct flush_light;
297 struct work_struct flush_normal; 298 struct work_struct flush_normal;
298 struct work_struct flush_heavy; 299 struct work_struct flush_heavy;
@@ -464,6 +465,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
464void ipoib_dev_cleanup(struct net_device *dev); 465void ipoib_dev_cleanup(struct net_device *dev);
465 466
466void ipoib_mcast_join_task(struct work_struct *work); 467void ipoib_mcast_join_task(struct work_struct *work);
468void ipoib_mcast_carrier_on_task(struct work_struct *work);
467void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); 469void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
468 470
469void ipoib_mcast_restart_task(struct work_struct *work); 471void ipoib_mcast_restart_task(struct work_struct *work);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7e9e218738fa..e9ca3cb57d52 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -404,7 +404,7 @@ static void path_rec_completion(int status,
404 struct net_device *dev = path->dev; 404 struct net_device *dev = path->dev;
405 struct ipoib_dev_priv *priv = netdev_priv(dev); 405 struct ipoib_dev_priv *priv = netdev_priv(dev);
406 struct ipoib_ah *ah = NULL; 406 struct ipoib_ah *ah = NULL;
407 struct ipoib_ah *old_ah; 407 struct ipoib_ah *old_ah = NULL;
408 struct ipoib_neigh *neigh, *tn; 408 struct ipoib_neigh *neigh, *tn;
409 struct sk_buff_head skqueue; 409 struct sk_buff_head skqueue;
410 struct sk_buff *skb; 410 struct sk_buff *skb;
@@ -428,12 +428,12 @@ static void path_rec_completion(int status,
428 428
429 spin_lock_irqsave(&priv->lock, flags); 429 spin_lock_irqsave(&priv->lock, flags);
430 430
431 old_ah = path->ah;
432 path->ah = ah;
433
434 if (ah) { 431 if (ah) {
435 path->pathrec = *pathrec; 432 path->pathrec = *pathrec;
436 433
434 old_ah = path->ah;
435 path->ah = ah;
436
437 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 437 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
438 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 438 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
439 439
@@ -1075,6 +1075,7 @@ static void ipoib_setup(struct net_device *dev)
1075 1075
1076 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 1076 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1077 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1077 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1078 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1078 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1079 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1079 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1080 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1080 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1081 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ac33c8f3ea85..aae28620a6e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -366,6 +366,21 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
366 return ret; 366 return ret;
367} 367}
368 368
369void ipoib_mcast_carrier_on_task(struct work_struct *work)
370{
371 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
372 carrier_on_task);
373
374 /*
375 * Take rtnl_lock to avoid racing with ipoib_stop() and
376 * turning the carrier back on while a device is being
377 * removed.
378 */
379 rtnl_lock();
380 netif_carrier_on(priv->dev);
381 rtnl_unlock();
382}
383
369static int ipoib_mcast_join_complete(int status, 384static int ipoib_mcast_join_complete(int status,
370 struct ib_sa_multicast *multicast) 385 struct ib_sa_multicast *multicast)
371{ 386{
@@ -392,16 +407,12 @@ static int ipoib_mcast_join_complete(int status,
392 &priv->mcast_task, 0); 407 &priv->mcast_task, 0);
393 mutex_unlock(&mcast_mutex); 408 mutex_unlock(&mcast_mutex);
394 409
395 if (mcast == priv->broadcast) { 410 /*
396 /* 411 * Defer carrier on work to ipoib_workqueue to avoid a
397 * Take RTNL lock here to avoid racing with 412 * deadlock on rtnl_lock here.
398 * ipoib_stop() and turning the carrier back 413 */
399 * on while a device is being removed. 414 if (mcast == priv->broadcast)
400 */ 415 queue_work(ipoib_workqueue, &priv->carrier_on_task);
401 rtnl_lock();
402 netif_carrier_on(dev);
403 rtnl_unlock();
404 }
405 416
406 return 0; 417 return 0;
407 } 418 }