aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c59
2 files changed, 63 insertions, 1 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 769044c25ca5..2703d9a3e9b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -299,6 +299,11 @@ struct ipoib_neigh_table {
299 struct completion deleted; 299 struct completion deleted;
300}; 300};
301 301
302struct ipoib_qp_state_validate {
303 struct work_struct work;
304 struct ipoib_dev_priv *priv;
305};
306
302/* 307/*
303 * Device private locking: network stack tx_lock protects members used 308 * Device private locking: network stack tx_lock protects members used
304 * in TX fast path, lock protects everything else. lock nests inside 309 * in TX fast path, lock protects everything else. lock nests inside
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 29b376dadd2b..63b92cbb29ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -327,6 +327,51 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
327 } 327 }
328} 328}
329 329
330/*
331 * As the result of a completion error the QP Can be transferred to SQE states.
332 * The function checks if the (send)QP is in SQE state and
333 * moves it back to RTS state, that in order to have it functional again.
334 */
335static void ipoib_qp_state_validate_work(struct work_struct *work)
336{
337 struct ipoib_qp_state_validate *qp_work =
338 container_of(work, struct ipoib_qp_state_validate, work);
339
340 struct ipoib_dev_priv *priv = qp_work->priv;
341 struct ib_qp_attr qp_attr;
342 struct ib_qp_init_attr query_init_attr;
343 int ret;
344
345 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
346 if (ret) {
347 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
348 __func__, ret);
349 goto free_res;
350 }
351 pr_info("%s: QP: 0x%x is in state: %d\n",
352 __func__, priv->qp->qp_num, qp_attr.qp_state);
353
354 /* currently support only in SQE->RTS transition*/
355 if (qp_attr.qp_state == IB_QPS_SQE) {
356 qp_attr.qp_state = IB_QPS_RTS;
357
358 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
359 if (ret) {
360 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
361 ret, priv->qp->qp_num);
362 goto free_res;
363 }
364 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
365 __func__, priv->qp->qp_num);
366 } else {
367 pr_warn("QP (%d) will stay in state: %d\n",
368 priv->qp->qp_num, qp_attr.qp_state);
369 }
370
371free_res:
372 kfree(qp_work);
373}
374
330static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) 375static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
331{ 376{
332 struct ipoib_dev_priv *priv = netdev_priv(dev); 377 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -358,10 +403,22 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
358 netif_wake_queue(dev); 403 netif_wake_queue(dev);
359 404
360 if (wc->status != IB_WC_SUCCESS && 405 if (wc->status != IB_WC_SUCCESS &&
361 wc->status != IB_WC_WR_FLUSH_ERR) 406 wc->status != IB_WC_WR_FLUSH_ERR) {
407 struct ipoib_qp_state_validate *qp_work;
362 ipoib_warn(priv, "failed send event " 408 ipoib_warn(priv, "failed send event "
363 "(status=%d, wrid=%d vend_err %x)\n", 409 "(status=%d, wrid=%d vend_err %x)\n",
364 wc->status, wr_id, wc->vendor_err); 410 wc->status, wr_id, wc->vendor_err);
411 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
412 if (!qp_work) {
413 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
414 __func__, priv->qp->qp_num);
415 return;
416 }
417
418 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
419 qp_work->priv = priv;
420 queue_work(priv->wq, &qp_work->work);
421 }
365} 422}
366 423
367static int poll_tx(struct ipoib_dev_priv *priv) 424static int poll_tx(struct ipoib_dev_priv *priv)