aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-25 18:49:56 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-25 18:49:56 -0400
commit1ea0975875294964853209927feccdf6bc8cf5f9 (patch)
treeb5d2feab6211ee987fb30bca35493fd6569003bb
parenta41d7f000447015f3f5fe7223f1d53845268e2e8 (diff)
parent2dfbfc37121d307e1f1d24c2979382cb17b19347 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB/cm: Drain cq in ipoib_cm_dev_stop() IPoIB/cm: Fix timeout check in ipoib_cm_dev_stop() IB/ehca: Fix number of send WRs reported for new QP IB/mlx4: Initialize send queue entry ownership bits IB/mlx4: Don't allocate RQ doorbell if using SRQ
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c59
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c31
5 files changed, 60 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7f0beec74f70..5766ae3a2029 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -331,7 +331,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
331 0); 331 0);
332 qp->ipz_qp_handle.handle = outs[0]; 332 qp->ipz_qp_handle.handle = outs[0];
333 qp->real_qp_num = (u32)outs[1]; 333 qp->real_qp_num = (u32)outs[1];
334 parms->act_nr_send_sges = 334 parms->act_nr_send_wqes =
335 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); 335 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
336 parms->act_nr_recv_wqes = 336 parms->act_nr_recv_wqes =
337 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); 337 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a824bc5f79fd..dc137dec2308 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -270,9 +270,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
270 struct ib_qp_init_attr *init_attr, 270 struct ib_qp_init_attr *init_attr,
271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
272{ 272{
273 struct mlx4_wqe_ctrl_seg *ctrl;
274 int err; 273 int err;
275 int i;
276 274
277 mutex_init(&qp->mutex); 275 mutex_init(&qp->mutex);
278 spin_lock_init(&qp->sq.lock); 276 spin_lock_init(&qp->sq.lock);
@@ -319,20 +317,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
319 if (err) 317 if (err)
320 goto err_mtt; 318 goto err_mtt;
321 319
322 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 320 if (!init_attr->srq) {
323 ucmd.db_addr, &qp->db); 321 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
324 if (err) 322 ucmd.db_addr, &qp->db);
325 goto err_mtt; 323 if (err)
324 goto err_mtt;
325 }
326 } else { 326 } else {
327 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 327 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
328 if (err) 328 if (err)
329 goto err; 329 goto err;
330 330
331 err = mlx4_ib_db_alloc(dev, &qp->db, 0); 331 if (!init_attr->srq) {
332 if (err) 332 err = mlx4_ib_db_alloc(dev, &qp->db, 0);
333 goto err; 333 if (err)
334 goto err;
334 335
335 *qp->db.db = 0; 336 *qp->db.db = 0;
337 }
336 338
337 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { 339 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
338 err = -ENOMEM; 340 err = -ENOMEM;
@@ -348,11 +350,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
348 if (err) 350 if (err)
349 goto err_mtt; 351 goto err_mtt;
350 352
351 for (i = 0; i < qp->sq.max; ++i) {
352 ctrl = get_send_wqe(qp, i);
353 ctrl->owner_opcode = cpu_to_be32(1 << 31);
354 }
355
356 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); 353 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
357 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); 354 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
358 355
@@ -386,7 +383,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
386 return 0; 383 return 0;
387 384
388err_wrid: 385err_wrid:
389 if (pd->uobject) 386 if (pd->uobject && !init_attr->srq)
390 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 387 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
391 else { 388 else {
392 kfree(qp->sq.wrid); 389 kfree(qp->sq.wrid);
@@ -403,7 +400,7 @@ err_buf:
403 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 400 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
404 401
405err_db: 402err_db:
406 if (!pd->uobject) 403 if (!pd->uobject && !init_attr->srq)
407 mlx4_ib_db_free(dev, &qp->db); 404 mlx4_ib_db_free(dev, &qp->db);
408 405
409err: 406err:
@@ -481,14 +478,16 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
481 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 478 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
482 479
483 if (is_user) { 480 if (is_user) {
484 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 481 if (!qp->ibqp.srq)
485 &qp->db); 482 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
483 &qp->db);
486 ib_umem_release(qp->umem); 484 ib_umem_release(qp->umem);
487 } else { 485 } else {
488 kfree(qp->sq.wrid); 486 kfree(qp->sq.wrid);
489 kfree(qp->rq.wrid); 487 kfree(qp->rq.wrid);
490 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 488 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
491 mlx4_ib_db_free(dev, &qp->db); 489 if (!qp->ibqp.srq)
490 mlx4_ib_db_free(dev, &qp->db);
492 } 491 }
493} 492}
494 493
@@ -852,7 +851,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
852 if (ibqp->srq) 851 if (ibqp->srq)
853 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 852 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
854 853
855 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 854 if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
856 context->db_rec_addr = cpu_to_be64(qp->db.dma); 855 context->db_rec_addr = cpu_to_be64(qp->db.dma);
857 856
858 if (cur_state == IB_QPS_INIT && 857 if (cur_state == IB_QPS_INIT &&
@@ -872,6 +871,21 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
872 else 871 else
873 sqd_event = 0; 872 sqd_event = 0;
874 873
874 /*
875 * Before passing a kernel QP to the HW, make sure that the
876 * ownership bits of the send queue are set so that the
877 * hardware doesn't start processing stale work requests.
878 */
879 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
880 struct mlx4_wqe_ctrl_seg *ctrl;
881 int i;
882
883 for (i = 0; i < qp->sq.max; ++i) {
884 ctrl = get_send_wqe(qp, i);
885 ctrl->owner_opcode = cpu_to_be32(1 << 31);
886 }
887 }
888
875 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 889 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
876 to_mlx4_state(new_state), context, optpar, 890 to_mlx4_state(new_state), context, optpar,
877 sqd_event, &qp->mqp); 891 sqd_event, &qp->mqp);
@@ -919,7 +933,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
919 qp->rq.tail = 0; 933 qp->rq.tail = 0;
920 qp->sq.head = 0; 934 qp->sq.head = 0;
921 qp->sq.tail = 0; 935 qp->sq.tail = 0;
922 *qp->db.db = 0; 936 if (!ibqp->srq)
937 *qp->db.db = 0;
923 } 938 }
924 939
925out: 940out:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a0b3782c7625..158759e28a5b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -429,6 +429,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
429 429
430void ipoib_pkey_poll(struct work_struct *work); 430void ipoib_pkey_poll(struct work_struct *work);
431int ipoib_pkey_dev_delay_open(struct net_device *dev); 431int ipoib_pkey_dev_delay_open(struct net_device *dev);
432void ipoib_drain_cq(struct net_device *dev);
432 433
433#ifdef CONFIG_INFINIBAND_IPOIB_CM 434#ifdef CONFIG_INFINIBAND_IPOIB_CM
434 435
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ffec794b7913..f133b56fd978 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -713,7 +713,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
713 while (!list_empty(&priv->cm.rx_error_list) || 713 while (!list_empty(&priv->cm.rx_error_list) ||
714 !list_empty(&priv->cm.rx_flush_list) || 714 !list_empty(&priv->cm.rx_flush_list) ||
715 !list_empty(&priv->cm.rx_drain_list)) { 715 !list_empty(&priv->cm.rx_drain_list)) {
716 if (!time_after(jiffies, begin + 5 * HZ)) { 716 if (time_after(jiffies, begin + 5 * HZ)) {
717 ipoib_warn(priv, "RX drain timing out\n"); 717 ipoib_warn(priv, "RX drain timing out\n");
718 718
719 /* 719 /*
@@ -726,6 +726,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
726 } 726 }
727 spin_unlock_irq(&priv->lock); 727 spin_unlock_irq(&priv->lock);
728 msleep(1); 728 msleep(1);
729 ipoib_drain_cq(dev);
729 spin_lock_irq(&priv->lock); 730 spin_lock_irq(&priv->lock);
730 } 731 }
731 732
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index c1aad06eb4e9..8404f05b2b6e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -550,13 +550,30 @@ static int recvs_pending(struct net_device *dev)
550 return pending; 550 return pending;
551} 551}
552 552
553void ipoib_drain_cq(struct net_device *dev)
554{
555 struct ipoib_dev_priv *priv = netdev_priv(dev);
556 int i, n;
557 do {
558 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
559 for (i = 0; i < n; ++i) {
560 if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
561 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
562 else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
563 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
564 else
565 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
566 }
567 } while (n == IPOIB_NUM_WC);
568}
569
553int ipoib_ib_dev_stop(struct net_device *dev, int flush) 570int ipoib_ib_dev_stop(struct net_device *dev, int flush)
554{ 571{
555 struct ipoib_dev_priv *priv = netdev_priv(dev); 572 struct ipoib_dev_priv *priv = netdev_priv(dev);
556 struct ib_qp_attr qp_attr; 573 struct ib_qp_attr qp_attr;
557 unsigned long begin; 574 unsigned long begin;
558 struct ipoib_tx_buf *tx_req; 575 struct ipoib_tx_buf *tx_req;
559 int i, n; 576 int i;
560 577
561 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 578 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
562 netif_poll_disable(dev); 579 netif_poll_disable(dev);
@@ -611,17 +628,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
611 goto timeout; 628 goto timeout;
612 } 629 }
613 630
614 do { 631 ipoib_drain_cq(dev);
615 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
616 for (i = 0; i < n; ++i) {
617 if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
618 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
619 else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
620 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
621 else
622 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
623 }
624 } while (n == IPOIB_NUM_WC);
625 632
626 msleep(1); 633 msleep(1);
627 } 634 }