aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.co.il>2007-05-24 09:05:01 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-24 17:02:38 -0400
commitc0be5fb5f835110652911ea8b88ad78f841e5b45 (patch)
treea6ba737cd09555a0b24bef370c01e623ca78fca9 /drivers/infiniband/hw
parent02d89b87081f516ad3993637f9b75db0d9786554 (diff)
IB/mlx4: Initialize send queue entry ownership bits
We need to initialize the owner bit of send queue WQEs to hardware ownership whenever the QP is modified from reset to init, not just when the QP is first allocated. This avoids having the hardware process stale WQEs when the QP is moved to reset but not destroyed and then modified to init again. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 88a994d8a133..dc137dec2308 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -270,9 +270,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
270 struct ib_qp_init_attr *init_attr, 270 struct ib_qp_init_attr *init_attr,
271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
272{ 272{
273 struct mlx4_wqe_ctrl_seg *ctrl;
274 int err; 273 int err;
275 int i;
276 274
277 mutex_init(&qp->mutex); 275 mutex_init(&qp->mutex);
278 spin_lock_init(&qp->sq.lock); 276 spin_lock_init(&qp->sq.lock);
@@ -352,11 +350,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
352 if (err) 350 if (err)
353 goto err_mtt; 351 goto err_mtt;
354 352
355 for (i = 0; i < qp->sq.max; ++i) {
356 ctrl = get_send_wqe(qp, i);
357 ctrl->owner_opcode = cpu_to_be32(1 << 31);
358 }
359
360 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); 353 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
361 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); 354 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
362 355
@@ -878,6 +871,21 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
878 else 871 else
879 sqd_event = 0; 872 sqd_event = 0;
880 873
874 /*
875 * Before passing a kernel QP to the HW, make sure that the
876 * ownership bits of the send queue are set so that the
877 * hardware doesn't start processing stale work requests.
878 */
879 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
880 struct mlx4_wqe_ctrl_seg *ctrl;
881 int i;
882
883 for (i = 0; i < qp->sq.max; ++i) {
884 ctrl = get_send_wqe(qp, i);
885 ctrl->owner_opcode = cpu_to_be32(1 << 31);
886 }
887 }
888
881 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 889 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
882 to_mlx4_state(new_state), context, optpar, 890 to_mlx4_state(new_state), context, optpar,
883 sqd_event, &qp->mqp); 891 sqd_event, &qp->mqp);