aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorAlexander Schmidt <alexs@linux.vnet.ibm.com>2008-08-12 09:46:07 -0400
committerRoland Dreier <rolandd@cisco.com>2008-08-12 14:34:58 -0400
commit51ad241af45a0bfc02d1ed72a3ad58b46f8e30df (patch)
tree53c427390b91a0430309668153d6f7994b970bbf /drivers/infiniband
parentf2d7499be1b1fe1cd8a5e6a01c1f44173894a241 (diff)
IB/ehca: Update qp_state on cached modify_qp()
Since the introduction of the port auto-detect mode for ehca, calls to modify_qp() may be cached in the device driver when the ports are not activated yet. When a modify_qp() call is cached, the qp state remains untouched until the port is activated, which will leave the qp in the reset state. In the reset state, however, it is not allowed to post SQ WQEs, which confuses applications like ib_mad. The solution for this problem is to immediately set the qp state as requested by modify_qp(), even when the call is cached. Signed-off-by: Alexander Schmidt <alexs@linux.vnet.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index ea13efddf175..c58fd4eead18 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1534,8 +1534,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1534 if (attr_mask & IB_QP_QKEY) 1534 if (attr_mask & IB_QP_QKEY)
1535 my_qp->qkey = attr->qkey; 1535 my_qp->qkey = attr->qkey;
1536 1536
1537 my_qp->state = qp_new_state;
1538
1539modify_qp_exit2: 1537modify_qp_exit2:
1540 if (squeue_locked) { /* this means: sqe -> rts */ 1538 if (squeue_locked) { /* this means: sqe -> rts */
1541 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1539 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1551,6 +1549,8 @@ modify_qp_exit1:
1551int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1549int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1552 struct ib_udata *udata) 1550 struct ib_udata *udata)
1553{ 1551{
1552 int ret = 0;
1553
1554 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1554 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1555 ib_device); 1555 ib_device);
1556 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1556 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
@@ -1597,12 +1597,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1597 attr->qp_state, my_qp->init_attr.port_num, 1597 attr->qp_state, my_qp->init_attr.port_num,
1598 ibqp->qp_type); 1598 ibqp->qp_type);
1599 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1599 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1600 return 0; 1600 goto out;
1601 } 1601 }
1602 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1602 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1603 } 1603 }
1604 1604
1605 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1605 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1606
1607out:
1608 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1609 my_qp->state = attr->qp_state;
1610
1611 return ret;
1606} 1612}
1607 1613
1608void ehca_recover_sqp(struct ib_qp *sqp) 1614void ehca_recover_sqp(struct ib_qp *sqp)