aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@mellanox.co.il>2005-12-15 17:36:24 -0500
committerRoland Dreier <rolandd@cisco.com>2005-12-15 17:36:24 -0500
commitd1646f86a2a05a956adbb163c81a81bd621f055e (patch)
treee7b321e9b424682ea08b5214e1c415131e7d215d /drivers/infiniband/hw
parent576d2e4e40315e8140c04be99cd057720d8a3817 (diff)
IB/mthca: Fix IB_QP_ACCESS_FLAGS handling.
This patch corrects some corner cases in managing the RAE/RRE bits in the mthca qp context. These bits need to be zero if the user requests max_dest_rd_atomic of zero. The bits need to be restored to the value implied by the qp access flags attribute in a previous (or the current) modify-qp command if the dest_rd_atomic variable is changed to non-zero. In the current implementation, the following scenario will not work: RESET-to-INIT set QP access flags to all disabled (zeroes) INIT-to-RTR set max_dest_rd_atomic=10, AND set qp_access_flags = IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC The current code will incorrectly take the access-flags value set in the RESET-to-INIT transition. We can simplify, and correct, this IB_QP_ACCESS_FLAGS handling: it is always safe to set qp access flags in the firmware command if either of IB_QP_MAX_DEST_RD_ATOMIC or IB_QP_ACCESS_FLAGS is set, so let's just set it to the correct value, always. Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il> Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c87
1 files changed, 37 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 3543299ecb15..e826c9ff5d70 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -522,6 +522,36 @@ static void init_port(struct mthca_dev *dev, int port)
522 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 522 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
523} 523}
524 524
525static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
526 int attr_mask)
527{
528 u8 dest_rd_atomic;
529 u32 access_flags;
530 u32 hw_access_flags = 0;
531
532 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
533 dest_rd_atomic = attr->max_dest_rd_atomic;
534 else
535 dest_rd_atomic = qp->resp_depth;
536
537 if (attr_mask & IB_QP_ACCESS_FLAGS)
538 access_flags = attr->qp_access_flags;
539 else
540 access_flags = qp->atomic_rd_en;
541
542 if (!dest_rd_atomic)
543 access_flags &= IB_ACCESS_REMOTE_WRITE;
544
545 if (access_flags & IB_ACCESS_REMOTE_READ)
546 hw_access_flags |= MTHCA_QP_BIT_RRE;
547 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
548 hw_access_flags |= MTHCA_QP_BIT_RAE;
549 if (access_flags & IB_ACCESS_REMOTE_WRITE)
550 hw_access_flags |= MTHCA_QP_BIT_RWE;
551
552 return cpu_to_be32(hw_access_flags);
553}
554
525int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) 555int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
526{ 556{
527 struct mthca_dev *dev = to_mdev(ibqp->device); 557 struct mthca_dev *dev = to_mdev(ibqp->device);
@@ -743,57 +773,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
743 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 773 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
744 } 774 }
745 775
746 if (attr_mask & IB_QP_ACCESS_FLAGS) {
747 qp_context->params2 |=
748 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
749 MTHCA_QP_BIT_RWE : 0);
750
751 /*
752 * Only enable RDMA reads and atomics if we have
753 * responder resources set to a non-zero value.
754 */
755 if (qp->resp_depth) {
756 qp_context->params2 |=
757 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
758 MTHCA_QP_BIT_RRE : 0);
759 qp_context->params2 |=
760 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
761 MTHCA_QP_BIT_RAE : 0);
762 }
763
764 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
765 MTHCA_QP_OPTPAR_RRE |
766 MTHCA_QP_OPTPAR_RAE);
767 }
768
769 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 776 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
770 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
771 /*
772 * Lowering our responder resources to zero.
773 * Turn off reads RDMA and atomics as responder.
774 * (RRE/RAE in params2 already zero)
775 */
776 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
777 MTHCA_QP_OPTPAR_RAE);
778 }
779
780 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
781 /*
782 * Increasing our responder resources from
783 * zero. Turn on RDMA reads and atomics as
784 * appropriate.
785 */
786 qp_context->params2 |=
787 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
788 MTHCA_QP_BIT_RRE : 0);
789 qp_context->params2 |=
790 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
791 MTHCA_QP_BIT_RAE : 0);
792
793 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
794 MTHCA_QP_OPTPAR_RAE);
795 }
796
797 if (attr->max_dest_rd_atomic) 777 if (attr->max_dest_rd_atomic)
798 qp_context->params2 |= 778 qp_context->params2 |=
799 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 779 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
@@ -801,6 +781,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
801 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 781 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
802 } 782 }
803 783
784 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
785 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
786 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
787 MTHCA_QP_OPTPAR_RRE |
788 MTHCA_QP_OPTPAR_RAE);
789 }
790
804 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 791 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
805 792
806 if (ibqp->srq) 793 if (ibqp->srq)