aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-11-15 03:24:23 -0500
committerRoland Dreier <rolandd@cisco.com>2005-11-15 03:24:23 -0500
commitcbc5b2bb9e226c2b2b981836d2289912e2ef3c1c (patch)
tree9bb777025b4237dad46d7e9235c80f2e85749a10 /drivers/infiniband/hw/mthca
parent47f2bce9021b4974ed33b072ebb8348c8145c946 (diff)
[IB] mthca: don't disable RDMA writes if no responder resources
Responder resources are only required to handle RDMA reads and atomic operations, not RDMA writes. So the driver should allow RDMA writes even if responder resources are set to 0. This is especially important for the UC transport -- with the old code, it was impossible to enable RDMA writes for UC QPs. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c27
1 files changed, 12 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 760c418d5bc9..5899f0c765be 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -730,15 +730,16 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
730 } 730 }
731 731
732 if (attr_mask & IB_QP_ACCESS_FLAGS) { 732 if (attr_mask & IB_QP_ACCESS_FLAGS) {
733 qp_context->params2 |=
734 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
735 MTHCA_QP_BIT_RWE : 0);
736
733 /* 737 /*
734 * Only enable RDMA/atomics if we have responder 738 * Only enable RDMA reads and atomics if we have
735 * resources set to a non-zero value. 739 * responder resources set to a non-zero value.
736 */ 740 */
737 if (qp->resp_depth) { 741 if (qp->resp_depth) {
738 qp_context->params2 |= 742 qp_context->params2 |=
739 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
740 MTHCA_QP_BIT_RWE : 0);
741 qp_context->params2 |=
742 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? 743 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
743 MTHCA_QP_BIT_RRE : 0); 744 MTHCA_QP_BIT_RRE : 0);
744 qp_context->params2 |= 745 qp_context->params2 |=
@@ -759,31 +760,27 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
759 if (qp->resp_depth && !attr->max_dest_rd_atomic) { 760 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
760 /* 761 /*
761 * Lowering our responder resources to zero. 762 * Lowering our responder resources to zero.
762 * Turn off RDMA/atomics as responder. 763 * Turn off reads RDMA and atomics as responder.
763 * (RWE/RRE/RAE in params2 already zero) 764 * (RRE/RAE in params2 already zero)
764 */ 765 */
765 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 766 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
766 MTHCA_QP_OPTPAR_RRE |
767 MTHCA_QP_OPTPAR_RAE); 767 MTHCA_QP_OPTPAR_RAE);
768 } 768 }
769 769
770 if (!qp->resp_depth && attr->max_dest_rd_atomic) { 770 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
771 /* 771 /*
772 * Increasing our responder resources from 772 * Increasing our responder resources from
773 * zero. Turn on RDMA/atomics as appropriate. 773 * zero. Turn on RDMA reads and atomics as
774 * appropriate.
774 */ 775 */
775 qp_context->params2 |= 776 qp_context->params2 |=
776 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
777 MTHCA_QP_BIT_RWE : 0);
778 qp_context->params2 |=
779 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? 777 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
780 MTHCA_QP_BIT_RRE : 0); 778 MTHCA_QP_BIT_RRE : 0);
781 qp_context->params2 |= 779 qp_context->params2 |=
782 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? 780 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
783 MTHCA_QP_BIT_RAE : 0); 781 MTHCA_QP_BIT_RAE : 0);
784 782
785 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 783 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
786 MTHCA_QP_OPTPAR_RRE |
787 MTHCA_QP_OPTPAR_RAE); 784 MTHCA_QP_OPTPAR_RAE);
788 } 785 }
789 786