aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@dev.mellanox.co.il>2007-05-14 00:26:51 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-19 11:51:57 -0400
commit65adfa911a3522c1e40e55afd472dd571dc2431b (patch)
tree4e33f3eb290d63af8a1e011b68a05b579af2069e
parentb18aad7150c85cc86a66be8a1c744b63b41b36e0 (diff)
IB/mlx4: Fix RESET to RESET and RESET to ERROR transitions
According to the IB spec, a QP can be moved from RESET back to RESET or to the ERROR state, but mlx4 firmware does not support this and returns an error if we try. Fix the RESET to RESET transition by just returning 0 without doing anything, and fix RESET to ERROR by moving the QP from RESET to INIT with dummy parameters and then transitioning from INIT to ERROR. Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c115
1 files changed, 80 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 0cf8b95128fd..bd28af5753d1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -573,7 +573,7 @@ static int to_mlx4_st(enum ib_qp_type type)
573 } 573 }
574} 574}
575 575
576static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr, 576static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
577 int attr_mask) 577 int attr_mask)
578{ 578{
579 u8 dest_rd_atomic; 579 u8 dest_rd_atomic;
@@ -603,7 +603,7 @@ static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *att
603 return cpu_to_be32(hw_access_flags); 603 return cpu_to_be32(hw_access_flags);
604} 604}
605 605
606static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr, 606static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
607 int attr_mask) 607 int attr_mask)
608{ 608{
609 if (attr_mask & IB_QP_PKEY_INDEX) 609 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -619,7 +619,7 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
619 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 619 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
620} 620}
621 621
622static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah, 622static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
623 struct mlx4_qp_path *path, u8 port) 623 struct mlx4_qp_path *path, u8 port)
624{ 624{
625 path->grh_mylmc = ah->src_path_bits & 0x7f; 625 path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -655,14 +655,14 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah,
655 return 0; 655 return 0;
656} 656}
657 657
658int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 658static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
659 int attr_mask, struct ib_udata *udata) 659 const struct ib_qp_attr *attr, int attr_mask,
660 enum ib_qp_state cur_state, enum ib_qp_state new_state)
660{ 661{
661 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 662 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
662 struct mlx4_ib_qp *qp = to_mqp(ibqp); 663 struct mlx4_ib_qp *qp = to_mqp(ibqp);
663 struct mlx4_qp_context *context; 664 struct mlx4_qp_context *context;
664 enum mlx4_qp_optpar optpar = 0; 665 enum mlx4_qp_optpar optpar = 0;
665 enum ib_qp_state cur_state, new_state;
666 int sqd_event; 666 int sqd_event;
667 int err = -EINVAL; 667 int err = -EINVAL;
668 668
@@ -670,34 +670,6 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
670 if (!context) 670 if (!context)
671 return -ENOMEM; 671 return -ENOMEM;
672 672
673 mutex_lock(&qp->mutex);
674
675 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
676 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
677
678 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
679 goto out;
680
681 if ((attr_mask & IB_QP_PKEY_INDEX) &&
682 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
683 goto out;
684 }
685
686 if ((attr_mask & IB_QP_PORT) &&
687 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
688 goto out;
689 }
690
691 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
692 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
693 goto out;
694 }
695
696 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
697 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
698 goto out;
699 }
700
701 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 673 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
702 (to_mlx4_st(ibqp->qp_type) << 16)); 674 (to_mlx4_st(ibqp->qp_type) << 16));
703 context->flags |= cpu_to_be32(1 << 8); /* DE? */ 675 context->flags |= cpu_to_be32(1 << 8); /* DE? */
@@ -920,11 +892,84 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920 } 892 }
921 893
922out: 894out:
923 mutex_unlock(&qp->mutex);
924 kfree(context); 895 kfree(context);
925 return err; 896 return err;
926} 897}
927 898
899static const struct ib_qp_attr mlx4_ib_qp_attr = { .port_num = 1 };
900static const int mlx4_ib_qp_attr_mask_table[IB_QPT_UD + 1] = {
901 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
902 IB_QP_PORT |
903 IB_QP_QKEY),
904 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
905 IB_QP_PORT |
906 IB_QP_ACCESS_FLAGS),
907 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
908 IB_QP_PORT |
909 IB_QP_ACCESS_FLAGS),
910 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
911 IB_QP_QKEY),
912 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
913 IB_QP_QKEY),
914};
915
916int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
917 int attr_mask, struct ib_udata *udata)
918{
919 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
920 struct mlx4_ib_qp *qp = to_mqp(ibqp);
921 enum ib_qp_state cur_state, new_state;
922 int err = -EINVAL;
923
924 mutex_lock(&qp->mutex);
925
926 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
927 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
928
929 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
930 goto out;
931
932 if ((attr_mask & IB_QP_PKEY_INDEX) &&
933 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
934 goto out;
935 }
936
937 if ((attr_mask & IB_QP_PORT) &&
938 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
939 goto out;
940 }
941
942 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
943 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
944 goto out;
945 }
946
947 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
948 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
949 goto out;
950 }
951
952 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
953 err = 0;
954 goto out;
955 }
956
957 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
958 err = __mlx4_ib_modify_qp(ibqp, &mlx4_ib_qp_attr,
959 mlx4_ib_qp_attr_mask_table[ibqp->qp_type],
960 IB_QPS_RESET, IB_QPS_INIT);
961 if (err)
962 goto out;
963 cur_state = IB_QPS_INIT;
964 }
965
966 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
967
968out:
969 mutex_unlock(&qp->mutex);
970 return err;
971}
972
928static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 973static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
929 void *wqe) 974 void *wqe)
930{ 975{