aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c158
1 files changed, 98 insertions, 60 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 72fabb822f1c..a20e2f0c9a54 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -295,7 +295,7 @@ static int to_mthca_st(int transport)
295 } 295 }
296} 296}
297 297
298static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, 298static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
299 int attr_mask) 299 int attr_mask)
300{ 300{
301 if (attr_mask & IB_QP_PKEY_INDEX) 301 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -327,7 +327,7 @@ static void init_port(struct mthca_dev *dev, int port)
327 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 327 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
328} 328}
329 329
330static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, 330static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
331 int attr_mask) 331 int attr_mask)
332{ 332{
333 u8 dest_rd_atomic; 333 u8 dest_rd_atomic;
@@ -510,7 +510,7 @@ out:
510 return err; 510 return err;
511} 511}
512 512
513static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 513static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
514 struct mthca_qp_path *path, u8 port) 514 struct mthca_qp_path *path, u8 port)
515{ 515{
516 path->g_mylmc = ah->src_path_bits & 0x7f; 516 path->g_mylmc = ah->src_path_bits & 0x7f;
@@ -538,12 +538,12 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
538 return 0; 538 return 0;
539} 539}
540 540
541int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 541static int __mthca_modify_qp(struct ib_qp *ibqp,
542 struct ib_udata *udata) 542 const struct ib_qp_attr *attr, int attr_mask,
543 enum ib_qp_state cur_state, enum ib_qp_state new_state)
543{ 544{
544 struct mthca_dev *dev = to_mdev(ibqp->device); 545 struct mthca_dev *dev = to_mdev(ibqp->device);
545 struct mthca_qp *qp = to_mqp(ibqp); 546 struct mthca_qp *qp = to_mqp(ibqp);
546 enum ib_qp_state cur_state, new_state;
547 struct mthca_mailbox *mailbox; 547 struct mthca_mailbox *mailbox;
548 struct mthca_qp_param *qp_param; 548 struct mthca_qp_param *qp_param;
549 struct mthca_qp_context *qp_context; 549 struct mthca_qp_context *qp_context;
@@ -551,60 +551,6 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
551 u8 status; 551 u8 status;
552 int err = -EINVAL; 552 int err = -EINVAL;
553 553
554 mutex_lock(&qp->mutex);
555
556 if (attr_mask & IB_QP_CUR_STATE) {
557 cur_state = attr->cur_qp_state;
558 } else {
559 spin_lock_irq(&qp->sq.lock);
560 spin_lock(&qp->rq.lock);
561 cur_state = qp->state;
562 spin_unlock(&qp->rq.lock);
563 spin_unlock_irq(&qp->sq.lock);
564 }
565
566 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
567
568 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
569 mthca_dbg(dev, "Bad QP transition (transport %d) "
570 "%d->%d with attr 0x%08x\n",
571 qp->transport, cur_state, new_state,
572 attr_mask);
573 goto out;
574 }
575
576 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
577 err = 0;
578 goto out;
579 }
580
581 if ((attr_mask & IB_QP_PKEY_INDEX) &&
582 attr->pkey_index >= dev->limits.pkey_table_len) {
583 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
584 attr->pkey_index, dev->limits.pkey_table_len-1);
585 goto out;
586 }
587
588 if ((attr_mask & IB_QP_PORT) &&
589 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
590 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
591 goto out;
592 }
593
594 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
595 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
596 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
597 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
598 goto out;
599 }
600
601 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
602 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
603 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
604 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
605 goto out;
606 }
607
608 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 554 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
609 if (IS_ERR(mailbox)) { 555 if (IS_ERR(mailbox)) {
610 err = PTR_ERR(mailbox); 556 err = PTR_ERR(mailbox);
@@ -891,6 +837,98 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
891 837
892out_mailbox: 838out_mailbox:
893 mthca_free_mailbox(dev, mailbox); 839 mthca_free_mailbox(dev, mailbox);
840out:
841 return err;
842}
843
844static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };
845static const int dummy_init_attr_mask[] = {
846 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
847 IB_QP_PORT |
848 IB_QP_QKEY),
849 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
850 IB_QP_PORT |
851 IB_QP_ACCESS_FLAGS),
852 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
853 IB_QP_PORT |
854 IB_QP_ACCESS_FLAGS),
855 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
856 IB_QP_QKEY),
857 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
858 IB_QP_QKEY),
859};
860
861int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
862 struct ib_udata *udata)
863{
864 struct mthca_dev *dev = to_mdev(ibqp->device);
865 struct mthca_qp *qp = to_mqp(ibqp);
866 enum ib_qp_state cur_state, new_state;
867 int err = -EINVAL;
868
869 mutex_lock(&qp->mutex);
870 if (attr_mask & IB_QP_CUR_STATE) {
871 cur_state = attr->cur_qp_state;
872 } else {
873 spin_lock_irq(&qp->sq.lock);
874 spin_lock(&qp->rq.lock);
875 cur_state = qp->state;
876 spin_unlock(&qp->rq.lock);
877 spin_unlock_irq(&qp->sq.lock);
878 }
879
880 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
881
882 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
883 mthca_dbg(dev, "Bad QP transition (transport %d) "
884 "%d->%d with attr 0x%08x\n",
885 qp->transport, cur_state, new_state,
886 attr_mask);
887 goto out;
888 }
889
890 if ((attr_mask & IB_QP_PKEY_INDEX) &&
891 attr->pkey_index >= dev->limits.pkey_table_len) {
892 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
893 attr->pkey_index, dev->limits.pkey_table_len-1);
894 goto out;
895 }
896
897 if ((attr_mask & IB_QP_PORT) &&
898 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
899 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
900 goto out;
901 }
902
903 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
904 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
905 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
906 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
907 goto out;
908 }
909
910 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
911 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
912 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
913 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
914 goto out;
915 }
916
917 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
918 err = 0;
919 goto out;
920 }
921
922 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
923 err = __mthca_modify_qp(ibqp, &dummy_init_attr,
924 dummy_init_attr_mask[ibqp->qp_type],
925 IB_QPS_RESET, IB_QPS_INIT);
926 if (err)
927 goto out;
928 cur_state = IB_QPS_INIT;
929 }
930
931 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
894 932
895out: 933out:
896 mutex_unlock(&qp->mutex); 934 mutex_unlock(&qp->mutex);