aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c165
1 files changed, 102 insertions, 63 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 72fabb822f1c..eef415b12b2e 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/sched.h>
40 41
41#include <asm/io.h> 42#include <asm/io.h>
42 43
@@ -295,7 +296,7 @@ static int to_mthca_st(int transport)
295 } 296 }
296} 297}
297 298
298static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, 299static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
299 int attr_mask) 300 int attr_mask)
300{ 301{
301 if (attr_mask & IB_QP_PKEY_INDEX) 302 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -327,7 +328,7 @@ static void init_port(struct mthca_dev *dev, int port)
327 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 328 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
328} 329}
329 330
330static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, 331static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
331 int attr_mask) 332 int attr_mask)
332{ 333{
333 u8 dest_rd_atomic; 334 u8 dest_rd_atomic;
@@ -510,7 +511,7 @@ out:
510 return err; 511 return err;
511} 512}
512 513
513static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 514static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
514 struct mthca_qp_path *path, u8 port) 515 struct mthca_qp_path *path, u8 port)
515{ 516{
516 path->g_mylmc = ah->src_path_bits & 0x7f; 517 path->g_mylmc = ah->src_path_bits & 0x7f;
@@ -538,12 +539,12 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
538 return 0; 539 return 0;
539} 540}
540 541
541int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 542static int __mthca_modify_qp(struct ib_qp *ibqp,
542 struct ib_udata *udata) 543 const struct ib_qp_attr *attr, int attr_mask,
544 enum ib_qp_state cur_state, enum ib_qp_state new_state)
543{ 545{
544 struct mthca_dev *dev = to_mdev(ibqp->device); 546 struct mthca_dev *dev = to_mdev(ibqp->device);
545 struct mthca_qp *qp = to_mqp(ibqp); 547 struct mthca_qp *qp = to_mqp(ibqp);
546 enum ib_qp_state cur_state, new_state;
547 struct mthca_mailbox *mailbox; 548 struct mthca_mailbox *mailbox;
548 struct mthca_qp_param *qp_param; 549 struct mthca_qp_param *qp_param;
549 struct mthca_qp_context *qp_context; 550 struct mthca_qp_context *qp_context;
@@ -551,60 +552,6 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
551 u8 status; 552 u8 status;
552 int err = -EINVAL; 553 int err = -EINVAL;
553 554
554 mutex_lock(&qp->mutex);
555
556 if (attr_mask & IB_QP_CUR_STATE) {
557 cur_state = attr->cur_qp_state;
558 } else {
559 spin_lock_irq(&qp->sq.lock);
560 spin_lock(&qp->rq.lock);
561 cur_state = qp->state;
562 spin_unlock(&qp->rq.lock);
563 spin_unlock_irq(&qp->sq.lock);
564 }
565
566 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
567
568 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
569 mthca_dbg(dev, "Bad QP transition (transport %d) "
570 "%d->%d with attr 0x%08x\n",
571 qp->transport, cur_state, new_state,
572 attr_mask);
573 goto out;
574 }
575
576 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
577 err = 0;
578 goto out;
579 }
580
581 if ((attr_mask & IB_QP_PKEY_INDEX) &&
582 attr->pkey_index >= dev->limits.pkey_table_len) {
583 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
584 attr->pkey_index, dev->limits.pkey_table_len-1);
585 goto out;
586 }
587
588 if ((attr_mask & IB_QP_PORT) &&
589 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
590 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
591 goto out;
592 }
593
594 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
595 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
596 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
597 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
598 goto out;
599 }
600
601 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
602 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
603 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
604 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
605 goto out;
606 }
607
608 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 555 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
609 if (IS_ERR(mailbox)) { 556 if (IS_ERR(mailbox)) {
610 err = PTR_ERR(mailbox); 557 err = PTR_ERR(mailbox);
@@ -891,6 +838,98 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
891 838
892out_mailbox: 839out_mailbox:
893 mthca_free_mailbox(dev, mailbox); 840 mthca_free_mailbox(dev, mailbox);
841out:
842 return err;
843}
844
845static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };
846static const int dummy_init_attr_mask[] = {
847 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
848 IB_QP_PORT |
849 IB_QP_QKEY),
850 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
851 IB_QP_PORT |
852 IB_QP_ACCESS_FLAGS),
853 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
854 IB_QP_PORT |
855 IB_QP_ACCESS_FLAGS),
856 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
857 IB_QP_QKEY),
858 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
859 IB_QP_QKEY),
860};
861
862int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
863 struct ib_udata *udata)
864{
865 struct mthca_dev *dev = to_mdev(ibqp->device);
866 struct mthca_qp *qp = to_mqp(ibqp);
867 enum ib_qp_state cur_state, new_state;
868 int err = -EINVAL;
869
870 mutex_lock(&qp->mutex);
871 if (attr_mask & IB_QP_CUR_STATE) {
872 cur_state = attr->cur_qp_state;
873 } else {
874 spin_lock_irq(&qp->sq.lock);
875 spin_lock(&qp->rq.lock);
876 cur_state = qp->state;
877 spin_unlock(&qp->rq.lock);
878 spin_unlock_irq(&qp->sq.lock);
879 }
880
881 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
882
883 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
884 mthca_dbg(dev, "Bad QP transition (transport %d) "
885 "%d->%d with attr 0x%08x\n",
886 qp->transport, cur_state, new_state,
887 attr_mask);
888 goto out;
889 }
890
891 if ((attr_mask & IB_QP_PKEY_INDEX) &&
892 attr->pkey_index >= dev->limits.pkey_table_len) {
893 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
894 attr->pkey_index, dev->limits.pkey_table_len-1);
895 goto out;
896 }
897
898 if ((attr_mask & IB_QP_PORT) &&
899 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
900 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
901 goto out;
902 }
903
904 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
905 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
906 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
907 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
908 goto out;
909 }
910
911 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
912 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
913 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
914 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
915 goto out;
916 }
917
918 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
919 err = 0;
920 goto out;
921 }
922
923 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
924 err = __mthca_modify_qp(ibqp, &dummy_init_attr,
925 dummy_init_attr_mask[ibqp->qp_type],
926 IB_QPS_RESET, IB_QPS_INIT);
927 if (err)
928 goto out;
929 cur_state = IB_QPS_INIT;
930 }
931
932 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
894 933
895out: 934out:
896 mutex_unlock(&qp->mutex); 935 mutex_unlock(&qp->mutex);
@@ -2245,10 +2284,10 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2245 struct mthca_next_seg *next; 2284 struct mthca_next_seg *next;
2246 2285
2247 /* 2286 /*
2248 * For SRQs, all WQEs generate a CQE, so we're always at the 2287 * For SRQs, all receive WQEs generate a CQE, so we're always
2249 * end of the doorbell chain. 2288 * at the end of the doorbell chain.
2250 */ 2289 */
2251 if (qp->ibqp.srq) { 2290 if (qp->ibqp.srq && !is_send) {
2252 *new_wqe = 0; 2291 *new_wqe = 0;
2253 return; 2292 return;
2254 } 2293 }