aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2006-06-17 23:37:41 -0400
committerRoland Dreier <rolandd@cisco.com>2006-06-17 23:37:41 -0400
commitc93b6fbaa99bb3a1552e14317296be14dde51dfb (patch)
treebad61dc728f0eb28fcfdf01953cd0ae43a4b2350 /drivers/infiniband/hw
parentc9c5d9feef86debee4d8e77a738ad86877cf371a (diff)
IB/mthca: Make all device methods truly reentrant
Documentation/infiniband/core_locking.txt says: All of the methods in struct ib_device exported by a low-level driver must be fully reentrant. The low-level driver is required to perform all synchronization necessary to maintain consistency, even if multiple function calls using the same object are run simultaneously. However, mthca's modify_qp, modify_srq and resize_cq methods are currently not reentrant. Add a mutex to the QP, SRQ and CQ structures so that these calls can be properly serialized. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c38
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c5
5 files changed, 48 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 87a8f1166a3b..3e27a084257e 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -822,6 +822,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
822 spin_lock_init(&cq->lock); 822 spin_lock_init(&cq->lock);
823 cq->refcount = 1; 823 cq->refcount = 1;
824 init_waitqueue_head(&cq->wait); 824 init_waitqueue_head(&cq->wait);
825 mutex_init(&cq->mutex);
825 826
826 memset(cq_context, 0, sizeof *cq_context); 827 memset(cq_context, 0, sizeof *cq_context);
827 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 828 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 8f89ba7c9147..230ae21db8fd 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -793,18 +793,24 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
793 if (entries < 1 || entries > dev->limits.max_cqes) 793 if (entries < 1 || entries > dev->limits.max_cqes)
794 return -EINVAL; 794 return -EINVAL;
795 795
796 mutex_lock(&cq->mutex);
797
796 entries = roundup_pow_of_two(entries + 1); 798 entries = roundup_pow_of_two(entries + 1);
797 if (entries == ibcq->cqe + 1) 799 if (entries == ibcq->cqe + 1) {
798 return 0; 800 ret = 0;
801 goto out;
802 }
799 803
800 if (cq->is_kernel) { 804 if (cq->is_kernel) {
801 ret = mthca_alloc_resize_buf(dev, cq, entries); 805 ret = mthca_alloc_resize_buf(dev, cq, entries);
802 if (ret) 806 if (ret)
803 return ret; 807 goto out;
804 lkey = cq->resize_buf->buf.mr.ibmr.lkey; 808 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
805 } else { 809 } else {
806 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 810 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
807 return -EFAULT; 811 ret = -EFAULT;
812 goto out;
813 }
808 lkey = ucmd.lkey; 814 lkey = ucmd.lkey;
809 } 815 }
810 816
@@ -821,7 +827,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
821 cq->resize_buf = NULL; 827 cq->resize_buf = NULL;
822 spin_unlock_irq(&cq->lock); 828 spin_unlock_irq(&cq->lock);
823 } 829 }
824 return ret; 830 goto out;
825 } 831 }
826 832
827 if (cq->is_kernel) { 833 if (cq->is_kernel) {
@@ -848,7 +854,10 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
848 } else 854 } else
849 ibcq->cqe = entries - 1; 855 ibcq->cqe = entries - 1;
850 856
851 return 0; 857out:
858 mutex_unlock(&cq->mutex);
859
860 return ret;
852} 861}
853 862
854static int mthca_destroy_cq(struct ib_cq *cq) 863static int mthca_destroy_cq(struct ib_cq *cq)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 179a8f610d0f..8de2887ba15c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -214,6 +214,7 @@ struct mthca_cq {
214 int arm_sn; 214 int arm_sn;
215 215
216 wait_queue_head_t wait; 216 wait_queue_head_t wait;
217 struct mutex mutex;
217}; 218};
218 219
219struct mthca_srq { 220struct mthca_srq {
@@ -237,6 +238,7 @@ struct mthca_srq {
237 struct mthca_mr mr; 238 struct mthca_mr mr;
238 239
239 wait_queue_head_t wait; 240 wait_queue_head_t wait;
241 struct mutex mutex;
240}; 242};
241 243
242struct mthca_wq { 244struct mthca_wq {
@@ -278,6 +280,7 @@ struct mthca_qp {
278 union mthca_buf queue; 280 union mthca_buf queue;
279 281
280 wait_queue_head_t wait; 282 wait_queue_head_t wait;
283 struct mutex mutex;
281}; 284};
282 285
283struct mthca_sqp { 286struct mthca_sqp {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 322bc32a93f6..16c387d8170c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -536,6 +536,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
536 u8 status; 536 u8 status;
537 int err = -EINVAL; 537 int err = -EINVAL;
538 538
539 mutex_lock(&qp->mutex);
540
539 if (attr_mask & IB_QP_CUR_STATE) { 541 if (attr_mask & IB_QP_CUR_STATE) {
540 cur_state = attr->cur_qp_state; 542 cur_state = attr->cur_qp_state;
541 } else { 543 } else {
@@ -553,39 +555,41 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
553 "%d->%d with attr 0x%08x\n", 555 "%d->%d with attr 0x%08x\n",
554 qp->transport, cur_state, new_state, 556 qp->transport, cur_state, new_state,
555 attr_mask); 557 attr_mask);
556 return -EINVAL; 558 goto out;
557 } 559 }
558 560
559 if ((attr_mask & IB_QP_PKEY_INDEX) && 561 if ((attr_mask & IB_QP_PKEY_INDEX) &&
560 attr->pkey_index >= dev->limits.pkey_table_len) { 562 attr->pkey_index >= dev->limits.pkey_table_len) {
561 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 563 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
562 attr->pkey_index, dev->limits.pkey_table_len-1); 564 attr->pkey_index, dev->limits.pkey_table_len-1);
563 return -EINVAL; 565 goto out;
564 } 566 }
565 567
566 if ((attr_mask & IB_QP_PORT) && 568 if ((attr_mask & IB_QP_PORT) &&
567 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 569 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
568 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 570 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
569 return -EINVAL; 571 goto out;
570 } 572 }
571 573
572 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 574 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
573 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 575 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
574 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 576 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
575 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 577 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
576 return -EINVAL; 578 goto out;
577 } 579 }
578 580
579 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 581 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
580 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 582 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
581 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 583 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
582 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 584 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
583 return -EINVAL; 585 goto out;
584 } 586 }
585 587
586 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 588 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
587 if (IS_ERR(mailbox)) 589 if (IS_ERR(mailbox)) {
588 return PTR_ERR(mailbox); 590 err = PTR_ERR(mailbox);
591 goto out;
592 }
589 qp_param = mailbox->buf; 593 qp_param = mailbox->buf;
590 qp_context = &qp_param->context; 594 qp_context = &qp_param->context;
591 memset(qp_param, 0, sizeof *qp_param); 595 memset(qp_param, 0, sizeof *qp_param);
@@ -618,7 +622,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
618 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 622 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
619 mthca_dbg(dev, "path MTU (%u) is invalid\n", 623 mthca_dbg(dev, "path MTU (%u) is invalid\n",
620 attr->path_mtu); 624 attr->path_mtu);
621 goto out; 625 goto out_mailbox;
622 } 626 }
623 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 627 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
624 } 628 }
@@ -672,7 +676,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
672 if (attr_mask & IB_QP_AV) { 676 if (attr_mask & IB_QP_AV) {
673 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 677 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
674 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 678 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
675 goto out; 679 goto out_mailbox;
676 680
677 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 681 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
678 } 682 }
@@ -686,18 +690,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
686 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 690 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
687 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 691 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
688 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 692 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
689 goto out; 693 goto out_mailbox;
690 } 694 }
691 695
692 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 696 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
693 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 697 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
694 attr->alt_port_num); 698 attr->alt_port_num);
695 goto out; 699 goto out_mailbox;
696 } 700 }
697 701
698 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 702 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
699 attr->alt_ah_attr.port_num)) 703 attr->alt_ah_attr.port_num))
700 goto out; 704 goto out_mailbox;
701 705
702 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 706 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
703 attr->alt_port_num << 24); 707 attr->alt_port_num << 24);
@@ -793,12 +797,12 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 797 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
794 mailbox, sqd_event, &status); 798 mailbox, sqd_event, &status);
795 if (err) 799 if (err)
796 goto out; 800 goto out_mailbox;
797 if (status) { 801 if (status) {
798 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", 802 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
799 cur_state, new_state, status); 803 cur_state, new_state, status);
800 err = -EINVAL; 804 err = -EINVAL;
801 goto out; 805 goto out_mailbox;
802 } 806 }
803 807
804 qp->state = new_state; 808 qp->state = new_state;
@@ -853,8 +857,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
853 } 857 }
854 } 858 }
855 859
856out: 860out_mailbox:
857 mthca_free_mailbox(dev, mailbox); 861 mthca_free_mailbox(dev, mailbox);
862
863out:
864 mutex_unlock(&qp->mutex);
858 return err; 865 return err;
859} 866}
860 867
@@ -1100,6 +1107,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1100 1107
1101 qp->refcount = 1; 1108 qp->refcount = 1;
1102 init_waitqueue_head(&qp->wait); 1109 init_waitqueue_head(&qp->wait);
1110 mutex_init(&qp->mutex);
1103 qp->state = IB_QPS_RESET; 1111 qp->state = IB_QPS_RESET;
1104 qp->atomic_rd_en = 0; 1112 qp->atomic_rd_en = 0;
1105 qp->resp_depth = 0; 1113 qp->resp_depth = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b292fefa3b41..fab417c5cf43 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -243,6 +243,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
243 spin_lock_init(&srq->lock); 243 spin_lock_init(&srq->lock);
244 srq->refcount = 1; 244 srq->refcount = 1;
245 init_waitqueue_head(&srq->wait); 245 init_waitqueue_head(&srq->wait);
246 mutex_init(&srq->mutex);
246 247
247 if (mthca_is_memfree(dev)) 248 if (mthca_is_memfree(dev))
248 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 249 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
@@ -371,7 +372,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
371 if (attr_mask & IB_SRQ_LIMIT) { 372 if (attr_mask & IB_SRQ_LIMIT) {
372 if (attr->srq_limit > srq->max) 373 if (attr->srq_limit > srq->max)
373 return -EINVAL; 374 return -EINVAL;
375
376 mutex_lock(&srq->mutex);
374 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 377 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
378 mutex_unlock(&srq->mutex);
379
375 if (ret) 380 if (ret)
376 return ret; 381 return ret;
377 if (status) 382 if (status)