aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_provider.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2006-06-17 23:37:41 -0400
committerRoland Dreier <rolandd@cisco.com>2006-06-17 23:37:41 -0400
commitc93b6fbaa99bb3a1552e14317296be14dde51dfb (patch)
treebad61dc728f0eb28fcfdf01953cd0ae43a4b2350 /drivers/infiniband/hw/mthca/mthca_provider.c
parentc9c5d9feef86debee4d8e77a738ad86877cf371a (diff)
IB/mthca: Make all device methods truly reentrant
Documentation/infiniband/core_locking.txt says: All of the methods in struct ib_device exported by a low-level driver must be fully reentrant. The low-level driver is required to perform all synchronization necessary to maintain consistency, even if multiple function calls using the same object are run simultaneously. However, mthca's modify_qp, modify_srq and resize_cq methods are currently not reentrant. Add a mutex to the QP, SRQ and CQ structures so that these calls can be properly serialized. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_provider.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 8f89ba7c9147..230ae21db8fd 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -793,18 +793,24 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
793 if (entries < 1 || entries > dev->limits.max_cqes) 793 if (entries < 1 || entries > dev->limits.max_cqes)
794 return -EINVAL; 794 return -EINVAL;
795 795
796 mutex_lock(&cq->mutex);
797
796 entries = roundup_pow_of_two(entries + 1); 798 entries = roundup_pow_of_two(entries + 1);
797 if (entries == ibcq->cqe + 1) 799 if (entries == ibcq->cqe + 1) {
798 return 0; 800 ret = 0;
801 goto out;
802 }
799 803
800 if (cq->is_kernel) { 804 if (cq->is_kernel) {
801 ret = mthca_alloc_resize_buf(dev, cq, entries); 805 ret = mthca_alloc_resize_buf(dev, cq, entries);
802 if (ret) 806 if (ret)
803 return ret; 807 goto out;
804 lkey = cq->resize_buf->buf.mr.ibmr.lkey; 808 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
805 } else { 809 } else {
806 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 810 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
807 return -EFAULT; 811 ret = -EFAULT;
812 goto out;
813 }
808 lkey = ucmd.lkey; 814 lkey = ucmd.lkey;
809 } 815 }
810 816
@@ -821,7 +827,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
821 cq->resize_buf = NULL; 827 cq->resize_buf = NULL;
822 spin_unlock_irq(&cq->lock); 828 spin_unlock_irq(&cq->lock);
823 } 829 }
824 return ret; 830 goto out;
825 } 831 }
826 832
827 if (cq->is_kernel) { 833 if (cq->is_kernel) {
@@ -848,7 +854,10 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
848 } else 854 } else
849 ibcq->cqe = entries - 1; 855 ibcq->cqe = entries - 1;
850 856
851 return 0; 857out:
858 mutex_unlock(&cq->mutex);
859
860 return ret;
852} 861}
853 862
854static int mthca_destroy_cq(struct ib_cq *cq) 863static int mthca_destroy_cq(struct ib_cq *cq)