diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 41 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 23 |
6 files changed, 85 insertions, 49 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..205854e9c662 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
238 | spin_lock(&dev->cq_table.lock); | 238 | spin_lock(&dev->cq_table.lock); |
239 | 239 | ||
240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); |
241 | |||
242 | if (cq) | 241 | if (cq) |
243 | atomic_inc(&cq->refcount); | 242 | ++cq->refcount; |
243 | |||
244 | spin_unlock(&dev->cq_table.lock); | 244 | spin_unlock(&dev->cq_table.lock); |
245 | 245 | ||
246 | if (!cq) { | 246 | if (!cq) { |
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
254 | if (cq->ibcq.event_handler) | 254 | if (cq->ibcq.event_handler) |
255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); | 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); |
256 | 256 | ||
257 | if (atomic_dec_and_test(&cq->refcount)) | 257 | spin_lock(&dev->cq_table.lock); |
258 | if (!--cq->refcount) | ||
258 | wake_up(&cq->wait); | 259 | wake_up(&cq->wait); |
260 | spin_unlock(&dev->cq_table.lock); | ||
259 | } | 261 | } |
260 | 262 | ||
261 | static inline int is_recv_cqe(struct mthca_cqe *cqe) | 263 | static inline int is_recv_cqe(struct mthca_cqe *cqe) |
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) | |||
267 | return !(cqe->is_send & 0x80); | 269 | return !(cqe->is_send & 0x80); |
268 | } | 270 | } |
269 | 271 | ||
270 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 272 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
271 | struct mthca_srq *srq) | 273 | struct mthca_srq *srq) |
272 | { | 274 | { |
273 | struct mthca_cq *cq; | ||
274 | struct mthca_cqe *cqe; | 275 | struct mthca_cqe *cqe; |
275 | u32 prod_index; | 276 | u32 prod_index; |
276 | int nfreed = 0; | 277 | int nfreed = 0; |
277 | 278 | ||
278 | spin_lock_irq(&dev->cq_table.lock); | ||
279 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | ||
280 | if (cq) | ||
281 | atomic_inc(&cq->refcount); | ||
282 | spin_unlock_irq(&dev->cq_table.lock); | ||
283 | |||
284 | if (!cq) | ||
285 | return; | ||
286 | |||
287 | spin_lock_irq(&cq->lock); | 279 | spin_lock_irq(&cq->lock); |
288 | 280 | ||
289 | /* | 281 | /* |
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
301 | 293 | ||
302 | if (0) | 294 | if (0) |
303 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", | 295 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", |
304 | qpn, cqn, cq->cons_index, prod_index); | 296 | qpn, cq->cqn, cq->cons_index, prod_index); |
305 | 297 | ||
306 | /* | 298 | /* |
307 | * Now sweep backwards through the CQ, removing CQ entries | 299 | * Now sweep backwards through the CQ, removing CQ entries |
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
325 | } | 317 | } |
326 | 318 | ||
327 | spin_unlock_irq(&cq->lock); | 319 | spin_unlock_irq(&cq->lock); |
328 | if (atomic_dec_and_test(&cq->refcount)) | ||
329 | wake_up(&cq->wait); | ||
330 | } | 320 | } |
331 | 321 | ||
332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | 322 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) |
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
821 | } | 811 | } |
822 | 812 | ||
823 | spin_lock_init(&cq->lock); | 813 | spin_lock_init(&cq->lock); |
824 | atomic_set(&cq->refcount, 1); | 814 | cq->refcount = 1; |
825 | init_waitqueue_head(&cq->wait); | 815 | init_waitqueue_head(&cq->wait); |
826 | 816 | ||
827 | memset(cq_context, 0, sizeof *cq_context); | 817 | memset(cq_context, 0, sizeof *cq_context); |
@@ -896,6 +886,17 @@ err_out: | |||
896 | return err; | 886 | return err; |
897 | } | 887 | } |
898 | 888 | ||
889 | static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) | ||
890 | { | ||
891 | int c; | ||
892 | |||
893 | spin_lock_irq(&dev->cq_table.lock); | ||
894 | c = cq->refcount; | ||
895 | spin_unlock_irq(&dev->cq_table.lock); | ||
896 | |||
897 | return c; | ||
898 | } | ||
899 | |||
899 | void mthca_free_cq(struct mthca_dev *dev, | 900 | void mthca_free_cq(struct mthca_dev *dev, |
900 | struct mthca_cq *cq) | 901 | struct mthca_cq *cq) |
901 | { | 902 | { |
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
929 | spin_lock_irq(&dev->cq_table.lock); | 930 | spin_lock_irq(&dev->cq_table.lock); |
930 | mthca_array_clear(&dev->cq_table.cq, | 931 | mthca_array_clear(&dev->cq_table.cq, |
931 | cq->cqn & (dev->limits.num_cqs - 1)); | 932 | cq->cqn & (dev->limits.num_cqs - 1)); |
933 | --cq->refcount; | ||
932 | spin_unlock_irq(&dev->cq_table.lock); | 934 | spin_unlock_irq(&dev->cq_table.lock); |
933 | 935 | ||
934 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) | 936 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) |
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
936 | else | 938 | else |
937 | synchronize_irq(dev->pdev->irq); | 939 | synchronize_irq(dev->pdev->irq); |
938 | 940 | ||
939 | atomic_dec(&cq->refcount); | 941 | wait_event(cq->wait, !get_cq_refcount(dev, cq)); |
940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
941 | 942 | ||
942 | if (cq->is_kernel) { | 943 | if (cq->is_kernel) { |
943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 944 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c1822..f8160b8de090 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); | 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); |
497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, |
498 | enum ib_event_type event_type); | 498 | enum ib_event_type event_type); |
499 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 499 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
500 | struct mthca_srq *srq); | 500 | struct mthca_srq *srq); |
501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); |
502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..a486dec1707e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
761 | 761 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 763 | { |
764 | unsigned long addr; | ||
764 | int err, i; | 765 | int err, i; |
765 | 766 | ||
766 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, | 767 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, |
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
796 | goto err_fmr_mpt; | 797 | goto err_fmr_mpt; |
797 | } | 798 | } |
798 | 799 | ||
800 | addr = pci_resource_start(dev->pdev, 4) + | ||
801 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
802 | dev->mr_table.mpt_base); | ||
803 | |||
799 | dev->mr_table.tavor_fmr.mpt_base = | 804 | dev->mr_table.tavor_fmr.mpt_base = |
800 | ioremap(dev->mr_table.mpt_base, | 805 | ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); |
801 | (1 << i) * sizeof (struct mthca_mpt_entry)); | ||
802 | 806 | ||
803 | if (!dev->mr_table.tavor_fmr.mpt_base) { | 807 | if (!dev->mr_table.tavor_fmr.mpt_base) { |
804 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); | 808 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); |
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
806 | goto err_fmr_mpt; | 810 | goto err_fmr_mpt; |
807 | } | 811 | } |
808 | 812 | ||
813 | addr = pci_resource_start(dev->pdev, 4) + | ||
814 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
815 | dev->mr_table.mtt_base); | ||
816 | |||
809 | dev->mr_table.tavor_fmr.mtt_base = | 817 | dev->mr_table.tavor_fmr.mtt_base = |
810 | ioremap(dev->mr_table.mtt_base, | 818 | ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); |
811 | (1 << i) * MTHCA_MTT_SEG_SIZE); | ||
812 | if (!dev->mr_table.tavor_fmr.mtt_base) { | 819 | if (!dev->mr_table.tavor_fmr.mtt_base) { |
813 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); | 820 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); |
814 | err = -ENOMEM; | 821 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d690..179a8f610d0f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -139,11 +139,12 @@ struct mthca_ah { | |||
139 | * a qp may be locked, with the send cq locked first. No other | 139 | * a qp may be locked, with the send cq locked first. No other |
140 | * nesting should be done. | 140 | * nesting should be done. |
141 | * | 141 | * |
142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | 142 | * Each struct mthca_cq/qp also has an ref count, protected by the |
143 | * pointer from the cq/qp_table to the struct counts as one reference. | 143 | * corresponding table lock. The pointer from the cq/qp_table to the |
144 | * This reference also is good for access through the consumer API, so | 144 | * struct counts as one reference. This reference also is good for |
145 | * modifying the CQ/QP etc doesn't need to take another reference. | 145 | * access through the consumer API, so modifying the CQ/QP etc doesn't |
146 | * Access because of a completion being polled does need a reference. | 146 | * need to take another reference. Access to a QP because of a |
147 | * completion being polled does not need a reference either. | ||
147 | * | 148 | * |
148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | 149 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the |
149 | * destroy function to sleep on. | 150 | * destroy function to sleep on. |
@@ -159,8 +160,9 @@ struct mthca_ah { | |||
159 | * - decrement ref count; if zero, wake up waiters | 160 | * - decrement ref count; if zero, wake up waiters |
160 | * | 161 | * |
161 | * To destroy a CQ/QP, we can do the following: | 162 | * To destroy a CQ/QP, we can do the following: |
162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | 163 | * - lock cq/qp_table |
163 | * - decrement ref count | 164 | * - remove pointer and decrement ref count |
165 | * - unlock cq/qp_table lock | ||
164 | * - wait_event until ref count is zero | 166 | * - wait_event until ref count is zero |
165 | * | 167 | * |
166 | * It is the consumer's responsibilty to make sure that no QP | 168 | * It is the consumer's responsibilty to make sure that no QP |
@@ -197,7 +199,7 @@ struct mthca_cq_resize { | |||
197 | struct mthca_cq { | 199 | struct mthca_cq { |
198 | struct ib_cq ibcq; | 200 | struct ib_cq ibcq; |
199 | spinlock_t lock; | 201 | spinlock_t lock; |
200 | atomic_t refcount; | 202 | int refcount; |
201 | int cqn; | 203 | int cqn; |
202 | u32 cons_index; | 204 | u32 cons_index; |
203 | struct mthca_cq_buf buf; | 205 | struct mthca_cq_buf buf; |
@@ -217,7 +219,7 @@ struct mthca_cq { | |||
217 | struct mthca_srq { | 219 | struct mthca_srq { |
218 | struct ib_srq ibsrq; | 220 | struct ib_srq ibsrq; |
219 | spinlock_t lock; | 221 | spinlock_t lock; |
220 | atomic_t refcount; | 222 | int refcount; |
221 | int srqn; | 223 | int srqn; |
222 | int max; | 224 | int max; |
223 | int max_gs; | 225 | int max_gs; |
@@ -254,7 +256,7 @@ struct mthca_wq { | |||
254 | 256 | ||
255 | struct mthca_qp { | 257 | struct mthca_qp { |
256 | struct ib_qp ibqp; | 258 | struct ib_qp ibqp; |
257 | atomic_t refcount; | 259 | int refcount; |
258 | u32 qpn; | 260 | u32 qpn; |
259 | int is_direct; | 261 | int is_direct; |
260 | u8 port; /* for SQP and memfree use only */ | 262 | u8 port; /* for SQP and memfree use only */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e367323..19765f6f8d58 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
242 | if (qp) | 242 | if (qp) |
243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
245 | 245 | ||
246 | if (!qp) { | 246 | if (!qp) { |
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
259 | 259 | ||
260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
261 | if (!--qp->refcount) | ||
261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
263 | spin_unlock(&dev->qp_table.lock); | ||
262 | } | 264 | } |
263 | 265 | ||
264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
834 | */ | 836 | */ |
835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
841 | 843 | ||
842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1096 | int ret; | 1098 | int ret; |
1097 | int i; | 1099 | int i; |
1098 | 1100 | ||
1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1318 | return err; | 1320 | return err; |
1319 | } | 1321 | } |
1320 | 1322 | ||
1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
1324 | { | ||
1325 | int c; | ||
1326 | |||
1327 | spin_lock_irq(&dev->qp_table.lock); | ||
1328 | c = qp->refcount; | ||
1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
1330 | |||
1331 | return c; | ||
1332 | } | ||
1333 | |||
1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
1323 | { | 1336 | { |
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
1355 | --qp->refcount; | ||
1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
1343 | 1357 | ||
1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
1347 | 1361 | ||
1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1350 | 1363 | ||
1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
1359 | */ | 1372 | */ |
1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1366 | 1379 | ||
1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355ae..1ea433291fa7 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
241 | goto err_out_mailbox; | 241 | goto err_out_mailbox; |
242 | 242 | ||
243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
244 | atomic_set(&srq->refcount, 1); | 244 | srq->refcount = 1; |
245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
246 | 246 | ||
247 | if (mthca_is_memfree(dev)) | 247 | if (mthca_is_memfree(dev)) |
@@ -308,6 +308,17 @@ err_out: | |||
308 | return err; | 308 | return err; |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) | ||
312 | { | ||
313 | int c; | ||
314 | |||
315 | spin_lock_irq(&dev->srq_table.lock); | ||
316 | c = srq->refcount; | ||
317 | spin_unlock_irq(&dev->srq_table.lock); | ||
318 | |||
319 | return c; | ||
320 | } | ||
321 | |||
311 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | 322 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) |
312 | { | 323 | { |
313 | struct mthca_mailbox *mailbox; | 324 | struct mthca_mailbox *mailbox; |
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
329 | spin_lock_irq(&dev->srq_table.lock); | 340 | spin_lock_irq(&dev->srq_table.lock); |
330 | mthca_array_clear(&dev->srq_table.srq, | 341 | mthca_array_clear(&dev->srq_table.srq, |
331 | srq->srqn & (dev->limits.num_srqs - 1)); | 342 | srq->srqn & (dev->limits.num_srqs - 1)); |
343 | --srq->refcount; | ||
332 | spin_unlock_irq(&dev->srq_table.lock); | 344 | spin_unlock_irq(&dev->srq_table.lock); |
333 | 345 | ||
334 | atomic_dec(&srq->refcount); | 346 | wait_event(srq->wait, !get_srq_refcount(dev, srq)); |
335 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | ||
336 | 347 | ||
337 | if (!srq->ibsrq.uobject) { | 348 | if (!srq->ibsrq.uobject) { |
338 | mthca_free_srq_buf(dev, srq); | 349 | mthca_free_srq_buf(dev, srq); |
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
414 | spin_lock(&dev->srq_table.lock); | 425 | spin_lock(&dev->srq_table.lock); |
415 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | 426 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); |
416 | if (srq) | 427 | if (srq) |
417 | atomic_inc(&srq->refcount); | 428 | ++srq->refcount; |
418 | spin_unlock(&dev->srq_table.lock); | 429 | spin_unlock(&dev->srq_table.lock); |
419 | 430 | ||
420 | if (!srq) { | 431 | if (!srq) { |
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
431 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 442 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
432 | 443 | ||
433 | out: | 444 | out: |
434 | if (atomic_dec_and_test(&srq->refcount)) | 445 | spin_lock(&dev->srq_table.lock); |
446 | if (!--srq->refcount) | ||
435 | wake_up(&srq->wait); | 447 | wake_up(&srq->wait); |
448 | spin_unlock(&dev->srq_table.lock); | ||
436 | } | 449 | } |
437 | 450 | ||
438 | /* | 451 | /* |