diff options
author | Joachim Fenkes <fenkes@de.ibm.com> | 2007-07-09 09:31:10 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-09 23:12:27 -0400 |
commit | 26ed687fdd541c2542b79dcd75fb2c82eb36f189 (patch) | |
tree | 45d5ad42b6b9259f273cc274d0fbf18a6d169e91 /drivers | |
parent | 28db6beb420c756c61dd44d9f2786a0677159e74 (diff) |
IB/ehca: Change idr spinlocks into rwlocks
This eliminates lock contention among IRQs as well as the need to
disable IRQs around idr_find, because there are no IRQ writers.
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_cq.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_uverbs.c | 9 |
6 files changed, 28 insertions, 32 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 8580f2a0ea57..f1e0db2ff16c 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -293,8 +293,8 @@ void ehca_cleanup_av_cache(void); | |||
293 | int ehca_init_mrmw_cache(void); | 293 | int ehca_init_mrmw_cache(void); |
294 | void ehca_cleanup_mrmw_cache(void); | 294 | void ehca_cleanup_mrmw_cache(void); |
295 | 295 | ||
296 | extern spinlock_t ehca_qp_idr_lock; | 296 | extern rwlock_t ehca_qp_idr_lock; |
297 | extern spinlock_t ehca_cq_idr_lock; | 297 | extern rwlock_t ehca_cq_idr_lock; |
298 | extern struct idr ehca_qp_idr; | 298 | extern struct idr ehca_qp_idr; |
299 | extern struct idr ehca_cq_idr; | 299 | extern struct idr ehca_cq_idr; |
300 | 300 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 3729997457ca..01d4a148bd71 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
@@ -163,9 +163,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, | |||
163 | goto create_cq_exit1; | 163 | goto create_cq_exit1; |
164 | } | 164 | } |
165 | 165 | ||
166 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 166 | write_lock_irqsave(&ehca_cq_idr_lock, flags); |
167 | ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); | 167 | ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); |
168 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 168 | write_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
169 | 169 | ||
170 | } while (ret == -EAGAIN); | 170 | } while (ret == -EAGAIN); |
171 | 171 | ||
@@ -294,9 +294,9 @@ create_cq_exit3: | |||
294 | "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret); | 294 | "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret); |
295 | 295 | ||
296 | create_cq_exit2: | 296 | create_cq_exit2: |
297 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 297 | write_lock_irqsave(&ehca_cq_idr_lock, flags); |
298 | idr_remove(&ehca_cq_idr, my_cq->token); | 298 | idr_remove(&ehca_cq_idr, my_cq->token); |
299 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 299 | write_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
300 | 300 | ||
301 | create_cq_exit1: | 301 | create_cq_exit1: |
302 | kmem_cache_free(cq_cache, my_cq); | 302 | kmem_cache_free(cq_cache, my_cq); |
@@ -334,9 +334,9 @@ int ehca_destroy_cq(struct ib_cq *cq) | |||
334 | * remove the CQ from the idr first to make sure | 334 | * remove the CQ from the idr first to make sure |
335 | * no more interrupt tasklets will touch this CQ | 335 | * no more interrupt tasklets will touch this CQ |
336 | */ | 336 | */ |
337 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 337 | write_lock_irqsave(&ehca_cq_idr_lock, flags); |
338 | idr_remove(&ehca_cq_idr, my_cq->token); | 338 | idr_remove(&ehca_cq_idr, my_cq->token); |
339 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 339 | write_unlock_irqrestore(&ehca_cq_idr_lock, flags); |
340 | 340 | ||
341 | /* now wait until all pending events have completed */ | 341 | /* now wait until all pending events have completed */ |
342 | wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events)); | 342 | wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events)); |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 3e790a326d97..02b73c84c49b 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -180,12 +180,11 @@ static void qp_event_callback(struct ehca_shca *shca, | |||
180 | { | 180 | { |
181 | struct ib_event event; | 181 | struct ib_event event; |
182 | struct ehca_qp *qp; | 182 | struct ehca_qp *qp; |
183 | unsigned long flags; | ||
184 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); | 183 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); |
185 | 184 | ||
186 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 185 | read_lock(&ehca_qp_idr_lock); |
187 | qp = idr_find(&ehca_qp_idr, token); | 186 | qp = idr_find(&ehca_qp_idr, token); |
188 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 187 | read_unlock(&ehca_qp_idr_lock); |
189 | 188 | ||
190 | 189 | ||
191 | if (!qp) | 190 | if (!qp) |
@@ -209,14 +208,13 @@ static void cq_event_callback(struct ehca_shca *shca, | |||
209 | u64 eqe) | 208 | u64 eqe) |
210 | { | 209 | { |
211 | struct ehca_cq *cq; | 210 | struct ehca_cq *cq; |
212 | unsigned long flags; | ||
213 | u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); | 211 | u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); |
214 | 212 | ||
215 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 213 | read_lock(&ehca_cq_idr_lock); |
216 | cq = idr_find(&ehca_cq_idr, token); | 214 | cq = idr_find(&ehca_cq_idr, token); |
217 | if (cq) | 215 | if (cq) |
218 | atomic_inc(&cq->nr_events); | 216 | atomic_inc(&cq->nr_events); |
219 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 217 | read_unlock(&ehca_cq_idr_lock); |
220 | 218 | ||
221 | if (!cq) | 219 | if (!cq) |
222 | return; | 220 | return; |
@@ -411,7 +409,6 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) | |||
411 | { | 409 | { |
412 | u64 eqe_value; | 410 | u64 eqe_value; |
413 | u32 token; | 411 | u32 token; |
414 | unsigned long flags; | ||
415 | struct ehca_cq *cq; | 412 | struct ehca_cq *cq; |
416 | 413 | ||
417 | eqe_value = eqe->entry; | 414 | eqe_value = eqe->entry; |
@@ -419,11 +416,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) | |||
419 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | 416 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
420 | ehca_dbg(&shca->ib_device, "Got completion event"); | 417 | ehca_dbg(&shca->ib_device, "Got completion event"); |
421 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | 418 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
422 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 419 | read_lock(&ehca_cq_idr_lock); |
423 | cq = idr_find(&ehca_cq_idr, token); | 420 | cq = idr_find(&ehca_cq_idr, token); |
424 | if (cq) | 421 | if (cq) |
425 | atomic_inc(&cq->nr_events); | 422 | atomic_inc(&cq->nr_events); |
426 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 423 | read_unlock(&ehca_cq_idr_lock); |
427 | if (cq == NULL) { | 424 | if (cq == NULL) { |
428 | ehca_err(&shca->ib_device, | 425 | ehca_err(&shca->ib_device, |
429 | "Invalid eqe for non-existing cq token=%x", | 426 | "Invalid eqe for non-existing cq token=%x", |
@@ -480,11 +477,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
480 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; | 477 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; |
481 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { | 478 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { |
482 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); | 479 | token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); |
483 | spin_lock(&ehca_cq_idr_lock); | 480 | read_lock(&ehca_cq_idr_lock); |
484 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); | 481 | eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); |
485 | if (eqe_cache[eqe_cnt].cq) | 482 | if (eqe_cache[eqe_cnt].cq) |
486 | atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); | 483 | atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); |
487 | spin_unlock(&ehca_cq_idr_lock); | 484 | read_unlock(&ehca_cq_idr_lock); |
488 | if (!eqe_cache[eqe_cnt].cq) { | 485 | if (!eqe_cache[eqe_cnt].cq) { |
489 | ehca_err(&shca->ib_device, | 486 | ehca_err(&shca->ib_device, |
490 | "Invalid eqe for non-existing cq " | 487 | "Invalid eqe for non-existing cq " |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 32396b203f14..28ba2dd24216 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -96,8 +96,8 @@ MODULE_PARM_DESC(static_rate, | |||
96 | MODULE_PARM_DESC(scaling_code, | 96 | MODULE_PARM_DESC(scaling_code, |
97 | "set scaling code (0: disabled/default, 1: enabled)"); | 97 | "set scaling code (0: disabled/default, 1: enabled)"); |
98 | 98 | ||
99 | DEFINE_SPINLOCK(ehca_qp_idr_lock); | 99 | DEFINE_RWLOCK(ehca_qp_idr_lock); |
100 | DEFINE_SPINLOCK(ehca_cq_idr_lock); | 100 | DEFINE_RWLOCK(ehca_cq_idr_lock); |
101 | DEFINE_IDR(ehca_qp_idr); | 101 | DEFINE_IDR(ehca_qp_idr); |
102 | DEFINE_IDR(ehca_cq_idr); | 102 | DEFINE_IDR(ehca_cq_idr); |
103 | 103 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 31d21526df5e..74671250303f 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -512,9 +512,9 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd, | |||
512 | goto create_qp_exit0; | 512 | goto create_qp_exit0; |
513 | } | 513 | } |
514 | 514 | ||
515 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 515 | write_lock_irqsave(&ehca_qp_idr_lock, flags); |
516 | ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); | 516 | ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); |
517 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 517 | write_unlock_irqrestore(&ehca_qp_idr_lock, flags); |
518 | 518 | ||
519 | } while (ret == -EAGAIN); | 519 | } while (ret == -EAGAIN); |
520 | 520 | ||
@@ -733,9 +733,9 @@ create_qp_exit2: | |||
733 | hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); | 733 | hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); |
734 | 734 | ||
735 | create_qp_exit1: | 735 | create_qp_exit1: |
736 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 736 | write_lock_irqsave(&ehca_qp_idr_lock, flags); |
737 | idr_remove(&ehca_qp_idr, my_qp->token); | 737 | idr_remove(&ehca_qp_idr, my_qp->token); |
738 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 738 | write_unlock_irqrestore(&ehca_qp_idr_lock, flags); |
739 | 739 | ||
740 | create_qp_exit0: | 740 | create_qp_exit0: |
741 | kmem_cache_free(qp_cache, my_qp); | 741 | kmem_cache_free(qp_cache, my_qp); |
@@ -1706,9 +1706,9 @@ int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
1706 | } | 1706 | } |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 1709 | write_lock_irqsave(&ehca_qp_idr_lock, flags); |
1710 | idr_remove(&ehca_qp_idr, my_qp->token); | 1710 | idr_remove(&ehca_qp_idr, my_qp->token); |
1711 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 1711 | write_unlock_irqrestore(&ehca_qp_idr_lock, flags); |
1712 | 1712 | ||
1713 | h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); | 1713 | h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); |
1714 | if (h_ret != H_SUCCESS) { | 1714 | if (h_ret != H_SUCCESS) { |
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index d8fe37d56f1a..3031b3bb56f9 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
@@ -253,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
253 | u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ | 253 | u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ |
254 | u32 cur_pid = current->tgid; | 254 | u32 cur_pid = current->tgid; |
255 | u32 ret; | 255 | u32 ret; |
256 | unsigned long flags; | ||
257 | struct ehca_cq *cq; | 256 | struct ehca_cq *cq; |
258 | struct ehca_qp *qp; | 257 | struct ehca_qp *qp; |
259 | struct ehca_pd *pd; | 258 | struct ehca_pd *pd; |
@@ -261,9 +260,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
261 | 260 | ||
262 | switch (q_type) { | 261 | switch (q_type) { |
263 | case 1: /* CQ */ | 262 | case 1: /* CQ */ |
264 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 263 | read_lock(&ehca_cq_idr_lock); |
265 | cq = idr_find(&ehca_cq_idr, idr_handle); | 264 | cq = idr_find(&ehca_cq_idr, idr_handle); |
266 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | 265 | read_unlock(&ehca_cq_idr_lock); |
267 | 266 | ||
268 | /* make sure this mmap really belongs to the authorized user */ | 267 | /* make sure this mmap really belongs to the authorized user */ |
269 | if (!cq) | 268 | if (!cq) |
@@ -289,9 +288,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
289 | break; | 288 | break; |
290 | 289 | ||
291 | case 2: /* QP */ | 290 | case 2: /* QP */ |
292 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | 291 | read_lock(&ehca_qp_idr_lock); |
293 | qp = idr_find(&ehca_qp_idr, idr_handle); | 292 | qp = idr_find(&ehca_qp_idr, idr_handle); |
294 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | 293 | read_unlock(&ehca_qp_idr_lock); |
295 | 294 | ||
296 | /* make sure this mmap really belongs to the authorized user */ | 295 | /* make sure this mmap really belongs to the authorized user */ |
297 | if (!qp) | 296 | if (!qp) |