aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_irq.c
diff options
context:
space:
mode:
authorJoachim Fenkes <fenkes@de.ibm.com>2007-07-09 09:31:10 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:27 -0400
commit26ed687fdd541c2542b79dcd75fb2c82eb36f189 (patch)
tree45d5ad42b6b9259f273cc274d0fbf18a6d169e91 /drivers/infiniband/hw/ehca/ehca_irq.c
parent28db6beb420c756c61dd44d9f2786a0677159e74 (diff)
IB/ehca: Change idr spinlocks into rwlocks
This eliminates lock contention among IRQs as well as the need to disable IRQs around idr_find, because there are no IRQ writers. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3e790a326d97..02b73c84c49b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -180,12 +180,11 @@ static void qp_event_callback(struct ehca_shca *shca,
180{ 180{
181 struct ib_event event; 181 struct ib_event event;
182 struct ehca_qp *qp; 182 struct ehca_qp *qp;
183 unsigned long flags;
184 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); 183 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
185 184
186 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 185 read_lock(&ehca_qp_idr_lock);
187 qp = idr_find(&ehca_qp_idr, token); 186 qp = idr_find(&ehca_qp_idr, token);
188 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 187 read_unlock(&ehca_qp_idr_lock);
189 188
190 189
191 if (!qp) 190 if (!qp)
@@ -209,14 +208,13 @@ static void cq_event_callback(struct ehca_shca *shca,
209 u64 eqe) 208 u64 eqe)
210{ 209{
211 struct ehca_cq *cq; 210 struct ehca_cq *cq;
212 unsigned long flags;
213 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); 211 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
214 212
215 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 213 read_lock(&ehca_cq_idr_lock);
216 cq = idr_find(&ehca_cq_idr, token); 214 cq = idr_find(&ehca_cq_idr, token);
217 if (cq) 215 if (cq)
218 atomic_inc(&cq->nr_events); 216 atomic_inc(&cq->nr_events);
219 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 217 read_unlock(&ehca_cq_idr_lock);
220 218
221 if (!cq) 219 if (!cq)
222 return; 220 return;
@@ -411,7 +409,6 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
411{ 409{
412 u64 eqe_value; 410 u64 eqe_value;
413 u32 token; 411 u32 token;
414 unsigned long flags;
415 struct ehca_cq *cq; 412 struct ehca_cq *cq;
416 413
417 eqe_value = eqe->entry; 414 eqe_value = eqe->entry;
@@ -419,11 +416,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 416 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
420 ehca_dbg(&shca->ib_device, "Got completion event"); 417 ehca_dbg(&shca->ib_device, "Got completion event");
421 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 418 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
422 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 419 read_lock(&ehca_cq_idr_lock);
423 cq = idr_find(&ehca_cq_idr, token); 420 cq = idr_find(&ehca_cq_idr, token);
424 if (cq) 421 if (cq)
425 atomic_inc(&cq->nr_events); 422 atomic_inc(&cq->nr_events);
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 423 read_unlock(&ehca_cq_idr_lock);
427 if (cq == NULL) { 424 if (cq == NULL) {
428 ehca_err(&shca->ib_device, 425 ehca_err(&shca->ib_device,
429 "Invalid eqe for non-existing cq token=%x", 426 "Invalid eqe for non-existing cq token=%x",
@@ -480,11 +477,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
480 eqe_value = eqe_cache[eqe_cnt].eqe->entry; 477 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
481 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 478 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
482 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 479 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
483 spin_lock(&ehca_cq_idr_lock); 480 read_lock(&ehca_cq_idr_lock);
484 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); 481 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
485 if (eqe_cache[eqe_cnt].cq) 482 if (eqe_cache[eqe_cnt].cq)
486 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); 483 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
487 spin_unlock(&ehca_cq_idr_lock); 484 read_unlock(&ehca_cq_idr_lock);
488 if (!eqe_cache[eqe_cnt].cq) { 485 if (!eqe_cache[eqe_cnt].cq) {
489 ehca_err(&shca->ib_device, 486 ehca_err(&shca->ib_device,
490 "Invalid eqe for non-existing cq " 487 "Invalid eqe for non-existing cq "