aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 100329ba3343..3e790a326d97 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -5,6 +5,8 @@
5 * 5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com> 6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com> 7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
8 * 10 *
9 * Copyright (c) 2005 IBM Corporation 11 * Copyright (c) 2005 IBM Corporation
10 * 12 *
@@ -212,6 +214,8 @@ static void cq_event_callback(struct ehca_shca *shca,
212 214
213 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 215 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
214 cq = idr_find(&ehca_cq_idr, token); 216 cq = idr_find(&ehca_cq_idr, token);
217 if (cq)
218 atomic_inc(&cq->nr_events);
215 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 219 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
216 220
217 if (!cq) 221 if (!cq)
@@ -219,6 +223,9 @@ static void cq_event_callback(struct ehca_shca *shca,
219 223
220 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); 224 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
221 225
226 if (atomic_dec_and_test(&cq->nr_events))
227 wake_up(&cq->wait_completion);
228
222 return; 229 return;
223} 230}
224 231
@@ -414,25 +421,22 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
414 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 421 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
415 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 422 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
416 cq = idr_find(&ehca_cq_idr, token); 423 cq = idr_find(&ehca_cq_idr, token);
424 if (cq)
425 atomic_inc(&cq->nr_events);
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
417 if (cq == NULL) { 427 if (cq == NULL) {
418 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
419 ehca_err(&shca->ib_device, 428 ehca_err(&shca->ib_device,
420 "Invalid eqe for non-existing cq token=%x", 429 "Invalid eqe for non-existing cq token=%x",
421 token); 430 token);
422 return; 431 return;
423 } 432 }
424 reset_eq_pending(cq); 433 reset_eq_pending(cq);
425 cq->nr_events++;
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 if (ehca_scaling_code) 434 if (ehca_scaling_code)
428 queue_comp_task(cq); 435 queue_comp_task(cq);
429 else { 436 else {
430 comp_event_callback(cq); 437 comp_event_callback(cq);
431 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 438 if (atomic_dec_and_test(&cq->nr_events))
432 cq->nr_events--;
433 if (!cq->nr_events)
434 wake_up(&cq->wait_completion); 439 wake_up(&cq->wait_completion);
435 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
436 } 440 }
437 } else { 441 } else {
438 ehca_dbg(&shca->ib_device, "Got non completion event"); 442 ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -478,15 +482,15 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 482 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
479 spin_lock(&ehca_cq_idr_lock); 483 spin_lock(&ehca_cq_idr_lock);
480 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token); 484 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
485 if (eqe_cache[eqe_cnt].cq)
486 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
487 spin_unlock(&ehca_cq_idr_lock);
481 if (!eqe_cache[eqe_cnt].cq) { 488 if (!eqe_cache[eqe_cnt].cq) {
482 spin_unlock(&ehca_cq_idr_lock);
483 ehca_err(&shca->ib_device, 489 ehca_err(&shca->ib_device,
484 "Invalid eqe for non-existing cq " 490 "Invalid eqe for non-existing cq "
485 "token=%x", token); 491 "token=%x", token);
486 continue; 492 continue;
487 } 493 }
488 eqe_cache[eqe_cnt].cq->nr_events++;
489 spin_unlock(&ehca_cq_idr_lock);
490 } else 494 } else
491 eqe_cache[eqe_cnt].cq = NULL; 495 eqe_cache[eqe_cnt].cq = NULL;
492 eqe_cnt++; 496 eqe_cnt++;
@@ -517,11 +521,8 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
517 else { 521 else {
518 struct ehca_cq *cq = eq->eqe_cache[i].cq; 522 struct ehca_cq *cq = eq->eqe_cache[i].cq;
519 comp_event_callback(cq); 523 comp_event_callback(cq);
520 spin_lock(&ehca_cq_idr_lock); 524 if (atomic_dec_and_test(&cq->nr_events))
521 cq->nr_events--;
522 if (!cq->nr_events)
523 wake_up(&cq->wait_completion); 525 wake_up(&cq->wait_completion);
524 spin_unlock(&ehca_cq_idr_lock);
525 } 526 }
526 } else { 527 } else {
527 ehca_dbg(&shca->ib_device, "Got non completion event"); 528 ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -621,13 +622,10 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
621 while (!list_empty(&cct->cq_list)) { 622 while (!list_empty(&cct->cq_list)) {
622 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 623 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
623 spin_unlock_irqrestore(&cct->task_lock, flags); 624 spin_unlock_irqrestore(&cct->task_lock, flags);
624 comp_event_callback(cq);
625 625
626 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 626 comp_event_callback(cq);
627 cq->nr_events--; 627 if (atomic_dec_and_test(&cq->nr_events))
628 if (!cq->nr_events)
629 wake_up(&cq->wait_completion); 628 wake_up(&cq->wait_completion);
630 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
631 629
632 spin_lock_irqsave(&cct->task_lock, flags); 630 spin_lock_irqsave(&cct->task_lock, flags);
633 spin_lock(&cq->task_lock); 631 spin_lock(&cq->task_lock);