aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_irq.c
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com>2007-02-28 12:01:02 -0500
committerRoland Dreier <rolandd@cisco.com>2007-03-01 16:04:05 -0500
commit31726798bd8fbef6244b28cf962f4a4c45793dea (patch)
tree78f8067c58e003c639f58cf9fbf89d08e0d85465 /drivers/infiniband/hw/ehca/ehca_irq.c
parenta27cbe878203076247c1b5287f5ab59ed143b560 (diff)
IB/ehca: Fix sync between completion handler and destroy cq
This patch fixes two issues reported by Roland Dreier and Christoph Hellwig: - Mismatched sync/locking between completion handler and destroy cq We introduced a counter nr_events per cq to track number of irq events seen. This counter is incremented when an event queue entry is seen and decremented after completion handler has been called regardless if scaling code is active or not. Note that nr_callbacks tracks number of events assigned to a cpu and both counters can potentially diverge. The sync between running completion handler and destroy cq is done by using the global spin lock ehca_cq_idr_lock. - Replace yield by wait_event on the counter above to become zero. Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_irq.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c59
1 files changed, 39 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3ec53c687d0..20f36bf8b2b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -404,10 +404,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
404 u32 token; 404 u32 token;
405 unsigned long flags; 405 unsigned long flags;
406 struct ehca_cq *cq; 406 struct ehca_cq *cq;
407
407 eqe_value = eqe->entry; 408 eqe_value = eqe->entry;
408 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 409 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
409 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 410 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
410 ehca_dbg(&shca->ib_device, "... completion event"); 411 ehca_dbg(&shca->ib_device, "Got completion event");
411 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 412 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
412 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 413 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
413 cq = idr_find(&ehca_cq_idr, token); 414 cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +420,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 return; 420 return;
420 } 421 }
421 reset_eq_pending(cq); 422 reset_eq_pending(cq);
422 if (ehca_scaling_code) { 423 cq->nr_events++;
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
425 if (ehca_scaling_code)
423 queue_comp_task(cq); 426 queue_comp_task(cq);
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 427 else {
425 } else {
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 comp_event_callback(cq); 428 comp_event_callback(cq);
429 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
430 cq->nr_events--;
431 if (!cq->nr_events)
432 wake_up(&cq->wait_completion);
433 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 } 434 }
429 } else { 435 } else {
430 ehca_dbg(&shca->ib_device, 436 ehca_dbg(&shca->ib_device, "Got non completion event");
431 "Got non completion event");
432 parse_identifier(shca, eqe_value); 437 parse_identifier(shca, eqe_value);
433 } 438 }
434} 439}
@@ -478,6 +483,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 "token=%x", token); 483 "token=%x", token);
479 continue; 484 continue;
480 } 485 }
486 eqe_cache[eqe_cnt].cq->nr_events++;
481 spin_unlock(&ehca_cq_idr_lock); 487 spin_unlock(&ehca_cq_idr_lock);
482 } else 488 } else
483 eqe_cache[eqe_cnt].cq = NULL; 489 eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +510,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
504 /* call completion handler for cached eqes */ 510 /* call completion handler for cached eqes */
505 for (i = 0; i < eqe_cnt; i++) 511 for (i = 0; i < eqe_cnt; i++)
506 if (eq->eqe_cache[i].cq) { 512 if (eq->eqe_cache[i].cq) {
507 if (ehca_scaling_code) { 513 if (ehca_scaling_code)
508 spin_lock(&ehca_cq_idr_lock);
509 queue_comp_task(eq->eqe_cache[i].cq); 514 queue_comp_task(eq->eqe_cache[i].cq);
510 spin_unlock(&ehca_cq_idr_lock); 515 else {
511 } else 516 struct ehca_cq *cq = eq->eqe_cache[i].cq;
512 comp_event_callback(eq->eqe_cache[i].cq); 517 comp_event_callback(cq);
518 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
519 cq->nr_events--;
520 if (!cq->nr_events)
521 wake_up(&cq->wait_completion);
522 spin_unlock_irqrestore(&ehca_cq_idr_lock,
523 flags);
524 }
513 } else { 525 } else {
514 ehca_dbg(&shca->ib_device, "Got non completion event"); 526 ehca_dbg(&shca->ib_device, "Got non completion event");
515 parse_identifier(shca, eq->eqe_cache[i].eqe->entry); 527 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +535,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
523 if (!eqe) 535 if (!eqe)
524 break; 536 break;
525 process_eqe(shca, eqe); 537 process_eqe(shca, eqe);
526 eqe_cnt++;
527 } while (1); 538 } while (1);
528 539
529unlock_irq_spinlock: 540unlock_irq_spinlock:
@@ -567,8 +578,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
567 list_add_tail(&__cq->entry, &cct->cq_list); 578 list_add_tail(&__cq->entry, &cct->cq_list);
568 cct->cq_jobs++; 579 cct->cq_jobs++;
569 wake_up(&cct->wait_queue); 580 wake_up(&cct->wait_queue);
570 } 581 } else
571 else
572 __cq->nr_callbacks++; 582 __cq->nr_callbacks++;
573 583
574 spin_unlock(&__cq->task_lock); 584 spin_unlock(&__cq->task_lock);
@@ -577,18 +587,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
577 587
578static void queue_comp_task(struct ehca_cq *__cq) 588static void queue_comp_task(struct ehca_cq *__cq)
579{ 589{
580 int cpu;
581 int cpu_id; 590 int cpu_id;
582 struct ehca_cpu_comp_task *cct; 591 struct ehca_cpu_comp_task *cct;
592 int cq_jobs;
593 unsigned long flags;
583 594
584 cpu = get_cpu();
585 cpu_id = find_next_online_cpu(pool); 595 cpu_id = find_next_online_cpu(pool);
586 BUG_ON(!cpu_online(cpu_id)); 596 BUG_ON(!cpu_online(cpu_id));
587 597
588 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 598 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
589 BUG_ON(!cct); 599 BUG_ON(!cct);
590 600
591 if (cct->cq_jobs > 0) { 601 spin_lock_irqsave(&cct->task_lock, flags);
602 cq_jobs = cct->cq_jobs;
603 spin_unlock_irqrestore(&cct->task_lock, flags);
604 if (cq_jobs > 0) {
592 cpu_id = find_next_online_cpu(pool); 605 cpu_id = find_next_online_cpu(pool);
593 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 606 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
594 BUG_ON(!cct); 607 BUG_ON(!cct);
@@ -608,11 +621,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
608 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 621 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
609 spin_unlock_irqrestore(&cct->task_lock, flags); 622 spin_unlock_irqrestore(&cct->task_lock, flags);
610 comp_event_callback(cq); 623 comp_event_callback(cq);
611 spin_lock_irqsave(&cct->task_lock, flags);
612 624
625 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
626 cq->nr_events--;
627 if (!cq->nr_events)
628 wake_up(&cq->wait_completion);
629 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
630
631 spin_lock_irqsave(&cct->task_lock, flags);
613 spin_lock(&cq->task_lock); 632 spin_lock(&cq->task_lock);
614 cq->nr_callbacks--; 633 cq->nr_callbacks--;
615 if (cq->nr_callbacks == 0) { 634 if (!cq->nr_callbacks) {
616 list_del_init(cct->cq_list.next); 635 list_del_init(cct->cq_list.next);
617 cct->cq_jobs--; 636 cct->cq_jobs--;
618 } 637 }