aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com>2007-02-28 12:01:02 -0500
committerRoland Dreier <rolandd@cisco.com>2007-03-01 16:04:05 -0500
commit31726798bd8fbef6244b28cf962f4a4c45793dea (patch)
tree78f8067c58e003c639f58cf9fbf89d08e0d85465 /drivers/infiniband/hw
parenta27cbe878203076247c1b5287f5ab59ed143b560 (diff)
IB/ehca: Fix sync between completion handler and destroy cq
This patch fixes two issues reported by Roland Dreier and Christoph Hellwig: - Mismatched sync/locking between completion handler and destroy cq We introduced a counter nr_events per cq to track number of irq events seen. This counter is incremented when an event queue entry is seen and decremented after completion handler has been called regardless if scaling code is active or not. Note that nr_callbacks tracks number of events assigned to a cpu and both counters can potentially diverge. The sync between running completion handler and destroy cq is done by using the global spin lock ehca_cq_idr_lock. - Replace yield by wait_event on the counter above to become zero. Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c59
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c4
4 files changed, 60 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 40404c9e2817..82ded44c6cee 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -52,6 +52,8 @@ struct ehca_mw;
52struct ehca_pd; 52struct ehca_pd;
53struct ehca_av; 53struct ehca_av;
54 54
55#include <linux/wait.h>
56
55#include <rdma/ib_verbs.h> 57#include <rdma/ib_verbs.h>
56#include <rdma/ib_user_verbs.h> 58#include <rdma/ib_user_verbs.h>
57 59
@@ -153,7 +155,9 @@ struct ehca_cq {
153 spinlock_t cb_lock; 155 spinlock_t cb_lock;
154 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 156 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
155 struct list_head entry; 157 struct list_head entry;
156 u32 nr_callbacks; 158 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
159 u32 nr_events; /* #events seen */
160 wait_queue_head_t wait_completion;
157 spinlock_t task_lock; 161 spinlock_t task_lock;
158 u32 ownpid; 162 u32 ownpid;
159 /* mmap counter for resources mapped into user space */ 163 /* mmap counter for resources mapped into user space */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6ebfa27e4e16..e2cdc1a16fe9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
146 spin_lock_init(&my_cq->spinlock); 146 spin_lock_init(&my_cq->spinlock);
147 spin_lock_init(&my_cq->cb_lock); 147 spin_lock_init(&my_cq->cb_lock);
148 spin_lock_init(&my_cq->task_lock); 148 spin_lock_init(&my_cq->task_lock);
149 init_waitqueue_head(&my_cq->wait_completion);
149 my_cq->ownpid = current->tgid; 150 my_cq->ownpid = current->tgid;
150 151
151 cq = &my_cq->ib_cq; 152 cq = &my_cq->ib_cq;
@@ -302,6 +303,16 @@ create_cq_exit1:
302 return cq; 303 return cq;
303} 304}
304 305
306static int get_cq_nr_events(struct ehca_cq *my_cq)
307{
308 int ret;
309 unsigned long flags;
310 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
311 ret = my_cq->nr_events;
312 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
313 return ret;
314}
315
305int ehca_destroy_cq(struct ib_cq *cq) 316int ehca_destroy_cq(struct ib_cq *cq)
306{ 317{
307 u64 h_ret; 318 u64 h_ret;
@@ -329,10 +340,11 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 } 340 }
330 341
331 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 342 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
332 while (my_cq->nr_callbacks) { 343 while (my_cq->nr_events) {
333 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 344 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
334 yield(); 345 wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
335 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 /* recheck nr_events to assure no cqe has just arrived */
336 } 348 }
337 349
338 idr_remove(&ehca_cq_idr, my_cq->token); 350 idr_remove(&ehca_cq_idr, my_cq->token);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3ec53c687d08..20f36bf8b2b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -404,10 +404,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
404 u32 token; 404 u32 token;
405 unsigned long flags; 405 unsigned long flags;
406 struct ehca_cq *cq; 406 struct ehca_cq *cq;
407
407 eqe_value = eqe->entry; 408 eqe_value = eqe->entry;
408 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 409 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
409 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 410 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
410 ehca_dbg(&shca->ib_device, "... completion event"); 411 ehca_dbg(&shca->ib_device, "Got completion event");
411 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 412 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
412 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 413 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
413 cq = idr_find(&ehca_cq_idr, token); 414 cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +420,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 return; 420 return;
420 } 421 }
421 reset_eq_pending(cq); 422 reset_eq_pending(cq);
422 if (ehca_scaling_code) { 423 cq->nr_events++;
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
425 if (ehca_scaling_code)
423 queue_comp_task(cq); 426 queue_comp_task(cq);
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 427 else {
425 } else {
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 comp_event_callback(cq); 428 comp_event_callback(cq);
429 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
430 cq->nr_events--;
431 if (!cq->nr_events)
432 wake_up(&cq->wait_completion);
433 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 } 434 }
429 } else { 435 } else {
430 ehca_dbg(&shca->ib_device, 436 ehca_dbg(&shca->ib_device, "Got non completion event");
431 "Got non completion event");
432 parse_identifier(shca, eqe_value); 437 parse_identifier(shca, eqe_value);
433 } 438 }
434} 439}
@@ -478,6 +483,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 "token=%x", token); 483 "token=%x", token);
479 continue; 484 continue;
480 } 485 }
486 eqe_cache[eqe_cnt].cq->nr_events++;
481 spin_unlock(&ehca_cq_idr_lock); 487 spin_unlock(&ehca_cq_idr_lock);
482 } else 488 } else
483 eqe_cache[eqe_cnt].cq = NULL; 489 eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +510,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
504 /* call completion handler for cached eqes */ 510 /* call completion handler for cached eqes */
505 for (i = 0; i < eqe_cnt; i++) 511 for (i = 0; i < eqe_cnt; i++)
506 if (eq->eqe_cache[i].cq) { 512 if (eq->eqe_cache[i].cq) {
507 if (ehca_scaling_code) { 513 if (ehca_scaling_code)
508 spin_lock(&ehca_cq_idr_lock);
509 queue_comp_task(eq->eqe_cache[i].cq); 514 queue_comp_task(eq->eqe_cache[i].cq);
510 spin_unlock(&ehca_cq_idr_lock); 515 else {
511 } else 516 struct ehca_cq *cq = eq->eqe_cache[i].cq;
512 comp_event_callback(eq->eqe_cache[i].cq); 517 comp_event_callback(cq);
518 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
519 cq->nr_events--;
520 if (!cq->nr_events)
521 wake_up(&cq->wait_completion);
522 spin_unlock_irqrestore(&ehca_cq_idr_lock,
523 flags);
524 }
513 } else { 525 } else {
514 ehca_dbg(&shca->ib_device, "Got non completion event"); 526 ehca_dbg(&shca->ib_device, "Got non completion event");
515 parse_identifier(shca, eq->eqe_cache[i].eqe->entry); 527 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +535,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
523 if (!eqe) 535 if (!eqe)
524 break; 536 break;
525 process_eqe(shca, eqe); 537 process_eqe(shca, eqe);
526 eqe_cnt++;
527 } while (1); 538 } while (1);
528 539
529unlock_irq_spinlock: 540unlock_irq_spinlock:
@@ -567,8 +578,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
567 list_add_tail(&__cq->entry, &cct->cq_list); 578 list_add_tail(&__cq->entry, &cct->cq_list);
568 cct->cq_jobs++; 579 cct->cq_jobs++;
569 wake_up(&cct->wait_queue); 580 wake_up(&cct->wait_queue);
570 } 581 } else
571 else
572 __cq->nr_callbacks++; 582 __cq->nr_callbacks++;
573 583
574 spin_unlock(&__cq->task_lock); 584 spin_unlock(&__cq->task_lock);
@@ -577,18 +587,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
577 587
578static void queue_comp_task(struct ehca_cq *__cq) 588static void queue_comp_task(struct ehca_cq *__cq)
579{ 589{
580 int cpu;
581 int cpu_id; 590 int cpu_id;
582 struct ehca_cpu_comp_task *cct; 591 struct ehca_cpu_comp_task *cct;
592 int cq_jobs;
593 unsigned long flags;
583 594
584 cpu = get_cpu();
585 cpu_id = find_next_online_cpu(pool); 595 cpu_id = find_next_online_cpu(pool);
586 BUG_ON(!cpu_online(cpu_id)); 596 BUG_ON(!cpu_online(cpu_id));
587 597
588 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 598 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
589 BUG_ON(!cct); 599 BUG_ON(!cct);
590 600
591 if (cct->cq_jobs > 0) { 601 spin_lock_irqsave(&cct->task_lock, flags);
602 cq_jobs = cct->cq_jobs;
603 spin_unlock_irqrestore(&cct->task_lock, flags);
604 if (cq_jobs > 0) {
592 cpu_id = find_next_online_cpu(pool); 605 cpu_id = find_next_online_cpu(pool);
593 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 606 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
594 BUG_ON(!cct); 607 BUG_ON(!cct);
@@ -608,11 +621,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
608 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 621 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
609 spin_unlock_irqrestore(&cct->task_lock, flags); 622 spin_unlock_irqrestore(&cct->task_lock, flags);
610 comp_event_callback(cq); 623 comp_event_callback(cq);
611 spin_lock_irqsave(&cct->task_lock, flags);
612 624
625 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
626 cq->nr_events--;
627 if (!cq->nr_events)
628 wake_up(&cq->wait_completion);
629 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
630
631 spin_lock_irqsave(&cct->task_lock, flags);
613 spin_lock(&cq->task_lock); 632 spin_lock(&cq->task_lock);
614 cq->nr_callbacks--; 633 cq->nr_callbacks--;
615 if (cq->nr_callbacks == 0) { 634 if (!cq->nr_callbacks) {
616 list_del_init(cct->cq_list.next); 635 list_del_init(cct->cq_list.next);
617 cct->cq_jobs--; 636 cct->cq_jobs--;
618 } 637 }
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c1835121a822..059da9628bb5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0021"); 55MODULE_VERSION("SVNEHCA_0022");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -810,7 +810,7 @@ int __init ehca_module_init(void)
810 int ret; 810 int ret;
811 811
812 printk(KERN_INFO "eHCA Infiniband Device Driver " 812 printk(KERN_INFO "eHCA Infiniband Device Driver "
813 "(Rel.: SVNEHCA_0021)\n"); 813 "(Rel.: SVNEHCA_0022)\n");
814 idr_init(&ehca_qp_idr); 814 idr_init(&ehca_qp_idr);
815 idr_init(&ehca_cq_idr); 815 idr_init(&ehca_cq_idr);
816 spin_lock_init(&ehca_qp_idr_lock); 816 spin_lock_init(&ehca_qp_idr_lock);