aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c67
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c4
4 files changed, 68 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 40404c9e2817..82ded44c6cee 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -52,6 +52,8 @@ struct ehca_mw;
52struct ehca_pd; 52struct ehca_pd;
53struct ehca_av; 53struct ehca_av;
54 54
55#include <linux/wait.h>
56
55#include <rdma/ib_verbs.h> 57#include <rdma/ib_verbs.h>
56#include <rdma/ib_user_verbs.h> 58#include <rdma/ib_user_verbs.h>
57 59
@@ -153,7 +155,9 @@ struct ehca_cq {
153 spinlock_t cb_lock; 155 spinlock_t cb_lock;
154 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 156 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
155 struct list_head entry; 157 struct list_head entry;
156 u32 nr_callbacks; 158 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
159 u32 nr_events; /* #events seen */
160 wait_queue_head_t wait_completion;
157 spinlock_t task_lock; 161 spinlock_t task_lock;
158 u32 ownpid; 162 u32 ownpid;
159 /* mmap counter for resources mapped into user space */ 163 /* mmap counter for resources mapped into user space */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6ebfa27e4e16..e2cdc1a16fe9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
146 spin_lock_init(&my_cq->spinlock); 146 spin_lock_init(&my_cq->spinlock);
147 spin_lock_init(&my_cq->cb_lock); 147 spin_lock_init(&my_cq->cb_lock);
148 spin_lock_init(&my_cq->task_lock); 148 spin_lock_init(&my_cq->task_lock);
149 init_waitqueue_head(&my_cq->wait_completion);
149 my_cq->ownpid = current->tgid; 150 my_cq->ownpid = current->tgid;
150 151
151 cq = &my_cq->ib_cq; 152 cq = &my_cq->ib_cq;
@@ -302,6 +303,16 @@ create_cq_exit1:
302 return cq; 303 return cq;
303} 304}
304 305
306static int get_cq_nr_events(struct ehca_cq *my_cq)
307{
308 int ret;
309 unsigned long flags;
310 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
311 ret = my_cq->nr_events;
312 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
313 return ret;
314}
315
305int ehca_destroy_cq(struct ib_cq *cq) 316int ehca_destroy_cq(struct ib_cq *cq)
306{ 317{
307 u64 h_ret; 318 u64 h_ret;
@@ -329,10 +340,11 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 } 340 }
330 341
331 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 342 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
332 while (my_cq->nr_callbacks) { 343 while (my_cq->nr_events) {
333 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 344 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
334 yield(); 345 wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
335 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 /* recheck nr_events to assure no cqe has just arrived */
336 } 348 }
337 349
338 idr_remove(&ehca_cq_idr, my_cq->token); 350 idr_remove(&ehca_cq_idr, my_cq->token);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3ec53c687d08..f284be1c9166 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -66,7 +66,9 @@
66static void queue_comp_task(struct ehca_cq *__cq); 66static void queue_comp_task(struct ehca_cq *__cq);
67 67
68static struct ehca_comp_pool* pool; 68static struct ehca_comp_pool* pool;
69#ifdef CONFIG_HOTPLUG_CPU
69static struct notifier_block comp_pool_callback_nb; 70static struct notifier_block comp_pool_callback_nb;
71#endif
70 72
71static inline void comp_event_callback(struct ehca_cq *cq) 73static inline void comp_event_callback(struct ehca_cq *cq)
72{ 74{
@@ -404,10 +406,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
404 u32 token; 406 u32 token;
405 unsigned long flags; 407 unsigned long flags;
406 struct ehca_cq *cq; 408 struct ehca_cq *cq;
409
407 eqe_value = eqe->entry; 410 eqe_value = eqe->entry;
408 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 411 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
409 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 412 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
410 ehca_dbg(&shca->ib_device, "... completion event"); 413 ehca_dbg(&shca->ib_device, "Got completion event");
411 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 414 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
412 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 415 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
413 cq = idr_find(&ehca_cq_idr, token); 416 cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +422,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 return; 422 return;
420 } 423 }
421 reset_eq_pending(cq); 424 reset_eq_pending(cq);
422 if (ehca_scaling_code) { 425 cq->nr_events++;
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 if (ehca_scaling_code)
423 queue_comp_task(cq); 428 queue_comp_task(cq);
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 429 else {
425 } else {
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 comp_event_callback(cq); 430 comp_event_callback(cq);
431 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
432 cq->nr_events--;
433 if (!cq->nr_events)
434 wake_up(&cq->wait_completion);
435 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 } 436 }
429 } else { 437 } else {
430 ehca_dbg(&shca->ib_device, 438 ehca_dbg(&shca->ib_device, "Got non completion event");
431 "Got non completion event");
432 parse_identifier(shca, eqe_value); 439 parse_identifier(shca, eqe_value);
433 } 440 }
434} 441}
@@ -478,6 +485,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 "token=%x", token); 485 "token=%x", token);
479 continue; 486 continue;
480 } 487 }
488 eqe_cache[eqe_cnt].cq->nr_events++;
481 spin_unlock(&ehca_cq_idr_lock); 489 spin_unlock(&ehca_cq_idr_lock);
482 } else 490 } else
483 eqe_cache[eqe_cnt].cq = NULL; 491 eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +512,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
504 /* call completion handler for cached eqes */ 512 /* call completion handler for cached eqes */
505 for (i = 0; i < eqe_cnt; i++) 513 for (i = 0; i < eqe_cnt; i++)
506 if (eq->eqe_cache[i].cq) { 514 if (eq->eqe_cache[i].cq) {
507 if (ehca_scaling_code) { 515 if (ehca_scaling_code)
508 spin_lock(&ehca_cq_idr_lock);
509 queue_comp_task(eq->eqe_cache[i].cq); 516 queue_comp_task(eq->eqe_cache[i].cq);
510 spin_unlock(&ehca_cq_idr_lock); 517 else {
511 } else 518 struct ehca_cq *cq = eq->eqe_cache[i].cq;
512 comp_event_callback(eq->eqe_cache[i].cq); 519 comp_event_callback(cq);
520 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
521 cq->nr_events--;
522 if (!cq->nr_events)
523 wake_up(&cq->wait_completion);
524 spin_unlock_irqrestore(&ehca_cq_idr_lock,
525 flags);
526 }
513 } else { 527 } else {
514 ehca_dbg(&shca->ib_device, "Got non completion event"); 528 ehca_dbg(&shca->ib_device, "Got non completion event");
515 parse_identifier(shca, eq->eqe_cache[i].eqe->entry); 529 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +537,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
523 if (!eqe) 537 if (!eqe)
524 break; 538 break;
525 process_eqe(shca, eqe); 539 process_eqe(shca, eqe);
526 eqe_cnt++;
527 } while (1); 540 } while (1);
528 541
529unlock_irq_spinlock: 542unlock_irq_spinlock:
@@ -567,8 +580,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
567 list_add_tail(&__cq->entry, &cct->cq_list); 580 list_add_tail(&__cq->entry, &cct->cq_list);
568 cct->cq_jobs++; 581 cct->cq_jobs++;
569 wake_up(&cct->wait_queue); 582 wake_up(&cct->wait_queue);
570 } 583 } else
571 else
572 __cq->nr_callbacks++; 584 __cq->nr_callbacks++;
573 585
574 spin_unlock(&__cq->task_lock); 586 spin_unlock(&__cq->task_lock);
@@ -577,18 +589,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
577 589
578static void queue_comp_task(struct ehca_cq *__cq) 590static void queue_comp_task(struct ehca_cq *__cq)
579{ 591{
580 int cpu;
581 int cpu_id; 592 int cpu_id;
582 struct ehca_cpu_comp_task *cct; 593 struct ehca_cpu_comp_task *cct;
594 int cq_jobs;
595 unsigned long flags;
583 596
584 cpu = get_cpu();
585 cpu_id = find_next_online_cpu(pool); 597 cpu_id = find_next_online_cpu(pool);
586 BUG_ON(!cpu_online(cpu_id)); 598 BUG_ON(!cpu_online(cpu_id));
587 599
588 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 600 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
589 BUG_ON(!cct); 601 BUG_ON(!cct);
590 602
591 if (cct->cq_jobs > 0) { 603 spin_lock_irqsave(&cct->task_lock, flags);
604 cq_jobs = cct->cq_jobs;
605 spin_unlock_irqrestore(&cct->task_lock, flags);
606 if (cq_jobs > 0) {
592 cpu_id = find_next_online_cpu(pool); 607 cpu_id = find_next_online_cpu(pool);
593 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 608 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
594 BUG_ON(!cct); 609 BUG_ON(!cct);
@@ -608,11 +623,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
608 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 623 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
609 spin_unlock_irqrestore(&cct->task_lock, flags); 624 spin_unlock_irqrestore(&cct->task_lock, flags);
610 comp_event_callback(cq); 625 comp_event_callback(cq);
611 spin_lock_irqsave(&cct->task_lock, flags);
612 626
627 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
628 cq->nr_events--;
629 if (!cq->nr_events)
630 wake_up(&cq->wait_completion);
631 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
632
633 spin_lock_irqsave(&cct->task_lock, flags);
613 spin_lock(&cq->task_lock); 634 spin_lock(&cq->task_lock);
614 cq->nr_callbacks--; 635 cq->nr_callbacks--;
615 if (cq->nr_callbacks == 0) { 636 if (!cq->nr_callbacks) {
616 list_del_init(cct->cq_list.next); 637 list_del_init(cct->cq_list.next);
617 cct->cq_jobs--; 638 cct->cq_jobs--;
618 } 639 }
@@ -714,6 +735,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
714 735
715} 736}
716 737
738#ifdef CONFIG_HOTPLUG_CPU
717static int comp_pool_callback(struct notifier_block *nfb, 739static int comp_pool_callback(struct notifier_block *nfb,
718 unsigned long action, 740 unsigned long action,
719 void *hcpu) 741 void *hcpu)
@@ -756,6 +778,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
756 778
757 return NOTIFY_OK; 779 return NOTIFY_OK;
758} 780}
781#endif
759 782
760int ehca_create_comp_pool(void) 783int ehca_create_comp_pool(void)
761{ 784{
@@ -786,9 +809,11 @@ int ehca_create_comp_pool(void)
786 } 809 }
787 } 810 }
788 811
812#ifdef CONFIG_HOTPLUG_CPU
789 comp_pool_callback_nb.notifier_call = comp_pool_callback; 813 comp_pool_callback_nb.notifier_call = comp_pool_callback;
790 comp_pool_callback_nb.priority =0; 814 comp_pool_callback_nb.priority =0;
791 register_cpu_notifier(&comp_pool_callback_nb); 815 register_cpu_notifier(&comp_pool_callback_nb);
816#endif
792 817
793 printk(KERN_INFO "eHCA scaling code enabled\n"); 818 printk(KERN_INFO "eHCA scaling code enabled\n");
794 819
@@ -802,7 +827,9 @@ void ehca_destroy_comp_pool(void)
802 if (!ehca_scaling_code) 827 if (!ehca_scaling_code)
803 return; 828 return;
804 829
830#ifdef CONFIG_HOTPLUG_CPU
805 unregister_cpu_notifier(&comp_pool_callback_nb); 831 unregister_cpu_notifier(&comp_pool_callback_nb);
832#endif
806 833
807 for (i = 0; i < NR_CPUS; i++) { 834 for (i = 0; i < NR_CPUS; i++) {
808 if (cpu_online(i)) 835 if (cpu_online(i))
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c1835121a822..059da9628bb5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0021"); 55MODULE_VERSION("SVNEHCA_0022");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -810,7 +810,7 @@ int __init ehca_module_init(void)
810 int ret; 810 int ret;
811 811
812 printk(KERN_INFO "eHCA Infiniband Device Driver " 812 printk(KERN_INFO "eHCA Infiniband Device Driver "
813 "(Rel.: SVNEHCA_0021)\n"); 813 "(Rel.: SVNEHCA_0022)\n");
814 idr_init(&ehca_qp_idr); 814 idr_init(&ehca_qp_idr);
815 idr_init(&ehca_cq_idr); 815 idr_init(&ehca_cq_idr);
816 spin_lock_init(&ehca_qp_idr_lock); 816 spin_lock_init(&ehca_qp_idr_lock);