diff options
author | Hoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com> | 2007-02-15 11:07:30 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-16 16:57:34 -0500 |
commit | 8b16cef3df871b005f3a97e273b5b135ebfb3769 (patch) | |
tree | 36b75ea756c6338164ab436da515abd3073d21f2 | |
parent | 78d8d5f9ef8d6179e92b94481cfdfc45d396992f (diff) |
IB/ehca: Fix race condition/locking issues in scaling code
Fix a race condition in find_next_cpu_online() and some other locking
issues in ehca scaling code.
Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index b923b5d5de68..9679b072ad01 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -544,28 +544,30 @@ void ehca_tasklet_eq(unsigned long data) | |||
544 | 544 | ||
545 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) | 545 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) |
546 | { | 546 | { |
547 | unsigned long flags_last_cpu; | 547 | int cpu; |
548 | unsigned long flags; | ||
548 | 549 | ||
550 | WARN_ON_ONCE(!in_interrupt()); | ||
549 | if (ehca_debug_level) | 551 | if (ehca_debug_level) |
550 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 552 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
551 | 553 | ||
552 | spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu); | 554 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
553 | pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map); | 555 | cpu = next_cpu(pool->last_cpu, cpu_online_map); |
554 | if (pool->last_cpu == NR_CPUS) | 556 | if (cpu == NR_CPUS) |
555 | pool->last_cpu = first_cpu(cpu_online_map); | 557 | cpu = first_cpu(cpu_online_map); |
556 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu); | 558 | pool->last_cpu = cpu; |
559 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | ||
557 | 560 | ||
558 | return pool->last_cpu; | 561 | return cpu; |
559 | } | 562 | } |
560 | 563 | ||
561 | static void __queue_comp_task(struct ehca_cq *__cq, | 564 | static void __queue_comp_task(struct ehca_cq *__cq, |
562 | struct ehca_cpu_comp_task *cct) | 565 | struct ehca_cpu_comp_task *cct) |
563 | { | 566 | { |
564 | unsigned long flags_cct; | 567 | unsigned long flags; |
565 | unsigned long flags_cq; | ||
566 | 568 | ||
567 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 569 | spin_lock_irqsave(&cct->task_lock, flags); |
568 | spin_lock_irqsave(&__cq->task_lock, flags_cq); | 570 | spin_lock(&__cq->task_lock); |
569 | 571 | ||
570 | if (__cq->nr_callbacks == 0) { | 572 | if (__cq->nr_callbacks == 0) { |
571 | __cq->nr_callbacks++; | 573 | __cq->nr_callbacks++; |
@@ -576,8 +578,8 @@ static void __queue_comp_task(struct ehca_cq *__cq, | |||
576 | else | 578 | else |
577 | __cq->nr_callbacks++; | 579 | __cq->nr_callbacks++; |
578 | 580 | ||
579 | spin_unlock_irqrestore(&__cq->task_lock, flags_cq); | 581 | spin_unlock(&__cq->task_lock); |
580 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 582 | spin_unlock_irqrestore(&cct->task_lock, flags); |
581 | } | 583 | } |
582 | 584 | ||
583 | static void queue_comp_task(struct ehca_cq *__cq) | 585 | static void queue_comp_task(struct ehca_cq *__cq) |
@@ -588,69 +590,69 @@ static void queue_comp_task(struct ehca_cq *__cq) | |||
588 | 590 | ||
589 | cpu = get_cpu(); | 591 | cpu = get_cpu(); |
590 | cpu_id = find_next_online_cpu(pool); | 592 | cpu_id = find_next_online_cpu(pool); |
591 | |||
592 | BUG_ON(!cpu_online(cpu_id)); | 593 | BUG_ON(!cpu_online(cpu_id)); |
593 | 594 | ||
594 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | 595 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); |
596 | BUG_ON(!cct); | ||
595 | 597 | ||
596 | if (cct->cq_jobs > 0) { | 598 | if (cct->cq_jobs > 0) { |
597 | cpu_id = find_next_online_cpu(pool); | 599 | cpu_id = find_next_online_cpu(pool); |
598 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | 600 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); |
601 | BUG_ON(!cct); | ||
599 | } | 602 | } |
600 | 603 | ||
601 | __queue_comp_task(__cq, cct); | 604 | __queue_comp_task(__cq, cct); |
602 | |||
603 | put_cpu(); | ||
604 | |||
605 | return; | ||
606 | } | 605 | } |
607 | 606 | ||
608 | static void run_comp_task(struct ehca_cpu_comp_task* cct) | 607 | static void run_comp_task(struct ehca_cpu_comp_task* cct) |
609 | { | 608 | { |
610 | struct ehca_cq *cq; | 609 | struct ehca_cq *cq; |
611 | unsigned long flags_cct; | 610 | unsigned long flags; |
612 | unsigned long flags_cq; | ||
613 | 611 | ||
614 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 612 | spin_lock_irqsave(&cct->task_lock, flags); |
615 | 613 | ||
616 | while (!list_empty(&cct->cq_list)) { | 614 | while (!list_empty(&cct->cq_list)) { |
617 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 615 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
618 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 616 | spin_unlock_irqrestore(&cct->task_lock, flags); |
619 | comp_event_callback(cq); | 617 | comp_event_callback(cq); |
620 | spin_lock_irqsave(&cct->task_lock, flags_cct); | 618 | spin_lock_irqsave(&cct->task_lock, flags); |
621 | 619 | ||
622 | spin_lock_irqsave(&cq->task_lock, flags_cq); | 620 | spin_lock(&cq->task_lock); |
623 | cq->nr_callbacks--; | 621 | cq->nr_callbacks--; |
624 | if (cq->nr_callbacks == 0) { | 622 | if (cq->nr_callbacks == 0) { |
625 | list_del_init(cct->cq_list.next); | 623 | list_del_init(cct->cq_list.next); |
626 | cct->cq_jobs--; | 624 | cct->cq_jobs--; |
627 | } | 625 | } |
628 | spin_unlock_irqrestore(&cq->task_lock, flags_cq); | 626 | spin_unlock(&cq->task_lock); |
629 | |||
630 | } | 627 | } |
631 | 628 | ||
632 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 629 | spin_unlock_irqrestore(&cct->task_lock, flags); |
633 | |||
634 | return; | ||
635 | } | 630 | } |
636 | 631 | ||
637 | static int comp_task(void *__cct) | 632 | static int comp_task(void *__cct) |
638 | { | 633 | { |
639 | struct ehca_cpu_comp_task* cct = __cct; | 634 | struct ehca_cpu_comp_task* cct = __cct; |
635 | int cql_empty; | ||
640 | DECLARE_WAITQUEUE(wait, current); | 636 | DECLARE_WAITQUEUE(wait, current); |
641 | 637 | ||
642 | set_current_state(TASK_INTERRUPTIBLE); | 638 | set_current_state(TASK_INTERRUPTIBLE); |
643 | while(!kthread_should_stop()) { | 639 | while(!kthread_should_stop()) { |
644 | add_wait_queue(&cct->wait_queue, &wait); | 640 | add_wait_queue(&cct->wait_queue, &wait); |
645 | 641 | ||
646 | if (list_empty(&cct->cq_list)) | 642 | spin_lock_irq(&cct->task_lock); |
643 | cql_empty = list_empty(&cct->cq_list); | ||
644 | spin_unlock_irq(&cct->task_lock); | ||
645 | if (cql_empty) | ||
647 | schedule(); | 646 | schedule(); |
648 | else | 647 | else |
649 | __set_current_state(TASK_RUNNING); | 648 | __set_current_state(TASK_RUNNING); |
650 | 649 | ||
651 | remove_wait_queue(&cct->wait_queue, &wait); | 650 | remove_wait_queue(&cct->wait_queue, &wait); |
652 | 651 | ||
653 | if (!list_empty(&cct->cq_list)) | 652 | spin_lock_irq(&cct->task_lock); |
653 | cql_empty = list_empty(&cct->cq_list); | ||
654 | spin_unlock_irq(&cct->task_lock); | ||
655 | if (!cql_empty) | ||
654 | run_comp_task(__cct); | 656 | run_comp_task(__cct); |
655 | 657 | ||
656 | set_current_state(TASK_INTERRUPTIBLE); | 658 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -693,8 +695,6 @@ static void destroy_comp_task(struct ehca_comp_pool *pool, | |||
693 | 695 | ||
694 | if (task) | 696 | if (task) |
695 | kthread_stop(task); | 697 | kthread_stop(task); |
696 | |||
697 | return; | ||
698 | } | 698 | } |
699 | 699 | ||
700 | static void take_over_work(struct ehca_comp_pool *pool, | 700 | static void take_over_work(struct ehca_comp_pool *pool, |
@@ -815,6 +815,4 @@ void ehca_destroy_comp_pool(void) | |||
815 | free_percpu(pool->cpu_comp_tasks); | 815 | free_percpu(pool->cpu_comp_tasks); |
816 | kfree(pool); | 816 | kfree(pool); |
817 | #endif | 817 | #endif |
818 | |||
819 | return; | ||
820 | } | 818 | } |