diff options
-rw-r--r-- | arch/x86_64/kernel/io_apic.c | 42 | ||||
-rw-r--r-- | arch/x86_64/kernel/smpboot.c | 7 | ||||
-rw-r--r-- | include/asm-x86_64/hw_irq.h | 2 |
3 files changed, 45 insertions, 6 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 0e89ae7e7b22..fe429e5d6b29 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1; | |||
63 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | 63 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; |
64 | 64 | ||
65 | static DEFINE_SPINLOCK(ioapic_lock); | 65 | static DEFINE_SPINLOCK(ioapic_lock); |
66 | static DEFINE_SPINLOCK(vector_lock); | 66 | DEFINE_SPINLOCK(vector_lock); |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * # of IRQ routing registers | 69 | * # of IRQ routing registers |
@@ -618,6 +618,9 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
618 | 618 | ||
619 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); | 619 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); |
620 | 620 | ||
621 | /* Only try and allocate irqs on cpus that are present */ | ||
622 | cpus_and(mask, mask, cpu_online_map); | ||
623 | |||
621 | if (irq_vector[irq] > 0) | 624 | if (irq_vector[irq] > 0) |
622 | old_vector = irq_vector[irq]; | 625 | old_vector = irq_vector[irq]; |
623 | if (old_vector > 0) { | 626 | if (old_vector > 0) { |
@@ -627,11 +630,12 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
627 | } | 630 | } |
628 | 631 | ||
629 | for_each_cpu_mask(cpu, mask) { | 632 | for_each_cpu_mask(cpu, mask) { |
630 | cpumask_t domain; | 633 | cpumask_t domain, new_mask; |
631 | int new_cpu; | 634 | int new_cpu; |
632 | int vector, offset; | 635 | int vector, offset; |
633 | 636 | ||
634 | domain = vector_allocation_domain(cpu); | 637 | domain = vector_allocation_domain(cpu); |
638 | cpus_and(new_mask, domain, cpu_online_map); | ||
635 | 639 | ||
636 | vector = current_vector; | 640 | vector = current_vector; |
637 | offset = current_offset; | 641 | offset = current_offset; |
@@ -646,18 +650,20 @@ next: | |||
646 | continue; | 650 | continue; |
647 | if (vector == IA32_SYSCALL_VECTOR) | 651 | if (vector == IA32_SYSCALL_VECTOR) |
648 | goto next; | 652 | goto next; |
649 | for_each_cpu_mask(new_cpu, domain) | 653 | for_each_cpu_mask(new_cpu, new_mask) |
650 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 654 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
651 | goto next; | 655 | goto next; |
652 | /* Found one! */ | 656 | /* Found one! */ |
653 | current_vector = vector; | 657 | current_vector = vector; |
654 | current_offset = offset; | 658 | current_offset = offset; |
655 | if (old_vector >= 0) { | 659 | if (old_vector >= 0) { |
660 | cpumask_t old_mask; | ||
656 | int old_cpu; | 661 | int old_cpu; |
657 | for_each_cpu_mask(old_cpu, irq_domain[irq]) | 662 | cpus_and(old_mask, irq_domain[irq], cpu_online_map); |
663 | for_each_cpu_mask(old_cpu, old_mask) | ||
658 | per_cpu(vector_irq, old_cpu)[old_vector] = -1; | 664 | per_cpu(vector_irq, old_cpu)[old_vector] = -1; |
659 | } | 665 | } |
660 | for_each_cpu_mask(new_cpu, domain) | 666 | for_each_cpu_mask(new_cpu, new_mask) |
661 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 667 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
662 | irq_vector[irq] = vector; | 668 | irq_vector[irq] = vector; |
663 | irq_domain[irq] = domain; | 669 | irq_domain[irq] = domain; |
@@ -678,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
678 | return vector; | 684 | return vector; |
679 | } | 685 | } |
680 | 686 | ||
687 | void __setup_vector_irq(int cpu) | ||
688 | { | ||
689 | /* Initialize vector_irq on a new cpu */ | ||
690 | /* This function must be called with vector_lock held */ | ||
691 | unsigned long flags; | ||
692 | int irq, vector; | ||
693 | |||
694 | |||
695 | /* Mark the inuse vectors */ | ||
696 | for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) { | ||
697 | if (!cpu_isset(cpu, irq_domain[irq])) | ||
698 | continue; | ||
699 | vector = irq_vector[irq]; | ||
700 | per_cpu(vector_irq, cpu)[vector] = irq; | ||
701 | } | ||
702 | /* Mark the free vectors */ | ||
703 | for (vector = 0; vector < NR_VECTORS; ++vector) { | ||
704 | irq = per_cpu(vector_irq, cpu)[vector]; | ||
705 | if (irq < 0) | ||
706 | continue; | ||
707 | if (!cpu_isset(cpu, irq_domain[irq])) | ||
708 | per_cpu(vector_irq, cpu)[vector] = -1; | ||
709 | } | ||
710 | } | ||
711 | |||
712 | |||
681 | extern void (*interrupt[NR_IRQS])(void); | 713 | extern void (*interrupt[NR_IRQS])(void); |
682 | 714 | ||
683 | static struct irq_chip ioapic_chip; | 715 | static struct irq_chip ioapic_chip; |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 7b7a6870288a..62c2e747af58 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void) | |||
581 | * smp_call_function(). | 581 | * smp_call_function(). |
582 | */ | 582 | */ |
583 | lock_ipi_call_lock(); | 583 | lock_ipi_call_lock(); |
584 | spin_lock(&vector_lock); | ||
584 | 585 | ||
586 | /* Setup the per cpu irq handling data structures */ | ||
587 | __setup_vector_irq(smp_processor_id()); | ||
585 | /* | 588 | /* |
586 | * Allow the master to continue. | 589 | * Allow the master to continue. |
587 | */ | 590 | */ |
588 | cpu_set(smp_processor_id(), cpu_online_map); | 591 | cpu_set(smp_processor_id(), cpu_online_map); |
589 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 592 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
593 | spin_unlock(&vector_lock); | ||
590 | unlock_ipi_call_lock(); | 594 | unlock_ipi_call_lock(); |
591 | 595 | ||
592 | cpu_idle(); | 596 | cpu_idle(); |
@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
799 | cpu, node); | 803 | cpu, node); |
800 | } | 804 | } |
801 | 805 | ||
802 | |||
803 | alternatives_smp_switch(1); | 806 | alternatives_smp_switch(1); |
804 | 807 | ||
805 | c_idle.idle = get_idle_for_cpu(cpu); | 808 | c_idle.idle = get_idle_for_cpu(cpu); |
@@ -1246,8 +1249,10 @@ int __cpu_disable(void) | |||
1246 | local_irq_disable(); | 1249 | local_irq_disable(); |
1247 | remove_siblinginfo(cpu); | 1250 | remove_siblinginfo(cpu); |
1248 | 1251 | ||
1252 | spin_lock(&vector_lock); | ||
1249 | /* It's now safe to remove this processor from the online map */ | 1253 | /* It's now safe to remove this processor from the online map */ |
1250 | cpu_clear(cpu, cpu_online_map); | 1254 | cpu_clear(cpu, cpu_online_map); |
1255 | spin_unlock(&vector_lock); | ||
1251 | remove_cpu_from_maps(); | 1256 | remove_cpu_from_maps(); |
1252 | fixup_irqs(cpu_online_map); | 1257 | fixup_irqs(cpu_online_map); |
1253 | return 0; | 1258 | return 0; |
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 792dd52fcd70..179cce755aa7 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h | |||
@@ -76,6 +76,8 @@ | |||
76 | #ifndef __ASSEMBLY__ | 76 | #ifndef __ASSEMBLY__ |
77 | typedef int vector_irq_t[NR_VECTORS]; | 77 | typedef int vector_irq_t[NR_VECTORS]; |
78 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 78 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
79 | extern void __setup_vector_irq(int cpu); | ||
80 | extern spinlock_t vector_lock; | ||
79 | 81 | ||
80 | /* | 82 | /* |
81 | * Various low-level irq details needed by irq.c, process.c, | 83 | * Various low-level irq details needed by irq.c, process.c, |