diff options
Diffstat (limited to 'arch/x86_64/kernel/io_apic.c')
| -rw-r--r-- | arch/x86_64/kernel/io_apic.c | 62 |
1 files changed, 44 insertions, 18 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index b000017e4b5d..fe429e5d6b29 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
| @@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1; | |||
| 63 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | 63 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; |
| 64 | 64 | ||
| 65 | static DEFINE_SPINLOCK(ioapic_lock); | 65 | static DEFINE_SPINLOCK(ioapic_lock); |
| 66 | static DEFINE_SPINLOCK(vector_lock); | 66 | DEFINE_SPINLOCK(vector_lock); |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * # of IRQ routing registers | 69 | * # of IRQ routing registers |
| @@ -612,15 +612,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
| 612 | * Also, we've got to be careful not to trash gate | 612 | * Also, we've got to be careful not to trash gate |
| 613 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 613 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
| 614 | */ | 614 | */ |
| 615 | static struct { | 615 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
| 616 | int vector; | ||
| 617 | int offset; | ||
| 618 | } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} }; | ||
| 619 | int old_vector = -1; | 616 | int old_vector = -1; |
| 620 | int cpu; | 617 | int cpu; |
| 621 | 618 | ||
| 622 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); | 619 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); |
| 623 | 620 | ||
| 621 | /* Only try and allocate irqs on cpus that are present */ | ||
| 622 | cpus_and(mask, mask, cpu_online_map); | ||
| 623 | |||
| 624 | if (irq_vector[irq] > 0) | 624 | if (irq_vector[irq] > 0) |
| 625 | old_vector = irq_vector[irq]; | 625 | old_vector = irq_vector[irq]; |
| 626 | if (old_vector > 0) { | 626 | if (old_vector > 0) { |
| @@ -630,15 +630,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
| 630 | } | 630 | } |
| 631 | 631 | ||
| 632 | for_each_cpu_mask(cpu, mask) { | 632 | for_each_cpu_mask(cpu, mask) { |
| 633 | cpumask_t domain; | 633 | cpumask_t domain, new_mask; |
| 634 | int first, new_cpu; | 634 | int new_cpu; |
| 635 | int vector, offset; | 635 | int vector, offset; |
| 636 | 636 | ||
| 637 | domain = vector_allocation_domain(cpu); | 637 | domain = vector_allocation_domain(cpu); |
| 638 | first = first_cpu(domain); | 638 | cpus_and(new_mask, domain, cpu_online_map); |
| 639 | 639 | ||
| 640 | vector = pos[first].vector; | 640 | vector = current_vector; |
| 641 | offset = pos[first].offset; | 641 | offset = current_offset; |
| 642 | next: | 642 | next: |
| 643 | vector += 8; | 643 | vector += 8; |
| 644 | if (vector >= FIRST_SYSTEM_VECTOR) { | 644 | if (vector >= FIRST_SYSTEM_VECTOR) { |
| @@ -646,24 +646,24 @@ next: | |||
| 646 | offset = (offset + 1) % 8; | 646 | offset = (offset + 1) % 8; |
| 647 | vector = FIRST_DEVICE_VECTOR + offset; | 647 | vector = FIRST_DEVICE_VECTOR + offset; |
| 648 | } | 648 | } |
| 649 | if (unlikely(pos[first].vector == vector)) | 649 | if (unlikely(current_vector == vector)) |
| 650 | continue; | 650 | continue; |
| 651 | if (vector == IA32_SYSCALL_VECTOR) | 651 | if (vector == IA32_SYSCALL_VECTOR) |
| 652 | goto next; | 652 | goto next; |
| 653 | for_each_cpu_mask(new_cpu, domain) | 653 | for_each_cpu_mask(new_cpu, new_mask) |
| 654 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 654 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
| 655 | goto next; | 655 | goto next; |
| 656 | /* Found one! */ | 656 | /* Found one! */ |
| 657 | for_each_cpu_mask(new_cpu, domain) { | 657 | current_vector = vector; |
| 658 | pos[new_cpu].vector = vector; | 658 | current_offset = offset; |
| 659 | pos[new_cpu].offset = offset; | ||
| 660 | } | ||
| 661 | if (old_vector >= 0) { | 659 | if (old_vector >= 0) { |
| 660 | cpumask_t old_mask; | ||
| 662 | int old_cpu; | 661 | int old_cpu; |
| 663 | for_each_cpu_mask(old_cpu, irq_domain[irq]) | 662 | cpus_and(old_mask, irq_domain[irq], cpu_online_map); |
| 663 | for_each_cpu_mask(old_cpu, old_mask) | ||
| 664 | per_cpu(vector_irq, old_cpu)[old_vector] = -1; | 664 | per_cpu(vector_irq, old_cpu)[old_vector] = -1; |
| 665 | } | 665 | } |
| 666 | for_each_cpu_mask(new_cpu, domain) | 666 | for_each_cpu_mask(new_cpu, new_mask) |
| 667 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 667 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
| 668 | irq_vector[irq] = vector; | 668 | irq_vector[irq] = vector; |
| 669 | irq_domain[irq] = domain; | 669 | irq_domain[irq] = domain; |
| @@ -684,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
| 684 | return vector; | 684 | return vector; |
| 685 | } | 685 | } |
| 686 | 686 | ||
| 687 | void __setup_vector_irq(int cpu) | ||
| 688 | { | ||
| 689 | /* Initialize vector_irq on a new cpu */ | ||
| 690 | /* This function must be called with vector_lock held */ | ||
| 691 | unsigned long flags; | ||
| 692 | int irq, vector; | ||
| 693 | |||
| 694 | |||
| 695 | /* Mark the inuse vectors */ | ||
| 696 | for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) { | ||
| 697 | if (!cpu_isset(cpu, irq_domain[irq])) | ||
| 698 | continue; | ||
| 699 | vector = irq_vector[irq]; | ||
| 700 | per_cpu(vector_irq, cpu)[vector] = irq; | ||
| 701 | } | ||
| 702 | /* Mark the free vectors */ | ||
| 703 | for (vector = 0; vector < NR_VECTORS; ++vector) { | ||
| 704 | irq = per_cpu(vector_irq, cpu)[vector]; | ||
| 705 | if (irq < 0) | ||
| 706 | continue; | ||
| 707 | if (!cpu_isset(cpu, irq_domain[irq])) | ||
| 708 | per_cpu(vector_irq, cpu)[vector] = -1; | ||
| 709 | } | ||
| 710 | } | ||
| 711 | |||
| 712 | |||
| 687 | extern void (*interrupt[NR_IRQS])(void); | 713 | extern void (*interrupt[NR_IRQS])(void); |
| 688 | 714 | ||
| 689 | static struct irq_chip ioapic_chip; | 715 | static struct irq_chip ioapic_chip; |
