diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2006-10-24 19:00:22 -0400 |
---|---|---|
committer | Andi Kleen <andi@one.(none)> | 2006-10-24 19:00:22 -0400 |
commit | d1752aa884ec0ac3027c1a3d456bf69bf765c8b8 (patch) | |
tree | e6c9c5d192a07b4bbf7d8f1381539c3f4e5ada50 /arch | |
parent | 7059abedd2f04b68bd7e1a79c9c72f7aeee134c0 (diff) |
[PATCH] x86-64: Simplify the vector allocator.
There is no reason to remember a per cpu position of which vector
to try. Keeping a global position is simpler and more likely to
result in a global vector allocation even if I don't need or require
it. For level triggered interrupts this means we are less likely to
acknowledge another cpus irq, and cause the level triggered irq to
harmlessly refire.
This simplification makes it easier to only access data structures
of online cpus, by having fewer special cases to deal with.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86_64/kernel/io_apic.c | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index b000017e4b5d..0e89ae7e7b22 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -612,10 +612,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
612 | * Also, we've got to be careful not to trash gate | 612 | * Also, we've got to be careful not to trash gate |
613 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 613 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
614 | */ | 614 | */ |
615 | static struct { | 615 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
616 | int vector; | ||
617 | int offset; | ||
618 | } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} }; | ||
619 | int old_vector = -1; | 616 | int old_vector = -1; |
620 | int cpu; | 617 | int cpu; |
621 | 618 | ||
@@ -631,14 +628,13 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
631 | 628 | ||
632 | for_each_cpu_mask(cpu, mask) { | 629 | for_each_cpu_mask(cpu, mask) { |
633 | cpumask_t domain; | 630 | cpumask_t domain; |
634 | int first, new_cpu; | 631 | int new_cpu; |
635 | int vector, offset; | 632 | int vector, offset; |
636 | 633 | ||
637 | domain = vector_allocation_domain(cpu); | 634 | domain = vector_allocation_domain(cpu); |
638 | first = first_cpu(domain); | ||
639 | 635 | ||
640 | vector = pos[first].vector; | 636 | vector = current_vector; |
641 | offset = pos[first].offset; | 637 | offset = current_offset; |
642 | next: | 638 | next: |
643 | vector += 8; | 639 | vector += 8; |
644 | if (vector >= FIRST_SYSTEM_VECTOR) { | 640 | if (vector >= FIRST_SYSTEM_VECTOR) { |
@@ -646,7 +642,7 @@ next: | |||
646 | offset = (offset + 1) % 8; | 642 | offset = (offset + 1) % 8; |
647 | vector = FIRST_DEVICE_VECTOR + offset; | 643 | vector = FIRST_DEVICE_VECTOR + offset; |
648 | } | 644 | } |
649 | if (unlikely(pos[first].vector == vector)) | 645 | if (unlikely(current_vector == vector)) |
650 | continue; | 646 | continue; |
651 | if (vector == IA32_SYSCALL_VECTOR) | 647 | if (vector == IA32_SYSCALL_VECTOR) |
652 | goto next; | 648 | goto next; |
@@ -654,10 +650,8 @@ next: | |||
654 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 650 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
655 | goto next; | 651 | goto next; |
656 | /* Found one! */ | 652 | /* Found one! */ |
657 | for_each_cpu_mask(new_cpu, domain) { | 653 | current_vector = vector; |
658 | pos[new_cpu].vector = vector; | 654 | current_offset = offset; |
659 | pos[new_cpu].offset = offset; | ||
660 | } | ||
661 | if (old_vector >= 0) { | 655 | if (old_vector >= 0) { |
662 | int old_cpu; | 656 | int old_cpu; |
663 | for_each_cpu_mask(old_cpu, irq_domain[irq]) | 657 | for_each_cpu_mask(old_cpu, irq_domain[irq]) |