aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-10-24 19:00:23 -0400
committerAndi Kleen <andi@one.(none)>2006-10-24 19:00:23 -0400
commit70a0a5357db20c291d46c04011d646d5d84c868c (patch)
treefb82087c685b2f3018172a5956f86cc36fc833d1 /arch/x86_64
parentd1752aa884ec0ac3027c1a3d456bf69bf765c8b8 (diff)
[PATCH] x86-64: Only look at per_cpu data for online cpus.
When I generalized __assign_irq_vector I failed to pay attention to what happens when you access a per cpu data structure for a cpu that is not online. It is an undefined case making any code that does it have undefined behavior as well. The code still needs to be able to allocate a vector across cpus that are not online to properly handle combinations like lowest priority interrupt delivery and cpu_hotplug. Not that we can do that today but the infrastructure shouldn't prevent it. So this patch updates the places where we touch per cpu data to only touch online cpus, it makes cpu vector allocation an atomic operation with respect to cpu hotplug, and it updates the cpu start code to properly initialize vector_irq so we don't have inconsistencies. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/io_apic.c42
-rw-r--r--arch/x86_64/kernel/smpboot.c7
2 files changed, 43 insertions, 6 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0e89ae7e7b22..fe429e5d6b29 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1;
63static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 63static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
64 64
65static DEFINE_SPINLOCK(ioapic_lock); 65static DEFINE_SPINLOCK(ioapic_lock);
66static DEFINE_SPINLOCK(vector_lock); 66DEFINE_SPINLOCK(vector_lock);
67 67
68/* 68/*
69 * # of IRQ routing registers 69 * # of IRQ routing registers
@@ -618,6 +618,9 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
618 618
619 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 619 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
620 620
621 /* Only try and allocate irqs on cpus that are present */
622 cpus_and(mask, mask, cpu_online_map);
623
621 if (irq_vector[irq] > 0) 624 if (irq_vector[irq] > 0)
622 old_vector = irq_vector[irq]; 625 old_vector = irq_vector[irq];
623 if (old_vector > 0) { 626 if (old_vector > 0) {
@@ -627,11 +630,12 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
627 } 630 }
628 631
629 for_each_cpu_mask(cpu, mask) { 632 for_each_cpu_mask(cpu, mask) {
630 cpumask_t domain; 633 cpumask_t domain, new_mask;
631 int new_cpu; 634 int new_cpu;
632 int vector, offset; 635 int vector, offset;
633 636
634 domain = vector_allocation_domain(cpu); 637 domain = vector_allocation_domain(cpu);
638 cpus_and(new_mask, domain, cpu_online_map);
635 639
636 vector = current_vector; 640 vector = current_vector;
637 offset = current_offset; 641 offset = current_offset;
@@ -646,18 +650,20 @@ next:
646 continue; 650 continue;
647 if (vector == IA32_SYSCALL_VECTOR) 651 if (vector == IA32_SYSCALL_VECTOR)
648 goto next; 652 goto next;
649 for_each_cpu_mask(new_cpu, domain) 653 for_each_cpu_mask(new_cpu, new_mask)
650 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 654 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
651 goto next; 655 goto next;
652 /* Found one! */ 656 /* Found one! */
653 current_vector = vector; 657 current_vector = vector;
654 current_offset = offset; 658 current_offset = offset;
655 if (old_vector >= 0) { 659 if (old_vector >= 0) {
660 cpumask_t old_mask;
656 int old_cpu; 661 int old_cpu;
657 for_each_cpu_mask(old_cpu, irq_domain[irq]) 662 cpus_and(old_mask, irq_domain[irq], cpu_online_map);
663 for_each_cpu_mask(old_cpu, old_mask)
658 per_cpu(vector_irq, old_cpu)[old_vector] = -1; 664 per_cpu(vector_irq, old_cpu)[old_vector] = -1;
659 } 665 }
660 for_each_cpu_mask(new_cpu, domain) 666 for_each_cpu_mask(new_cpu, new_mask)
661 per_cpu(vector_irq, new_cpu)[vector] = irq; 667 per_cpu(vector_irq, new_cpu)[vector] = irq;
662 irq_vector[irq] = vector; 668 irq_vector[irq] = vector;
663 irq_domain[irq] = domain; 669 irq_domain[irq] = domain;
@@ -678,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
678 return vector; 684 return vector;
679} 685}
680 686
687void __setup_vector_irq(int cpu)
688{
689 /* Initialize vector_irq on a new cpu */
690 /* This function must be called with vector_lock held */
691 unsigned long flags;
692 int irq, vector;
693
694
695 /* Mark the inuse vectors */
696 for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
697 if (!cpu_isset(cpu, irq_domain[irq]))
698 continue;
699 vector = irq_vector[irq];
700 per_cpu(vector_irq, cpu)[vector] = irq;
701 }
702 /* Mark the free vectors */
703 for (vector = 0; vector < NR_VECTORS; ++vector) {
704 irq = per_cpu(vector_irq, cpu)[vector];
705 if (irq < 0)
706 continue;
707 if (!cpu_isset(cpu, irq_domain[irq]))
708 per_cpu(vector_irq, cpu)[vector] = -1;
709 }
710}
711
712
681extern void (*interrupt[NR_IRQS])(void); 713extern void (*interrupt[NR_IRQS])(void);
682 714
683static struct irq_chip ioapic_chip; 715static struct irq_chip ioapic_chip;
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 7b7a6870288a..62c2e747af58 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void)
581 * smp_call_function(). 581 * smp_call_function().
582 */ 582 */
583 lock_ipi_call_lock(); 583 lock_ipi_call_lock();
584 spin_lock(&vector_lock);
584 585
586 /* Setup the per cpu irq handling data structures */
587 __setup_vector_irq(smp_processor_id());
585 /* 588 /*
586 * Allow the master to continue. 589 * Allow the master to continue.
587 */ 590 */
588 cpu_set(smp_processor_id(), cpu_online_map); 591 cpu_set(smp_processor_id(), cpu_online_map);
589 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 592 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
593 spin_unlock(&vector_lock);
590 unlock_ipi_call_lock(); 594 unlock_ipi_call_lock();
591 595
592 cpu_idle(); 596 cpu_idle();
@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
799 cpu, node); 803 cpu, node);
800 } 804 }
801 805
802
803 alternatives_smp_switch(1); 806 alternatives_smp_switch(1);
804 807
805 c_idle.idle = get_idle_for_cpu(cpu); 808 c_idle.idle = get_idle_for_cpu(cpu);
@@ -1246,8 +1249,10 @@ int __cpu_disable(void)
1246 local_irq_disable(); 1249 local_irq_disable();
1247 remove_siblinginfo(cpu); 1250 remove_siblinginfo(cpu);
1248 1251
1252 spin_lock(&vector_lock);
1249 /* It's now safe to remove this processor from the online map */ 1253 /* It's now safe to remove this processor from the online map */
1250 cpu_clear(cpu, cpu_online_map); 1254 cpu_clear(cpu, cpu_online_map);
1255 spin_unlock(&vector_lock);
1251 remove_cpu_from_maps(); 1256 remove_cpu_from_maps();
1252 fixup_irqs(cpu_online_map); 1257 fixup_irqs(cpu_online_map);
1253 return 0; 1258 return 0;