aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2009-01-31 11:24:43 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-31 12:26:07 -0500
commit92ab78315c638515d0e81b0c70b2082f713582d9 (patch)
tree9b9e2f8c0e985a0a3f7afaeb3160706aa1fe172a /arch/x86
parent7fc49f19813030f2e15ad2ccec5cb701f7f4a3ec (diff)
x86/Voyager: make it build and boot
[ mingo@elte.hu: these fixes are a subset of changes cherry-picked from: git://git.kernel.org:/pub/scm/linux/kernel/git/jejb/voyager-2.6.git They fix various problems that recent x86 changes caused in the Voyager subarchitecture: both APIC changes and cpumask changes and certain cleanups caused subarch assumptions to break. Most of these changes are obsolete as the subarch code has been removed from the x86 development tree - but we merge them upstream to make Voyager build and boot. ] Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/mach-default/setup.c12
-rw-r--r--arch/x86/mach-voyager/setup.c12
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c25
4 files changed, 35 insertions, 26 deletions
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 1507ad4e674d..10a09c2f1828 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
90DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 81DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
91 [0 ... IRQ0_VECTOR - 1] = -1, 82 [0 ... IRQ0_VECTOR - 1] = -1,
92 [IRQ0_VECTOR] = 0, 83 [IRQ0_VECTOR] = 0,
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
178 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 169 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
179#endif 170#endif
180 171
181 if (!acpi_ioapic)
182 setup_irq(2, &irq2);
183
184 /* setup after call gates are initialised (usually add in 172 /* setup after call gates are initialised (usually add in
185 * the architecture specific gates) 173 * the architecture specific gates)
186 */ 174 */
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index df167f265622..a265a7c63190 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
38 init_ISA_irqs(); 38 init_ISA_irqs();
39} 39}
40 40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
41/** 50/**
42 * intr_init_hook - post gate setup interrupt initialisation 51 * intr_init_hook - post gate setup interrupt initialisation
43 * 52 *
@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
53 if (x86_quirks->arch_intr_init()) 62 if (x86_quirks->arch_intr_init())
54 return; 63 return;
55 } 64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
56} 68}
57 69
58/** 70/**
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index a580b9562e76..d914a7996a66 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -33,13 +33,23 @@ void __init intr_init_hook(void)
33 setup_irq(2, &irq2); 33 setup_irq(2, &irq2);
34} 34}
35 35
36void __init pre_setup_arch_hook(void) 36static void voyager_disable_tsc(void)
37{ 37{
38 /* Voyagers run their CPUs from independent clocks, so disable 38 /* Voyagers run their CPUs from independent clocks, so disable
39 * the TSC code because we can't sync them */ 39 * the TSC code because we can't sync them */
40 setup_clear_cpu_cap(X86_FEATURE_TSC); 40 setup_clear_cpu_cap(X86_FEATURE_TSC);
41} 41}
42 42
43void __init pre_setup_arch_hook(void)
44{
45 voyager_disable_tsc();
46}
47
48void __init pre_time_init_hook(void)
49{
50 voyager_disable_tsc();
51}
52
43void __init trap_init_hook(void) 53void __init trap_init_hook(void)
44{ 54{
45} 55}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9840b7ec749a..7ffcdeec4631 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq); 81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq); 82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq); 83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); 84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq); 85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void); 86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid); 87static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212 212
213/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
214cpumask_t cpu_callin_map = CPU_MASK_NONE;
215cpumask_t cpu_callout_map = CPU_MASK_NONE;
216 214
217/* The per processor IRQ masks (these are usually kept in sync) */ 215/* The per processor IRQ masks (these are usually kept in sync) */
218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
378 cpus_addr(phys_cpu_present_map)[0] |= 376 cpus_addr(phys_cpu_present_map)[0] |=
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
380 3) << 24; 378 3) << 24;
381 cpu_possible_map = phys_cpu_present_map; 379 init_cpu_possible(&phys_cpu_present_map);
382 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", 380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
383 cpus_addr(phys_cpu_present_map)[0]); 381 cpus_addr(phys_cpu_present_map)[0]);
384 /* Here we set up the VIC to enable SMP */ 382 /* Here we set up the VIC to enable SMP */
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq)
1599 * change the mask and then do an interrupt enable CPI to re-enable on 1597 * change the mask and then do an interrupt enable CPI to re-enable on
1600 * the selected processors */ 1598 * the selected processors */
1601 1599
1602void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) 1600void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1603{ 1601{
1604 /* Only extended processors handle interrupts */ 1602 /* Only extended processors handle interrupts */
1605 unsigned long real_mask; 1603 unsigned long real_mask;
1606 unsigned long irq_mask = 1 << irq; 1604 unsigned long irq_mask = 1 << irq;
1607 int cpu; 1605 int cpu;
1608 1606
1609 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1607 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1610 1608
1611 if (cpus_addr(mask)[0] == 0) 1609 if (cpus_addr(*mask)[0] == 0)
1612 /* can't have no CPUs to accept the interrupt -- extremely 1610 /* can't have no CPUs to accept the interrupt -- extremely
1613 * bad things will happen */ 1611 * bad things will happen */
1614 return; 1612 return;
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1750 init_gdt(smp_processor_id()); 1748 init_gdt(smp_processor_id());
1751 switch_to_new_gdt(); 1749 switch_to_new_gdt();
1752 1750
1753 cpu_set(smp_processor_id(), cpu_online_map); 1751 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1754 cpu_set(smp_processor_id(), cpu_callout_map); 1752 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1755 cpu_set(smp_processor_id(), cpu_possible_map); 1753 cpu_callin_map = CPU_MASK_NONE;
1756 cpu_set(smp_processor_id(), cpu_present_map); 1754 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1755
1757} 1756}
1758 1757
1759static int __cpuinit voyager_cpu_up(unsigned int cpu) 1758static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void)
1783 x86_write_percpu(cpu_number, hard_smp_processor_id()); 1782 x86_write_percpu(cpu_number, hard_smp_processor_id());
1784} 1783}
1785 1784
1786static void voyager_send_call_func(cpumask_t callmask) 1785static void voyager_send_call_func(const struct cpumask *callmask)
1787{ 1786{
1788 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); 1787 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1789 send_CPI(mask, VIC_CALL_FUNCTION_CPI); 1788 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1790} 1789}
1791 1790