diff options
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 47 |
1 files changed, 23 insertions, 24 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749a..b9cc84a2a4fc 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -65,7 +65,7 @@ static volatile unsigned long smp_invalidate_needed; | |||
65 | 65 | ||
66 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | 66 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used |
67 | * by scheduler but indexed physically */ | 67 | * by scheduler but indexed physically */ |
68 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 68 | static cpumask_t voyager_phys_cpu_present_map = CPU_MASK_NONE; |
69 | 69 | ||
70 | /* The internal functions */ | 70 | /* The internal functions */ |
71 | static void send_CPI(__u32 cpuset, __u8 cpi); | 71 | static void send_CPI(__u32 cpuset, __u8 cpi); |
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); | |||
81 | static void disable_local_vic_irq(unsigned int irq); | 81 | static void disable_local_vic_irq(unsigned int irq); |
82 | static void before_handle_vic_irq(unsigned int irq); | 82 | static void before_handle_vic_irq(unsigned int irq); |
83 | static void after_handle_vic_irq(unsigned int irq); | 83 | static void after_handle_vic_irq(unsigned int irq); |
84 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | 84 | static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); |
85 | static void ack_vic_irq(unsigned int irq); | 85 | static void ack_vic_irq(unsigned int irq); |
86 | static void vic_enable_cpi(void); | 86 | static void vic_enable_cpi(void); |
87 | static void do_boot_cpu(__u8 cpuid); | 87 | static void do_boot_cpu(__u8 cpuid); |
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map; | |||
211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
212 | 212 | ||
213 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | ||
215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | ||
216 | 214 | ||
217 | /* The per processor IRQ masks (these are usually kept in sync) */ | 215 | /* The per processor IRQ masks (these are usually kept in sync) */ |
218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 216 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -368,19 +366,19 @@ void __init find_smp_config(void) | |||
368 | /* set up everything for just this CPU, we can alter | 366 | /* set up everything for just this CPU, we can alter |
369 | * this as we start the other CPUs later */ | 367 | * this as we start the other CPUs later */ |
370 | /* now get the CPU disposition from the extended CMOS */ | 368 | /* now get the CPU disposition from the extended CMOS */ |
371 | cpus_addr(phys_cpu_present_map)[0] = | 369 | cpus_addr(voyager_phys_cpu_present_map)[0] = |
372 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | 370 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); |
373 | cpus_addr(phys_cpu_present_map)[0] |= | 371 | cpus_addr(voyager_phys_cpu_present_map)[0] |= |
374 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | 372 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; |
375 | cpus_addr(phys_cpu_present_map)[0] |= | 373 | cpus_addr(voyager_phys_cpu_present_map)[0] |= |
376 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | 374 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + |
377 | 2) << 16; | 375 | 2) << 16; |
378 | cpus_addr(phys_cpu_present_map)[0] |= | 376 | cpus_addr(voyager_phys_cpu_present_map)[0] |= |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | 377 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + |
380 | 3) << 24; | 378 | 3) << 24; |
381 | cpu_possible_map = phys_cpu_present_map; | 379 | init_cpu_possible(&voyager_phys_cpu_present_map); |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", | 380 | printk("VOYAGER SMP: voyager_phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | 381 | cpus_addr(voyager_phys_cpu_present_map)[0]); |
384 | /* Here we set up the VIC to enable SMP */ | 382 | /* Here we set up the VIC to enable SMP */ |
385 | /* enable the CPIs by writing the base vector to their register */ | 383 | /* enable the CPIs by writing the base vector to their register */ |
386 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | 384 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); |
@@ -630,15 +628,15 @@ void __init smp_boot_cpus(void) | |||
630 | /* now that the cat has probed the Voyager System Bus, sanity | 628 | /* now that the cat has probed the Voyager System Bus, sanity |
631 | * check the cpu map */ | 629 | * check the cpu map */ |
632 | if (((voyager_quad_processors | voyager_extended_vic_processors) | 630 | if (((voyager_quad_processors | voyager_extended_vic_processors) |
633 | & cpus_addr(phys_cpu_present_map)[0]) != | 631 | & cpus_addr(voyager_phys_cpu_present_map)[0]) != |
634 | cpus_addr(phys_cpu_present_map)[0]) { | 632 | cpus_addr(voyager_phys_cpu_present_map)[0]) { |
635 | /* should panic */ | 633 | /* should panic */ |
636 | printk("\n\n***WARNING*** " | 634 | printk("\n\n***WARNING*** " |
637 | "Sanity check of CPU present map FAILED\n"); | 635 | "Sanity check of CPU present map FAILED\n"); |
638 | } | 636 | } |
639 | } else if (voyager_level == 4) | 637 | } else if (voyager_level == 4) |
640 | voyager_extended_vic_processors = | 638 | voyager_extended_vic_processors = |
641 | cpus_addr(phys_cpu_present_map)[0]; | 639 | cpus_addr(voyager_phys_cpu_present_map)[0]; |
642 | 640 | ||
643 | /* this sets up the idle task to run on the current cpu */ | 641 | /* this sets up the idle task to run on the current cpu */ |
644 | voyager_extended_cpus = 1; | 642 | voyager_extended_cpus = 1; |
@@ -672,7 +670,7 @@ void __init smp_boot_cpus(void) | |||
672 | /* loop over all the extended VIC CPUs and boot them. The | 670 | /* loop over all the extended VIC CPUs and boot them. The |
673 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | 671 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
674 | for (i = 0; i < nr_cpu_ids; i++) { | 672 | for (i = 0; i < nr_cpu_ids; i++) { |
675 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | 673 | if (i == boot_cpu_id || !cpu_isset(i, voyager_phys_cpu_present_map)) |
676 | continue; | 674 | continue; |
677 | do_boot_cpu(i); | 675 | do_boot_cpu(i); |
678 | /* This udelay seems to be needed for the Quad boots | 676 | /* This udelay seems to be needed for the Quad boots |
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) | |||
1599 | * change the mask and then do an interrupt enable CPI to re-enable on | 1597 | * change the mask and then do an interrupt enable CPI to re-enable on |
1600 | * the selected processors */ | 1598 | * the selected processors */ |
1601 | 1599 | ||
1602 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | 1600 | void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) |
1603 | { | 1601 | { |
1604 | /* Only extended processors handle interrupts */ | 1602 | /* Only extended processors handle interrupts */ |
1605 | unsigned long real_mask; | 1603 | unsigned long real_mask; |
1606 | unsigned long irq_mask = 1 << irq; | 1604 | unsigned long irq_mask = 1 << irq; |
1607 | int cpu; | 1605 | int cpu; |
1608 | 1606 | ||
1609 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | 1607 | real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; |
1610 | 1608 | ||
1611 | if (cpus_addr(mask)[0] == 0) | 1609 | if (cpus_addr(*mask)[0] == 0) |
1612 | /* can't have no CPUs to accept the interrupt -- extremely | 1610 | /* can't have no CPUs to accept the interrupt -- extremely |
1613 | * bad things will happen */ | 1611 | * bad things will happen */ |
1614 | return; | 1612 | return; |
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) | |||
1750 | init_gdt(smp_processor_id()); | 1748 | init_gdt(smp_processor_id()); |
1751 | switch_to_new_gdt(); | 1749 | switch_to_new_gdt(); |
1752 | 1750 | ||
1753 | cpu_set(smp_processor_id(), cpu_online_map); | 1751 | cpu_online_map = cpumask_of_cpu(smp_processor_id()); |
1754 | cpu_set(smp_processor_id(), cpu_callout_map); | 1752 | cpu_callout_map = cpumask_of_cpu(smp_processor_id()); |
1755 | cpu_set(smp_processor_id(), cpu_possible_map); | 1753 | cpu_callin_map = CPU_MASK_NONE; |
1756 | cpu_set(smp_processor_id(), cpu_present_map); | 1754 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
1755 | |||
1757 | } | 1756 | } |
1758 | 1757 | ||
1759 | static int __cpuinit voyager_cpu_up(unsigned int cpu) | 1758 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) | |||
1783 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1782 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1784 | } | 1783 | } |
1785 | 1784 | ||
1786 | static void voyager_send_call_func(cpumask_t callmask) | 1785 | static void voyager_send_call_func(const struct cpumask *callmask) |
1787 | { | 1786 | { |
1788 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | 1787 | __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); |
1789 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | 1788 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1790 | } | 1789 | } |
1791 | 1790 | ||