diff options
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 331cd6d56483..328cb0ce62f0 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); | |||
81 | static void disable_local_vic_irq(unsigned int irq); | 81 | static void disable_local_vic_irq(unsigned int irq); |
82 | static void before_handle_vic_irq(unsigned int irq); | 82 | static void before_handle_vic_irq(unsigned int irq); |
83 | static void after_handle_vic_irq(unsigned int irq); | 83 | static void after_handle_vic_irq(unsigned int irq); |
84 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | 84 | static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); |
85 | static void ack_vic_irq(unsigned int irq); | 85 | static void ack_vic_irq(unsigned int irq); |
86 | static void vic_enable_cpi(void); | 86 | static void vic_enable_cpi(void); |
87 | static void do_boot_cpu(__u8 cpuid); | 87 | static void do_boot_cpu(__u8 cpuid); |
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map; | |||
211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
212 | 212 | ||
213 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | ||
215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | ||
216 | 214 | ||
217 | /* The per processor IRQ masks (these are usually kept in sync) */ | 215 | /* The per processor IRQ masks (these are usually kept in sync) */ |
218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 216 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -378,7 +376,7 @@ void __init find_smp_config(void) | |||
378 | cpus_addr(phys_cpu_present_map)[0] |= | 376 | cpus_addr(phys_cpu_present_map)[0] |= |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | 377 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + |
380 | 3) << 24; | 378 | 3) << 24; |
381 | cpu_possible_map = phys_cpu_present_map; | 379 | init_cpu_possible(&phys_cpu_present_map); |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", | 380 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | 381 | cpus_addr(phys_cpu_present_map)[0]); |
384 | /* Here we set up the VIC to enable SMP */ | 382 | /* Here we set up the VIC to enable SMP */ |
@@ -1598,16 +1596,16 @@ static void after_handle_vic_irq(unsigned int irq) | |||
1598 | * change the mask and then do an interrupt enable CPI to re-enable on | 1596 | * change the mask and then do an interrupt enable CPI to re-enable on |
1599 | * the selected processors */ | 1597 | * the selected processors */ |
1600 | 1598 | ||
1601 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | 1599 | void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) |
1602 | { | 1600 | { |
1603 | /* Only extended processors handle interrupts */ | 1601 | /* Only extended processors handle interrupts */ |
1604 | unsigned long real_mask; | 1602 | unsigned long real_mask; |
1605 | unsigned long irq_mask = 1 << irq; | 1603 | unsigned long irq_mask = 1 << irq; |
1606 | int cpu; | 1604 | int cpu; |
1607 | 1605 | ||
1608 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | 1606 | real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; |
1609 | 1607 | ||
1610 | if (cpus_addr(mask)[0] == 0) | 1608 | if (cpus_addr(*mask)[0] == 0) |
1611 | /* can't have no CPUs to accept the interrupt -- extremely | 1609 | /* can't have no CPUs to accept the interrupt -- extremely |
1612 | * bad things will happen */ | 1610 | * bad things will happen */ |
1613 | return; | 1611 | return; |
@@ -1748,10 +1746,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) | |||
1748 | { | 1746 | { |
1749 | switch_to_new_gdt(); | 1747 | switch_to_new_gdt(); |
1750 | 1748 | ||
1751 | cpu_set(smp_processor_id(), cpu_online_map); | 1749 | cpu_online_map = cpumask_of_cpu(smp_processor_id()); |
1752 | cpu_set(smp_processor_id(), cpu_callout_map); | 1750 | cpu_callout_map = cpumask_of_cpu(smp_processor_id()); |
1753 | cpu_set(smp_processor_id(), cpu_possible_map); | 1751 | cpu_callin_map = CPU_MASK_NONE; |
1754 | cpu_set(smp_processor_id(), cpu_present_map); | 1752 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
1753 | |||
1755 | } | 1754 | } |
1756 | 1755 | ||
1757 | static int __cpuinit voyager_cpu_up(unsigned int cpu) | 1756 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
@@ -1780,9 +1779,9 @@ void __init smp_setup_processor_id(void) | |||
1780 | current_thread_info()->cpu = hard_smp_processor_id(); | 1779 | current_thread_info()->cpu = hard_smp_processor_id(); |
1781 | } | 1780 | } |
1782 | 1781 | ||
1783 | static void voyager_send_call_func(cpumask_t callmask) | 1782 | static void voyager_send_call_func(const struct cpumask *callmask) |
1784 | { | 1783 | { |
1785 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | 1784 | __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); |
1786 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | 1785 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1787 | } | 1786 | } |
1788 | 1787 | ||