aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mach-voyager/voyager_smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9840b7ec749..98e3c2bc756 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq); 81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq); 82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq); 83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); 84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq); 85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void); 86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid); 87static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212 212
213/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
214cpumask_t cpu_callin_map = CPU_MASK_NONE;
215cpumask_t cpu_callout_map = CPU_MASK_NONE;
216 214
217/* The per processor IRQ masks (these are usually kept in sync) */ 215/* The per processor IRQ masks (these are usually kept in sync) */
218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
378 cpus_addr(phys_cpu_present_map)[0] |= 376 cpus_addr(phys_cpu_present_map)[0] |=
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
380 3) << 24; 378 3) << 24;
381 cpu_possible_map = phys_cpu_present_map; 379 init_cpu_possible(&phys_cpu_present_map);
382 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", 380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
383 cpus_addr(phys_cpu_present_map)[0]); 381 cpus_addr(phys_cpu_present_map)[0]);
384 /* Here we set up the VIC to enable SMP */ 382 /* Here we set up the VIC to enable SMP */
@@ -402,7 +400,7 @@ void __init find_smp_config(void)
402 VOYAGER_SUS_IN_CONTROL_PORT); 400 VOYAGER_SUS_IN_CONTROL_PORT);
403 401
404 current_thread_info()->cpu = boot_cpu_id; 402 current_thread_info()->cpu = boot_cpu_id;
405 x86_write_percpu(cpu_number, boot_cpu_id); 403 percpu_write(cpu_number, boot_cpu_id);
406} 404}
407 405
408/* 406/*
@@ -530,7 +528,6 @@ static void __init do_boot_cpu(__u8 cpu)
530 /* init_tasks (in sched.c) is indexed logically */ 528 /* init_tasks (in sched.c) is indexed logically */
531 stack_start.sp = (void *)idle->thread.sp; 529 stack_start.sp = (void *)idle->thread.sp;
532 530
533 init_gdt(cpu);
534 per_cpu(current_task, cpu) = idle; 531 per_cpu(current_task, cpu) = idle;
535 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 532 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
536 irq_ctx_init(cpu); 533 irq_ctx_init(cpu);
@@ -1599,16 +1596,16 @@ static void after_handle_vic_irq(unsigned int irq)
1599 * change the mask and then do an interrupt enable CPI to re-enable on 1596 * change the mask and then do an interrupt enable CPI to re-enable on
1600 * the selected processors */ 1597 * the selected processors */
1601 1598
1602void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) 1599void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1603{ 1600{
1604 /* Only extended processors handle interrupts */ 1601 /* Only extended processors handle interrupts */
1605 unsigned long real_mask; 1602 unsigned long real_mask;
1606 unsigned long irq_mask = 1 << irq; 1603 unsigned long irq_mask = 1 << irq;
1607 int cpu; 1604 int cpu;
1608 1605
1609 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1606 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1610 1607
1611 if (cpus_addr(mask)[0] == 0) 1608 if (cpus_addr(*mask)[0] == 0)
1612 /* can't have no CPUs to accept the interrupt -- extremely 1609 /* can't have no CPUs to accept the interrupt -- extremely
1613 * bad things will happen */ 1610 * bad things will happen */
1614 return; 1611 return;
@@ -1747,13 +1744,14 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1747 1744
1748static void __cpuinit voyager_smp_prepare_boot_cpu(void) 1745static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1749{ 1746{
1750 init_gdt(smp_processor_id()); 1747 int cpu = smp_processor_id();
1751 switch_to_new_gdt(); 1748 switch_to_new_gdt(cpu);
1749
1750 cpu_set(cpu, cpu_online_map);
1751 cpu_set(cpu, cpu_callout_map);
1752 cpu_set(cpu, cpu_possible_map);
1753 cpu_set(cpu, cpu_present_map);
1752 1754
1753 cpu_set(smp_processor_id(), cpu_online_map);
1754 cpu_set(smp_processor_id(), cpu_callout_map);
1755 cpu_set(smp_processor_id(), cpu_possible_map);
1756 cpu_set(smp_processor_id(), cpu_present_map);
1757} 1755}
1758 1756
1759static int __cpuinit voyager_cpu_up(unsigned int cpu) 1757static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1780,12 +1778,11 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1780void __init smp_setup_processor_id(void) 1778void __init smp_setup_processor_id(void)
1781{ 1779{
1782 current_thread_info()->cpu = hard_smp_processor_id(); 1780 current_thread_info()->cpu = hard_smp_processor_id();
1783 x86_write_percpu(cpu_number, hard_smp_processor_id());
1784} 1781}
1785 1782
1786static void voyager_send_call_func(cpumask_t callmask) 1783static void voyager_send_call_func(const struct cpumask *callmask)
1787{ 1784{
1788 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); 1785 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1789 send_CPI(mask, VIC_CALL_FUNCTION_CPI); 1786 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1790} 1787}
1791 1788