diff options
author | Glauber de Oliveira Costa <gcosta@redhat.com> | 2008-03-19 13:25:25 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:01 -0400 |
commit | a6c422ccdb57924bd20ae408dba8e9db01d09677 (patch) | |
tree | 3df5b830d56291ae024037fcda55ba405ddd306d /arch/x86 | |
parent | 73bf102b1cadc53d418df02ba687769a9f916a6d (diff) |
x86: fill cpu to apicid and present map in mpparse
This is the way x86_64 does, and complement the already
present patch that does the bios cpu to apicid mapping here
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/mpparse_32.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 24 |
2 files changed, 24 insertions, 19 deletions
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 6ea97163701f..a0cec74b80ef 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c | |||
@@ -105,7 +105,8 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinit | |||
105 | 105 | ||
106 | static void __cpuinit MP_processor_info (struct mpc_config_processor *m) | 106 | static void __cpuinit MP_processor_info (struct mpc_config_processor *m) |
107 | { | 107 | { |
108 | int ver, apicid; | 108 | int ver, apicid, cpu; |
109 | cpumask_t tmp_map; | ||
109 | physid_mask_t phys_cpu; | 110 | physid_mask_t phys_cpu; |
110 | 111 | ||
111 | if (!(m->mpc_cpuflag & CPU_ENABLED)) { | 112 | if (!(m->mpc_cpuflag & CPU_ENABLED)) { |
@@ -198,6 +199,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) | |||
198 | 199 | ||
199 | cpu_set(num_processors, cpu_possible_map); | 200 | cpu_set(num_processors, cpu_possible_map); |
200 | num_processors++; | 201 | num_processors++; |
202 | cpus_complement(tmp_map, cpu_present_map); | ||
203 | cpu = first_cpu(tmp_map); | ||
204 | |||
205 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) | ||
206 | /* | ||
207 | * x86_bios_cpu_apicid is required to have processors listed | ||
208 | * in same order as logical cpu numbers. Hence the first | ||
209 | * entry is BSP, and so on. | ||
210 | */ | ||
211 | cpu = 0; | ||
201 | 212 | ||
202 | /* | 213 | /* |
203 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | 214 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y |
@@ -220,12 +231,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) | |||
220 | } | 231 | } |
221 | /* are we being called early in kernel startup? */ | 232 | /* are we being called early in kernel startup? */ |
222 | if (x86_cpu_to_apicid_early_ptr) { | 233 | if (x86_cpu_to_apicid_early_ptr) { |
234 | u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; | ||
223 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | 235 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; |
236 | |||
237 | cpu_to_apicid[cpu] = m->mpc_apicid; | ||
224 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; | 238 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; |
225 | } else { | 239 | } else { |
226 | int cpu = num_processors - 1; | 240 | per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; |
227 | per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; | 241 | per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; |
228 | } | 242 | } |
243 | cpu_set(cpu, cpu_present_map); | ||
229 | } | 244 | } |
230 | 245 | ||
231 | static void __init MP_bus_info (struct mpc_config_bus *m) | 246 | static void __init MP_bus_info (struct mpc_config_bus *m) |
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bf5c9e9f26c1..2fea910eff43 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -525,16 +525,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
525 | #endif /* WAKE_SECONDARY_VIA_INIT */ | 525 | #endif /* WAKE_SECONDARY_VIA_INIT */ |
526 | 526 | ||
527 | extern cpumask_t cpu_initialized; | 527 | extern cpumask_t cpu_initialized; |
528 | static inline int alloc_cpu_id(void) | ||
529 | { | ||
530 | cpumask_t tmp_map; | ||
531 | int cpu; | ||
532 | cpus_complement(tmp_map, cpu_present_map); | ||
533 | cpu = first_cpu(tmp_map); | ||
534 | if (cpu >= NR_CPUS) | ||
535 | return -ENODEV; | ||
536 | return cpu; | ||
537 | } | ||
538 | 528 | ||
539 | #ifdef CONFIG_HOTPLUG_CPU | 529 | #ifdef CONFIG_HOTPLUG_CPU |
540 | static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; | 530 | static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; |
@@ -605,7 +595,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
605 | 595 | ||
606 | irq_ctx_init(cpu); | 596 | irq_ctx_init(cpu); |
607 | 597 | ||
608 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
609 | /* | 598 | /* |
610 | * This grunge runs the startup process for | 599 | * This grunge runs the startup process for |
611 | * the targeted processor. | 600 | * the targeted processor. |
@@ -666,10 +655,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
666 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | 655 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ |
667 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | 656 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ |
668 | cpu_clear(cpu, cpu_possible_map); | 657 | cpu_clear(cpu, cpu_possible_map); |
658 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | ||
669 | cpucount--; | 659 | cpucount--; |
670 | } else { | ||
671 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
672 | cpu_set(cpu, cpu_present_map); | ||
673 | } | 660 | } |
674 | 661 | ||
675 | /* mark "stuck" area as not stuck */ | 662 | /* mark "stuck" area as not stuck */ |
@@ -745,6 +732,7 @@ EXPORT_SYMBOL(xquad_portio); | |||
745 | static void __init disable_smp(void) | 732 | static void __init disable_smp(void) |
746 | { | 733 | { |
747 | cpu_possible_map = cpumask_of_cpu(0); | 734 | cpu_possible_map = cpumask_of_cpu(0); |
735 | cpu_present_map = cpumask_of_cpu(0); | ||
748 | smpboot_clear_io_apic_irqs(); | 736 | smpboot_clear_io_apic_irqs(); |
749 | phys_cpu_present_map = physid_mask_of_physid(0); | 737 | phys_cpu_present_map = physid_mask_of_physid(0); |
750 | map_cpu_to_logical_apicid(); | 738 | map_cpu_to_logical_apicid(); |
@@ -825,7 +813,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
825 | 813 | ||
826 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 814 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
827 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 815 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
828 | per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; | ||
829 | 816 | ||
830 | current_thread_info()->cpu = 0; | 817 | current_thread_info()->cpu = 0; |
831 | 818 | ||
@@ -866,8 +853,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
866 | continue; | 853 | continue; |
867 | if (max_cpus <= cpucount+1) | 854 | if (max_cpus <= cpucount+1) |
868 | continue; | 855 | continue; |
869 | 856 | /* Utterly temporary */ | |
870 | if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) | 857 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
858 | if (per_cpu(x86_cpu_to_apicid, cpu) == apicid) | ||
859 | break; | ||
860 | if (do_boot_cpu(apicid, cpu)) | ||
871 | printk("CPU #%d not responding - cannot use it.\n", | 861 | printk("CPU #%d not responding - cannot use it.\n", |
872 | apicid); | 862 | apicid); |
873 | else | 863 | else |