aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/apic_64.c16
-rw-r--r--arch/x86/kernel/mpparse_64.c17
-rw-r--r--arch/x86/kernel/setup_64.c3
-rw-r--r--arch/x86/kernel/smpboot_64.c7
-rw-r--r--include/asm-x86/smp_64.h8
5 files changed, 41 insertions, 10 deletions
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 01d4ca27ecf0..f9919c492699 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -1180,14 +1180,26 @@ __cpuinit int apic_is_clustered_box(void)
1180 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 1180 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1181 1181
1182 for (i = 0; i < NR_CPUS; i++) { 1182 for (i = 0; i < NR_CPUS; i++) {
1183 id = bios_cpu_apicid[i]; 1183 /* are we being called early in kernel startup? */
1184 if (x86_bios_cpu_apicid_early_ptr) {
1185 id = ((u16 *)x86_bios_cpu_apicid_early_ptr)[i];
1186 }
1187 else if (i < nr_cpu_ids) {
1188 if (cpu_present(i))
1189 id = per_cpu(x86_bios_cpu_apicid, i);
1190 else
1191 continue;
1192 }
1193 else
1194 break;
1195
1184 if (id != BAD_APICID) 1196 if (id != BAD_APICID)
1185 __set_bit(APIC_CLUSTERID(id), clustermap); 1197 __set_bit(APIC_CLUSTERID(id), clustermap);
1186 } 1198 }
1187 1199
1188 /* Problem: Partially populated chassis may not have CPUs in some of 1200 /* Problem: Partially populated chassis may not have CPUs in some of
1189 * the APIC clusters they have been allocated. Only present CPUs have 1201 * the APIC clusters they have been allocated. Only present CPUs have
1190 * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since 1202 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since
1191 * clusters are allocated sequentially, count zeros only if they are 1203 * clusters are allocated sequentially, count zeros only if they are
1192 * bounded by ones. 1204 * bounded by ones.
1193 */ 1205 */
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
index 528ad9696d96..fd671754dcb5 100644
--- a/arch/x86/kernel/mpparse_64.c
+++ b/arch/x86/kernel/mpparse_64.c
@@ -67,7 +67,11 @@ unsigned disabled_cpus __cpuinitdata;
67/* Bitmask of physically existing CPUs */ 67/* Bitmask of physically existing CPUs */
68physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; 68physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
69 69
70u16 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 70u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
71 = { [0 ... NR_CPUS-1] = BAD_APICID };
72void *x86_bios_cpu_apicid_early_ptr;
73DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
74EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
71 75
72 76
73/* 77/*
@@ -118,19 +122,22 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
118 physid_set(m->mpc_apicid, phys_cpu_present_map); 122 physid_set(m->mpc_apicid, phys_cpu_present_map);
119 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { 123 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
120 /* 124 /*
121 * bios_cpu_apicid is required to have processors listed 125 * x86_bios_cpu_apicid is required to have processors listed
122 * in same order as logical cpu numbers. Hence the first 126 * in same order as logical cpu numbers. Hence the first
123 * entry is BSP, and so on. 127 * entry is BSP, and so on.
124 */ 128 */
125 cpu = 0; 129 cpu = 0;
126 } 130 }
127 bios_cpu_apicid[cpu] = m->mpc_apicid;
128 /* are we being called early in kernel startup? */ 131 /* are we being called early in kernel startup? */
129 if (x86_cpu_to_apicid_early_ptr) { 132 if (x86_cpu_to_apicid_early_ptr) {
130 u16 *x86_cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr; 133 u16 *cpu_to_apicid = (u16 *)x86_cpu_to_apicid_early_ptr;
131 x86_cpu_to_apicid[cpu] = m->mpc_apicid; 134 u16 *bios_cpu_apicid = (u16 *)x86_bios_cpu_apicid_early_ptr;
135
136 cpu_to_apicid[cpu] = m->mpc_apicid;
137 bios_cpu_apicid[cpu] = m->mpc_apicid;
132 } else { 138 } else {
133 per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; 139 per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
140 per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
134 } 141 }
135 142
136 cpu_set(cpu, cpu_possible_map); 143 cpu_set(cpu, cpu_possible_map);
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 529e45c37b1c..71a420c7fee7 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -362,8 +362,11 @@ void __init setup_arch(char **cmdline_p)
362#ifdef CONFIG_SMP 362#ifdef CONFIG_SMP
363 /* setup to use the early static init tables during kernel startup */ 363 /* setup to use the early static init tables during kernel startup */
364 x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init; 364 x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
365#ifdef CONFIG_NUMA
365 x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init; 366 x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
366#endif 367#endif
368 x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init;
369#endif
367 370
368#ifdef CONFIG_ACPI 371#ifdef CONFIG_ACPI
369 /* 372 /*
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index a8bc2bcdb74a..93071cdf0849 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -864,8 +864,12 @@ void __init smp_set_apicids(void)
864 if (per_cpu_offset(cpu)) { 864 if (per_cpu_offset(cpu)) {
865 per_cpu(x86_cpu_to_apicid, cpu) = 865 per_cpu(x86_cpu_to_apicid, cpu) =
866 x86_cpu_to_apicid_init[cpu]; 866 x86_cpu_to_apicid_init[cpu];
867#ifdef CONFIG_NUMA
867 per_cpu(x86_cpu_to_node_map, cpu) = 868 per_cpu(x86_cpu_to_node_map, cpu) =
868 x86_cpu_to_node_map_init[cpu]; 869 x86_cpu_to_node_map_init[cpu];
870#endif
871 per_cpu(x86_bios_cpu_apicid, cpu) =
872 x86_bios_cpu_apicid_init[cpu];
869 } 873 }
870 else 874 else
871 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", 875 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
@@ -874,7 +878,10 @@ void __init smp_set_apicids(void)
874 878
875 /* indicate the early static arrays are gone */ 879 /* indicate the early static arrays are gone */
876 x86_cpu_to_apicid_early_ptr = NULL; 880 x86_cpu_to_apicid_early_ptr = NULL;
881#ifdef CONFIG_NUMA
877 x86_cpu_to_node_map_early_ptr = NULL; 882 x86_cpu_to_node_map_early_ptr = NULL;
883#endif
884 x86_bios_cpu_apicid_early_ptr = NULL;
878} 885}
879 886
880static void __init smp_cpu_index_default(void) 887static void __init smp_cpu_index_default(void)
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index 6fa332db29cc..e0a75519ad21 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -27,18 +27,20 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
27 void *info, int wait); 27 void *info, int wait);
28 28
29extern u16 __initdata x86_cpu_to_apicid_init[]; 29extern u16 __initdata x86_cpu_to_apicid_init[];
30extern u16 __initdata x86_bios_cpu_apicid_init[];
30extern void *x86_cpu_to_apicid_early_ptr; 31extern void *x86_cpu_to_apicid_early_ptr;
31extern u16 bios_cpu_apicid[]; 32extern void *x86_bios_cpu_apicid_early_ptr;
32 33
33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
35DECLARE_PER_CPU(u16, cpu_llc_id); 36DECLARE_PER_CPU(u16, cpu_llc_id);
36DECLARE_PER_CPU(u16, x86_cpu_to_apicid); 37DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
38DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
37 39
38static inline int cpu_present_to_apicid(int mps_cpu) 40static inline int cpu_present_to_apicid(int mps_cpu)
39{ 41{
40 if (mps_cpu < NR_CPUS) 42 if (cpu_present(mps_cpu))
41 return (int)bios_cpu_apicid[mps_cpu]; 43 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
42 else 44 else
43 return BAD_APICID; 45 return BAD_APICID;
44} 46}