aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r--arch/x86/kernel/smpboot.c48
1 files changed, 23 insertions, 25 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7985c5b3f916..9056f7e272c0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
90#else 90#else
91struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 91static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
92#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 92#define get_idle_for_cpu(x) (idle_thread_array[(x)])
93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
94#endif 94#endif
@@ -123,13 +123,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
123 123
124static atomic_t init_deasserted; 124static atomic_t init_deasserted;
125 125
126static int boot_cpu_logical_apicid;
127 126
128/* representing cpus for which sibling maps can be computed */ 127/* representing cpus for which sibling maps can be computed */
129static cpumask_t cpu_sibling_setup_map; 128static cpumask_t cpu_sibling_setup_map;
130 129
131/* Set if we find a B stepping CPU */ 130/* Set if we find a B stepping CPU */
132int __cpuinitdata smp_b_stepping; 131static int __cpuinitdata smp_b_stepping;
133 132
134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 133#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
135 134
@@ -165,6 +164,8 @@ static void unmap_cpu_to_node(int cpu)
165#endif 164#endif
166 165
167#ifdef CONFIG_X86_32 166#ifdef CONFIG_X86_32
167static int boot_cpu_logical_apicid;
168
168u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = 169u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
169 { [0 ... NR_CPUS-1] = BAD_APICID }; 170 { [0 ... NR_CPUS-1] = BAD_APICID };
170 171
@@ -210,7 +211,7 @@ static void __cpuinit smp_callin(void)
210 /* 211 /*
211 * (This works even if the APIC is not enabled.) 212 * (This works even if the APIC is not enabled.)
212 */ 213 */
213 phys_id = GET_APIC_ID(read_apic_id()); 214 phys_id = read_apic_id();
214 cpuid = smp_processor_id(); 215 cpuid = smp_processor_id();
215 if (cpu_isset(cpuid, cpu_callin_map)) { 216 if (cpu_isset(cpuid, cpu_callin_map)) {
216 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 217 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
@@ -257,6 +258,7 @@ static void __cpuinit smp_callin(void)
257 end_local_APIC_setup(); 258 end_local_APIC_setup();
258 map_cpu_to_logical_apicid(); 259 map_cpu_to_logical_apicid();
259 260
261 notify_cpu_starting(cpuid);
260 /* 262 /*
261 * Get our bogomips. 263 * Get our bogomips.
262 * 264 *
@@ -550,8 +552,7 @@ static inline void __inquire_remote_apic(int apicid)
550 printk(KERN_CONT 552 printk(KERN_CONT
551 "a previous APIC delivery may have failed\n"); 553 "a previous APIC delivery may have failed\n");
552 554
553 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); 555 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
554 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
555 556
556 timeout = 0; 557 timeout = 0;
557 do { 558 do {
@@ -583,11 +584,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
583 int maxlvt; 584 int maxlvt;
584 585
585 /* Target chip */ 586 /* Target chip */
586 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
587
588 /* Boot on the stack */ 587 /* Boot on the stack */
589 /* Kick the second */ 588 /* Kick the second */
590 apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); 589 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
591 590
592 pr_debug("Waiting for send to finish...\n"); 591 pr_debug("Waiting for send to finish...\n");
593 send_status = safe_apic_wait_icr_idle(); 592 send_status = safe_apic_wait_icr_idle();
@@ -640,13 +639,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
640 /* 639 /*
641 * Turn INIT on target chip 640 * Turn INIT on target chip
642 */ 641 */
643 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
644
645 /* 642 /*
646 * Send IPI 643 * Send IPI
647 */ 644 */
648 apic_write(APIC_ICR, 645 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
649 APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); 646 phys_apicid);
650 647
651 pr_debug("Waiting for send to finish...\n"); 648 pr_debug("Waiting for send to finish...\n");
652 send_status = safe_apic_wait_icr_idle(); 649 send_status = safe_apic_wait_icr_idle();
@@ -656,10 +653,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
656 pr_debug("Deasserting INIT.\n"); 653 pr_debug("Deasserting INIT.\n");
657 654
658 /* Target chip */ 655 /* Target chip */
659 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
660
661 /* Send IPI */ 656 /* Send IPI */
662 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); 657 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
663 658
664 pr_debug("Waiting for send to finish...\n"); 659 pr_debug("Waiting for send to finish...\n");
665 send_status = safe_apic_wait_icr_idle(); 660 send_status = safe_apic_wait_icr_idle();
@@ -702,11 +697,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
702 */ 697 */
703 698
704 /* Target chip */ 699 /* Target chip */
705 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
706
707 /* Boot on the stack */ 700 /* Boot on the stack */
708 /* Kick the second */ 701 /* Kick the second */
709 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); 702 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
703 phys_apicid);
710 704
711 /* 705 /*
712 * Give the other CPU some time to accept the IPI. 706 * Give the other CPU some time to accept the IPI.
@@ -1175,10 +1169,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1175 * Setup boot CPU information 1169 * Setup boot CPU information
1176 */ 1170 */
1177 smp_store_cpu_info(0); /* Final full version of the data */ 1171 smp_store_cpu_info(0); /* Final full version of the data */
1172#ifdef CONFIG_X86_32
1178 boot_cpu_logical_apicid = logical_smp_processor_id(); 1173 boot_cpu_logical_apicid = logical_smp_processor_id();
1174#endif
1179 current_thread_info()->cpu = 0; /* needed? */ 1175 current_thread_info()->cpu = 0; /* needed? */
1180 set_cpu_sibling_map(0); 1176 set_cpu_sibling_map(0);
1181 1177
1178#ifdef CONFIG_X86_64
1179 enable_IR_x2apic();
1180 setup_apic_routing();
1181#endif
1182
1182 if (smp_sanity_check(max_cpus) < 0) { 1183 if (smp_sanity_check(max_cpus) < 0) {
1183 printk(KERN_INFO "SMP disabled\n"); 1184 printk(KERN_INFO "SMP disabled\n");
1184 disable_smp(); 1185 disable_smp();
@@ -1186,9 +1187,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1186 } 1187 }
1187 1188
1188 preempt_disable(); 1189 preempt_disable();
1189 if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { 1190 if (read_apic_id() != boot_cpu_physical_apicid) {
1190 panic("Boot APIC ID in local APIC unexpected (%d vs %d)", 1191 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1191 GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); 1192 read_apic_id(), boot_cpu_physical_apicid);
1192 /* Or can we switch back to PIC here? */ 1193 /* Or can we switch back to PIC here? */
1193 } 1194 }
1194 preempt_enable(); 1195 preempt_enable();
@@ -1313,16 +1314,13 @@ __init void prefill_possible_map(void)
1313 if (!num_processors) 1314 if (!num_processors)
1314 num_processors = 1; 1315 num_processors = 1;
1315 1316
1316#ifdef CONFIG_HOTPLUG_CPU
1317 if (additional_cpus == -1) { 1317 if (additional_cpus == -1) {
1318 if (disabled_cpus > 0) 1318 if (disabled_cpus > 0)
1319 additional_cpus = disabled_cpus; 1319 additional_cpus = disabled_cpus;
1320 else 1320 else
1321 additional_cpus = 0; 1321 additional_cpus = 0;
1322 } 1322 }
1323#else 1323
1324 additional_cpus = 0;
1325#endif
1326 possible = num_processors + additional_cpus; 1324 possible = num_processors + additional_cpus;
1327 if (possible > NR_CPUS) 1325 if (possible > NR_CPUS)
1328 possible = NR_CPUS; 1326 possible = NR_CPUS;