diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 09:50:03 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 16:20:20 -0400 |
| commit | 80c5520811d3805adcb15c570ea5e2d489fa5d0b (patch) | |
| tree | ae797a7f4af39f80e77526533d06ac23b439f0ab /arch/x86/kernel/smpboot.c | |
| parent | b3e3b302cf6dc8d60b67f0e84d1fa5648889c038 (diff) | |
| parent | 8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff) | |
Merge branch 'cpus4096' into irq/threaded
Conflicts:
arch/parisc/kernel/irq.c
kernel/irq/handle.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 285 |
1 files changed, 81 insertions, 204 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bb1a3b1fc87f..58d24ef917d8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * x86 SMP booting functions | 2 | * x86 SMP booting functions |
| 3 | * | 3 | * |
| 4 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> | 4 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> |
| 5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | 5 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> |
| 6 | * Copyright 2001 Andi Kleen, SuSE Labs. | 6 | * Copyright 2001 Andi Kleen, SuSE Labs. |
| 7 | * | 7 | * |
| 8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | 8 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
| @@ -53,7 +53,6 @@ | |||
| 53 | #include <asm/nmi.h> | 53 | #include <asm/nmi.h> |
| 54 | #include <asm/irq.h> | 54 | #include <asm/irq.h> |
| 55 | #include <asm/idle.h> | 55 | #include <asm/idle.h> |
| 56 | #include <asm/smp.h> | ||
| 57 | #include <asm/trampoline.h> | 56 | #include <asm/trampoline.h> |
| 58 | #include <asm/cpu.h> | 57 | #include <asm/cpu.h> |
| 59 | #include <asm/numa.h> | 58 | #include <asm/numa.h> |
| @@ -61,13 +60,12 @@ | |||
| 61 | #include <asm/tlbflush.h> | 60 | #include <asm/tlbflush.h> |
| 62 | #include <asm/mtrr.h> | 61 | #include <asm/mtrr.h> |
| 63 | #include <asm/vmi.h> | 62 | #include <asm/vmi.h> |
| 64 | #include <asm/genapic.h> | 63 | #include <asm/apic.h> |
| 65 | #include <asm/setup.h> | 64 | #include <asm/setup.h> |
| 65 | #include <asm/uv/uv.h> | ||
| 66 | #include <linux/mc146818rtc.h> | 66 | #include <linux/mc146818rtc.h> |
| 67 | 67 | ||
| 68 | #include <mach_apic.h> | 68 | #include <asm/smpboot_hooks.h> |
| 69 | #include <mach_wakecpu.h> | ||
| 70 | #include <smpboot_hooks.h> | ||
| 71 | 69 | ||
| 72 | #ifdef CONFIG_X86_32 | 70 | #ifdef CONFIG_X86_32 |
| 73 | u8 apicid_2_node[MAX_APICID]; | 71 | u8 apicid_2_node[MAX_APICID]; |
| @@ -103,29 +101,20 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
| 103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
| 104 | 102 | ||
| 105 | /* representing HT siblings of each logical CPU */ | 103 | /* representing HT siblings of each logical CPU */ |
| 106 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 104 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
| 107 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 105 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 108 | 106 | ||
| 109 | /* representing HT and core siblings of each logical CPU */ | 107 | /* representing HT and core siblings of each logical CPU */ |
| 110 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 108 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
| 111 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 109 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
| 112 | 110 | ||
| 113 | /* Per CPU bogomips and other parameters */ | 111 | /* Per CPU bogomips and other parameters */ |
| 114 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 112 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
| 115 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 113 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
| 116 | 114 | ||
| 117 | static atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
| 118 | |||
| 119 | |||
| 120 | /* Set if we find a B stepping CPU */ | ||
| 121 | static int __cpuinitdata smp_b_stepping; | ||
| 122 | 116 | ||
| 123 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
| 124 | |||
| 125 | /* which logical CPUs are on which nodes */ | ||
| 126 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | ||
| 127 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | ||
| 128 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
| 129 | /* which node each logical CPU is on */ | 118 | /* which node each logical CPU is on */ |
| 130 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | 119 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; |
| 131 | EXPORT_SYMBOL(cpu_to_node_map); | 120 | EXPORT_SYMBOL(cpu_to_node_map); |
| @@ -134,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map); | |||
| 134 | static void map_cpu_to_node(int cpu, int node) | 123 | static void map_cpu_to_node(int cpu, int node) |
| 135 | { | 124 | { |
| 136 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | 125 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); |
| 137 | cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); | 126 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); |
| 138 | cpu_to_node_map[cpu] = node; | 127 | cpu_to_node_map[cpu] = node; |
| 139 | } | 128 | } |
| 140 | 129 | ||
| @@ -145,7 +134,7 @@ static void unmap_cpu_to_node(int cpu) | |||
| 145 | 134 | ||
| 146 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | 135 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); |
| 147 | for (node = 0; node < MAX_NUMNODES; node++) | 136 | for (node = 0; node < MAX_NUMNODES; node++) |
| 148 | cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); | 137 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
| 149 | cpu_to_node_map[cpu] = 0; | 138 | cpu_to_node_map[cpu] = 0; |
| 150 | } | 139 | } |
| 151 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | 140 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ |
| @@ -163,7 +152,7 @@ static void map_cpu_to_logical_apicid(void) | |||
| 163 | { | 152 | { |
| 164 | int cpu = smp_processor_id(); | 153 | int cpu = smp_processor_id(); |
| 165 | int apicid = logical_smp_processor_id(); | 154 | int apicid = logical_smp_processor_id(); |
| 166 | int node = apicid_to_node(apicid); | 155 | int node = apic->apicid_to_node(apicid); |
| 167 | 156 | ||
| 168 | if (!node_online(node)) | 157 | if (!node_online(node)) |
| 169 | node = first_online_node; | 158 | node = first_online_node; |
| @@ -196,7 +185,8 @@ static void __cpuinit smp_callin(void) | |||
| 196 | * our local APIC. We have to wait for the IPI or we'll | 185 | * our local APIC. We have to wait for the IPI or we'll |
| 197 | * lock up on an APIC access. | 186 | * lock up on an APIC access. |
| 198 | */ | 187 | */ |
| 199 | wait_for_init_deassert(&init_deasserted); | 188 | if (apic->wait_for_init_deassert) |
| 189 | apic->wait_for_init_deassert(&init_deasserted); | ||
| 200 | 190 | ||
| 201 | /* | 191 | /* |
| 202 | * (This works even if the APIC is not enabled.) | 192 | * (This works even if the APIC is not enabled.) |
| @@ -243,7 +233,8 @@ static void __cpuinit smp_callin(void) | |||
| 243 | */ | 233 | */ |
| 244 | 234 | ||
| 245 | pr_debug("CALLIN, before setup_local_APIC().\n"); | 235 | pr_debug("CALLIN, before setup_local_APIC().\n"); |
| 246 | smp_callin_clear_local_apic(); | 236 | if (apic->smp_callin_clear_local_apic) |
| 237 | apic->smp_callin_clear_local_apic(); | ||
| 247 | setup_local_APIC(); | 238 | setup_local_APIC(); |
| 248 | end_local_APIC_setup(); | 239 | end_local_APIC_setup(); |
| 249 | map_cpu_to_logical_apicid(); | 240 | map_cpu_to_logical_apicid(); |
| @@ -271,8 +262,6 @@ static void __cpuinit smp_callin(void) | |||
| 271 | cpumask_set_cpu(cpuid, cpu_callin_mask); | 262 | cpumask_set_cpu(cpuid, cpu_callin_mask); |
| 272 | } | 263 | } |
| 273 | 264 | ||
| 274 | static int __cpuinitdata unsafe_smp; | ||
| 275 | |||
| 276 | /* | 265 | /* |
| 277 | * Activate a secondary processor. | 266 | * Activate a secondary processor. |
| 278 | */ | 267 | */ |
| @@ -307,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 307 | __flush_tlb_all(); | 296 | __flush_tlb_all(); |
| 308 | #endif | 297 | #endif |
| 309 | 298 | ||
| 310 | /* This must be done before setting cpu_online_map */ | 299 | /* This must be done before setting cpu_online_mask */ |
| 311 | set_cpu_sibling_map(raw_smp_processor_id()); | 300 | set_cpu_sibling_map(raw_smp_processor_id()); |
| 312 | wmb(); | 301 | wmb(); |
| 313 | 302 | ||
| @@ -340,75 +329,22 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 340 | cpu_idle(); | 329 | cpu_idle(); |
| 341 | } | 330 | } |
| 342 | 331 | ||
| 343 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) | 332 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 333 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
| 334 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
| 335 | const struct cpuinfo_x86 *src) | ||
| 344 | { | 336 | { |
| 345 | /* | 337 | struct cpumask *llc = dst->llc_shared_map; |
| 346 | * Mask B, Pentium, but not Pentium MMX | 338 | *dst = *src; |
| 347 | */ | 339 | dst->llc_shared_map = llc; |
| 348 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
| 349 | c->x86 == 5 && | ||
| 350 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
| 351 | c->x86_model <= 3) | ||
| 352 | /* | ||
| 353 | * Remember we have B step Pentia with bugs | ||
| 354 | */ | ||
| 355 | smp_b_stepping = 1; | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Certain Athlons might work (for various values of 'work') in SMP | ||
| 359 | * but they are not certified as MP capable. | ||
| 360 | */ | ||
| 361 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | ||
| 362 | |||
| 363 | if (num_possible_cpus() == 1) | ||
| 364 | goto valid_k7; | ||
| 365 | |||
| 366 | /* Athlon 660/661 is valid. */ | ||
| 367 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | ||
| 368 | (c->x86_mask == 1))) | ||
| 369 | goto valid_k7; | ||
| 370 | |||
| 371 | /* Duron 670 is valid */ | ||
| 372 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
| 373 | goto valid_k7; | ||
| 374 | |||
| 375 | /* | ||
| 376 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
| 377 | * bit. It's worth noting that the A5 stepping (662) of some | ||
| 378 | * Athlon XP's have the MP bit set. | ||
| 379 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
| 380 | * more. | ||
| 381 | */ | ||
| 382 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
| 383 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
| 384 | (c->x86_model > 7)) | ||
| 385 | if (cpu_has_mp) | ||
| 386 | goto valid_k7; | ||
| 387 | |||
| 388 | /* If we get here, not a certified SMP capable AMD system. */ | ||
| 389 | unsafe_smp = 1; | ||
| 390 | } | ||
| 391 | |||
| 392 | valid_k7: | ||
| 393 | ; | ||
| 394 | } | 340 | } |
| 395 | 341 | #else | |
| 396 | static void __cpuinit smp_checks(void) | 342 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, |
| 343 | const struct cpuinfo_x86 *src) | ||
| 397 | { | 344 | { |
| 398 | if (smp_b_stepping) | 345 | *dst = *src; |
| 399 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | ||
| 400 | "with B stepping processors.\n"); | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Don't taint if we are running SMP kernel on a single non-MP | ||
| 404 | * approved Athlon | ||
| 405 | */ | ||
| 406 | if (unsafe_smp && num_online_cpus() > 1) { | ||
| 407 | printk(KERN_INFO "WARNING: This combination of AMD" | ||
| 408 | "processors is not suitable for SMP.\n"); | ||
| 409 | add_taint(TAINT_UNSAFE_SMP); | ||
| 410 | } | ||
| 411 | } | 346 | } |
| 347 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
| 412 | 348 | ||
| 413 | /* | 349 | /* |
| 414 | * The bootstrap kernel entry code has set these up. Save them for | 350 | * The bootstrap kernel entry code has set these up. Save them for |
| @@ -419,11 +355,10 @@ void __cpuinit smp_store_cpu_info(int id) | |||
| 419 | { | 355 | { |
| 420 | struct cpuinfo_x86 *c = &cpu_data(id); | 356 | struct cpuinfo_x86 *c = &cpu_data(id); |
| 421 | 357 | ||
| 422 | *c = boot_cpu_data; | 358 | copy_cpuinfo_x86(c, &boot_cpu_data); |
| 423 | c->cpu_index = id; | 359 | c->cpu_index = id; |
| 424 | if (id != 0) | 360 | if (id != 0) |
| 425 | identify_secondary_cpu(c); | 361 | identify_secondary_cpu(c); |
| 426 | smp_apply_quirks(c); | ||
| 427 | } | 362 | } |
| 428 | 363 | ||
| 429 | 364 | ||
| @@ -444,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 444 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | 379 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); |
| 445 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 380 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| 446 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 381 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
| 447 | cpumask_set_cpu(i, &c->llc_shared_map); | 382 | cpumask_set_cpu(i, c->llc_shared_map); |
| 448 | cpumask_set_cpu(cpu, &o->llc_shared_map); | 383 | cpumask_set_cpu(cpu, o->llc_shared_map); |
| 449 | } | 384 | } |
| 450 | } | 385 | } |
| 451 | } else { | 386 | } else { |
| 452 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 387 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
| 453 | } | 388 | } |
| 454 | 389 | ||
| 455 | cpumask_set_cpu(cpu, &c->llc_shared_map); | 390 | cpumask_set_cpu(cpu, c->llc_shared_map); |
| 456 | 391 | ||
| 457 | if (current_cpu_data.x86_max_cores == 1) { | 392 | if (current_cpu_data.x86_max_cores == 1) { |
| 458 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 393 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
| @@ -463,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 463 | for_each_cpu(i, cpu_sibling_setup_mask) { | 398 | for_each_cpu(i, cpu_sibling_setup_mask) { |
| 464 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 399 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
| 465 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 400 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
| 466 | cpumask_set_cpu(i, &c->llc_shared_map); | 401 | cpumask_set_cpu(i, c->llc_shared_map); |
| 467 | cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); | 402 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); |
| 468 | } | 403 | } |
| 469 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 404 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
| 470 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 405 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| @@ -502,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
| 502 | if (sched_mc_power_savings || sched_smt_power_savings) | 437 | if (sched_mc_power_savings || sched_smt_power_savings) |
| 503 | return cpu_core_mask(cpu); | 438 | return cpu_core_mask(cpu); |
| 504 | else | 439 | else |
| 505 | return &c->llc_shared_map; | 440 | return c->llc_shared_map; |
| 506 | } | ||
| 507 | |||
| 508 | cpumask_t cpu_coregroup_map(int cpu) | ||
| 509 | { | ||
| 510 | return *cpu_coregroup_mask(cpu); | ||
| 511 | } | 441 | } |
| 512 | 442 | ||
| 513 | static void impress_friends(void) | 443 | static void impress_friends(void) |
| @@ -583,7 +513,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) | |||
| 583 | /* Target chip */ | 513 | /* Target chip */ |
| 584 | /* Boot on the stack */ | 514 | /* Boot on the stack */ |
| 585 | /* Kick the second */ | 515 | /* Kick the second */ |
| 586 | apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); | 516 | apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid); |
| 587 | 517 | ||
| 588 | pr_debug("Waiting for send to finish...\n"); | 518 | pr_debug("Waiting for send to finish...\n"); |
| 589 | send_status = safe_apic_wait_icr_idle(); | 519 | send_status = safe_apic_wait_icr_idle(); |
| @@ -614,12 +544,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
| 614 | unsigned long send_status, accept_status = 0; | 544 | unsigned long send_status, accept_status = 0; |
| 615 | int maxlvt, num_starts, j; | 545 | int maxlvt, num_starts, j; |
| 616 | 546 | ||
| 617 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) { | ||
| 618 | send_status = uv_wakeup_secondary(phys_apicid, start_eip); | ||
| 619 | atomic_set(&init_deasserted, 1); | ||
| 620 | return send_status; | ||
| 621 | } | ||
| 622 | |||
| 623 | maxlvt = lapic_get_maxlvt(); | 547 | maxlvt = lapic_get_maxlvt(); |
| 624 | 548 | ||
| 625 | /* | 549 | /* |
| @@ -745,78 +669,23 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
| 745 | complete(&c_idle->done); | 669 | complete(&c_idle->done); |
| 746 | } | 670 | } |
| 747 | 671 | ||
| 748 | #ifdef CONFIG_X86_64 | ||
| 749 | |||
| 750 | /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */ | ||
| 751 | static void __ref free_bootmem_pda(struct x8664_pda *oldpda) | ||
| 752 | { | ||
| 753 | if (!after_bootmem) | ||
| 754 | free_bootmem((unsigned long)oldpda, sizeof(*oldpda)); | ||
| 755 | } | ||
| 756 | |||
| 757 | /* | ||
| 758 | * Allocate node local memory for the AP pda. | ||
| 759 | * | ||
| 760 | * Must be called after the _cpu_pda pointer table is initialized. | ||
| 761 | */ | ||
| 762 | int __cpuinit get_local_pda(int cpu) | ||
| 763 | { | ||
| 764 | struct x8664_pda *oldpda, *newpda; | ||
| 765 | unsigned long size = sizeof(struct x8664_pda); | ||
| 766 | int node = cpu_to_node(cpu); | ||
| 767 | |||
| 768 | if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem) | ||
| 769 | return 0; | ||
| 770 | |||
| 771 | oldpda = cpu_pda(cpu); | ||
| 772 | newpda = kmalloc_node(size, GFP_ATOMIC, node); | ||
| 773 | if (!newpda) { | ||
| 774 | printk(KERN_ERR "Could not allocate node local PDA " | ||
| 775 | "for CPU %d on node %d\n", cpu, node); | ||
| 776 | |||
| 777 | if (oldpda) | ||
| 778 | return 0; /* have a usable pda */ | ||
| 779 | else | ||
| 780 | return -1; | ||
| 781 | } | ||
| 782 | |||
| 783 | if (oldpda) { | ||
| 784 | memcpy(newpda, oldpda, size); | ||
| 785 | free_bootmem_pda(oldpda); | ||
| 786 | } | ||
| 787 | |||
| 788 | newpda->in_bootmem = 0; | ||
| 789 | cpu_pda(cpu) = newpda; | ||
| 790 | return 0; | ||
| 791 | } | ||
| 792 | #endif /* CONFIG_X86_64 */ | ||
| 793 | |||
| 794 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | ||
| 795 | /* | 672 | /* |
| 796 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | 673 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
| 797 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | 674 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
| 798 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | 675 | * Returns zero if CPU booted OK, else error code from |
| 676 | * ->wakeup_secondary_cpu. | ||
| 799 | */ | 677 | */ |
| 678 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | ||
| 800 | { | 679 | { |
| 801 | unsigned long boot_error = 0; | 680 | unsigned long boot_error = 0; |
| 802 | int timeout; | ||
| 803 | unsigned long start_ip; | 681 | unsigned long start_ip; |
| 804 | unsigned short nmi_high = 0, nmi_low = 0; | 682 | int timeout; |
| 805 | struct create_idle c_idle = { | 683 | struct create_idle c_idle = { |
| 806 | .cpu = cpu, | 684 | .cpu = cpu, |
| 807 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 685 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
| 808 | }; | 686 | }; |
| 809 | INIT_WORK(&c_idle.work, do_fork_idle); | ||
| 810 | 687 | ||
| 811 | #ifdef CONFIG_X86_64 | 688 | INIT_WORK(&c_idle.work, do_fork_idle); |
| 812 | /* Allocate node local memory for AP pdas */ | ||
| 813 | if (cpu > 0) { | ||
| 814 | boot_error = get_local_pda(cpu); | ||
| 815 | if (boot_error) | ||
| 816 | goto restore_state; | ||
| 817 | /* if can't get pda memory, can't start cpu */ | ||
| 818 | } | ||
| 819 | #endif | ||
| 820 | 689 | ||
| 821 | alternatives_smp_switch(1); | 690 | alternatives_smp_switch(1); |
| 822 | 691 | ||
| @@ -847,14 +716,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
| 847 | 716 | ||
| 848 | set_idle_for_cpu(cpu, c_idle.idle); | 717 | set_idle_for_cpu(cpu, c_idle.idle); |
| 849 | do_rest: | 718 | do_rest: |
| 850 | #ifdef CONFIG_X86_32 | ||
| 851 | per_cpu(current_task, cpu) = c_idle.idle; | 719 | per_cpu(current_task, cpu) = c_idle.idle; |
| 852 | init_gdt(cpu); | 720 | #ifdef CONFIG_X86_32 |
| 853 | /* Stack for startup_32 can be just as for start_secondary onwards */ | 721 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
| 854 | irq_ctx_init(cpu); | 722 | irq_ctx_init(cpu); |
| 855 | #else | 723 | #else |
| 856 | cpu_pda(cpu)->pcurrent = c_idle.idle; | ||
| 857 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | 724 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
| 725 | initial_gs = per_cpu_offset(cpu); | ||
| 726 | per_cpu(kernel_stack, cpu) = | ||
| 727 | (unsigned long)task_stack_page(c_idle.idle) - | ||
| 728 | KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
| 858 | #endif | 729 | #endif |
| 859 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 730 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
| 860 | initial_code = (unsigned long)start_secondary; | 731 | initial_code = (unsigned long)start_secondary; |
| @@ -878,8 +749,6 @@ do_rest: | |||
| 878 | 749 | ||
| 879 | pr_debug("Setting warm reset code and vector.\n"); | 750 | pr_debug("Setting warm reset code and vector.\n"); |
| 880 | 751 | ||
| 881 | store_NMI_vector(&nmi_high, &nmi_low); | ||
| 882 | |||
| 883 | smpboot_setup_warm_reset_vector(start_ip); | 752 | smpboot_setup_warm_reset_vector(start_ip); |
| 884 | /* | 753 | /* |
| 885 | * Be paranoid about clearing APIC errors. | 754 | * Be paranoid about clearing APIC errors. |
| @@ -891,9 +760,13 @@ do_rest: | |||
| 891 | } | 760 | } |
| 892 | 761 | ||
| 893 | /* | 762 | /* |
| 894 | * Starting actual IPI sequence... | 763 | * Kick the secondary CPU. Use the method in the APIC driver |
| 764 | * if it's defined - or use an INIT boot APIC message otherwise: | ||
| 895 | */ | 765 | */ |
| 896 | boot_error = wakeup_secondary_cpu(apicid, start_ip); | 766 | if (apic->wakeup_secondary_cpu) |
| 767 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); | ||
| 768 | else | ||
| 769 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); | ||
| 897 | 770 | ||
| 898 | if (!boot_error) { | 771 | if (!boot_error) { |
| 899 | /* | 772 | /* |
| @@ -927,13 +800,11 @@ do_rest: | |||
| 927 | else | 800 | else |
| 928 | /* trampoline code not run */ | 801 | /* trampoline code not run */ |
| 929 | printk(KERN_ERR "Not responding.\n"); | 802 | printk(KERN_ERR "Not responding.\n"); |
| 930 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) | 803 | if (apic->inquire_remote_apic) |
| 931 | inquire_remote_apic(apicid); | 804 | apic->inquire_remote_apic(apicid); |
| 932 | } | 805 | } |
| 933 | } | 806 | } |
| 934 | #ifdef CONFIG_X86_64 | 807 | |
| 935 | restore_state: | ||
| 936 | #endif | ||
| 937 | if (boot_error) { | 808 | if (boot_error) { |
| 938 | /* Try to put things back the way they were before ... */ | 809 | /* Try to put things back the way they were before ... */ |
| 939 | numa_remove_cpu(cpu); /* was set by numa_add_cpu */ | 810 | numa_remove_cpu(cpu); /* was set by numa_add_cpu */ |
| @@ -961,7 +832,7 @@ restore_state: | |||
| 961 | 832 | ||
| 962 | int __cpuinit native_cpu_up(unsigned int cpu) | 833 | int __cpuinit native_cpu_up(unsigned int cpu) |
| 963 | { | 834 | { |
| 964 | int apicid = cpu_present_to_apicid(cpu); | 835 | int apicid = apic->cpu_present_to_apicid(cpu); |
| 965 | unsigned long flags; | 836 | unsigned long flags; |
| 966 | int err; | 837 | int err; |
| 967 | 838 | ||
| @@ -1033,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
| 1033 | */ | 904 | */ |
| 1034 | static __init void disable_smp(void) | 905 | static __init void disable_smp(void) |
| 1035 | { | 906 | { |
| 1036 | /* use the read/write pointers to the present and possible maps */ | 907 | init_cpu_present(cpumask_of(0)); |
| 1037 | cpumask_copy(&cpu_present_map, cpumask_of(0)); | 908 | init_cpu_possible(cpumask_of(0)); |
| 1038 | cpumask_copy(&cpu_possible_map, cpumask_of(0)); | ||
| 1039 | smpboot_clear_io_apic_irqs(); | 909 | smpboot_clear_io_apic_irqs(); |
| 1040 | 910 | ||
| 1041 | if (smp_found_config) | 911 | if (smp_found_config) |
| @@ -1054,14 +924,14 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
| 1054 | { | 924 | { |
| 1055 | preempt_disable(); | 925 | preempt_disable(); |
| 1056 | 926 | ||
| 1057 | #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) | 927 | #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) |
| 1058 | if (def_to_bigsmp && nr_cpu_ids > 8) { | 928 | if (def_to_bigsmp && nr_cpu_ids > 8) { |
| 1059 | unsigned int cpu; | 929 | unsigned int cpu; |
| 1060 | unsigned nr; | 930 | unsigned nr; |
| 1061 | 931 | ||
| 1062 | printk(KERN_WARNING | 932 | printk(KERN_WARNING |
| 1063 | "More than 8 CPUs detected - skipping them.\n" | 933 | "More than 8 CPUs detected - skipping them.\n" |
| 1064 | "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); | 934 | "Use CONFIG_X86_BIGSMP.\n"); |
| 1065 | 935 | ||
| 1066 | nr = 0; | 936 | nr = 0; |
| 1067 | for_each_present_cpu(cpu) { | 937 | for_each_present_cpu(cpu) { |
| @@ -1107,7 +977,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
| 1107 | * Should not be necessary because the MP table should list the boot | 977 | * Should not be necessary because the MP table should list the boot |
| 1108 | * CPU too, but we do it for the sake of robustness anyway. | 978 | * CPU too, but we do it for the sake of robustness anyway. |
| 1109 | */ | 979 | */ |
| 1110 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | 980 | if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { |
| 1111 | printk(KERN_NOTICE | 981 | printk(KERN_NOTICE |
| 1112 | "weird, boot CPU (#%d) not listed by the BIOS.\n", | 982 | "weird, boot CPU (#%d) not listed by the BIOS.\n", |
| 1113 | boot_cpu_physical_apicid); | 983 | boot_cpu_physical_apicid); |
| @@ -1125,6 +995,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
| 1125 | printk(KERN_ERR "... forcing use of dummy APIC emulation." | 995 | printk(KERN_ERR "... forcing use of dummy APIC emulation." |
| 1126 | "(tell your hw vendor)\n"); | 996 | "(tell your hw vendor)\n"); |
| 1127 | smpboot_clear_io_apic(); | 997 | smpboot_clear_io_apic(); |
| 998 | arch_disable_smp_support(); | ||
| 1128 | return -1; | 999 | return -1; |
| 1129 | } | 1000 | } |
| 1130 | 1001 | ||
| @@ -1166,6 +1037,8 @@ static void __init smp_cpu_index_default(void) | |||
| 1166 | */ | 1037 | */ |
| 1167 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | 1038 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
| 1168 | { | 1039 | { |
| 1040 | unsigned int i; | ||
| 1041 | |||
| 1169 | preempt_disable(); | 1042 | preempt_disable(); |
| 1170 | smp_cpu_index_default(); | 1043 | smp_cpu_index_default(); |
| 1171 | current_cpu_data = boot_cpu_data; | 1044 | current_cpu_data = boot_cpu_data; |
| @@ -1179,11 +1052,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1179 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1052 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
| 1180 | #endif | 1053 | #endif |
| 1181 | current_thread_info()->cpu = 0; /* needed? */ | 1054 | current_thread_info()->cpu = 0; /* needed? */ |
| 1055 | for_each_possible_cpu(i) { | ||
| 1056 | alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | ||
| 1057 | alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | ||
| 1058 | alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | ||
| 1059 | cpumask_clear(per_cpu(cpu_core_map, i)); | ||
| 1060 | cpumask_clear(per_cpu(cpu_sibling_map, i)); | ||
| 1061 | cpumask_clear(cpu_data(i).llc_shared_map); | ||
| 1062 | } | ||
| 1182 | set_cpu_sibling_map(0); | 1063 | set_cpu_sibling_map(0); |
| 1183 | 1064 | ||
| 1184 | #ifdef CONFIG_X86_64 | ||
| 1185 | enable_IR_x2apic(); | 1065 | enable_IR_x2apic(); |
| 1186 | setup_apic_routing(); | 1066 | #ifdef CONFIG_X86_64 |
| 1067 | default_setup_apic_routing(); | ||
| 1187 | #endif | 1068 | #endif |
| 1188 | 1069 | ||
| 1189 | if (smp_sanity_check(max_cpus) < 0) { | 1070 | if (smp_sanity_check(max_cpus) < 0) { |
| @@ -1207,18 +1088,18 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1207 | */ | 1088 | */ |
| 1208 | setup_local_APIC(); | 1089 | setup_local_APIC(); |
| 1209 | 1090 | ||
| 1210 | #ifdef CONFIG_X86_64 | ||
| 1211 | /* | 1091 | /* |
| 1212 | * Enable IO APIC before setting up error vector | 1092 | * Enable IO APIC before setting up error vector |
| 1213 | */ | 1093 | */ |
| 1214 | if (!skip_ioapic_setup && nr_ioapics) | 1094 | if (!skip_ioapic_setup && nr_ioapics) |
| 1215 | enable_IO_APIC(); | 1095 | enable_IO_APIC(); |
| 1216 | #endif | 1096 | |
| 1217 | end_local_APIC_setup(); | 1097 | end_local_APIC_setup(); |
| 1218 | 1098 | ||
| 1219 | map_cpu_to_logical_apicid(); | 1099 | map_cpu_to_logical_apicid(); |
| 1220 | 1100 | ||
| 1221 | setup_portio_remap(); | 1101 | if (apic->setup_portio_remap) |
| 1102 | apic->setup_portio_remap(); | ||
| 1222 | 1103 | ||
| 1223 | smpboot_setup_io_apic(); | 1104 | smpboot_setup_io_apic(); |
| 1224 | /* | 1105 | /* |
| @@ -1240,10 +1121,7 @@ out: | |||
| 1240 | void __init native_smp_prepare_boot_cpu(void) | 1121 | void __init native_smp_prepare_boot_cpu(void) |
| 1241 | { | 1122 | { |
| 1242 | int me = smp_processor_id(); | 1123 | int me = smp_processor_id(); |
| 1243 | #ifdef CONFIG_X86_32 | 1124 | switch_to_new_gdt(me); |
| 1244 | init_gdt(me); | ||
| 1245 | #endif | ||
| 1246 | switch_to_new_gdt(); | ||
| 1247 | /* already set me in cpu_online_mask in boot_cpu_init() */ | 1125 | /* already set me in cpu_online_mask in boot_cpu_init() */ |
| 1248 | cpumask_set_cpu(me, cpu_callout_mask); | 1126 | cpumask_set_cpu(me, cpu_callout_mask); |
| 1249 | per_cpu(cpu_state, me) = CPU_ONLINE; | 1127 | per_cpu(cpu_state, me) = CPU_ONLINE; |
| @@ -1254,7 +1132,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
| 1254 | pr_debug("Boot done.\n"); | 1132 | pr_debug("Boot done.\n"); |
| 1255 | 1133 | ||
| 1256 | impress_friends(); | 1134 | impress_friends(); |
| 1257 | smp_checks(); | ||
| 1258 | #ifdef CONFIG_X86_IO_APIC | 1135 | #ifdef CONFIG_X86_IO_APIC |
| 1259 | setup_ioapic_dest(); | 1136 | setup_ioapic_dest(); |
| 1260 | #endif | 1137 | #endif |
| @@ -1271,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus); | |||
| 1271 | 1148 | ||
| 1272 | 1149 | ||
| 1273 | /* | 1150 | /* |
| 1274 | * cpu_possible_map should be static, it cannot change as cpu's | 1151 | * cpu_possible_mask should be static, it cannot change as cpu's |
| 1275 | * are onlined, or offlined. The reason is per-cpu data-structures | 1152 | * are onlined, or offlined. The reason is per-cpu data-structures |
| 1276 | * are allocated by some modules at init time, and dont expect to | 1153 | * are allocated by some modules at init time, and dont expect to |
| 1277 | * do this dynamically on cpu arrival/departure. | 1154 | * do this dynamically on cpu arrival/departure. |
| 1278 | * cpu_present_map on the other hand can change dynamically. | 1155 | * cpu_present_mask on the other hand can change dynamically. |
| 1279 | * In case when cpu_hotplug is not compiled, then we resort to current | 1156 | * In case when cpu_hotplug is not compiled, then we resort to current |
| 1280 | * behaviour, which is cpu_possible == cpu_present. | 1157 | * behaviour, which is cpu_possible == cpu_present. |
| 1281 | * - Ashok Raj | 1158 | * - Ashok Raj |
