diff options
author | Glauber de Oliveira Costa <gcosta@redhat.com> | 2008-03-19 13:25:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:03 -0400 |
commit | cb3c8b9003f15efa4a750a32d2d602d40cc45d5a (patch) | |
tree | 204a84d85c000f8453557d001557aaf4c0855434 /arch/x86/kernel/smpboot_32.c | |
parent | c70dcb74309cedfa64f0060f4a84792e873ceb53 (diff) |
x86: integrate do_boot_cpu
This is a very large patch, because it depends on a lot
of auxiliary static functions. But they all have been modified
to the point that they're sufficiently close now. So they're just
merged in smpboot.c
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/smpboot_32.c')
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 532 |
1 files changed, 2 insertions, 530 deletions
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ae25927f08c1..e82eeb2fdfef 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -80,114 +80,12 @@ extern void unmap_cpu_to_logical_apicid(int cpu); | |||
80 | /* State of each CPU. */ | 80 | /* State of each CPU. */ |
81 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 81 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
82 | 82 | ||
83 | /* Store all idle threads, this can be reused instead of creating | 83 | extern void smp_callin(void); |
84 | * a new thread. Also avoids complicated thread destroy functionality | ||
85 | * for idle threads. | ||
86 | */ | ||
87 | #ifdef CONFIG_HOTPLUG_CPU | ||
88 | /* | ||
89 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
90 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
91 | */ | ||
92 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
93 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
94 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
95 | #else | ||
96 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
97 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
98 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
99 | #endif | ||
100 | |||
101 | static atomic_t init_deasserted; | ||
102 | |||
103 | static void __cpuinit smp_callin(void) | ||
104 | { | ||
105 | int cpuid, phys_id; | ||
106 | unsigned long timeout; | ||
107 | |||
108 | /* | ||
109 | * If waken up by an INIT in an 82489DX configuration | ||
110 | * we may get here before an INIT-deassert IPI reaches | ||
111 | * our local APIC. We have to wait for the IPI or we'll | ||
112 | * lock up on an APIC access. | ||
113 | */ | ||
114 | wait_for_init_deassert(&init_deasserted); | ||
115 | |||
116 | /* | ||
117 | * (This works even if the APIC is not enabled.) | ||
118 | */ | ||
119 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | ||
120 | cpuid = smp_processor_id(); | ||
121 | if (cpu_isset(cpuid, cpu_callin_map)) { | ||
122 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | ||
123 | phys_id, cpuid); | ||
124 | BUG(); | ||
125 | } | ||
126 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | ||
127 | |||
128 | /* | ||
129 | * STARTUP IPIs are fragile beasts as they might sometimes | ||
130 | * trigger some glue motherboard logic. Complete APIC bus | ||
131 | * silence for 1 second, this overestimates the time the | ||
132 | * boot CPU is spending to send the up to 2 STARTUP IPIs | ||
133 | * by a factor of two. This should be enough. | ||
134 | */ | ||
135 | |||
136 | /* | ||
137 | * Waiting 2s total for startup (udelay is not yet working) | ||
138 | */ | ||
139 | timeout = jiffies + 2*HZ; | ||
140 | while (time_before(jiffies, timeout)) { | ||
141 | /* | ||
142 | * Has the boot CPU finished it's STARTUP sequence? | ||
143 | */ | ||
144 | if (cpu_isset(cpuid, cpu_callout_map)) | ||
145 | break; | ||
146 | cpu_relax(); | ||
147 | } | ||
148 | |||
149 | if (!time_before(jiffies, timeout)) { | ||
150 | printk("BUG: CPU%d started up but did not get a callout!\n", | ||
151 | cpuid); | ||
152 | BUG(); | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * the boot CPU has finished the init stage and is spinning | ||
157 | * on callin_map until we finish. We are free to set up this | ||
158 | * CPU, first the APIC. (this is probably redundant on most | ||
159 | * boards) | ||
160 | */ | ||
161 | |||
162 | Dprintk("CALLIN, before setup_local_APIC().\n"); | ||
163 | smp_callin_clear_local_apic(); | ||
164 | setup_local_APIC(); | ||
165 | end_local_APIC_setup(); | ||
166 | map_cpu_to_logical_apicid(); | ||
167 | |||
168 | /* | ||
169 | * Get our bogomips. | ||
170 | */ | ||
171 | local_irq_enable(); | ||
172 | calibrate_delay(); | ||
173 | local_irq_disable(); | ||
174 | Dprintk("Stack at about %p\n",&cpuid); | ||
175 | |||
176 | /* | ||
177 | * Save our processor parameters | ||
178 | */ | ||
179 | smp_store_cpu_info(cpuid); | ||
180 | |||
181 | /* | ||
182 | * Allow the master to continue. | ||
183 | */ | ||
184 | cpu_set(cpuid, cpu_callin_map); | ||
185 | } | ||
186 | 84 | ||
187 | /* | 85 | /* |
188 | * Activate a secondary processor. | 86 | * Activate a secondary processor. |
189 | */ | 87 | */ |
190 | static void __cpuinit start_secondary(void *unused) | 88 | void __cpuinit start_secondary(void *unused) |
191 | { | 89 | { |
192 | /* | 90 | /* |
193 | * Don't put *anything* before cpu_init(), SMP booting is too | 91 | * Don't put *anything* before cpu_init(), SMP booting is too |
@@ -257,373 +155,6 @@ void __devinit initialize_secondary(void) | |||
257 | :"m" (current->thread.sp),"m" (current->thread.ip)); | 155 | :"m" (current->thread.sp),"m" (current->thread.ip)); |
258 | } | 156 | } |
259 | 157 | ||
260 | static inline void __inquire_remote_apic(int apicid) | ||
261 | { | ||
262 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | ||
263 | char *names[] = { "ID", "VERSION", "SPIV" }; | ||
264 | int timeout; | ||
265 | u32 status; | ||
266 | |||
267 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | ||
268 | |||
269 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | ||
270 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | ||
271 | |||
272 | /* | ||
273 | * Wait for idle. | ||
274 | */ | ||
275 | status = safe_apic_wait_icr_idle(); | ||
276 | if (status) | ||
277 | printk(KERN_CONT | ||
278 | "a previous APIC delivery may have failed\n"); | ||
279 | |||
280 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | ||
281 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | ||
282 | |||
283 | timeout = 0; | ||
284 | do { | ||
285 | udelay(100); | ||
286 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | ||
287 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | ||
288 | |||
289 | switch (status) { | ||
290 | case APIC_ICR_RR_VALID: | ||
291 | status = apic_read(APIC_RRR); | ||
292 | printk(KERN_CONT "%08x\n", status); | ||
293 | break; | ||
294 | default: | ||
295 | printk(KERN_CONT "failed\n"); | ||
296 | } | ||
297 | } | ||
298 | } | ||
299 | |||
300 | #ifdef WAKE_SECONDARY_VIA_NMI | ||
301 | /* | ||
302 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | ||
303 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | ||
304 | * won't ... remember to clear down the APIC, etc later. | ||
305 | */ | ||
306 | static int __devinit | ||
307 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | ||
308 | { | ||
309 | unsigned long send_status, accept_status = 0; | ||
310 | int maxlvt; | ||
311 | |||
312 | /* Target chip */ | ||
313 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | ||
314 | |||
315 | /* Boot on the stack */ | ||
316 | /* Kick the second */ | ||
317 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | ||
318 | |||
319 | Dprintk("Waiting for send to finish...\n"); | ||
320 | send_status = safe_apic_wait_icr_idle(); | ||
321 | |||
322 | /* | ||
323 | * Give the other CPU some time to accept the IPI. | ||
324 | */ | ||
325 | udelay(200); | ||
326 | /* | ||
327 | * Due to the Pentium erratum 3AP. | ||
328 | */ | ||
329 | maxlvt = lapic_get_maxlvt(); | ||
330 | if (maxlvt > 3) { | ||
331 | apic_read_around(APIC_SPIV); | ||
332 | apic_write(APIC_ESR, 0); | ||
333 | } | ||
334 | accept_status = (apic_read(APIC_ESR) & 0xEF); | ||
335 | Dprintk("NMI sent.\n"); | ||
336 | |||
337 | if (send_status) | ||
338 | printk("APIC never delivered???\n"); | ||
339 | if (accept_status) | ||
340 | printk("APIC delivery error (%lx).\n", accept_status); | ||
341 | |||
342 | return (send_status | accept_status); | ||
343 | } | ||
344 | #endif /* WAKE_SECONDARY_VIA_NMI */ | ||
345 | |||
346 | #ifdef WAKE_SECONDARY_VIA_INIT | ||
347 | static int __devinit | ||
348 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | ||
349 | { | ||
350 | unsigned long send_status, accept_status = 0; | ||
351 | int maxlvt, num_starts, j; | ||
352 | |||
353 | /* | ||
354 | * Be paranoid about clearing APIC errors. | ||
355 | */ | ||
356 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | ||
357 | apic_read_around(APIC_SPIV); | ||
358 | apic_write(APIC_ESR, 0); | ||
359 | apic_read(APIC_ESR); | ||
360 | } | ||
361 | |||
362 | Dprintk("Asserting INIT.\n"); | ||
363 | |||
364 | /* | ||
365 | * Turn INIT on target chip | ||
366 | */ | ||
367 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
368 | |||
369 | /* | ||
370 | * Send IPI | ||
371 | */ | ||
372 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | ||
373 | | APIC_DM_INIT); | ||
374 | |||
375 | Dprintk("Waiting for send to finish...\n"); | ||
376 | send_status = safe_apic_wait_icr_idle(); | ||
377 | |||
378 | mdelay(10); | ||
379 | |||
380 | Dprintk("Deasserting INIT.\n"); | ||
381 | |||
382 | /* Target chip */ | ||
383 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
384 | |||
385 | /* Send IPI */ | ||
386 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | ||
387 | |||
388 | Dprintk("Waiting for send to finish...\n"); | ||
389 | send_status = safe_apic_wait_icr_idle(); | ||
390 | |||
391 | mb(); | ||
392 | atomic_set(&init_deasserted, 1); | ||
393 | |||
394 | /* | ||
395 | * Should we send STARTUP IPIs ? | ||
396 | * | ||
397 | * Determine this based on the APIC version. | ||
398 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | ||
399 | */ | ||
400 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | ||
401 | num_starts = 2; | ||
402 | else | ||
403 | num_starts = 0; | ||
404 | |||
405 | /* | ||
406 | * Paravirt / VMI wants a startup IPI hook here to set up the | ||
407 | * target processor state. | ||
408 | */ | ||
409 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | ||
410 | (unsigned long) stack_start.sp); | ||
411 | |||
412 | /* | ||
413 | * Run STARTUP IPI loop. | ||
414 | */ | ||
415 | Dprintk("#startup loops: %d.\n", num_starts); | ||
416 | |||
417 | maxlvt = lapic_get_maxlvt(); | ||
418 | |||
419 | for (j = 1; j <= num_starts; j++) { | ||
420 | Dprintk("Sending STARTUP #%d.\n",j); | ||
421 | apic_read_around(APIC_SPIV); | ||
422 | apic_write(APIC_ESR, 0); | ||
423 | apic_read(APIC_ESR); | ||
424 | Dprintk("After apic_write.\n"); | ||
425 | |||
426 | /* | ||
427 | * STARTUP IPI | ||
428 | */ | ||
429 | |||
430 | /* Target chip */ | ||
431 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
432 | |||
433 | /* Boot on the stack */ | ||
434 | /* Kick the second */ | ||
435 | apic_write_around(APIC_ICR, APIC_DM_STARTUP | ||
436 | | (start_eip >> 12)); | ||
437 | |||
438 | /* | ||
439 | * Give the other CPU some time to accept the IPI. | ||
440 | */ | ||
441 | udelay(300); | ||
442 | |||
443 | Dprintk("Startup point 1.\n"); | ||
444 | |||
445 | Dprintk("Waiting for send to finish...\n"); | ||
446 | send_status = safe_apic_wait_icr_idle(); | ||
447 | |||
448 | /* | ||
449 | * Give the other CPU some time to accept the IPI. | ||
450 | */ | ||
451 | udelay(200); | ||
452 | /* | ||
453 | * Due to the Pentium erratum 3AP. | ||
454 | */ | ||
455 | if (maxlvt > 3) { | ||
456 | apic_read_around(APIC_SPIV); | ||
457 | apic_write(APIC_ESR, 0); | ||
458 | } | ||
459 | accept_status = (apic_read(APIC_ESR) & 0xEF); | ||
460 | if (send_status || accept_status) | ||
461 | break; | ||
462 | } | ||
463 | Dprintk("After Startup.\n"); | ||
464 | |||
465 | if (send_status) | ||
466 | printk("APIC never delivered???\n"); | ||
467 | if (accept_status) | ||
468 | printk("APIC delivery error (%lx).\n", accept_status); | ||
469 | |||
470 | return (send_status | accept_status); | ||
471 | } | ||
472 | #endif /* WAKE_SECONDARY_VIA_INIT */ | ||
473 | |||
474 | extern cpumask_t cpu_initialized; | ||
475 | |||
476 | struct create_idle { | ||
477 | struct work_struct work; | ||
478 | struct task_struct *idle; | ||
479 | struct completion done; | ||
480 | int cpu; | ||
481 | }; | ||
482 | |||
483 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
484 | { | ||
485 | struct create_idle *c_idle = | ||
486 | container_of(work, struct create_idle, work); | ||
487 | |||
488 | c_idle->idle = fork_idle(c_idle->cpu); | ||
489 | complete(&c_idle->done); | ||
490 | } | ||
491 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | ||
492 | /* | ||
493 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | ||
494 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | ||
495 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | ||
496 | */ | ||
497 | { | ||
498 | unsigned long boot_error = 0; | ||
499 | int timeout; | ||
500 | unsigned long start_eip; | ||
501 | unsigned short nmi_high = 0, nmi_low = 0; | ||
502 | struct create_idle c_idle = { | ||
503 | .cpu = cpu, | ||
504 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
505 | }; | ||
506 | INIT_WORK(&c_idle.work, do_fork_idle); | ||
507 | |||
508 | alternatives_smp_switch(1); | ||
509 | |||
510 | c_idle.idle = get_idle_for_cpu(cpu); | ||
511 | |||
512 | /* | ||
513 | * We can't use kernel_thread since we must avoid to | ||
514 | * reschedule the child. | ||
515 | */ | ||
516 | if (c_idle.idle) { | ||
517 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | ||
518 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | ||
519 | init_idle(c_idle.idle, cpu); | ||
520 | goto do_rest; | ||
521 | } | ||
522 | |||
523 | if (!keventd_up() || current_is_keventd()) | ||
524 | c_idle.work.func(&c_idle.work); | ||
525 | else { | ||
526 | schedule_work(&c_idle.work); | ||
527 | wait_for_completion(&c_idle.done); | ||
528 | } | ||
529 | |||
530 | if (IS_ERR(c_idle.idle)) { | ||
531 | printk(KERN_ERR "failed fork for CPU %d\n", cpu); | ||
532 | return PTR_ERR(c_idle.idle); | ||
533 | } | ||
534 | |||
535 | set_idle_for_cpu(cpu, c_idle.idle); | ||
536 | do_rest: | ||
537 | per_cpu(current_task, cpu) = c_idle.idle; | ||
538 | init_gdt(cpu); | ||
539 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | ||
540 | |||
541 | c_idle.idle->thread.ip = (unsigned long) start_secondary; | ||
542 | /* start_eip had better be page-aligned! */ | ||
543 | start_eip = setup_trampoline(); | ||
544 | |||
545 | /* So we see what's up */ | ||
546 | printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); | ||
547 | /* Stack for startup_32 can be just as for start_secondary onwards */ | ||
548 | stack_start.sp = (void *) c_idle.idle->thread.sp; | ||
549 | |||
550 | irq_ctx_init(cpu); | ||
551 | |||
552 | /* | ||
553 | * This grunge runs the startup process for | ||
554 | * the targeted processor. | ||
555 | */ | ||
556 | |||
557 | atomic_set(&init_deasserted, 0); | ||
558 | |||
559 | Dprintk("Setting warm reset code and vector.\n"); | ||
560 | |||
561 | store_NMI_vector(&nmi_high, &nmi_low); | ||
562 | |||
563 | smpboot_setup_warm_reset_vector(start_eip); | ||
564 | /* | ||
565 | * Be paranoid about clearing APIC errors. | ||
566 | */ | ||
567 | apic_write(APIC_ESR, 0); | ||
568 | apic_read(APIC_ESR); | ||
569 | |||
570 | |||
571 | /* | ||
572 | * Starting actual IPI sequence... | ||
573 | */ | ||
574 | boot_error = wakeup_secondary_cpu(apicid, start_eip); | ||
575 | |||
576 | if (!boot_error) { | ||
577 | /* | ||
578 | * allow APs to start initializing. | ||
579 | */ | ||
580 | Dprintk("Before Callout %d.\n", cpu); | ||
581 | cpu_set(cpu, cpu_callout_map); | ||
582 | Dprintk("After Callout %d.\n", cpu); | ||
583 | |||
584 | /* | ||
585 | * Wait 5s total for a response | ||
586 | */ | ||
587 | for (timeout = 0; timeout < 50000; timeout++) { | ||
588 | if (cpu_isset(cpu, cpu_callin_map)) | ||
589 | break; /* It has booted */ | ||
590 | udelay(100); | ||
591 | } | ||
592 | |||
593 | if (cpu_isset(cpu, cpu_callin_map)) { | ||
594 | /* number CPUs logically, starting from 1 (BSP is 0) */ | ||
595 | Dprintk("OK.\n"); | ||
596 | printk("CPU%d: ", cpu); | ||
597 | print_cpu_info(&cpu_data(cpu)); | ||
598 | Dprintk("CPU has booted.\n"); | ||
599 | } else { | ||
600 | boot_error= 1; | ||
601 | if (*((volatile unsigned char *)trampoline_base) | ||
602 | == 0xA5) | ||
603 | /* trampoline started but...? */ | ||
604 | printk("Stuck ??\n"); | ||
605 | else | ||
606 | /* trampoline code not run */ | ||
607 | printk("Not responding.\n"); | ||
608 | inquire_remote_apic(apicid); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | if (boot_error) { | ||
613 | /* Try to put things back the way they were before ... */ | ||
614 | unmap_cpu_to_logical_apicid(cpu); | ||
615 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | ||
616 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | ||
617 | cpu_clear(cpu, cpu_possible_map); | ||
618 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | ||
619 | } | ||
620 | |||
621 | /* mark "stuck" area as not stuck */ | ||
622 | *((volatile unsigned long *)trampoline_base) = 0; | ||
623 | |||
624 | return boot_error; | ||
625 | } | ||
626 | |||
627 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
628 | void cpu_exit_clear(void) | 159 | void cpu_exit_clear(void) |
629 | { | 160 | { |
@@ -774,65 +305,6 @@ void __init native_smp_prepare_boot_cpu(void) | |||
774 | __get_cpu_var(cpu_state) = CPU_ONLINE; | 305 | __get_cpu_var(cpu_state) = CPU_ONLINE; |
775 | } | 306 | } |
776 | 307 | ||
777 | int __cpuinit native_cpu_up(unsigned int cpu) | ||
778 | { | ||
779 | int apicid = cpu_present_to_apicid(cpu); | ||
780 | unsigned long flags; | ||
781 | int err; | ||
782 | |||
783 | WARN_ON(irqs_disabled()); | ||
784 | |||
785 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); | ||
786 | |||
787 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || | ||
788 | !physid_isset(apicid, phys_cpu_present_map)) { | ||
789 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | |||
793 | /* | ||
794 | * Already booted CPU? | ||
795 | */ | ||
796 | if (cpu_isset(cpu, cpu_callin_map)) { | ||
797 | Dprintk("do_boot_cpu %d Already started\n", cpu); | ||
798 | return -ENOSYS; | ||
799 | } | ||
800 | |||
801 | /* | ||
802 | * Save current MTRR state in case it was changed since early boot | ||
803 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | ||
804 | */ | ||
805 | mtrr_save_state(); | ||
806 | |||
807 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
808 | |||
809 | /* init low mem mapping */ | ||
810 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | ||
811 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | ||
812 | flush_tlb_all(); | ||
813 | |||
814 | err = do_boot_cpu(apicid, cpu); | ||
815 | if (err < 0) { | ||
816 | Dprintk("do_boot_cpu failed %d\n", err); | ||
817 | return err; | ||
818 | } | ||
819 | |||
820 | /* | ||
821 | * Check TSC synchronization with the AP (keep irqs disabled | ||
822 | * while doing so): | ||
823 | */ | ||
824 | local_irq_save(flags); | ||
825 | check_tsc_sync_source(cpu); | ||
826 | local_irq_restore(flags); | ||
827 | |||
828 | while (!cpu_isset(cpu, cpu_online_map)) { | ||
829 | cpu_relax(); | ||
830 | touch_nmi_watchdog(); | ||
831 | } | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | extern void impress_friends(void); | 308 | extern void impress_friends(void); |
837 | extern void smp_checks(void); | 309 | extern void smp_checks(void); |
838 | 310 | ||