diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-06-21 19:25:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 06:50:10 -0400 |
commit | 0f0124fa742da7c51e2e3c5ded7f5e5e06ddc195 (patch) | |
tree | fe916a493968bf516ad3065f7f9bc911fc6039e0 /arch/x86/kernel/cpu | |
parent | a9c1182fbd349882fe912245d6e03cd30943be2d (diff) |
x86: merge setup64.c into common_64.c
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/common_64.c | 277 |
1 files changed, 276 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index 48ba79961583..9fb5b7caaa89 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -1,10 +1,17 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/bootmem.h> | ||
6 | #include <linux/bitops.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/kgdb.h> | ||
9 | #include <linux/topology.h> | ||
2 | #include <linux/string.h> | 10 | #include <linux/string.h> |
3 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
4 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
5 | #include <linux/module.h> | 13 | #include <linux/module.h> |
6 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
7 | #include <linux/bootmem.h> | ||
8 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
9 | #include <asm/i387.h> | 16 | #include <asm/i387.h> |
10 | #include <asm/msr.h> | 17 | #include <asm/msr.h> |
@@ -19,6 +26,15 @@ | |||
19 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
20 | #include <mach_apic.h> | 27 | #include <mach_apic.h> |
21 | #endif | 28 | #endif |
29 | #include <asm/pda.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/desc.h> | ||
33 | #include <asm/atomic.h> | ||
34 | #include <asm/proto.h> | ||
35 | #include <asm/sections.h> | ||
36 | #include <asm/setup.h> | ||
37 | #include <asm/genapic.h> | ||
22 | 38 | ||
23 | #include "cpu.h" | 39 | #include "cpu.h" |
24 | 40 | ||
@@ -404,3 +420,262 @@ static __init int setup_disablecpuid(char *arg) | |||
404 | return 1; | 420 | return 1; |
405 | } | 421 | } |
406 | __setup("clearcpuid=", setup_disablecpuid); | 422 | __setup("clearcpuid=", setup_disablecpuid); |
423 | |||
424 | #ifndef CONFIG_DEBUG_BOOT_PARAMS | ||
425 | struct boot_params __initdata boot_params; | ||
426 | #else | ||
427 | struct boot_params boot_params; | ||
428 | #endif | ||
429 | |||
430 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | ||
431 | |||
432 | struct x8664_pda **_cpu_pda __read_mostly; | ||
433 | EXPORT_SYMBOL(_cpu_pda); | ||
434 | |||
435 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | ||
436 | |||
437 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | ||
438 | |||
439 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | ||
440 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | ||
441 | |||
442 | static int do_not_nx __cpuinitdata; | ||
443 | |||
444 | /* noexec=on|off | ||
445 | Control non executable mappings for 64bit processes. | ||
446 | |||
447 | on Enable(default) | ||
448 | off Disable | ||
449 | */ | ||
450 | static int __init nonx_setup(char *str) | ||
451 | { | ||
452 | if (!str) | ||
453 | return -EINVAL; | ||
454 | if (!strncmp(str, "on", 2)) { | ||
455 | __supported_pte_mask |= _PAGE_NX; | ||
456 | do_not_nx = 0; | ||
457 | } else if (!strncmp(str, "off", 3)) { | ||
458 | do_not_nx = 1; | ||
459 | __supported_pte_mask &= ~_PAGE_NX; | ||
460 | } | ||
461 | return 0; | ||
462 | } | ||
463 | early_param("noexec", nonx_setup); | ||
464 | |||
465 | int force_personality32; | ||
466 | |||
467 | /* noexec32=on|off | ||
468 | Control non executable heap for 32bit processes. | ||
469 | To control the stack too use noexec=off | ||
470 | |||
471 | on PROT_READ does not imply PROT_EXEC for 32bit processes (default) | ||
472 | off PROT_READ implies PROT_EXEC | ||
473 | */ | ||
474 | static int __init nonx32_setup(char *str) | ||
475 | { | ||
476 | if (!strcmp(str, "on")) | ||
477 | force_personality32 &= ~READ_IMPLIES_EXEC; | ||
478 | else if (!strcmp(str, "off")) | ||
479 | force_personality32 |= READ_IMPLIES_EXEC; | ||
480 | return 1; | ||
481 | } | ||
482 | __setup("noexec32=", nonx32_setup); | ||
483 | |||
484 | void pda_init(int cpu) | ||
485 | { | ||
486 | struct x8664_pda *pda = cpu_pda(cpu); | ||
487 | |||
488 | /* Setup up data that may be needed in __get_free_pages early */ | ||
489 | asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); | ||
490 | /* Memory clobbers used to order PDA accessed */ | ||
491 | mb(); | ||
492 | wrmsrl(MSR_GS_BASE, pda); | ||
493 | mb(); | ||
494 | |||
495 | pda->cpunumber = cpu; | ||
496 | pda->irqcount = -1; | ||
497 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
498 | PDA_STACKOFFSET + THREAD_SIZE; | ||
499 | pda->active_mm = &init_mm; | ||
500 | pda->mmu_state = 0; | ||
501 | |||
502 | if (cpu == 0) { | ||
503 | /* others are initialized in smpboot.c */ | ||
504 | pda->pcurrent = &init_task; | ||
505 | pda->irqstackptr = boot_cpu_stack; | ||
506 | } else { | ||
507 | pda->irqstackptr = (char *) | ||
508 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
509 | if (!pda->irqstackptr) | ||
510 | panic("cannot allocate irqstack for cpu %d", cpu); | ||
511 | |||
512 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
513 | pda->nodenumber = cpu_to_node(cpu); | ||
514 | } | ||
515 | |||
516 | pda->irqstackptr += IRQSTACKSIZE-64; | ||
517 | } | ||
518 | |||
519 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
520 | DEBUG_STKSZ] | ||
521 | __attribute__((section(".bss.page_aligned"))); | ||
522 | |||
523 | extern asmlinkage void ignore_sysret(void); | ||
524 | |||
525 | /* May not be marked __init: used by software suspend */ | ||
526 | void syscall_init(void) | ||
527 | { | ||
528 | /* | ||
529 | * LSTAR and STAR live in a bit strange symbiosis. | ||
530 | * They both write to the same internal register. STAR allows to | ||
531 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | ||
532 | */ | ||
533 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | ||
534 | wrmsrl(MSR_LSTAR, system_call); | ||
535 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
536 | |||
537 | #ifdef CONFIG_IA32_EMULATION | ||
538 | syscall32_cpu_init(); | ||
539 | #endif | ||
540 | |||
541 | /* Flags to clear on syscall */ | ||
542 | wrmsrl(MSR_SYSCALL_MASK, | ||
543 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | ||
544 | } | ||
545 | |||
546 | void __cpuinit check_efer(void) | ||
547 | { | ||
548 | unsigned long efer; | ||
549 | |||
550 | rdmsrl(MSR_EFER, efer); | ||
551 | if (!(efer & EFER_NX) || do_not_nx) | ||
552 | __supported_pte_mask &= ~_PAGE_NX; | ||
553 | } | ||
554 | |||
555 | unsigned long kernel_eflags; | ||
556 | |||
557 | /* | ||
558 | * Copies of the original ist values from the tss are only accessed during | ||
559 | * debugging, no special alignment required. | ||
560 | */ | ||
561 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | ||
562 | |||
563 | /* | ||
564 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
565 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
566 | * and IDT. We reload them nevertheless, this function acts as a | ||
567 | * 'CPU state barrier', nothing should get across. | ||
568 | * A lot of state is already set up in PDA init. | ||
569 | */ | ||
570 | void __cpuinit cpu_init(void) | ||
571 | { | ||
572 | int cpu = stack_smp_processor_id(); | ||
573 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
574 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
575 | unsigned long v; | ||
576 | char *estacks = NULL; | ||
577 | struct task_struct *me; | ||
578 | int i; | ||
579 | |||
580 | /* CPU 0 is initialised in head64.c */ | ||
581 | if (cpu != 0) | ||
582 | pda_init(cpu); | ||
583 | else | ||
584 | estacks = boot_exception_stacks; | ||
585 | |||
586 | me = current; | ||
587 | |||
588 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
589 | panic("CPU#%d already initialized!\n", cpu); | ||
590 | |||
591 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
592 | |||
593 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
594 | |||
595 | /* | ||
596 | * Initialize the per-CPU GDT with the boot GDT, | ||
597 | * and set up the GDT descriptor: | ||
598 | */ | ||
599 | |||
600 | switch_to_new_gdt(); | ||
601 | load_idt((const struct desc_ptr *)&idt_descr); | ||
602 | |||
603 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
604 | syscall_init(); | ||
605 | |||
606 | wrmsrl(MSR_FS_BASE, 0); | ||
607 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
608 | barrier(); | ||
609 | |||
610 | check_efer(); | ||
611 | |||
612 | /* | ||
613 | * set up and load the per-CPU TSS | ||
614 | */ | ||
615 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
616 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
617 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
618 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
619 | }; | ||
620 | if (cpu) { | ||
621 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
622 | if (!estacks) | ||
623 | panic("Cannot allocate exception stack %ld %d\n", | ||
624 | v, cpu); | ||
625 | } | ||
626 | estacks += PAGE_SIZE << order[v]; | ||
627 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | ||
628 | } | ||
629 | |||
630 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
631 | /* | ||
632 | * <= is required because the CPU will access up to | ||
633 | * 8 bits beyond the end of the IO permission bitmap. | ||
634 | */ | ||
635 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
636 | t->io_bitmap[i] = ~0UL; | ||
637 | |||
638 | atomic_inc(&init_mm.mm_count); | ||
639 | me->active_mm = &init_mm; | ||
640 | if (me->mm) | ||
641 | BUG(); | ||
642 | enter_lazy_tlb(&init_mm, me); | ||
643 | |||
644 | load_sp0(t, ¤t->thread); | ||
645 | set_tss_desc(cpu, t); | ||
646 | load_TR_desc(); | ||
647 | load_LDT(&init_mm.context); | ||
648 | |||
649 | #ifdef CONFIG_KGDB | ||
650 | /* | ||
651 | * If the kgdb is connected no debug regs should be altered. This | ||
652 | * is only applicable when KGDB and a KGDB I/O module are built | ||
653 | * into the kernel and you are using early debugging with | ||
654 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
655 | */ | ||
656 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
657 | arch_kgdb_ops.correct_hw_break(); | ||
658 | else { | ||
659 | #endif | ||
660 | /* | ||
661 | * Clear all 6 debug registers: | ||
662 | */ | ||
663 | |||
664 | set_debugreg(0UL, 0); | ||
665 | set_debugreg(0UL, 1); | ||
666 | set_debugreg(0UL, 2); | ||
667 | set_debugreg(0UL, 3); | ||
668 | set_debugreg(0UL, 6); | ||
669 | set_debugreg(0UL, 7); | ||
670 | #ifdef CONFIG_KGDB | ||
671 | /* If the kgdb is connected no debug regs should be altered. */ | ||
672 | } | ||
673 | #endif | ||
674 | |||
675 | fpu_init(); | ||
676 | |||
677 | raw_local_save_flags(kernel_eflags); | ||
678 | |||
679 | if (is_uv_system()) | ||
680 | uv_cpu_init(); | ||
681 | } | ||