diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-04 23:09:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 03:40:49 -0400 |
commit | 1ba76586f778be327e452180d8378e40ee70f066 (patch) | |
tree | da0edadf3e91d036d4afc321a0a32181ca3bd7ab /arch/x86/kernel/cpu/common.c | |
parent | d5494d4f517158b1d2eea1ae33f6c264eb12675f (diff) |
x86: cpu/common*.c have same cpu_init(), with copying and #ifdef
hard to merge by lines... (as here we have material differences between
32-bit and 64-bit mode) - will try to do it later.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index dcd3ebd5ba62..f44678db1616 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -901,7 +901,129 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
901 | * initialized (naturally) in the bootstrap process, such as the GDT | 901 | * initialized (naturally) in the bootstrap process, such as the GDT |
902 | * and IDT. We reload them nevertheless, this function acts as a | 902 | * and IDT. We reload them nevertheless, this function acts as a |
903 | * 'CPU state barrier', nothing should get across. | 903 | * 'CPU state barrier', nothing should get across. |
904 | * A lot of state is already set up in PDA init for 64 bit | ||
904 | */ | 905 | */ |
906 | #ifdef CONFIG_X86_64 | ||
907 | void __cpuinit cpu_init(void) | ||
908 | { | ||
909 | int cpu = stack_smp_processor_id(); | ||
910 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
911 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
912 | unsigned long v; | ||
913 | char *estacks = NULL; | ||
914 | struct task_struct *me; | ||
915 | int i; | ||
916 | |||
917 | /* CPU 0 is initialised in head64.c */ | ||
918 | if (cpu != 0) | ||
919 | pda_init(cpu); | ||
920 | else | ||
921 | estacks = boot_exception_stacks; | ||
922 | |||
923 | me = current; | ||
924 | |||
925 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
926 | panic("CPU#%d already initialized!\n", cpu); | ||
927 | |||
928 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
929 | |||
930 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
931 | |||
932 | /* | ||
933 | * Initialize the per-CPU GDT with the boot GDT, | ||
934 | * and set up the GDT descriptor: | ||
935 | */ | ||
936 | |||
937 | switch_to_new_gdt(); | ||
938 | load_idt((const struct desc_ptr *)&idt_descr); | ||
939 | |||
940 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
941 | syscall_init(); | ||
942 | |||
943 | wrmsrl(MSR_FS_BASE, 0); | ||
944 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
945 | barrier(); | ||
946 | |||
947 | check_efer(); | ||
948 | if (cpu != 0 && x2apic) | ||
949 | enable_x2apic(); | ||
950 | |||
951 | /* | ||
952 | * set up and load the per-CPU TSS | ||
953 | */ | ||
954 | if (!orig_ist->ist[0]) { | ||
955 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
956 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
957 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
958 | }; | ||
959 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
960 | if (cpu) { | ||
961 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
962 | if (!estacks) | ||
963 | panic("Cannot allocate exception " | ||
964 | "stack %ld %d\n", v, cpu); | ||
965 | } | ||
966 | estacks += PAGE_SIZE << order[v]; | ||
967 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
968 | (unsigned long)estacks; | ||
969 | } | ||
970 | } | ||
971 | |||
972 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
973 | /* | ||
974 | * <= is required because the CPU will access up to | ||
975 | * 8 bits beyond the end of the IO permission bitmap. | ||
976 | */ | ||
977 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
978 | t->io_bitmap[i] = ~0UL; | ||
979 | |||
980 | atomic_inc(&init_mm.mm_count); | ||
981 | me->active_mm = &init_mm; | ||
982 | if (me->mm) | ||
983 | BUG(); | ||
984 | enter_lazy_tlb(&init_mm, me); | ||
985 | |||
986 | load_sp0(t, ¤t->thread); | ||
987 | set_tss_desc(cpu, t); | ||
988 | load_TR_desc(); | ||
989 | load_LDT(&init_mm.context); | ||
990 | |||
991 | #ifdef CONFIG_KGDB | ||
992 | /* | ||
993 | * If the kgdb is connected no debug regs should be altered. This | ||
994 | * is only applicable when KGDB and a KGDB I/O module are built | ||
995 | * into the kernel and you are using early debugging with | ||
996 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
997 | */ | ||
998 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
999 | arch_kgdb_ops.correct_hw_break(); | ||
1000 | else { | ||
1001 | #endif | ||
1002 | /* | ||
1003 | * Clear all 6 debug registers: | ||
1004 | */ | ||
1005 | |||
1006 | set_debugreg(0UL, 0); | ||
1007 | set_debugreg(0UL, 1); | ||
1008 | set_debugreg(0UL, 2); | ||
1009 | set_debugreg(0UL, 3); | ||
1010 | set_debugreg(0UL, 6); | ||
1011 | set_debugreg(0UL, 7); | ||
1012 | #ifdef CONFIG_KGDB | ||
1013 | /* If the kgdb is connected no debug regs should be altered. */ | ||
1014 | } | ||
1015 | #endif | ||
1016 | |||
1017 | fpu_init(); | ||
1018 | |||
1019 | raw_local_save_flags(kernel_eflags); | ||
1020 | |||
1021 | if (is_uv_system()) | ||
1022 | uv_cpu_init(); | ||
1023 | } | ||
1024 | |||
1025 | #else | ||
1026 | |||
905 | void __cpuinit cpu_init(void) | 1027 | void __cpuinit cpu_init(void) |
906 | { | 1028 | { |
907 | int cpu = smp_processor_id(); | 1029 | int cpu = smp_processor_id(); |
@@ -982,3 +1104,5 @@ void __cpuinit cpu_uninit(void) | |||
982 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | 1104 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; |
983 | } | 1105 | } |
984 | #endif | 1106 | #endif |
1107 | |||
1108 | #endif | ||