aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-09-04 23:09:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-05 03:40:49 -0400
commit1ba76586f778be327e452180d8378e40ee70f066 (patch)
treeda0edadf3e91d036d4afc321a0a32181ca3bd7ab /arch/x86
parentd5494d4f517158b1d2eea1ae33f6c264eb12675f (diff)
x86: cpu/common*.c have same cpu_init(), with copying and #ifdef
hard to merge by lines... (as here we have material differences between 32-bit and 64-bit mode) - will try to do it later. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/common.c124
-rw-r--r--arch/x86/kernel/cpu/common_64.c88
2 files changed, 211 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index dcd3ebd5ba62..f44678db1616 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -901,7 +901,129 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
901 * initialized (naturally) in the bootstrap process, such as the GDT 901 * initialized (naturally) in the bootstrap process, such as the GDT
902 * and IDT. We reload them nevertheless, this function acts as a 902 * and IDT. We reload them nevertheless, this function acts as a
903 * 'CPU state barrier', nothing should get across. 903 * 'CPU state barrier', nothing should get across.
904 * A lot of state is already set up in PDA init for 64 bit
904 */ 905 */
906#ifdef CONFIG_X86_64
907void __cpuinit cpu_init(void)
908{
909 int cpu = stack_smp_processor_id();
910 struct tss_struct *t = &per_cpu(init_tss, cpu);
911 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
912 unsigned long v;
913 char *estacks = NULL;
914 struct task_struct *me;
915 int i;
916
917 /* CPU 0 is initialised in head64.c */
918 if (cpu != 0)
919 pda_init(cpu);
920 else
921 estacks = boot_exception_stacks;
922
923 me = current;
924
925 if (cpu_test_and_set(cpu, cpu_initialized))
926 panic("CPU#%d already initialized!\n", cpu);
927
928 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
929
930 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
931
932 /*
933 * Initialize the per-CPU GDT with the boot GDT,
934 * and set up the GDT descriptor:
935 */
936
937 switch_to_new_gdt();
938 load_idt((const struct desc_ptr *)&idt_descr);
939
940 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
941 syscall_init();
942
943 wrmsrl(MSR_FS_BASE, 0);
944 wrmsrl(MSR_KERNEL_GS_BASE, 0);
945 barrier();
946
947 check_efer();
948 if (cpu != 0 && x2apic)
949 enable_x2apic();
950
951 /*
952 * set up and load the per-CPU TSS
953 */
954 if (!orig_ist->ist[0]) {
955 static const unsigned int order[N_EXCEPTION_STACKS] = {
956 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
957 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
958 };
959 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
960 if (cpu) {
961 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
962 if (!estacks)
963 panic("Cannot allocate exception "
964 "stack %ld %d\n", v, cpu);
965 }
966 estacks += PAGE_SIZE << order[v];
967 orig_ist->ist[v] = t->x86_tss.ist[v] =
968 (unsigned long)estacks;
969 }
970 }
971
972 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
973 /*
974 * <= is required because the CPU will access up to
975 * 8 bits beyond the end of the IO permission bitmap.
976 */
977 for (i = 0; i <= IO_BITMAP_LONGS; i++)
978 t->io_bitmap[i] = ~0UL;
979
980 atomic_inc(&init_mm.mm_count);
981 me->active_mm = &init_mm;
982 if (me->mm)
983 BUG();
984 enter_lazy_tlb(&init_mm, me);
985
986 load_sp0(t, &current->thread);
987 set_tss_desc(cpu, t);
988 load_TR_desc();
989 load_LDT(&init_mm.context);
990
991#ifdef CONFIG_KGDB
992 /*
993 * If the kgdb is connected no debug regs should be altered. This
994 * is only applicable when KGDB and a KGDB I/O module are built
995 * into the kernel and you are using early debugging with
996 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
997 */
998 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
999 arch_kgdb_ops.correct_hw_break();
1000 else {
1001#endif
1002 /*
1003 * Clear all 6 debug registers:
1004 */
1005
1006 set_debugreg(0UL, 0);
1007 set_debugreg(0UL, 1);
1008 set_debugreg(0UL, 2);
1009 set_debugreg(0UL, 3);
1010 set_debugreg(0UL, 6);
1011 set_debugreg(0UL, 7);
1012#ifdef CONFIG_KGDB
1013 /* If the kgdb is connected no debug regs should be altered. */
1014 }
1015#endif
1016
1017 fpu_init();
1018
1019 raw_local_save_flags(kernel_eflags);
1020
1021 if (is_uv_system())
1022 uv_cpu_init();
1023}
1024
1025#else
1026
905void __cpuinit cpu_init(void) 1027void __cpuinit cpu_init(void)
906{ 1028{
907 int cpu = smp_processor_id(); 1029 int cpu = smp_processor_id();
@@ -982,3 +1104,5 @@ void __cpuinit cpu_uninit(void)
982 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; 1104 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
983} 1105}
984#endif 1106#endif
1107
1108#endif
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index 9f2a6ece82dd..2bd0ed5abb0a 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -855,8 +855,9 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
855 * initialized (naturally) in the bootstrap process, such as the GDT 855 * initialized (naturally) in the bootstrap process, such as the GDT
856 * and IDT. We reload them nevertheless, this function acts as a 856 * and IDT. We reload them nevertheless, this function acts as a
857 * 'CPU state barrier', nothing should get across. 857 * 'CPU state barrier', nothing should get across.
858 * A lot of state is already set up in PDA init. 858 * A lot of state is already set up in PDA init for 64 bit
859 */ 859 */
860#ifdef CONFIG_X86_64
860void __cpuinit cpu_init(void) 861void __cpuinit cpu_init(void)
861{ 862{
862 int cpu = stack_smp_processor_id(); 863 int cpu = stack_smp_processor_id();
@@ -974,3 +975,88 @@ void __cpuinit cpu_init(void)
974 if (is_uv_system()) 975 if (is_uv_system())
975 uv_cpu_init(); 976 uv_cpu_init();
976} 977}
978
979#else
980
981void __cpuinit cpu_init(void)
982{
983 int cpu = smp_processor_id();
984 struct task_struct *curr = current;
985 struct tss_struct *t = &per_cpu(init_tss, cpu);
986 struct thread_struct *thread = &curr->thread;
987
988 if (cpu_test_and_set(cpu, cpu_initialized)) {
989 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
990 for (;;) local_irq_enable();
991 }
992
993 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
994
995 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
996 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
997
998 load_idt(&idt_descr);
999 switch_to_new_gdt();
1000
1001 /*
1002 * Set up and load the per-CPU TSS and LDT
1003 */
1004 atomic_inc(&init_mm.mm_count);
1005 curr->active_mm = &init_mm;
1006 if (curr->mm)
1007 BUG();
1008 enter_lazy_tlb(&init_mm, curr);
1009
1010 load_sp0(t, thread);
1011 set_tss_desc(cpu, t);
1012 load_TR_desc();
1013 load_LDT(&init_mm.context);
1014
1015#ifdef CONFIG_DOUBLEFAULT
1016 /* Set up doublefault TSS pointer in the GDT */
1017 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1018#endif
1019
1020 /* Clear %gs. */
1021 asm volatile ("mov %0, %%gs" : : "r" (0));
1022
1023 /* Clear all 6 debug registers: */
1024 set_debugreg(0, 0);
1025 set_debugreg(0, 1);
1026 set_debugreg(0, 2);
1027 set_debugreg(0, 3);
1028 set_debugreg(0, 6);
1029 set_debugreg(0, 7);
1030
1031 /*
1032 * Force FPU initialization:
1033 */
1034 if (cpu_has_xsave)
1035 current_thread_info()->status = TS_XSAVE;
1036 else
1037 current_thread_info()->status = 0;
1038 clear_used_math();
1039 mxcsr_feature_mask_init();
1040
1041 /*
1042 * Boot processor to setup the FP and extended state context info.
1043 */
1044 if (!smp_processor_id())
1045 init_thread_xstate();
1046
1047 xsave_init();
1048}
1049
1050#ifdef CONFIG_HOTPLUG_CPU
1051void __cpuinit cpu_uninit(void)
1052{
1053 int cpu = raw_smp_processor_id();
1054 cpu_clear(cpu, cpu_initialized);
1055
1056 /* lazy TLB state */
1057 per_cpu(cpu_tlbstate, cpu).state = 0;
1058 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
1059}
1060#endif
1061
1062#endif