aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c268
1 files changed, 139 insertions, 129 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 83492b1f93b..d7dd3c294e2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -21,14 +21,14 @@
21#include <asm/asm.h> 21#include <asm/asm.h>
22#include <asm/numa.h> 22#include <asm/numa.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24#ifdef CONFIG_X86_LOCAL_APIC 24#include <asm/cpu.h>
25#include <asm/mpspec.h> 25#include <asm/cpumask.h>
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <mach_apic.h> 27
28#include <asm/genapic.h> 28#ifdef CONFIG_X86_LOCAL_APIC
29#include <asm/uv/uv.h>
29#endif 30#endif
30 31
31#include <asm/pda.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/desc.h> 34#include <asm/desc.h>
@@ -37,11 +37,10 @@
37#include <asm/sections.h> 37#include <asm/sections.h>
38#include <asm/setup.h> 38#include <asm/setup.h>
39#include <asm/hypervisor.h> 39#include <asm/hypervisor.h>
40#include <asm/stackprotector.h>
40 41
41#include "cpu.h" 42#include "cpu.h"
42 43
43#ifdef CONFIG_X86_64
44
45/* all of these masks are initialized in setup_cpu_local_masks() */ 44/* all of these masks are initialized in setup_cpu_local_masks() */
46cpumask_var_t cpu_callin_mask; 45cpumask_var_t cpu_callin_mask;
47cpumask_var_t cpu_callout_mask; 46cpumask_var_t cpu_callout_mask;
@@ -50,35 +49,34 @@ cpumask_var_t cpu_initialized_mask;
50/* representing cpus for which sibling maps can be computed */ 49/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask; 50cpumask_var_t cpu_sibling_setup_mask;
52 51
53#else /* CONFIG_X86_32 */ 52/* correctly size the local cpu masks */
54 53void __init setup_cpu_local_masks(void)
55cpumask_t cpu_callin_map; 54{
56cpumask_t cpu_callout_map; 55 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
57cpumask_t cpu_initialized; 56 alloc_bootmem_cpumask_var(&cpu_callin_mask);
58cpumask_t cpu_sibling_setup_map; 57 alloc_bootmem_cpumask_var(&cpu_callout_mask);
59 58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
60#endif /* CONFIG_X86_32 */ 59}
61
62 60
63static struct cpu_dev *this_cpu __cpuinitdata; 61static struct cpu_dev *this_cpu __cpuinitdata;
64 62
63DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 64#ifdef CONFIG_X86_64
66/* We need valid kernel segments for data and code in long mode too 65 /*
67 * IRET will check the segment types kkeil 2000/10/28 66 * We need valid kernel segments for data and code in long mode too
68 * Also sysret mandates a special GDT layout 67 * IRET will check the segment types kkeil 2000/10/28
69 */ 68 * Also sysret mandates a special GDT layout
70/* The TLS descriptors are currently at a different place compared to i386. 69 *
71 Hopefully nobody expects them at a fixed place (Wine?) */ 70 * The TLS descriptors are currently at a different place compared to i386.
72DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 71 * Hopefully nobody expects them at a fixed place (Wine?)
72 */
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
79} };
80#else 79#else
81DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
82 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 80 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
83 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 81 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
84 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 82 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@@ -110,9 +108,10 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
110 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 108 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
111 109
112 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 110 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
113 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 111 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
114} }; 112 GDT_STACK_CANARY_INIT
115#endif 113#endif
114} };
116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
117 116
118#ifdef CONFIG_X86_32 117#ifdef CONFIG_X86_32
@@ -213,6 +212,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
213#endif 212#endif
214 213
215/* 214/*
215 * Some CPU features depend on higher CPUID levels, which may not always
216 * be available due to CPUID level capping or broken virtualization
217 * software. Add those features to this table to auto-disable them.
218 */
219struct cpuid_dependent_feature {
220 u32 feature;
221 u32 level;
222};
223static const struct cpuid_dependent_feature __cpuinitconst
224cpuid_dependent_features[] = {
225 { X86_FEATURE_MWAIT, 0x00000005 },
226 { X86_FEATURE_DCA, 0x00000009 },
227 { X86_FEATURE_XSAVE, 0x0000000d },
228 { 0, 0 }
229};
230
231static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
232{
233 const struct cpuid_dependent_feature *df;
234 for (df = cpuid_dependent_features; df->feature; df++) {
235 /*
236 * Note: cpuid_level is set to -1 if unavailable, but
237 * extended_extended_level is set to 0 if unavailable
238 * and the legitimate extended levels are all negative
239 * when signed; hence the weird messing around with
240 * signs here...
241 */
242 if (cpu_has(c, df->feature) &&
243 ((s32)df->level < 0 ?
244 (u32)df->level > (u32)c->extended_cpuid_level :
245 (s32)df->level > (s32)c->cpuid_level)) {
246 clear_cpu_cap(c, df->feature);
247 if (warn)
248 printk(KERN_WARNING
249 "CPU: CPU feature %s disabled "
250 "due to lack of CPUID level 0x%x\n",
251 x86_cap_flags[df->feature],
252 df->level);
253 }
254 }
255}
256
257/*
216 * Naming convention should be: <Name> [(<Codename>)] 258 * Naming convention should be: <Name> [(<Codename>)]
217 * This table only is used unless init_<vendor>() below doesn't set it; 259 * This table only is used unless init_<vendor>() below doesn't set it;
218 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 260 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -242,18 +284,29 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
242 284
243__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 285__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
244 286
287void load_percpu_segment(int cpu)
288{
289#ifdef CONFIG_X86_32
290 loadsegment(fs, __KERNEL_PERCPU);
291#else
292 loadsegment(gs, 0);
293 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
294#endif
295 load_stack_canary_segment();
296}
297
245/* Current gdt points %fs at the "master" per-cpu area: after this, 298/* Current gdt points %fs at the "master" per-cpu area: after this,
246 * it's on the real one. */ 299 * it's on the real one. */
247void switch_to_new_gdt(void) 300void switch_to_new_gdt(int cpu)
248{ 301{
249 struct desc_ptr gdt_descr; 302 struct desc_ptr gdt_descr;
250 303
251 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 304 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
252 gdt_descr.size = GDT_SIZE - 1; 305 gdt_descr.size = GDT_SIZE - 1;
253 load_gdt(&gdt_descr); 306 load_gdt(&gdt_descr);
254#ifdef CONFIG_X86_32 307 /* Reload the per-cpu base */
255 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 308
256#endif 309 load_percpu_segment(cpu);
257} 310}
258 311
259static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 312static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@@ -383,11 +436,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
383 } 436 }
384 437
385 index_msb = get_count_order(smp_num_siblings); 438 index_msb = get_count_order(smp_num_siblings);
386#ifdef CONFIG_X86_64 439 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
387 c->phys_proc_id = phys_pkg_id(index_msb);
388#else
389 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
390#endif
391 440
392 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 441 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
393 442
@@ -395,13 +444,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
395 444
396 core_bits = get_count_order(c->x86_max_cores); 445 core_bits = get_count_order(c->x86_max_cores);
397 446
398#ifdef CONFIG_X86_64 447 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
399 c->cpu_core_id = phys_pkg_id(index_msb) &
400 ((1 << core_bits) - 1);
401#else
402 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
403 ((1 << core_bits) - 1); 448 ((1 << core_bits) - 1);
404#endif
405 } 449 }
406 450
407out: 451out:
@@ -570,11 +614,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
570 if (this_cpu->c_early_init) 614 if (this_cpu->c_early_init)
571 this_cpu->c_early_init(c); 615 this_cpu->c_early_init(c);
572 616
573 validate_pat_support(c);
574
575#ifdef CONFIG_SMP 617#ifdef CONFIG_SMP
576 c->cpu_index = boot_cpu_id; 618 c->cpu_index = boot_cpu_id;
577#endif 619#endif
620 filter_cpuid_features(c, false);
578} 621}
579 622
580void __init early_cpu_init(void) 623void __init early_cpu_init(void)
@@ -637,7 +680,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
637 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 680 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
638#ifdef CONFIG_X86_32 681#ifdef CONFIG_X86_32
639# ifdef CONFIG_X86_HT 682# ifdef CONFIG_X86_HT
640 c->apicid = phys_pkg_id(c->initial_apicid, 0); 683 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
641# else 684# else
642 c->apicid = c->initial_apicid; 685 c->apicid = c->initial_apicid;
643# endif 686# endif
@@ -684,7 +727,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
684 this_cpu->c_identify(c); 727 this_cpu->c_identify(c);
685 728
686#ifdef CONFIG_X86_64 729#ifdef CONFIG_X86_64
687 c->apicid = phys_pkg_id(0); 730 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
688#endif 731#endif
689 732
690 /* 733 /*
@@ -708,6 +751,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
708 * we do "generic changes." 751 * we do "generic changes."
709 */ 752 */
710 753
754 /* Filter out anything that depends on CPUID levels we don't have */
755 filter_cpuid_features(c, true);
756
711 /* If the model name is still unset, do table lookup. */ 757 /* If the model name is still unset, do table lookup. */
712 if (!c->x86_model_id[0]) { 758 if (!c->x86_model_id[0]) {
713 char *p; 759 char *p;
@@ -766,6 +812,7 @@ static void vgetcpu_set_mode(void)
766void __init identify_boot_cpu(void) 812void __init identify_boot_cpu(void)
767{ 813{
768 identify_cpu(&boot_cpu_data); 814 identify_cpu(&boot_cpu_data);
815 init_c1e_mask();
769#ifdef CONFIG_X86_32 816#ifdef CONFIG_X86_32
770 sysenter_setup(); 817 sysenter_setup();
771 enable_sep_cpu(); 818 enable_sep_cpu();
@@ -877,54 +924,22 @@ static __init int setup_disablecpuid(char *arg)
877__setup("clearcpuid=", setup_disablecpuid); 924__setup("clearcpuid=", setup_disablecpuid);
878 925
879#ifdef CONFIG_X86_64 926#ifdef CONFIG_X86_64
880struct x8664_pda **_cpu_pda __read_mostly;
881EXPORT_SYMBOL(_cpu_pda);
882
883struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 927struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
884 928
885static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; 929DEFINE_PER_CPU_FIRST(union irq_stack_union,
930 irq_stack_union) __aligned(PAGE_SIZE);
931DEFINE_PER_CPU(char *, irq_stack_ptr) =
932 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
886 933
887void __cpuinit pda_init(int cpu) 934DEFINE_PER_CPU(unsigned long, kernel_stack) =
888{ 935 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
889 struct x8664_pda *pda = cpu_pda(cpu); 936EXPORT_PER_CPU_SYMBOL(kernel_stack);
890 937
891 /* Setup up data that may be needed in __get_free_pages early */ 938DEFINE_PER_CPU(unsigned int, irq_count) = -1;
892 loadsegment(fs, 0);
893 loadsegment(gs, 0);
894 /* Memory clobbers used to order PDA accessed */
895 mb();
896 wrmsrl(MSR_GS_BASE, pda);
897 mb();
898
899 pda->cpunumber = cpu;
900 pda->irqcount = -1;
901 pda->kernelstack = (unsigned long)stack_thread_info() -
902 PDA_STACKOFFSET + THREAD_SIZE;
903 pda->active_mm = &init_mm;
904 pda->mmu_state = 0;
905
906 if (cpu == 0) {
907 /* others are initialized in smpboot.c */
908 pda->pcurrent = &init_task;
909 pda->irqstackptr = boot_cpu_stack;
910 pda->irqstackptr += IRQSTACKSIZE - 64;
911 } else {
912 if (!pda->irqstackptr) {
913 pda->irqstackptr = (char *)
914 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
915 if (!pda->irqstackptr)
916 panic("cannot allocate irqstack for cpu %d",
917 cpu);
918 pda->irqstackptr += IRQSTACKSIZE - 64;
919 }
920
921 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
922 pda->nodenumber = cpu_to_node(cpu);
923 }
924}
925 939
926static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 940static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
927 DEBUG_STKSZ] __page_aligned_bss; 941 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
942 __aligned(PAGE_SIZE);
928 943
929extern asmlinkage void ignore_sysret(void); 944extern asmlinkage void ignore_sysret(void);
930 945
@@ -957,16 +972,21 @@ unsigned long kernel_eflags;
957 */ 972 */
958DEFINE_PER_CPU(struct orig_ist, orig_ist); 973DEFINE_PER_CPU(struct orig_ist, orig_ist);
959 974
960#else 975#else /* x86_64 */
976
977#ifdef CONFIG_CC_STACKPROTECTOR
978DEFINE_PER_CPU(unsigned long, stack_canary);
979#endif
961 980
962/* Make sure %fs is initialized properly in idle threads */ 981/* Make sure %fs and %gs are initialized properly in idle threads */
963struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 982struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
964{ 983{
965 memset(regs, 0, sizeof(struct pt_regs)); 984 memset(regs, 0, sizeof(struct pt_regs));
966 regs->fs = __KERNEL_PERCPU; 985 regs->fs = __KERNEL_PERCPU;
986 regs->gs = __KERNEL_STACK_CANARY;
967 return regs; 987 return regs;
968} 988}
969#endif 989#endif /* x86_64 */
970 990
971/* 991/*
972 * cpu_init() initializes state that is per-CPU. Some data is already 992 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -982,15 +1002,14 @@ void __cpuinit cpu_init(void)
982 struct tss_struct *t = &per_cpu(init_tss, cpu); 1002 struct tss_struct *t = &per_cpu(init_tss, cpu);
983 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); 1003 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
984 unsigned long v; 1004 unsigned long v;
985 char *estacks = NULL;
986 struct task_struct *me; 1005 struct task_struct *me;
987 int i; 1006 int i;
988 1007
989 /* CPU 0 is initialised in head64.c */ 1008#ifdef CONFIG_NUMA
990 if (cpu != 0) 1009 if (cpu != 0 && percpu_read(node_number) == 0 &&
991 pda_init(cpu); 1010 cpu_to_node(cpu) != NUMA_NO_NODE)
992 else 1011 percpu_write(node_number, cpu_to_node(cpu));
993 estacks = boot_exception_stacks; 1012#endif
994 1013
995 me = current; 1014 me = current;
996 1015
@@ -1006,7 +1025,9 @@ void __cpuinit cpu_init(void)
1006 * and set up the GDT descriptor: 1025 * and set up the GDT descriptor:
1007 */ 1026 */
1008 1027
1009 switch_to_new_gdt(); 1028 switch_to_new_gdt(cpu);
1029 loadsegment(fs, 0);
1030
1010 load_idt((const struct desc_ptr *)&idt_descr); 1031 load_idt((const struct desc_ptr *)&idt_descr);
1011 1032
1012 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1033 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1017,25 +1038,20 @@ void __cpuinit cpu_init(void)
1017 barrier(); 1038 barrier();
1018 1039
1019 check_efer(); 1040 check_efer();
1020 if (cpu != 0 && x2apic) 1041 if (cpu != 0)
1021 enable_x2apic(); 1042 enable_x2apic();
1022 1043
1023 /* 1044 /*
1024 * set up and load the per-CPU TSS 1045 * set up and load the per-CPU TSS
1025 */ 1046 */
1026 if (!orig_ist->ist[0]) { 1047 if (!orig_ist->ist[0]) {
1027 static const unsigned int order[N_EXCEPTION_STACKS] = { 1048 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1028 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 1049 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1029 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 1050 [DEBUG_STACK - 1] = DEBUG_STKSZ
1030 }; 1051 };
1052 char *estacks = per_cpu(exception_stacks, cpu);
1031 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1053 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1032 if (cpu) { 1054 estacks += sizes[v];
1033 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1034 if (!estacks)
1035 panic("Cannot allocate exception "
1036 "stack %ld %d\n", v, cpu);
1037 }
1038 estacks += PAGE_SIZE << order[v];
1039 orig_ist->ist[v] = t->x86_tss.ist[v] = 1055 orig_ist->ist[v] = t->x86_tss.ist[v] =
1040 (unsigned long)estacks; 1056 (unsigned long)estacks;
1041 } 1057 }
@@ -1069,22 +1085,19 @@ void __cpuinit cpu_init(void)
1069 */ 1085 */
1070 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1086 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1071 arch_kgdb_ops.correct_hw_break(); 1087 arch_kgdb_ops.correct_hw_break();
1072 else { 1088 else
1073#endif 1089#endif
1074 /* 1090 {
1075 * Clear all 6 debug registers: 1091 /*
1076 */ 1092 * Clear all 6 debug registers:
1077 1093 */
1078 set_debugreg(0UL, 0); 1094 set_debugreg(0UL, 0);
1079 set_debugreg(0UL, 1); 1095 set_debugreg(0UL, 1);
1080 set_debugreg(0UL, 2); 1096 set_debugreg(0UL, 2);
1081 set_debugreg(0UL, 3); 1097 set_debugreg(0UL, 3);
1082 set_debugreg(0UL, 6); 1098 set_debugreg(0UL, 6);
1083 set_debugreg(0UL, 7); 1099 set_debugreg(0UL, 7);
1084#ifdef CONFIG_KGDB
1085 /* If the kgdb is connected no debug regs should be altered. */
1086 } 1100 }
1087#endif
1088 1101
1089 fpu_init(); 1102 fpu_init();
1090 1103
@@ -1114,7 +1127,7 @@ void __cpuinit cpu_init(void)
1114 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1127 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1115 1128
1116 load_idt(&idt_descr); 1129 load_idt(&idt_descr);
1117 switch_to_new_gdt(); 1130 switch_to_new_gdt(cpu);
1118 1131
1119 /* 1132 /*
1120 * Set up and load the per-CPU TSS and LDT 1133 * Set up and load the per-CPU TSS and LDT
@@ -1135,9 +1148,6 @@ void __cpuinit cpu_init(void)
1135 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1148 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1136#endif 1149#endif
1137 1150
1138 /* Clear %gs. */
1139 asm volatile ("mov %0, %%gs" : : "r" (0));
1140
1141 /* Clear all 6 debug registers: */ 1151 /* Clear all 6 debug registers: */
1142 set_debugreg(0, 0); 1152 set_debugreg(0, 0);
1143 set_debugreg(0, 1); 1153 set_debugreg(0, 1);