aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/kernel/cpu/common.c15
-rw-r--r--arch/x86/kernel/setup_percpu.c6
3 files changed, 12 insertions, 14 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 794234eba317..befa20b4a68c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -394,11 +394,6 @@ union irq_stack_union {
394 394
395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396DECLARE_PER_CPU(char *, irq_stack_ptr); 396DECLARE_PER_CPU(char *, irq_stack_ptr);
397
398static inline void load_gs_base(int cpu)
399{
400 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
401}
402#endif 397#endif
403 398
404extern void print_cpu_info(struct cpuinfo_x86 *); 399extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e30c8a282c..0c766b80d915 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -258,12 +258,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
258void switch_to_new_gdt(void) 258void switch_to_new_gdt(void)
259{ 259{
260 struct desc_ptr gdt_descr; 260 struct desc_ptr gdt_descr;
261 int cpu = smp_processor_id();
261 262
262 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 263 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
263 gdt_descr.size = GDT_SIZE - 1; 264 gdt_descr.size = GDT_SIZE - 1;
264 load_gdt(&gdt_descr); 265 load_gdt(&gdt_descr);
266 /* Reload the per-cpu base */
265#ifdef CONFIG_X86_32 267#ifdef CONFIG_X86_32
266 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 268 loadsegment(fs, __KERNEL_PERCPU);
269#else
270 loadsegment(gs, 0);
271 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
267#endif 272#endif
268} 273}
269 274
@@ -968,10 +973,6 @@ void __cpuinit cpu_init(void)
968 struct task_struct *me; 973 struct task_struct *me;
969 int i; 974 int i;
970 975
971 loadsegment(fs, 0);
972 loadsegment(gs, 0);
973 load_gs_base(cpu);
974
975#ifdef CONFIG_NUMA 976#ifdef CONFIG_NUMA
976 if (cpu != 0 && percpu_read(node_number) == 0 && 977 if (cpu != 0 && percpu_read(node_number) == 0 &&
977 cpu_to_node(cpu) != NUMA_NO_NODE) 978 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -993,6 +994,8 @@ void __cpuinit cpu_init(void)
993 */ 994 */
994 995
995 switch_to_new_gdt(); 996 switch_to_new_gdt();
997 loadsegment(fs, 0);
998
996 load_idt((const struct desc_ptr *)&idt_descr); 999 load_idt((const struct desc_ptr *)&idt_descr);
997 1000
998 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1001 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bcca3a7b3748..4caa78d7cb15 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -113,13 +113,13 @@ void __init setup_per_cpu_areas(void)
113 per_cpu(x86_cpu_to_node_map, cpu) = 113 per_cpu(x86_cpu_to_node_map, cpu) =
114 early_per_cpu_map(x86_cpu_to_node_map, cpu); 114 early_per_cpu_map(x86_cpu_to_node_map, cpu);
115#endif 115#endif
116#endif
116 /* 117 /*
117 * Up to this point, the boot CPU has been using .data.init 118 * Up to this point, the boot CPU has been using .data.init
118 * area. Reload %gs offset for the boot CPU. 119 * area. Reload any changed state for the boot CPU.
119 */ 120 */
120 if (cpu == boot_cpu_id) 121 if (cpu == boot_cpu_id)
121 load_gs_base(cpu); 122 switch_to_new_gdt();
122#endif
123 123
124 DBG("PERCPU: cpu %4d %p\n", cpu, ptr); 124 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
125 } 125 }