aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-27 06:03:24 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-27 06:03:24 -0500
commit4369f1fb7cd4cf777312f43e1cb9aa5504fc4125 (patch)
treea5525d63fe682e6744c109fb72f7e1b33855cb00 /arch
parent3ddeb51d9c83931c1ca6abf76a38934c5a1ed918 (diff)
parentcf3997f507624757f149fcc42b76fb03c151fb65 (diff)
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts: arch/x86/kernel/setup_percpu.c Semantic conflict: arch/x86/kernel/cpu/common.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/cpumask.h4
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/include/asm/topology.h6
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/apic.c18
-rw-r--r--arch/x86/kernel/cpu/common.c24
-rw-r--r--arch/x86/kernel/setup_percpu.c378
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/smpcommon.c32
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c5
-rw-r--r--arch/x86/mm/numa_64.c217
-rw-r--r--arch/x86/xen/smp.c1
13 files changed, 323 insertions, 383 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5a29b792cb84..d6218e6c9824 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -133,7 +133,7 @@ config ARCH_HAS_CACHE_LINE_SIZE
133 def_bool y 133 def_bool y
134 134
135config HAVE_SETUP_PER_CPU_AREA 135config HAVE_SETUP_PER_CPU_AREA
136 def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER) 136 def_bool y
137 137
138config HAVE_CPUMASK_OF_CPU_MAP 138config HAVE_CPUMASK_OF_CPU_MAP
139 def_bool X86_64_SMP 139 def_bool X86_64_SMP
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index 26c6dad90479..a7f3c75f8ad7 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -10,6 +10,8 @@ extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask; 10extern cpumask_var_t cpu_initialized_mask;
11extern cpumask_var_t cpu_sibling_setup_mask; 11extern cpumask_var_t cpu_sibling_setup_mask;
12 12
13extern void setup_cpu_local_masks(void);
14
13#else /* CONFIG_X86_32 */ 15#else /* CONFIG_X86_32 */
14 16
15extern cpumask_t cpu_callin_map; 17extern cpumask_t cpu_callin_map;
@@ -22,6 +24,8 @@ extern cpumask_t cpu_sibling_setup_map;
22#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) 24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
23#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) 25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
24 26
27static inline void setup_cpu_local_masks(void) { }
28
25#endif /* CONFIG_X86_32 */ 29#endif /* CONFIG_X86_32 */
26 30
27#endif /* __ASSEMBLY__ */ 31#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 48676b943b92..befa20b4a68c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -394,14 +394,6 @@ union irq_stack_union {
394 394
395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396DECLARE_PER_CPU(char *, irq_stack_ptr); 396DECLARE_PER_CPU(char *, irq_stack_ptr);
397
398static inline void load_gs_base(int cpu)
399{
400 /* Memory clobbers used to order pda/percpu accesses */
401 mb();
402 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
403 mb();
404}
405#endif 397#endif
406 398
407extern void print_cpu_info(struct cpuinfo_x86 *); 399extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -778,7 +770,6 @@ extern struct desc_ptr early_gdt_descr;
778extern void cpu_set_gdt(int); 770extern void cpu_set_gdt(int);
779extern void switch_to_new_gdt(void); 771extern void switch_to_new_gdt(void);
780extern void cpu_init(void); 772extern void cpu_init(void);
781extern void init_gdt(int cpu);
782 773
783static inline unsigned long get_debugctlmsr(void) 774static inline unsigned long get_debugctlmsr(void)
784{ 775{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 10022ed3a4b6..77cfb2cfb386 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
74 return &node_to_cpumask_map[node]; 74 return &node_to_cpumask_map[node];
75} 75}
76 76
77static inline void setup_node_to_cpumask_map(void) { }
78
77#else /* CONFIG_X86_64 */ 79#else /* CONFIG_X86_64 */
78 80
79/* Mappings between node number and cpus on that node. */ 81/* Mappings between node number and cpus on that node. */
@@ -120,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
120 122
121#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
122 124
125extern void setup_node_to_cpumask_map(void);
126
123/* 127/*
124 * Replace default node_to_cpumask_ptr with optimized version 128 * Replace default node_to_cpumask_ptr with optimized version
125 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -218,6 +222,8 @@ static inline int node_to_first_cpu(int node)
218 return first_cpu(cpu_online_map); 222 return first_cpu(cpu_online_map);
219} 223}
220 224
225static inline void setup_node_to_cpumask_map(void) { }
226
221/* 227/*
222 * Replace default node_to_cpumask_ptr with optimized version 228 * Replace default node_to_cpumask_ptr with optimized version
223 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 229 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a99437c965cc..37fa30bada17 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
28obj-y := process_$(BITS).o signal.o entry_$(BITS).o 28obj-y := process_$(BITS).o signal.o entry_$(BITS).o
29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
31obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 31obj-y += setup.o i8259.o irqinit_$(BITS).o
32obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
33obj-$(CONFIG_X86_32) += probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_roms_32.o
34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@@ -59,8 +59,8 @@ apm-y := apm_32.o
59obj-$(CONFIG_APM) += apm.o 59obj-$(CONFIG_APM) += apm.o
60obj-$(CONFIG_X86_SMP) += smp.o 60obj-$(CONFIG_X86_SMP) += smp.o
61obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o 61obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o
62obj-$(CONFIG_X86_32_SMP) += smpcommon.o 62obj-$(CONFIG_SMP) += setup_percpu.o
63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
65obj-$(CONFIG_X86_MPPARSE) += mpparse.o 65obj-$(CONFIG_X86_MPPARSE) += mpparse.o
66obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 66obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 1df341a528a1..c6f15647eba9 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -60,6 +60,24 @@
60# error SPURIOUS_APIC_VECTOR definition error 60# error SPURIOUS_APIC_VECTOR definition error
61#endif 61#endif
62 62
63unsigned int num_processors;
64unsigned disabled_cpus __cpuinitdata;
65/* Processor that is doing the boot up */
66unsigned int boot_cpu_physical_apicid = -1U;
67EXPORT_SYMBOL(boot_cpu_physical_apicid);
68unsigned int max_physical_apicid;
69
70/* Bitmask of physically existing CPUs */
71physid_mask_t phys_cpu_present_map;
72
73/*
74 * Map cpu index to physical APIC ID
75 */
76DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
77DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
78EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
79EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
80
63#ifdef CONFIG_X86_32 81#ifdef CONFIG_X86_32
64/* 82/*
65 * Knob to control our willingness to enable the local APIC. 83 * Knob to control our willingness to enable the local APIC.
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 99904f288d6a..652fdc9a757a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -52,6 +52,15 @@ cpumask_var_t cpu_initialized_mask;
52/* representing cpus for which sibling maps can be computed */ 52/* representing cpus for which sibling maps can be computed */
53cpumask_var_t cpu_sibling_setup_mask; 53cpumask_var_t cpu_sibling_setup_mask;
54 54
55/* correctly size the local cpu masks */
56void __init setup_cpu_local_masks(void)
57{
58 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
59 alloc_bootmem_cpumask_var(&cpu_callin_mask);
60 alloc_bootmem_cpumask_var(&cpu_callout_mask);
61 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
62}
63
55#else /* CONFIG_X86_32 */ 64#else /* CONFIG_X86_32 */
56 65
57cpumask_t cpu_callin_map; 66cpumask_t cpu_callin_map;
@@ -249,12 +258,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
249void switch_to_new_gdt(void) 258void switch_to_new_gdt(void)
250{ 259{
251 struct desc_ptr gdt_descr; 260 struct desc_ptr gdt_descr;
261 int cpu = smp_processor_id();
252 262
253 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 263 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
254 gdt_descr.size = GDT_SIZE - 1; 264 gdt_descr.size = GDT_SIZE - 1;
255 load_gdt(&gdt_descr); 265 load_gdt(&gdt_descr);
266 /* Reload the per-cpu base */
256#ifdef CONFIG_X86_32 267#ifdef CONFIG_X86_32
257 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 268 loadsegment(fs, __KERNEL_PERCPU);
269#else
270 loadsegment(gs, 0);
271 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
258#endif 272#endif
259} 273}
260 274
@@ -959,10 +973,6 @@ void __cpuinit cpu_init(void)
959 struct task_struct *me; 973 struct task_struct *me;
960 int i; 974 int i;
961 975
962 loadsegment(fs, 0);
963 loadsegment(gs, 0);
964 load_gs_base(cpu);
965
966#ifdef CONFIG_NUMA 976#ifdef CONFIG_NUMA
967 if (cpu != 0 && percpu_read(node_number) == 0 && 977 if (cpu != 0 && percpu_read(node_number) == 0 &&
968 cpu_to_node(cpu) != NUMA_NO_NODE) 978 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -984,6 +994,8 @@ void __cpuinit cpu_init(void)
984 */ 994 */
985 995
986 switch_to_new_gdt(); 996 switch_to_new_gdt();
997 loadsegment(fs, 0);
998
987 load_idt((const struct desc_ptr *)&idt_descr); 999 load_idt((const struct desc_ptr *)&idt_descr);
988 1000
989 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1001 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e553803cd2db..0d1e7ac439f4 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -15,6 +15,7 @@
15#include <asm/highmem.h> 15#include <asm/highmem.h>
16#include <asm/proto.h> 16#include <asm/proto.h>
17#include <asm/cpumask.h> 17#include <asm/cpumask.h>
18#include <asm/cpu.h>
18 19
19#ifdef CONFIG_DEBUG_PER_CPU_MAPS 20#ifdef CONFIG_DEBUG_PER_CPU_MAPS
20# define DBG(x...) printk(KERN_DEBUG x) 21# define DBG(x...) printk(KERN_DEBUG x)
@@ -22,118 +23,36 @@
22# define DBG(x...) 23# define DBG(x...)
23#endif 24#endif
24 25
25/*
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
28 */
29#ifdef CONFIG_SMP
30DEFINE_PER_CPU(int, cpu_number); 26DEFINE_PER_CPU(int, cpu_number);
31EXPORT_PER_CPU_SYMBOL(cpu_number); 27EXPORT_PER_CPU_SYMBOL(cpu_number);
32#endif
33
34#ifdef CONFIG_X86_LOCAL_APIC
35unsigned int num_processors;
36unsigned disabled_cpus __cpuinitdata;
37/* Processor that is doing the boot up */
38unsigned int boot_cpu_physical_apicid = -1U;
39EXPORT_SYMBOL(boot_cpu_physical_apicid);
40unsigned int max_physical_apicid;
41
42/* Bitmask of physically existing CPUs */
43physid_mask_t phys_cpu_present_map;
44#endif
45
46/*
47 * Map cpu index to physical APIC ID
48 */
49DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
53
54#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
55#define X86_64_NUMA 1 /* (used later) */
56DEFINE_PER_CPU(int, node_number) = 0;
57EXPORT_PER_CPU_SYMBOL(node_number);
58
59/*
60 * Map cpu index to node index
61 */
62DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
63EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
64
65/*
66 * Which logical CPUs are on which nodes
67 */
68cpumask_t *node_to_cpumask_map;
69EXPORT_SYMBOL(node_to_cpumask_map);
70
71/*
72 * Setup node_to_cpumask_map
73 */
74static void __init setup_node_to_cpumask_map(void);
75 28
29#ifdef CONFIG_X86_64
30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
76#else 31#else
77static inline void setup_node_to_cpumask_map(void) { } 32#define BOOT_PERCPU_OFFSET 0
78#endif 33#endif
79 34
80#ifdef CONFIG_X86_64 35DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
81 36EXPORT_PER_CPU_SYMBOL(this_cpu_off);
82/* correctly size the local cpu masks */
83static void __init setup_cpu_local_masks(void)
84{
85 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
86 alloc_bootmem_cpumask_var(&cpu_callin_mask);
87 alloc_bootmem_cpumask_var(&cpu_callout_mask);
88 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
89}
90
91#else /* CONFIG_X86_32 */
92
93static inline void setup_cpu_local_masks(void)
94{
95}
96 37
97#endif /* CONFIG_X86_32 */ 38unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
39 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
40};
41EXPORT_SYMBOL(__per_cpu_offset);
98 42
99#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 43static inline void setup_percpu_segment(int cpu)
100/*
101 * Copy data used in early init routines from the initial arrays to the
102 * per cpu data areas. These arrays then become expendable and the
103 * *_early_ptr's are zeroed indicating that the static arrays are gone.
104 */
105static void __init setup_per_cpu_maps(void)
106{ 44{
107 int cpu; 45#ifdef CONFIG_X86_32
108 46 struct desc_struct gdt;
109 for_each_possible_cpu(cpu) {
110 per_cpu(x86_cpu_to_apicid, cpu) =
111 early_per_cpu_map(x86_cpu_to_apicid, cpu);
112 per_cpu(x86_bios_cpu_apicid, cpu) =
113 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
114#ifdef X86_64_NUMA
115 per_cpu(x86_cpu_to_node_map, cpu) =
116 early_per_cpu_map(x86_cpu_to_node_map, cpu);
117#endif
118 }
119 47
120 /* indicate the early static arrays will soon be gone */ 48 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
121 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 49 0x2 | DESCTYPE_S, 0x8);
122 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 50 gdt.s = 1;
123#ifdef X86_64_NUMA 51 write_gdt_entry(get_cpu_gdt_table(cpu),
124 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 52 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
125#endif 53#endif
126} 54}
127 55
128#ifdef CONFIG_X86_64
129unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
130 [0] = (unsigned long)__per_cpu_load,
131};
132#else
133unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
134#endif
135EXPORT_SYMBOL(__per_cpu_offset);
136
137/* 56/*
138 * Great future plan: 57 * Great future plan:
139 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 58 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
@@ -141,15 +60,12 @@ EXPORT_SYMBOL(__per_cpu_offset);
141 */ 60 */
142void __init setup_per_cpu_areas(void) 61void __init setup_per_cpu_areas(void)
143{ 62{
144 ssize_t size, old_size; 63 ssize_t size;
145 char *ptr; 64 char *ptr;
146 int cpu; 65 int cpu;
147 unsigned long align = 1;
148 66
149 /* Copy section for each CPU (we discard the original) */ 67 /* Copy section for each CPU (we discard the original) */
150 old_size = PERCPU_ENOUGH_ROOM; 68 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
151 align = max_t(unsigned long, PAGE_SIZE, align);
152 size = roundup(old_size, align);
153 69
154 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 70 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
155 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 71 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
@@ -158,20 +74,17 @@ void __init setup_per_cpu_areas(void)
158 74
159 for_each_possible_cpu(cpu) { 75 for_each_possible_cpu(cpu) {
160#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
161 ptr = __alloc_bootmem(size, align, 77 ptr = alloc_bootmem_pages(size);
162 __pa(MAX_DMA_ADDRESS));
163#else 78#else
164 int node = early_cpu_to_node(cpu); 79 int node = early_cpu_to_node(cpu);
165 if (!node_online(node) || !NODE_DATA(node)) { 80 if (!node_online(node) || !NODE_DATA(node)) {
166 ptr = __alloc_bootmem(size, align, 81 ptr = alloc_bootmem_pages(size);
167 __pa(MAX_DMA_ADDRESS));
168 pr_info("cpu %d has no node %d or node-local memory\n", 82 pr_info("cpu %d has no node %d or node-local memory\n",
169 cpu, node); 83 cpu, node);
170 pr_debug("per cpu data for cpu%d at %016lx\n", 84 pr_debug("per cpu data for cpu%d at %016lx\n",
171 cpu, __pa(ptr)); 85 cpu, __pa(ptr));
172 } else { 86 } else {
173 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 87 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
174 __pa(MAX_DMA_ADDRESS));
175 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 88 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
176 cpu, node, __pa(ptr)); 89 cpu, node, __pa(ptr));
177 } 90 }
@@ -181,22 +94,47 @@ void __init setup_per_cpu_areas(void)
181 per_cpu_offset(cpu) = ptr - __per_cpu_start; 94 per_cpu_offset(cpu) = ptr - __per_cpu_start;
182 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 95 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
183 per_cpu(cpu_number, cpu) = cpu; 96 per_cpu(cpu_number, cpu) = cpu;
97 setup_percpu_segment(cpu);
98 /*
99 * Copy data used in early init routines from the
100 * initial arrays to the per cpu data areas. These
101 * arrays then become expendable and the *_early_ptr's
102 * are zeroed indicating that the static arrays are
103 * gone.
104 */
105#ifdef CONFIG_X86_LOCAL_APIC
106 per_cpu(x86_cpu_to_apicid, cpu) =
107 early_per_cpu_map(x86_cpu_to_apicid, cpu);
108 per_cpu(x86_bios_cpu_apicid, cpu) =
109 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
110#endif
184#ifdef CONFIG_X86_64 111#ifdef CONFIG_X86_64
185 per_cpu(irq_stack_ptr, cpu) = 112 per_cpu(irq_stack_ptr, cpu) =
186 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; 113 per_cpu(irq_stack_union.irq_stack, cpu) +
114 IRQ_STACK_SIZE - 64;
115#ifdef CONFIG_NUMA
116 per_cpu(x86_cpu_to_node_map, cpu) =
117 early_per_cpu_map(x86_cpu_to_node_map, cpu);
118#endif
119#endif
187 /* 120 /*
188 * Up to this point, CPU0 has been using .data.init 121 * Up to this point, the boot CPU has been using .data.init
189 * area. Reload %gs offset for CPU0. 122 * area. Reload any changed state for the boot CPU.
190 */ 123 */
191 if (cpu == 0) 124 if (cpu == boot_cpu_id)
192 load_gs_base(cpu); 125 switch_to_new_gdt();
193#endif
194 126
195 DBG("PERCPU: cpu %4d %p\n", cpu, ptr); 127 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
196 } 128 }
197 129
198 /* Setup percpu data maps */ 130 /* indicate the early static arrays will soon be gone */
199 setup_per_cpu_maps(); 131#ifdef CONFIG_X86_LOCAL_APIC
132 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
133 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
134#endif
135#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
136 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
137#endif
200 138
201 /* Setup node to cpumask map */ 139 /* Setup node to cpumask map */
202 setup_node_to_cpumask_map(); 140 setup_node_to_cpumask_map();
@@ -204,207 +142,3 @@ void __init setup_per_cpu_areas(void)
204 /* Setup cpu initialized, callin, callout masks */ 142 /* Setup cpu initialized, callin, callout masks */
205 setup_cpu_local_masks(); 143 setup_cpu_local_masks();
206} 144}
207
208#endif
209
210#ifdef X86_64_NUMA
211
212/*
213 * Allocate node_to_cpumask_map based on number of available nodes
214 * Requires node_possible_map to be valid.
215 *
216 * Note: node_to_cpumask() is not valid until after this is done.
217 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
218 */
219static void __init setup_node_to_cpumask_map(void)
220{
221 unsigned int node, num = 0;
222 cpumask_t *map;
223
224 /* setup nr_node_ids if not done yet */
225 if (nr_node_ids == MAX_NUMNODES) {
226 for_each_node_mask(node, node_possible_map)
227 num = node;
228 nr_node_ids = num + 1;
229 }
230
231 /* allocate the map */
232 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
233 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
234
235 pr_debug("Node to cpumask map at %p for %d nodes\n",
236 map, nr_node_ids);
237
238 /* node_to_cpumask() will now work */
239 node_to_cpumask_map = map;
240}
241
242void __cpuinit numa_set_node(int cpu, int node)
243{
244 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
245
246 /* early setting, no percpu area yet */
247 if (cpu_to_node_map) {
248 cpu_to_node_map[cpu] = node;
249 return;
250 }
251
252#ifdef CONFIG_DEBUG_PER_CPU_MAPS
253 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
254 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
255 dump_stack();
256 return;
257 }
258#endif
259 per_cpu(x86_cpu_to_node_map, cpu) = node;
260
261 if (node != NUMA_NO_NODE)
262 per_cpu(node_number, cpu) = node;
263}
264
265void __cpuinit numa_clear_node(int cpu)
266{
267 numa_set_node(cpu, NUMA_NO_NODE);
268}
269
270#ifndef CONFIG_DEBUG_PER_CPU_MAPS
271
272void __cpuinit numa_add_cpu(int cpu)
273{
274 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
275}
276
277void __cpuinit numa_remove_cpu(int cpu)
278{
279 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
280}
281
282#else /* CONFIG_DEBUG_PER_CPU_MAPS */
283
284/*
285 * --------- debug versions of the numa functions ---------
286 */
287static void __cpuinit numa_set_cpumask(int cpu, int enable)
288{
289 int node = early_cpu_to_node(cpu);
290 cpumask_t *mask;
291 char buf[64];
292
293 if (node_to_cpumask_map == NULL) {
294 printk(KERN_ERR "node_to_cpumask_map NULL\n");
295 dump_stack();
296 return;
297 }
298
299 mask = &node_to_cpumask_map[node];
300 if (enable)
301 cpu_set(cpu, *mask);
302 else
303 cpu_clear(cpu, *mask);
304
305 cpulist_scnprintf(buf, sizeof(buf), mask);
306 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
307 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
308}
309
310void __cpuinit numa_add_cpu(int cpu)
311{
312 numa_set_cpumask(cpu, 1);
313}
314
315void __cpuinit numa_remove_cpu(int cpu)
316{
317 numa_set_cpumask(cpu, 0);
318}
319
320int cpu_to_node(int cpu)
321{
322 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
323 printk(KERN_WARNING
324 "cpu_to_node(%d): usage too early!\n", cpu);
325 dump_stack();
326 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
327 }
328 return per_cpu(x86_cpu_to_node_map, cpu);
329}
330EXPORT_SYMBOL(cpu_to_node);
331
332/*
333 * Same function as cpu_to_node() but used if called before the
334 * per_cpu areas are setup.
335 */
336int early_cpu_to_node(int cpu)
337{
338 if (early_per_cpu_ptr(x86_cpu_to_node_map))
339 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
340
341 if (!per_cpu_offset(cpu)) {
342 printk(KERN_WARNING
343 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
344 dump_stack();
345 return NUMA_NO_NODE;
346 }
347 return per_cpu(x86_cpu_to_node_map, cpu);
348}
349
350
351/* empty cpumask */
352static const cpumask_t cpu_mask_none;
353
354/*
355 * Returns a pointer to the bitmask of CPUs on Node 'node'.
356 */
357const cpumask_t *cpumask_of_node(int node)
358{
359 if (node_to_cpumask_map == NULL) {
360 printk(KERN_WARNING
361 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
362 node);
363 dump_stack();
364 return (const cpumask_t *)&cpu_online_map;
365 }
366 if (node >= nr_node_ids) {
367 printk(KERN_WARNING
368 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
369 node, nr_node_ids);
370 dump_stack();
371 return &cpu_mask_none;
372 }
373 return &node_to_cpumask_map[node];
374}
375EXPORT_SYMBOL(cpumask_of_node);
376
377/*
378 * Returns a bitmask of CPUs on Node 'node'.
379 *
380 * Side note: this function creates the returned cpumask on the stack
381 * so with a high NR_CPUS count, excessive stack space is used. The
382 * node_to_cpumask_ptr function should be used whenever possible.
383 */
384cpumask_t node_to_cpumask(int node)
385{
386 if (node_to_cpumask_map == NULL) {
387 printk(KERN_WARNING
388 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
389 dump_stack();
390 return cpu_online_map;
391 }
392 if (node >= nr_node_ids) {
393 printk(KERN_WARNING
394 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
395 node, nr_node_ids);
396 dump_stack();
397 return cpu_mask_none;
398 }
399 return node_to_cpumask_map[node];
400}
401EXPORT_SYMBOL(node_to_cpumask);
402
403/*
404 * --------- end of debug versions of the numa functions ---------
405 */
406
407#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
408
409#endif /* X86_64_NUMA */
410
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index def770b57b5a..f9dbcff43546 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -793,7 +793,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
793do_rest: 793do_rest:
794 per_cpu(current_task, cpu) = c_idle.idle; 794 per_cpu(current_task, cpu) = c_idle.idle;
795#ifdef CONFIG_X86_32 795#ifdef CONFIG_X86_32
796 init_gdt(cpu);
797 /* Stack for startup_32 can be just as for start_secondary onwards */ 796 /* Stack for startup_32 can be just as for start_secondary onwards */
798 irq_ctx_init(cpu); 797 irq_ctx_init(cpu);
799#else 798#else
@@ -1186,9 +1185,6 @@ out:
1186void __init native_smp_prepare_boot_cpu(void) 1185void __init native_smp_prepare_boot_cpu(void)
1187{ 1186{
1188 int me = smp_processor_id(); 1187 int me = smp_processor_id();
1189#ifdef CONFIG_X86_32
1190 init_gdt(me);
1191#endif
1192 switch_to_new_gdt(); 1188 switch_to_new_gdt();
1193 /* already set me in cpu_online_mask in boot_cpu_init() */ 1189 /* already set me in cpu_online_mask in boot_cpu_init() */
1194 cpumask_set_cpu(me, cpu_callout_mask); 1190 cpumask_set_cpu(me, cpu_callout_mask);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644
index add36b4e37c9..000000000000
--- a/arch/x86/kernel/smpcommon.c
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6#include <asm/sections.h>
7
8#ifdef CONFIG_X86_64
9DEFINE_PER_CPU(unsigned long, this_cpu_off) = (unsigned long)__per_cpu_load;
10#else
11DEFINE_PER_CPU(unsigned long, this_cpu_off);
12#endif
13EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14
15#ifdef CONFIG_X86_32
16/*
17 * Initialize the CPU's GDT. This is either the boot CPU doing itself
18 * (still using the master per-cpu area), or a CPU doing it for a
19 * secondary which will soon come up.
20 */
21__cpuinit void init_gdt(int cpu)
22{
23 struct desc_struct gdt;
24
25 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
26 0x2 | DESCTYPE_S, 0x8);
27 gdt.s = 1;
28
29 write_gdt_entry(get_cpu_gdt_table(cpu),
30 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
31}
32#endif
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 96f15b09a4c5..331cd6d56483 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -530,8 +530,6 @@ static void __init do_boot_cpu(__u8 cpu)
530 /* init_tasks (in sched.c) is indexed logically */ 530 /* init_tasks (in sched.c) is indexed logically */
531 stack_start.sp = (void *)idle->thread.sp; 531 stack_start.sp = (void *)idle->thread.sp;
532 532
533 init_gdt(cpu);
534 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
535 per_cpu(current_task, cpu) = idle; 533 per_cpu(current_task, cpu) = idle;
536 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 534 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
537 irq_ctx_init(cpu); 535 irq_ctx_init(cpu);
@@ -1748,8 +1746,6 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1748 1746
1749static void __cpuinit voyager_smp_prepare_boot_cpu(void) 1747static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1750{ 1748{
1751 init_gdt(smp_processor_id());
1752 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
1753 switch_to_new_gdt(); 1749 switch_to_new_gdt();
1754 1750
1755 cpu_set(smp_processor_id(), cpu_online_map); 1751 cpu_set(smp_processor_id(), cpu_online_map);
@@ -1782,7 +1778,6 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1782void __init smp_setup_processor_id(void) 1778void __init smp_setup_processor_id(void)
1783{ 1779{
1784 current_thread_info()->cpu = hard_smp_processor_id(); 1780 current_thread_info()->cpu = hard_smp_processor_id();
1785 percpu_write(cpu_number, hard_smp_processor_id());
1786} 1781}
1787 1782
1788static void voyager_send_call_func(cpumask_t callmask) 1783static void voyager_send_call_func(cpumask_t callmask)
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 71a14f89f89e..08d140fbc31b 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -20,6 +20,12 @@
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/k8.h>
22 22
23#ifdef CONFIG_DEBUG_PER_CPU_MAPS
24# define DBG(x...) printk(KERN_DEBUG x)
25#else
26# define DBG(x...)
27#endif
28
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 29struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 30EXPORT_SYMBOL(node_data);
25 31
@@ -33,6 +39,21 @@ int numa_off __initdata;
33static unsigned long __initdata nodemap_addr; 39static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 40static unsigned long __initdata nodemap_size;
35 41
42DEFINE_PER_CPU(int, node_number) = 0;
43EXPORT_PER_CPU_SYMBOL(node_number);
44
45/*
46 * Map cpu index to node index
47 */
48DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
49EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
50
51/*
52 * Which logical CPUs are on which nodes
53 */
54cpumask_t *node_to_cpumask_map;
55EXPORT_SYMBOL(node_to_cpumask_map);
56
36/* 57/*
37 * Given a shift value, try to populate memnodemap[] 58 * Given a shift value, try to populate memnodemap[]
38 * Returns : 59 * Returns :
@@ -640,3 +661,199 @@ void __init init_cpu_to_node(void)
640#endif 661#endif
641 662
642 663
664/*
665 * Allocate node_to_cpumask_map based on number of available nodes
666 * Requires node_possible_map to be valid.
667 *
668 * Note: node_to_cpumask() is not valid until after this is done.
669 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
670 */
671void __init setup_node_to_cpumask_map(void)
672{
673 unsigned int node, num = 0;
674 cpumask_t *map;
675
676 /* setup nr_node_ids if not done yet */
677 if (nr_node_ids == MAX_NUMNODES) {
678 for_each_node_mask(node, node_possible_map)
679 num = node;
680 nr_node_ids = num + 1;
681 }
682
683 /* allocate the map */
684 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
685 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
686
687 pr_debug("Node to cpumask map at %p for %d nodes\n",
688 map, nr_node_ids);
689
690 /* node_to_cpumask() will now work */
691 node_to_cpumask_map = map;
692}
693
694void __cpuinit numa_set_node(int cpu, int node)
695{
696 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
697
698 /* early setting, no percpu area yet */
699 if (cpu_to_node_map) {
700 cpu_to_node_map[cpu] = node;
701 return;
702 }
703
704#ifdef CONFIG_DEBUG_PER_CPU_MAPS
705 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
706 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
707 dump_stack();
708 return;
709 }
710#endif
711 per_cpu(x86_cpu_to_node_map, cpu) = node;
712
713 if (node != NUMA_NO_NODE)
714 per_cpu(node_number, cpu) = node;
715}
716
717void __cpuinit numa_clear_node(int cpu)
718{
719 numa_set_node(cpu, NUMA_NO_NODE);
720}
721
722#ifndef CONFIG_DEBUG_PER_CPU_MAPS
723
724void __cpuinit numa_add_cpu(int cpu)
725{
726 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
727}
728
729void __cpuinit numa_remove_cpu(int cpu)
730{
731 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
732}
733
734#else /* CONFIG_DEBUG_PER_CPU_MAPS */
735
736/*
737 * --------- debug versions of the numa functions ---------
738 */
739static void __cpuinit numa_set_cpumask(int cpu, int enable)
740{
741 int node = early_cpu_to_node(cpu);
742 cpumask_t *mask;
743 char buf[64];
744
745 if (node_to_cpumask_map == NULL) {
746 printk(KERN_ERR "node_to_cpumask_map NULL\n");
747 dump_stack();
748 return;
749 }
750
751 mask = &node_to_cpumask_map[node];
752 if (enable)
753 cpu_set(cpu, *mask);
754 else
755 cpu_clear(cpu, *mask);
756
757 cpulist_scnprintf(buf, sizeof(buf), mask);
758 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
759 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
760}
761
762void __cpuinit numa_add_cpu(int cpu)
763{
764 numa_set_cpumask(cpu, 1);
765}
766
767void __cpuinit numa_remove_cpu(int cpu)
768{
769 numa_set_cpumask(cpu, 0);
770}
771
772int cpu_to_node(int cpu)
773{
774 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
775 printk(KERN_WARNING
776 "cpu_to_node(%d): usage too early!\n", cpu);
777 dump_stack();
778 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
779 }
780 return per_cpu(x86_cpu_to_node_map, cpu);
781}
782EXPORT_SYMBOL(cpu_to_node);
783
784/*
785 * Same function as cpu_to_node() but used if called before the
786 * per_cpu areas are setup.
787 */
788int early_cpu_to_node(int cpu)
789{
790 if (early_per_cpu_ptr(x86_cpu_to_node_map))
791 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
792
793 if (!per_cpu_offset(cpu)) {
794 printk(KERN_WARNING
795 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
796 dump_stack();
797 return NUMA_NO_NODE;
798 }
799 return per_cpu(x86_cpu_to_node_map, cpu);
800}
801
802
803/* empty cpumask */
804static const cpumask_t cpu_mask_none;
805
806/*
807 * Returns a pointer to the bitmask of CPUs on Node 'node'.
808 */
809const cpumask_t *cpumask_of_node(int node)
810{
811 if (node_to_cpumask_map == NULL) {
812 printk(KERN_WARNING
813 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
814 node);
815 dump_stack();
816 return (const cpumask_t *)&cpu_online_map;
817 }
818 if (node >= nr_node_ids) {
819 printk(KERN_WARNING
820 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
821 node, nr_node_ids);
822 dump_stack();
823 return &cpu_mask_none;
824 }
825 return &node_to_cpumask_map[node];
826}
827EXPORT_SYMBOL(cpumask_of_node);
828
829/*
830 * Returns a bitmask of CPUs on Node 'node'.
831 *
832 * Side note: this function creates the returned cpumask on the stack
833 * so with a high NR_CPUS count, excessive stack space is used. The
834 * node_to_cpumask_ptr function should be used whenever possible.
835 */
836cpumask_t node_to_cpumask(int node)
837{
838 if (node_to_cpumask_map == NULL) {
839 printk(KERN_WARNING
840 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
841 dump_stack();
842 return cpu_online_map;
843 }
844 if (node >= nr_node_ids) {
845 printk(KERN_WARNING
846 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
847 node, nr_node_ids);
848 dump_stack();
849 return cpu_mask_none;
850 }
851 return node_to_cpumask_map[node];
852}
853EXPORT_SYMBOL(node_to_cpumask);
854
855/*
856 * --------- end of debug versions of the numa functions ---------
857 */
858
859#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 72c2eb9b64cd..7735e3dd359c 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -281,7 +281,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
281 281
282 per_cpu(current_task, cpu) = idle; 282 per_cpu(current_task, cpu) = idle;
283#ifdef CONFIG_X86_32 283#ifdef CONFIG_X86_32
284 init_gdt(cpu);
285 irq_ctx_init(cpu); 284 irq_ctx_init(cpu);
286#else 285#else
287 clear_tsk_thread_flag(idle, TIF_FORK); 286 clear_tsk_thread_flag(idle, TIF_FORK);