aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-08 18:06:43 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-16 04:58:27 -0400
commitc7b75947f89d45493562ede6d9ee7311dfa5c4ce (patch)
tree954e5c75d950c2b98cfb7b4040acf370ebbcc8ad
parent5b09b2876ed1a8e34a0da8f069575fc6174e2077 (diff)
xen64: smp.c compile hacking
A number of random changes to make xen/smp.c compile in 64-bit mode. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>a Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/setup.c7
-rw-r--r--arch/x86/xen/smp.c98
-rw-r--r--arch/x86/xen/xen-ops.h2
3 files changed, 58 insertions, 49 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e0a39595bde3..f52f3855fb6b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -98,7 +98,7 @@ void xen_enable_sysenter(void)
98 /* Mask events on entry, even though they get enabled immediately */ 98 /* Mask events on entry, even though they get enabled immediately */
99 static struct callback_register sysenter = { 99 static struct callback_register sysenter = {
100 .type = CALLBACKTYPE_sysenter, 100 .type = CALLBACKTYPE_sysenter,
101 .address = { __KERNEL_CS, (unsigned long)xen_sysenter_target }, 101 .address = XEN_CALLBACK(__KERNEL_CS, xen_sysenter_target),
102 .flags = CALLBACKF_mask_events, 102 .flags = CALLBACKF_mask_events,
103 }; 103 };
104 104
@@ -143,11 +143,6 @@ void __init xen_arch_setup(void)
143 143
144 pm_idle = xen_idle; 144 pm_idle = xen_idle;
145 145
146#ifdef CONFIG_SMP
147 /* fill cpus_possible with all available cpus */
148 xen_fill_possible_map();
149#endif
150
151 paravirt_disable_iospace(); 146 paravirt_disable_iospace();
152 147
153 fiddle_vdso(); 148 fiddle_vdso();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 91fae8ff756e..800bb2191e2a 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -66,13 +66,21 @@ static __cpuinit void cpu_bringup_and_idle(void)
66 int cpu = smp_processor_id(); 66 int cpu = smp_processor_id();
67 67
68 cpu_init(); 68 cpu_init();
69 preempt_disable();
70
69 xen_enable_sysenter(); 71 xen_enable_sysenter();
70 72
71 preempt_disable(); 73 cpu = smp_processor_id();
72 per_cpu(cpu_state, cpu) = CPU_ONLINE; 74 smp_store_cpu_info(cpu);
75 cpu_data(cpu).x86_max_cores = 1;
76 set_cpu_sibling_map(cpu);
73 77
74 xen_setup_cpu_clockevents(); 78 xen_setup_cpu_clockevents();
75 79
80 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE);
82 wmb();
83
76 /* We can take interrupts now: we're officially "up". */ 84 /* We can take interrupts now: we're officially "up". */
77 local_irq_enable(); 85 local_irq_enable();
78 86
@@ -141,7 +149,7 @@ static int xen_smp_intr_init(unsigned int cpu)
141 return rc; 149 return rc;
142} 150}
143 151
144void __init xen_fill_possible_map(void) 152static void __init xen_fill_possible_map(void)
145{ 153{
146 int i, rc; 154 int i, rc;
147 155
@@ -154,24 +162,12 @@ void __init xen_fill_possible_map(void)
154 162
155static void __init xen_smp_prepare_boot_cpu(void) 163static void __init xen_smp_prepare_boot_cpu(void)
156{ 164{
157 int cpu;
158
159 BUG_ON(smp_processor_id() != 0); 165 BUG_ON(smp_processor_id() != 0);
160 native_smp_prepare_boot_cpu(); 166 native_smp_prepare_boot_cpu();
161 167
162 /* We've switched to the "real" per-cpu gdt, so make sure the 168 /* We've switched to the "real" per-cpu gdt, so make sure the
163 old memory can be recycled */ 169 old memory can be recycled */
164 make_lowmem_page_readwrite(&per_cpu__gdt_page); 170 make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
165
166 for_each_possible_cpu(cpu) {
167 cpus_clear(per_cpu(cpu_sibling_map, cpu));
168 /*
169 * cpu_core_map lives in a per cpu area that is cleared
170 * when the per cpu array is allocated.
171 *
172 * cpus_clear(per_cpu(cpu_core_map, cpu));
173 */
174 }
175 171
176 xen_setup_vcpu_info_placement(); 172 xen_setup_vcpu_info_placement();
177} 173}
@@ -180,17 +176,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
180{ 176{
181 unsigned cpu; 177 unsigned cpu;
182 178
183 for_each_possible_cpu(cpu) {
184 cpus_clear(per_cpu(cpu_sibling_map, cpu));
185 /*
186 * cpu_core_ map will be zeroed when the per
187 * cpu area is allocated.
188 *
189 * cpus_clear(per_cpu(cpu_core_map, cpu));
190 */
191 }
192
193 smp_store_cpu_info(0); 179 smp_store_cpu_info(0);
180 cpu_data(0).x86_max_cores = 1;
194 set_cpu_sibling_map(0); 181 set_cpu_sibling_map(0);
195 182
196 if (xen_smp_intr_init(0)) 183 if (xen_smp_intr_init(0))
@@ -225,7 +212,7 @@ static __cpuinit int
225cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 212cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
226{ 213{
227 struct vcpu_guest_context *ctxt; 214 struct vcpu_guest_context *ctxt;
228 struct gdt_page *gdt = &per_cpu(gdt_page, cpu); 215 struct desc_struct *gdt;
229 216
230 if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 217 if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
231 return 0; 218 return 0;
@@ -234,12 +221,15 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
234 if (ctxt == NULL) 221 if (ctxt == NULL)
235 return -ENOMEM; 222 return -ENOMEM;
236 223
224 gdt = get_cpu_gdt_table(cpu);
225
237 ctxt->flags = VGCF_IN_KERNEL; 226 ctxt->flags = VGCF_IN_KERNEL;
238 ctxt->user_regs.ds = __USER_DS; 227 ctxt->user_regs.ds = __USER_DS;
239 ctxt->user_regs.es = __USER_DS; 228 ctxt->user_regs.es = __USER_DS;
240 ctxt->user_regs.fs = __KERNEL_PERCPU;
241 ctxt->user_regs.gs = 0;
242 ctxt->user_regs.ss = __KERNEL_DS; 229 ctxt->user_regs.ss = __KERNEL_DS;
230#ifdef CONFIG_X86_32
231 ctxt->user_regs.fs = __KERNEL_PERCPU;
232#endif
243 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 233 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
244 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 234 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
245 235
@@ -249,11 +239,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
249 239
250 ctxt->ldt_ents = 0; 240 ctxt->ldt_ents = 0;
251 241
252 BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); 242 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
253 make_lowmem_page_readonly(gdt->gdt); 243 make_lowmem_page_readonly(gdt);
254 244
255 ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); 245 ctxt->gdt_frames[0] = virt_to_mfn(gdt);
256 ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); 246 ctxt->gdt_ents = GDT_ENTRIES;
257 247
258 ctxt->user_regs.cs = __KERNEL_CS; 248 ctxt->user_regs.cs = __KERNEL_CS;
259 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 249 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
@@ -261,9 +251,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
261 ctxt->kernel_ss = __KERNEL_DS; 251 ctxt->kernel_ss = __KERNEL_DS;
262 ctxt->kernel_sp = idle->thread.sp0; 252 ctxt->kernel_sp = idle->thread.sp0;
263 253
254#ifdef CONFIG_X86_32
264 ctxt->event_callback_cs = __KERNEL_CS; 255 ctxt->event_callback_cs = __KERNEL_CS;
265 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
266 ctxt->failsafe_callback_cs = __KERNEL_CS; 256 ctxt->failsafe_callback_cs = __KERNEL_CS;
257#endif
258 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
267 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; 259 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
268 260
269 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 261 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
@@ -287,11 +279,28 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
287 return rc; 279 return rc;
288#endif 280#endif
289 281
282#ifdef CONFIG_X86_64
283 /* Allocate node local memory for AP pdas */
284 WARN_ON(cpu == 0);
285 if (cpu > 0) {
286 rc = get_local_pda(cpu);
287 if (rc)
288 return rc;
289 }
290#endif
291
292#ifdef CONFIG_X86_32
290 init_gdt(cpu); 293 init_gdt(cpu);
291 per_cpu(current_task, cpu) = idle; 294 per_cpu(current_task, cpu) = idle;
292 irq_ctx_init(cpu); 295 irq_ctx_init(cpu);
296#else
297 cpu_pda(cpu)->pcurrent = idle;
298 clear_tsk_thread_flag(idle, TIF_FORK);
299#endif
293 xen_setup_timer(cpu); 300 xen_setup_timer(cpu);
294 301
302 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
303
295 /* make sure interrupts start blocked */ 304 /* make sure interrupts start blocked */
296 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 305 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
297 306
@@ -306,16 +315,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
306 if (rc) 315 if (rc)
307 return rc; 316 return rc;
308 317
309 smp_store_cpu_info(cpu);
310 set_cpu_sibling_map(cpu);
311 /* This must be done before setting cpu_online_map */
312 wmb();
313
314 cpu_set(cpu, cpu_online_map);
315
316 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 318 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
317 BUG_ON(rc); 319 BUG_ON(rc);
318 320
321 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
322 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
323 barrier();
324 }
325
319 return 0; 326 return 0;
320} 327}
321 328
@@ -379,7 +386,11 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
379{ 386{
380 irq_enter(); 387 irq_enter();
381 generic_smp_call_function_interrupt(); 388 generic_smp_call_function_interrupt();
389#ifdef CONFIG_X86_32
382 __get_cpu_var(irq_stat).irq_call_count++; 390 __get_cpu_var(irq_stat).irq_call_count++;
391#else
392 add_pda(irq_call_count, 1);
393#endif
383 irq_exit(); 394 irq_exit();
384 395
385 return IRQ_HANDLED; 396 return IRQ_HANDLED;
@@ -389,7 +400,11 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
389{ 400{
390 irq_enter(); 401 irq_enter();
391 generic_smp_call_function_single_interrupt(); 402 generic_smp_call_function_single_interrupt();
403#ifdef CONFIG_X86_32
392 __get_cpu_var(irq_stat).irq_call_count++; 404 __get_cpu_var(irq_stat).irq_call_count++;
405#else
406 add_pda(irq_call_count, 1);
407#endif
393 irq_exit(); 408 irq_exit();
394 409
395 return IRQ_HANDLED; 410 return IRQ_HANDLED;
@@ -411,4 +426,5 @@ static const struct smp_ops xen_smp_ops __initdata = {
411void __init xen_smp_init(void) 426void __init xen_smp_init(void)
412{ 427{
413 smp_ops = xen_smp_ops; 428 smp_ops = xen_smp_ops;
429 xen_fill_possible_map();
414} 430}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 81a779fc9b26..aca4a7803e2c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -44,8 +44,6 @@ bool xen_vcpu_stolen(int vcpu);
44 44
45void xen_mark_init_mm_pinned(void); 45void xen_mark_init_mm_pinned(void);
46 46
47void __init xen_fill_possible_map(void);
48
49void __init xen_setup_vcpu_info_placement(void); 47void __init xen_setup_vcpu_info_placement(void);
50 48
51#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP