aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r--arch/x86/xen/smp.c137
1 files changed, 87 insertions, 50 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 233156f39b7..f702199312a 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -66,13 +66,22 @@ static __cpuinit void cpu_bringup_and_idle(void)
66 int cpu = smp_processor_id(); 66 int cpu = smp_processor_id();
67 67
68 cpu_init(); 68 cpu_init();
69 preempt_disable();
70
69 xen_enable_sysenter(); 71 xen_enable_sysenter();
72 xen_enable_syscall();
70 73
71 preempt_disable(); 74 cpu = smp_processor_id();
72 per_cpu(cpu_state, cpu) = CPU_ONLINE; 75 smp_store_cpu_info(cpu);
76 cpu_data(cpu).x86_max_cores = 1;
77 set_cpu_sibling_map(cpu);
73 78
74 xen_setup_cpu_clockevents(); 79 xen_setup_cpu_clockevents();
75 80
81 cpu_set(cpu, cpu_online_map);
82 x86_write_percpu(cpu_state, CPU_ONLINE);
83 wmb();
84
76 /* We can take interrupts now: we're officially "up". */ 85 /* We can take interrupts now: we're officially "up". */
77 local_irq_enable(); 86 local_irq_enable();
78 87
@@ -141,56 +150,37 @@ static int xen_smp_intr_init(unsigned int cpu)
141 return rc; 150 return rc;
142} 151}
143 152
144void __init xen_fill_possible_map(void) 153static void __init xen_fill_possible_map(void)
145{ 154{
146 int i, rc; 155 int i, rc;
147 156
148 for (i = 0; i < NR_CPUS; i++) { 157 for (i = 0; i < NR_CPUS; i++) {
149 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
150 if (rc >= 0) 159 if (rc >= 0) {
160 num_processors++;
151 cpu_set(i, cpu_possible_map); 161 cpu_set(i, cpu_possible_map);
162 }
152 } 163 }
153} 164}
154 165
155void __init xen_smp_prepare_boot_cpu(void) 166static void __init xen_smp_prepare_boot_cpu(void)
156{ 167{
157 int cpu;
158
159 BUG_ON(smp_processor_id() != 0); 168 BUG_ON(smp_processor_id() != 0);
160 native_smp_prepare_boot_cpu(); 169 native_smp_prepare_boot_cpu();
161 170
162 /* We've switched to the "real" per-cpu gdt, so make sure the 171 /* We've switched to the "real" per-cpu gdt, so make sure the
163 old memory can be recycled */ 172 old memory can be recycled */
164 make_lowmem_page_readwrite(&per_cpu__gdt_page); 173 make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
165
166 for_each_possible_cpu(cpu) {
167 cpus_clear(per_cpu(cpu_sibling_map, cpu));
168 /*
169 * cpu_core_map lives in a per cpu area that is cleared
170 * when the per cpu array is allocated.
171 *
172 * cpus_clear(per_cpu(cpu_core_map, cpu));
173 */
174 }
175 174
176 xen_setup_vcpu_info_placement(); 175 xen_setup_vcpu_info_placement();
177} 176}
178 177
179void __init xen_smp_prepare_cpus(unsigned int max_cpus) 178static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
180{ 179{
181 unsigned cpu; 180 unsigned cpu;
182 181
183 for_each_possible_cpu(cpu) {
184 cpus_clear(per_cpu(cpu_sibling_map, cpu));
185 /*
186 * cpu_core_ map will be zeroed when the per
187 * cpu area is allocated.
188 *
189 * cpus_clear(per_cpu(cpu_core_map, cpu));
190 */
191 }
192
193 smp_store_cpu_info(0); 182 smp_store_cpu_info(0);
183 cpu_data(0).x86_max_cores = 1;
194 set_cpu_sibling_map(0); 184 set_cpu_sibling_map(0);
195 185
196 if (xen_smp_intr_init(0)) 186 if (xen_smp_intr_init(0))
@@ -225,7 +215,7 @@ static __cpuinit int
225cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 215cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
226{ 216{
227 struct vcpu_guest_context *ctxt; 217 struct vcpu_guest_context *ctxt;
228 struct gdt_page *gdt = &per_cpu(gdt_page, cpu); 218 struct desc_struct *gdt;
229 219
230 if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 220 if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
231 return 0; 221 return 0;
@@ -234,12 +224,15 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
234 if (ctxt == NULL) 224 if (ctxt == NULL)
235 return -ENOMEM; 225 return -ENOMEM;
236 226
227 gdt = get_cpu_gdt_table(cpu);
228
237 ctxt->flags = VGCF_IN_KERNEL; 229 ctxt->flags = VGCF_IN_KERNEL;
238 ctxt->user_regs.ds = __USER_DS; 230 ctxt->user_regs.ds = __USER_DS;
239 ctxt->user_regs.es = __USER_DS; 231 ctxt->user_regs.es = __USER_DS;
240 ctxt->user_regs.fs = __KERNEL_PERCPU;
241 ctxt->user_regs.gs = 0;
242 ctxt->user_regs.ss = __KERNEL_DS; 232 ctxt->user_regs.ss = __KERNEL_DS;
233#ifdef CONFIG_X86_32
234 ctxt->user_regs.fs = __KERNEL_PERCPU;
235#endif
243 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 236 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
244 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 237 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
245 238
@@ -249,11 +242,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
249 242
250 ctxt->ldt_ents = 0; 243 ctxt->ldt_ents = 0;
251 244
252 BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); 245 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
253 make_lowmem_page_readonly(gdt->gdt); 246 make_lowmem_page_readonly(gdt);
254 247
255 ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); 248 ctxt->gdt_frames[0] = virt_to_mfn(gdt);
256 ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); 249 ctxt->gdt_ents = GDT_ENTRIES;
257 250
258 ctxt->user_regs.cs = __KERNEL_CS; 251 ctxt->user_regs.cs = __KERNEL_CS;
259 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 252 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
@@ -261,9 +254,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
261 ctxt->kernel_ss = __KERNEL_DS; 254 ctxt->kernel_ss = __KERNEL_DS;
262 ctxt->kernel_sp = idle->thread.sp0; 255 ctxt->kernel_sp = idle->thread.sp0;
263 256
257#ifdef CONFIG_X86_32
264 ctxt->event_callback_cs = __KERNEL_CS; 258 ctxt->event_callback_cs = __KERNEL_CS;
265 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
266 ctxt->failsafe_callback_cs = __KERNEL_CS; 259 ctxt->failsafe_callback_cs = __KERNEL_CS;
260#endif
261 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
267 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; 262 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
268 263
269 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 264 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
@@ -276,7 +271,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
276 return 0; 271 return 0;
277} 272}
278 273
279int __cpuinit xen_cpu_up(unsigned int cpu) 274static int __cpuinit xen_cpu_up(unsigned int cpu)
280{ 275{
281 struct task_struct *idle = idle_task(cpu); 276 struct task_struct *idle = idle_task(cpu);
282 int rc; 277 int rc;
@@ -287,11 +282,28 @@ int __cpuinit xen_cpu_up(unsigned int cpu)
287 return rc; 282 return rc;
288#endif 283#endif
289 284
285#ifdef CONFIG_X86_64
286 /* Allocate node local memory for AP pdas */
287 WARN_ON(cpu == 0);
288 if (cpu > 0) {
289 rc = get_local_pda(cpu);
290 if (rc)
291 return rc;
292 }
293#endif
294
295#ifdef CONFIG_X86_32
290 init_gdt(cpu); 296 init_gdt(cpu);
291 per_cpu(current_task, cpu) = idle; 297 per_cpu(current_task, cpu) = idle;
292 irq_ctx_init(cpu); 298 irq_ctx_init(cpu);
299#else
300 cpu_pda(cpu)->pcurrent = idle;
301 clear_tsk_thread_flag(idle, TIF_FORK);
302#endif
293 xen_setup_timer(cpu); 303 xen_setup_timer(cpu);
294 304
305 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
306
295 /* make sure interrupts start blocked */ 307 /* make sure interrupts start blocked */
296 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 308 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
297 309
@@ -306,20 +318,18 @@ int __cpuinit xen_cpu_up(unsigned int cpu)
306 if (rc) 318 if (rc)
307 return rc; 319 return rc;
308 320
309 smp_store_cpu_info(cpu);
310 set_cpu_sibling_map(cpu);
311 /* This must be done before setting cpu_online_map */
312 wmb();
313
314 cpu_set(cpu, cpu_online_map);
315
316 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 321 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
317 BUG_ON(rc); 322 BUG_ON(rc);
318 323
324 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
325 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
326 barrier();
327 }
328
319 return 0; 329 return 0;
320} 330}
321 331
322void xen_smp_cpus_done(unsigned int max_cpus) 332static void xen_smp_cpus_done(unsigned int max_cpus)
323{ 333{
324} 334}
325 335
@@ -335,12 +345,12 @@ static void stop_self(void *v)
335 BUG(); 345 BUG();
336} 346}
337 347
338void xen_smp_send_stop(void) 348static void xen_smp_send_stop(void)
339{ 349{
340 smp_call_function(stop_self, NULL, 0); 350 smp_call_function(stop_self, NULL, 0);
341} 351}
342 352
343void xen_smp_send_reschedule(int cpu) 353static void xen_smp_send_reschedule(int cpu)
344{ 354{
345 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 355 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
346} 356}
@@ -355,7 +365,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
355 xen_send_IPI_one(cpu, vector); 365 xen_send_IPI_one(cpu, vector);
356} 366}
357 367
358void xen_smp_send_call_function_ipi(cpumask_t mask) 368static void xen_smp_send_call_function_ipi(cpumask_t mask)
359{ 369{
360 int cpu; 370 int cpu;
361 371
@@ -370,7 +380,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask)
370 } 380 }
371} 381}
372 382
373void xen_smp_send_call_function_single_ipi(int cpu) 383static void xen_smp_send_call_function_single_ipi(int cpu)
374{ 384{
375 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); 385 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
376} 386}
@@ -379,7 +389,11 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
379{ 389{
380 irq_enter(); 390 irq_enter();
381 generic_smp_call_function_interrupt(); 391 generic_smp_call_function_interrupt();
392#ifdef CONFIG_X86_32
382 __get_cpu_var(irq_stat).irq_call_count++; 393 __get_cpu_var(irq_stat).irq_call_count++;
394#else
395 add_pda(irq_call_count, 1);
396#endif
383 irq_exit(); 397 irq_exit();
384 398
385 return IRQ_HANDLED; 399 return IRQ_HANDLED;
@@ -389,8 +403,31 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
389{ 403{
390 irq_enter(); 404 irq_enter();
391 generic_smp_call_function_single_interrupt(); 405 generic_smp_call_function_single_interrupt();
406#ifdef CONFIG_X86_32
392 __get_cpu_var(irq_stat).irq_call_count++; 407 __get_cpu_var(irq_stat).irq_call_count++;
408#else
409 add_pda(irq_call_count, 1);
410#endif
393 irq_exit(); 411 irq_exit();
394 412
395 return IRQ_HANDLED; 413 return IRQ_HANDLED;
396} 414}
415
416static const struct smp_ops xen_smp_ops __initdata = {
417 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
418 .smp_prepare_cpus = xen_smp_prepare_cpus,
419 .cpu_up = xen_cpu_up,
420 .smp_cpus_done = xen_smp_cpus_done,
421
422 .smp_send_stop = xen_smp_send_stop,
423 .smp_send_reschedule = xen_smp_send_reschedule,
424
425 .send_call_func_ipi = xen_smp_send_call_function_ipi,
426 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
427};
428
429void __init xen_smp_init(void)
430{
431 smp_ops = xen_smp_ops;
432 xen_fill_possible_map();
433}