diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r-- | arch/x86/xen/smp.c | 306 |
1 files changed, 257 insertions, 49 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 233156f39b7f..e693812ac59a 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * This does not handle HOTPLUG_CPU yet. | 15 | * This does not handle HOTPLUG_CPU yet. |
16 | */ | 16 | */ |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
20 | 21 | ||
@@ -35,6 +36,8 @@ | |||
35 | #include "xen-ops.h" | 36 | #include "xen-ops.h" |
36 | #include "mmu.h" | 37 | #include "mmu.h" |
37 | 38 | ||
39 | static void __cpuinit xen_init_lock_cpu(int cpu); | ||
40 | |||
38 | cpumask_t xen_cpu_initialized_map; | 41 | cpumask_t xen_cpu_initialized_map; |
39 | 42 | ||
40 | static DEFINE_PER_CPU(int, resched_irq); | 43 | static DEFINE_PER_CPU(int, resched_irq); |
@@ -66,13 +69,22 @@ static __cpuinit void cpu_bringup_and_idle(void) | |||
66 | int cpu = smp_processor_id(); | 69 | int cpu = smp_processor_id(); |
67 | 70 | ||
68 | cpu_init(); | 71 | cpu_init(); |
72 | preempt_disable(); | ||
73 | |||
69 | xen_enable_sysenter(); | 74 | xen_enable_sysenter(); |
75 | xen_enable_syscall(); | ||
70 | 76 | ||
71 | preempt_disable(); | 77 | cpu = smp_processor_id(); |
72 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 78 | smp_store_cpu_info(cpu); |
79 | cpu_data(cpu).x86_max_cores = 1; | ||
80 | set_cpu_sibling_map(cpu); | ||
73 | 81 | ||
74 | xen_setup_cpu_clockevents(); | 82 | xen_setup_cpu_clockevents(); |
75 | 83 | ||
84 | cpu_set(cpu, cpu_online_map); | ||
85 | x86_write_percpu(cpu_state, CPU_ONLINE); | ||
86 | wmb(); | ||
87 | |||
76 | /* We can take interrupts now: we're officially "up". */ | 88 | /* We can take interrupts now: we're officially "up". */ |
77 | local_irq_enable(); | 89 | local_irq_enable(); |
78 | 90 | ||
@@ -141,56 +153,39 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
141 | return rc; | 153 | return rc; |
142 | } | 154 | } |
143 | 155 | ||
144 | void __init xen_fill_possible_map(void) | 156 | static void __init xen_fill_possible_map(void) |
145 | { | 157 | { |
146 | int i, rc; | 158 | int i, rc; |
147 | 159 | ||
148 | for (i = 0; i < NR_CPUS; i++) { | 160 | for (i = 0; i < NR_CPUS; i++) { |
149 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 161 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
150 | if (rc >= 0) | 162 | if (rc >= 0) { |
163 | num_processors++; | ||
151 | cpu_set(i, cpu_possible_map); | 164 | cpu_set(i, cpu_possible_map); |
165 | } | ||
152 | } | 166 | } |
153 | } | 167 | } |
154 | 168 | ||
155 | void __init xen_smp_prepare_boot_cpu(void) | 169 | static void __init xen_smp_prepare_boot_cpu(void) |
156 | { | 170 | { |
157 | int cpu; | ||
158 | |||
159 | BUG_ON(smp_processor_id() != 0); | 171 | BUG_ON(smp_processor_id() != 0); |
160 | native_smp_prepare_boot_cpu(); | 172 | native_smp_prepare_boot_cpu(); |
161 | 173 | ||
162 | /* We've switched to the "real" per-cpu gdt, so make sure the | 174 | /* We've switched to the "real" per-cpu gdt, so make sure the |
163 | old memory can be recycled */ | 175 | old memory can be recycled */ |
164 | make_lowmem_page_readwrite(&per_cpu__gdt_page); | 176 | make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); |
165 | |||
166 | for_each_possible_cpu(cpu) { | ||
167 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
168 | /* | ||
169 | * cpu_core_map lives in a per cpu area that is cleared | ||
170 | * when the per cpu array is allocated. | ||
171 | * | ||
172 | * cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
173 | */ | ||
174 | } | ||
175 | 177 | ||
176 | xen_setup_vcpu_info_placement(); | 178 | xen_setup_vcpu_info_placement(); |
177 | } | 179 | } |
178 | 180 | ||
179 | void __init xen_smp_prepare_cpus(unsigned int max_cpus) | 181 | static void __init xen_smp_prepare_cpus(unsigned int max_cpus) |
180 | { | 182 | { |
181 | unsigned cpu; | 183 | unsigned cpu; |
182 | 184 | ||
183 | for_each_possible_cpu(cpu) { | 185 | xen_init_lock_cpu(0); |
184 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
185 | /* | ||
186 | * cpu_core_ map will be zeroed when the per | ||
187 | * cpu area is allocated. | ||
188 | * | ||
189 | * cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
190 | */ | ||
191 | } | ||
192 | 186 | ||
193 | smp_store_cpu_info(0); | 187 | smp_store_cpu_info(0); |
188 | cpu_data(0).x86_max_cores = 1; | ||
194 | set_cpu_sibling_map(0); | 189 | set_cpu_sibling_map(0); |
195 | 190 | ||
196 | if (xen_smp_intr_init(0)) | 191 | if (xen_smp_intr_init(0)) |
@@ -225,7 +220,7 @@ static __cpuinit int | |||
225 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | 220 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
226 | { | 221 | { |
227 | struct vcpu_guest_context *ctxt; | 222 | struct vcpu_guest_context *ctxt; |
228 | struct gdt_page *gdt = &per_cpu(gdt_page, cpu); | 223 | struct desc_struct *gdt; |
229 | 224 | ||
230 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) | 225 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) |
231 | return 0; | 226 | return 0; |
@@ -234,12 +229,15 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
234 | if (ctxt == NULL) | 229 | if (ctxt == NULL) |
235 | return -ENOMEM; | 230 | return -ENOMEM; |
236 | 231 | ||
232 | gdt = get_cpu_gdt_table(cpu); | ||
233 | |||
237 | ctxt->flags = VGCF_IN_KERNEL; | 234 | ctxt->flags = VGCF_IN_KERNEL; |
238 | ctxt->user_regs.ds = __USER_DS; | 235 | ctxt->user_regs.ds = __USER_DS; |
239 | ctxt->user_regs.es = __USER_DS; | 236 | ctxt->user_regs.es = __USER_DS; |
240 | ctxt->user_regs.fs = __KERNEL_PERCPU; | ||
241 | ctxt->user_regs.gs = 0; | ||
242 | ctxt->user_regs.ss = __KERNEL_DS; | 237 | ctxt->user_regs.ss = __KERNEL_DS; |
238 | #ifdef CONFIG_X86_32 | ||
239 | ctxt->user_regs.fs = __KERNEL_PERCPU; | ||
240 | #endif | ||
243 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; | 241 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; |
244 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | 242 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ |
245 | 243 | ||
@@ -249,11 +247,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
249 | 247 | ||
250 | ctxt->ldt_ents = 0; | 248 | ctxt->ldt_ents = 0; |
251 | 249 | ||
252 | BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); | 250 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); |
253 | make_lowmem_page_readonly(gdt->gdt); | 251 | make_lowmem_page_readonly(gdt); |
254 | 252 | ||
255 | ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); | 253 | ctxt->gdt_frames[0] = virt_to_mfn(gdt); |
256 | ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); | 254 | ctxt->gdt_ents = GDT_ENTRIES; |
257 | 255 | ||
258 | ctxt->user_regs.cs = __KERNEL_CS; | 256 | ctxt->user_regs.cs = __KERNEL_CS; |
259 | ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); | 257 | ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); |
@@ -261,9 +259,11 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
261 | ctxt->kernel_ss = __KERNEL_DS; | 259 | ctxt->kernel_ss = __KERNEL_DS; |
262 | ctxt->kernel_sp = idle->thread.sp0; | 260 | ctxt->kernel_sp = idle->thread.sp0; |
263 | 261 | ||
262 | #ifdef CONFIG_X86_32 | ||
264 | ctxt->event_callback_cs = __KERNEL_CS; | 263 | ctxt->event_callback_cs = __KERNEL_CS; |
265 | ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; | ||
266 | ctxt->failsafe_callback_cs = __KERNEL_CS; | 264 | ctxt->failsafe_callback_cs = __KERNEL_CS; |
265 | #endif | ||
266 | ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; | ||
267 | ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; | 267 | ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; |
268 | 268 | ||
269 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); | 269 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); |
@@ -276,7 +276,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
276 | return 0; | 276 | return 0; |
277 | } | 277 | } |
278 | 278 | ||
279 | int __cpuinit xen_cpu_up(unsigned int cpu) | 279 | static int __cpuinit xen_cpu_up(unsigned int cpu) |
280 | { | 280 | { |
281 | struct task_struct *idle = idle_task(cpu); | 281 | struct task_struct *idle = idle_task(cpu); |
282 | int rc; | 282 | int rc; |
@@ -287,10 +287,28 @@ int __cpuinit xen_cpu_up(unsigned int cpu) | |||
287 | return rc; | 287 | return rc; |
288 | #endif | 288 | #endif |
289 | 289 | ||
290 | #ifdef CONFIG_X86_64 | ||
291 | /* Allocate node local memory for AP pdas */ | ||
292 | WARN_ON(cpu == 0); | ||
293 | if (cpu > 0) { | ||
294 | rc = get_local_pda(cpu); | ||
295 | if (rc) | ||
296 | return rc; | ||
297 | } | ||
298 | #endif | ||
299 | |||
300 | #ifdef CONFIG_X86_32 | ||
290 | init_gdt(cpu); | 301 | init_gdt(cpu); |
291 | per_cpu(current_task, cpu) = idle; | 302 | per_cpu(current_task, cpu) = idle; |
292 | irq_ctx_init(cpu); | 303 | irq_ctx_init(cpu); |
304 | #else | ||
305 | cpu_pda(cpu)->pcurrent = idle; | ||
306 | clear_tsk_thread_flag(idle, TIF_FORK); | ||
307 | #endif | ||
293 | xen_setup_timer(cpu); | 308 | xen_setup_timer(cpu); |
309 | xen_init_lock_cpu(cpu); | ||
310 | |||
311 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
294 | 312 | ||
295 | /* make sure interrupts start blocked */ | 313 | /* make sure interrupts start blocked */ |
296 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; | 314 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; |
@@ -306,20 +324,18 @@ int __cpuinit xen_cpu_up(unsigned int cpu) | |||
306 | if (rc) | 324 | if (rc) |
307 | return rc; | 325 | return rc; |
308 | 326 | ||
309 | smp_store_cpu_info(cpu); | ||
310 | set_cpu_sibling_map(cpu); | ||
311 | /* This must be done before setting cpu_online_map */ | ||
312 | wmb(); | ||
313 | |||
314 | cpu_set(cpu, cpu_online_map); | ||
315 | |||
316 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); | 327 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); |
317 | BUG_ON(rc); | 328 | BUG_ON(rc); |
318 | 329 | ||
330 | while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { | ||
331 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
332 | barrier(); | ||
333 | } | ||
334 | |||
319 | return 0; | 335 | return 0; |
320 | } | 336 | } |
321 | 337 | ||
322 | void xen_smp_cpus_done(unsigned int max_cpus) | 338 | static void xen_smp_cpus_done(unsigned int max_cpus) |
323 | { | 339 | { |
324 | } | 340 | } |
325 | 341 | ||
@@ -335,12 +351,12 @@ static void stop_self(void *v) | |||
335 | BUG(); | 351 | BUG(); |
336 | } | 352 | } |
337 | 353 | ||
338 | void xen_smp_send_stop(void) | 354 | static void xen_smp_send_stop(void) |
339 | { | 355 | { |
340 | smp_call_function(stop_self, NULL, 0); | 356 | smp_call_function(stop_self, NULL, 0); |
341 | } | 357 | } |
342 | 358 | ||
343 | void xen_smp_send_reschedule(int cpu) | 359 | static void xen_smp_send_reschedule(int cpu) |
344 | { | 360 | { |
345 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 361 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
346 | } | 362 | } |
@@ -355,7 +371,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
355 | xen_send_IPI_one(cpu, vector); | 371 | xen_send_IPI_one(cpu, vector); |
356 | } | 372 | } |
357 | 373 | ||
358 | void xen_smp_send_call_function_ipi(cpumask_t mask) | 374 | static void xen_smp_send_call_function_ipi(cpumask_t mask) |
359 | { | 375 | { |
360 | int cpu; | 376 | int cpu; |
361 | 377 | ||
@@ -370,7 +386,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
370 | } | 386 | } |
371 | } | 387 | } |
372 | 388 | ||
373 | void xen_smp_send_call_function_single_ipi(int cpu) | 389 | static void xen_smp_send_call_function_single_ipi(int cpu) |
374 | { | 390 | { |
375 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | 391 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); |
376 | } | 392 | } |
@@ -379,7 +395,11 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |||
379 | { | 395 | { |
380 | irq_enter(); | 396 | irq_enter(); |
381 | generic_smp_call_function_interrupt(); | 397 | generic_smp_call_function_interrupt(); |
398 | #ifdef CONFIG_X86_32 | ||
382 | __get_cpu_var(irq_stat).irq_call_count++; | 399 | __get_cpu_var(irq_stat).irq_call_count++; |
400 | #else | ||
401 | add_pda(irq_call_count, 1); | ||
402 | #endif | ||
383 | irq_exit(); | 403 | irq_exit(); |
384 | 404 | ||
385 | return IRQ_HANDLED; | 405 | return IRQ_HANDLED; |
@@ -389,8 +409,196 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
389 | { | 409 | { |
390 | irq_enter(); | 410 | irq_enter(); |
391 | generic_smp_call_function_single_interrupt(); | 411 | generic_smp_call_function_single_interrupt(); |
412 | #ifdef CONFIG_X86_32 | ||
392 | __get_cpu_var(irq_stat).irq_call_count++; | 413 | __get_cpu_var(irq_stat).irq_call_count++; |
414 | #else | ||
415 | add_pda(irq_call_count, 1); | ||
416 | #endif | ||
393 | irq_exit(); | 417 | irq_exit(); |
394 | 418 | ||
395 | return IRQ_HANDLED; | 419 | return IRQ_HANDLED; |
396 | } | 420 | } |
421 | |||
422 | struct xen_spinlock { | ||
423 | unsigned char lock; /* 0 -> free; 1 -> locked */ | ||
424 | unsigned short spinners; /* count of waiting cpus */ | ||
425 | }; | ||
426 | |||
427 | static int xen_spin_is_locked(struct raw_spinlock *lock) | ||
428 | { | ||
429 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
430 | |||
431 | return xl->lock != 0; | ||
432 | } | ||
433 | |||
434 | static int xen_spin_is_contended(struct raw_spinlock *lock) | ||
435 | { | ||
436 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
437 | |||
438 | /* Not strictly true; this is only the count of contended | ||
439 | lock-takers entering the slow path. */ | ||
440 | return xl->spinners != 0; | ||
441 | } | ||
442 | |||
443 | static int xen_spin_trylock(struct raw_spinlock *lock) | ||
444 | { | ||
445 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
446 | u8 old = 1; | ||
447 | |||
448 | asm("xchgb %b0,%1" | ||
449 | : "+q" (old), "+m" (xl->lock) : : "memory"); | ||
450 | |||
451 | return old == 0; | ||
452 | } | ||
453 | |||
454 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
455 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | ||
456 | |||
457 | static inline void spinning_lock(struct xen_spinlock *xl) | ||
458 | { | ||
459 | __get_cpu_var(lock_spinners) = xl; | ||
460 | wmb(); /* set lock of interest before count */ | ||
461 | asm(LOCK_PREFIX " incw %0" | ||
462 | : "+m" (xl->spinners) : : "memory"); | ||
463 | } | ||
464 | |||
465 | static inline void unspinning_lock(struct xen_spinlock *xl) | ||
466 | { | ||
467 | asm(LOCK_PREFIX " decw %0" | ||
468 | : "+m" (xl->spinners) : : "memory"); | ||
469 | wmb(); /* decrement count before clearing lock */ | ||
470 | __get_cpu_var(lock_spinners) = NULL; | ||
471 | } | ||
472 | |||
473 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) | ||
474 | { | ||
475 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
476 | int irq = __get_cpu_var(lock_kicker_irq); | ||
477 | int ret; | ||
478 | |||
479 | /* If kicker interrupts not initialized yet, just spin */ | ||
480 | if (irq == -1) | ||
481 | return 0; | ||
482 | |||
483 | /* announce we're spinning */ | ||
484 | spinning_lock(xl); | ||
485 | |||
486 | /* clear pending */ | ||
487 | xen_clear_irq_pending(irq); | ||
488 | |||
489 | /* check again make sure it didn't become free while | ||
490 | we weren't looking */ | ||
491 | ret = xen_spin_trylock(lock); | ||
492 | if (ret) | ||
493 | goto out; | ||
494 | |||
495 | /* block until irq becomes pending */ | ||
496 | xen_poll_irq(irq); | ||
497 | kstat_this_cpu.irqs[irq]++; | ||
498 | |||
499 | out: | ||
500 | unspinning_lock(xl); | ||
501 | return ret; | ||
502 | } | ||
503 | |||
504 | static void xen_spin_lock(struct raw_spinlock *lock) | ||
505 | { | ||
506 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
507 | int timeout; | ||
508 | u8 oldval; | ||
509 | |||
510 | do { | ||
511 | timeout = 1 << 10; | ||
512 | |||
513 | asm("1: xchgb %1,%0\n" | ||
514 | " testb %1,%1\n" | ||
515 | " jz 3f\n" | ||
516 | "2: rep;nop\n" | ||
517 | " cmpb $0,%0\n" | ||
518 | " je 1b\n" | ||
519 | " dec %2\n" | ||
520 | " jnz 2b\n" | ||
521 | "3:\n" | ||
522 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) | ||
523 | : "1" (1) | ||
524 | : "memory"); | ||
525 | |||
526 | } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); | ||
527 | } | ||
528 | |||
529 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | ||
530 | { | ||
531 | int cpu; | ||
532 | |||
533 | for_each_online_cpu(cpu) { | ||
534 | /* XXX should mix up next cpu selection */ | ||
535 | if (per_cpu(lock_spinners, cpu) == xl) { | ||
536 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | ||
537 | break; | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | |||
542 | static void xen_spin_unlock(struct raw_spinlock *lock) | ||
543 | { | ||
544 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
545 | |||
546 | smp_wmb(); /* make sure no writes get moved after unlock */ | ||
547 | xl->lock = 0; /* release lock */ | ||
548 | |||
549 | /* make sure unlock happens before kick */ | ||
550 | barrier(); | ||
551 | |||
552 | if (unlikely(xl->spinners)) | ||
553 | xen_spin_unlock_slow(xl); | ||
554 | } | ||
555 | |||
556 | static __cpuinit void xen_init_lock_cpu(int cpu) | ||
557 | { | ||
558 | int irq; | ||
559 | const char *name; | ||
560 | |||
561 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | ||
562 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | ||
563 | cpu, | ||
564 | xen_reschedule_interrupt, | ||
565 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
566 | name, | ||
567 | NULL); | ||
568 | |||
569 | if (irq >= 0) { | ||
570 | disable_irq(irq); /* make sure it's never delivered */ | ||
571 | per_cpu(lock_kicker_irq, cpu) = irq; | ||
572 | } | ||
573 | |||
574 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | ||
575 | } | ||
576 | |||
577 | static void __init xen_init_spinlocks(void) | ||
578 | { | ||
579 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | ||
580 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | ||
581 | pv_lock_ops.spin_lock = xen_spin_lock; | ||
582 | pv_lock_ops.spin_trylock = xen_spin_trylock; | ||
583 | pv_lock_ops.spin_unlock = xen_spin_unlock; | ||
584 | } | ||
585 | |||
586 | static const struct smp_ops xen_smp_ops __initdata = { | ||
587 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | ||
588 | .smp_prepare_cpus = xen_smp_prepare_cpus, | ||
589 | .cpu_up = xen_cpu_up, | ||
590 | .smp_cpus_done = xen_smp_cpus_done, | ||
591 | |||
592 | .smp_send_stop = xen_smp_send_stop, | ||
593 | .smp_send_reschedule = xen_smp_send_reschedule, | ||
594 | |||
595 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | ||
596 | .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | ||
597 | }; | ||
598 | |||
599 | void __init xen_smp_init(void) | ||
600 | { | ||
601 | smp_ops = xen_smp_ops; | ||
602 | xen_fill_possible_map(); | ||
603 | xen_init_spinlocks(); | ||
604 | } | ||