diff options
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/smp.c | 171 |
1 files changed, 171 insertions, 0 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index f702199312a5..e693812ac59a 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * This does not handle HOTPLUG_CPU yet. | 15 | * This does not handle HOTPLUG_CPU yet. |
16 | */ | 16 | */ |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
20 | 21 | ||
@@ -35,6 +36,8 @@ | |||
35 | #include "xen-ops.h" | 36 | #include "xen-ops.h" |
36 | #include "mmu.h" | 37 | #include "mmu.h" |
37 | 38 | ||
39 | static void __cpuinit xen_init_lock_cpu(int cpu); | ||
40 | |||
38 | cpumask_t xen_cpu_initialized_map; | 41 | cpumask_t xen_cpu_initialized_map; |
39 | 42 | ||
40 | static DEFINE_PER_CPU(int, resched_irq); | 43 | static DEFINE_PER_CPU(int, resched_irq); |
@@ -179,6 +182,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
179 | { | 182 | { |
180 | unsigned cpu; | 183 | unsigned cpu; |
181 | 184 | ||
185 | xen_init_lock_cpu(0); | ||
186 | |||
182 | smp_store_cpu_info(0); | 187 | smp_store_cpu_info(0); |
183 | cpu_data(0).x86_max_cores = 1; | 188 | cpu_data(0).x86_max_cores = 1; |
184 | set_cpu_sibling_map(0); | 189 | set_cpu_sibling_map(0); |
@@ -301,6 +306,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
301 | clear_tsk_thread_flag(idle, TIF_FORK); | 306 | clear_tsk_thread_flag(idle, TIF_FORK); |
302 | #endif | 307 | #endif |
303 | xen_setup_timer(cpu); | 308 | xen_setup_timer(cpu); |
309 | xen_init_lock_cpu(cpu); | ||
304 | 310 | ||
305 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 311 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
306 | 312 | ||
@@ -413,6 +419,170 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
413 | return IRQ_HANDLED; | 419 | return IRQ_HANDLED; |
414 | } | 420 | } |
415 | 421 | ||
422 | struct xen_spinlock { | ||
423 | unsigned char lock; /* 0 -> free; 1 -> locked */ | ||
424 | unsigned short spinners; /* count of waiting cpus */ | ||
425 | }; | ||
426 | |||
427 | static int xen_spin_is_locked(struct raw_spinlock *lock) | ||
428 | { | ||
429 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
430 | |||
431 | return xl->lock != 0; | ||
432 | } | ||
433 | |||
434 | static int xen_spin_is_contended(struct raw_spinlock *lock) | ||
435 | { | ||
436 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
437 | |||
438 | /* Not strictly true; this is only the count of contended | ||
439 | lock-takers entering the slow path. */ | ||
440 | return xl->spinners != 0; | ||
441 | } | ||
442 | |||
443 | static int xen_spin_trylock(struct raw_spinlock *lock) | ||
444 | { | ||
445 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
446 | u8 old = 1; | ||
447 | |||
448 | asm("xchgb %b0,%1" | ||
449 | : "+q" (old), "+m" (xl->lock) : : "memory"); | ||
450 | |||
451 | return old == 0; | ||
452 | } | ||
453 | |||
454 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
455 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | ||
456 | |||
457 | static inline void spinning_lock(struct xen_spinlock *xl) | ||
458 | { | ||
459 | __get_cpu_var(lock_spinners) = xl; | ||
460 | wmb(); /* set lock of interest before count */ | ||
461 | asm(LOCK_PREFIX " incw %0" | ||
462 | : "+m" (xl->spinners) : : "memory"); | ||
463 | } | ||
464 | |||
465 | static inline void unspinning_lock(struct xen_spinlock *xl) | ||
466 | { | ||
467 | asm(LOCK_PREFIX " decw %0" | ||
468 | : "+m" (xl->spinners) : : "memory"); | ||
469 | wmb(); /* decrement count before clearing lock */ | ||
470 | __get_cpu_var(lock_spinners) = NULL; | ||
471 | } | ||
472 | |||
473 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) | ||
474 | { | ||
475 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
476 | int irq = __get_cpu_var(lock_kicker_irq); | ||
477 | int ret; | ||
478 | |||
479 | /* If kicker interrupts not initialized yet, just spin */ | ||
480 | if (irq == -1) | ||
481 | return 0; | ||
482 | |||
483 | /* announce we're spinning */ | ||
484 | spinning_lock(xl); | ||
485 | |||
486 | /* clear pending */ | ||
487 | xen_clear_irq_pending(irq); | ||
488 | |||
489 | /* check again make sure it didn't become free while | ||
490 | we weren't looking */ | ||
491 | ret = xen_spin_trylock(lock); | ||
492 | if (ret) | ||
493 | goto out; | ||
494 | |||
495 | /* block until irq becomes pending */ | ||
496 | xen_poll_irq(irq); | ||
497 | kstat_this_cpu.irqs[irq]++; | ||
498 | |||
499 | out: | ||
500 | unspinning_lock(xl); | ||
501 | return ret; | ||
502 | } | ||
503 | |||
504 | static void xen_spin_lock(struct raw_spinlock *lock) | ||
505 | { | ||
506 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
507 | int timeout; | ||
508 | u8 oldval; | ||
509 | |||
510 | do { | ||
511 | timeout = 1 << 10; | ||
512 | |||
513 | asm("1: xchgb %1,%0\n" | ||
514 | " testb %1,%1\n" | ||
515 | " jz 3f\n" | ||
516 | "2: rep;nop\n" | ||
517 | " cmpb $0,%0\n" | ||
518 | " je 1b\n" | ||
519 | " dec %2\n" | ||
520 | " jnz 2b\n" | ||
521 | "3:\n" | ||
522 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) | ||
523 | : "1" (1) | ||
524 | : "memory"); | ||
525 | |||
526 | } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); | ||
527 | } | ||
528 | |||
529 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | ||
530 | { | ||
531 | int cpu; | ||
532 | |||
533 | for_each_online_cpu(cpu) { | ||
534 | /* XXX should mix up next cpu selection */ | ||
535 | if (per_cpu(lock_spinners, cpu) == xl) { | ||
536 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | ||
537 | break; | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | |||
542 | static void xen_spin_unlock(struct raw_spinlock *lock) | ||
543 | { | ||
544 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
545 | |||
546 | smp_wmb(); /* make sure no writes get moved after unlock */ | ||
547 | xl->lock = 0; /* release lock */ | ||
548 | |||
549 | /* make sure unlock happens before kick */ | ||
550 | barrier(); | ||
551 | |||
552 | if (unlikely(xl->spinners)) | ||
553 | xen_spin_unlock_slow(xl); | ||
554 | } | ||
555 | |||
556 | static __cpuinit void xen_init_lock_cpu(int cpu) | ||
557 | { | ||
558 | int irq; | ||
559 | const char *name; | ||
560 | |||
561 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | ||
562 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | ||
563 | cpu, | ||
564 | xen_reschedule_interrupt, | ||
565 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
566 | name, | ||
567 | NULL); | ||
568 | |||
569 | if (irq >= 0) { | ||
570 | disable_irq(irq); /* make sure it's never delivered */ | ||
571 | per_cpu(lock_kicker_irq, cpu) = irq; | ||
572 | } | ||
573 | |||
574 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | ||
575 | } | ||
576 | |||
577 | static void __init xen_init_spinlocks(void) | ||
578 | { | ||
579 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | ||
580 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | ||
581 | pv_lock_ops.spin_lock = xen_spin_lock; | ||
582 | pv_lock_ops.spin_trylock = xen_spin_trylock; | ||
583 | pv_lock_ops.spin_unlock = xen_spin_unlock; | ||
584 | } | ||
585 | |||
416 | static const struct smp_ops xen_smp_ops __initdata = { | 586 | static const struct smp_ops xen_smp_ops __initdata = { |
417 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 587 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
418 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 588 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
@@ -430,4 +600,5 @@ void __init xen_smp_init(void) | |||
430 | { | 600 | { |
431 | smp_ops = xen_smp_ops; | 601 | smp_ops = xen_smp_ops; |
432 | xen_fill_possible_map(); | 602 | xen_fill_possible_map(); |
603 | xen_init_spinlocks(); | ||
433 | } | 604 | } |