diff options
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r-- | arch/x86/kernel/kvm.c | 245 |
1 files changed, 0 insertions, 245 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 1726c4c12336..865058d087ac 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -575,9 +575,6 @@ static void kvm_kick_cpu(int cpu) | |||
575 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); | 575 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); |
576 | } | 576 | } |
577 | 577 | ||
578 | |||
579 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
580 | |||
581 | #include <asm/qspinlock.h> | 578 | #include <asm/qspinlock.h> |
582 | 579 | ||
583 | static void kvm_wait(u8 *ptr, u8 val) | 580 | static void kvm_wait(u8 *ptr, u8 val) |
@@ -606,243 +603,6 @@ out: | |||
606 | local_irq_restore(flags); | 603 | local_irq_restore(flags); |
607 | } | 604 | } |
608 | 605 | ||
609 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
610 | |||
611 | enum kvm_contention_stat { | ||
612 | TAKEN_SLOW, | ||
613 | TAKEN_SLOW_PICKUP, | ||
614 | RELEASED_SLOW, | ||
615 | RELEASED_SLOW_KICKED, | ||
616 | NR_CONTENTION_STATS | ||
617 | }; | ||
618 | |||
619 | #ifdef CONFIG_KVM_DEBUG_FS | ||
620 | #define HISTO_BUCKETS 30 | ||
621 | |||
622 | static struct kvm_spinlock_stats | ||
623 | { | ||
624 | u32 contention_stats[NR_CONTENTION_STATS]; | ||
625 | u32 histo_spin_blocked[HISTO_BUCKETS+1]; | ||
626 | u64 time_blocked; | ||
627 | } spinlock_stats; | ||
628 | |||
629 | static u8 zero_stats; | ||
630 | |||
631 | static inline void check_zero(void) | ||
632 | { | ||
633 | u8 ret; | ||
634 | u8 old; | ||
635 | |||
636 | old = READ_ONCE(zero_stats); | ||
637 | if (unlikely(old)) { | ||
638 | ret = cmpxchg(&zero_stats, old, 0); | ||
639 | /* This ensures only one fellow resets the stat */ | ||
640 | if (ret == old) | ||
641 | memset(&spinlock_stats, 0, sizeof(spinlock_stats)); | ||
642 | } | ||
643 | } | ||
644 | |||
645 | static inline void add_stats(enum kvm_contention_stat var, u32 val) | ||
646 | { | ||
647 | check_zero(); | ||
648 | spinlock_stats.contention_stats[var] += val; | ||
649 | } | ||
650 | |||
651 | |||
652 | static inline u64 spin_time_start(void) | ||
653 | { | ||
654 | return sched_clock(); | ||
655 | } | ||
656 | |||
657 | static void __spin_time_accum(u64 delta, u32 *array) | ||
658 | { | ||
659 | unsigned index; | ||
660 | |||
661 | index = ilog2(delta); | ||
662 | check_zero(); | ||
663 | |||
664 | if (index < HISTO_BUCKETS) | ||
665 | array[index]++; | ||
666 | else | ||
667 | array[HISTO_BUCKETS]++; | ||
668 | } | ||
669 | |||
670 | static inline void spin_time_accum_blocked(u64 start) | ||
671 | { | ||
672 | u32 delta; | ||
673 | |||
674 | delta = sched_clock() - start; | ||
675 | __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); | ||
676 | spinlock_stats.time_blocked += delta; | ||
677 | } | ||
678 | |||
679 | static struct dentry *d_spin_debug; | ||
680 | static struct dentry *d_kvm_debug; | ||
681 | |||
682 | static struct dentry *kvm_init_debugfs(void) | ||
683 | { | ||
684 | d_kvm_debug = debugfs_create_dir("kvm-guest", NULL); | ||
685 | if (!d_kvm_debug) | ||
686 | printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); | ||
687 | |||
688 | return d_kvm_debug; | ||
689 | } | ||
690 | |||
691 | static int __init kvm_spinlock_debugfs(void) | ||
692 | { | ||
693 | struct dentry *d_kvm; | ||
694 | |||
695 | d_kvm = kvm_init_debugfs(); | ||
696 | if (d_kvm == NULL) | ||
697 | return -ENOMEM; | ||
698 | |||
699 | d_spin_debug = debugfs_create_dir("spinlocks", d_kvm); | ||
700 | |||
701 | debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); | ||
702 | |||
703 | debugfs_create_u32("taken_slow", 0444, d_spin_debug, | ||
704 | &spinlock_stats.contention_stats[TAKEN_SLOW]); | ||
705 | debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, | ||
706 | &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]); | ||
707 | |||
708 | debugfs_create_u32("released_slow", 0444, d_spin_debug, | ||
709 | &spinlock_stats.contention_stats[RELEASED_SLOW]); | ||
710 | debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, | ||
711 | &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]); | ||
712 | |||
713 | debugfs_create_u64("time_blocked", 0444, d_spin_debug, | ||
714 | &spinlock_stats.time_blocked); | ||
715 | |||
716 | debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, | ||
717 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | fs_initcall(kvm_spinlock_debugfs); | ||
722 | #else /* !CONFIG_KVM_DEBUG_FS */ | ||
723 | static inline void add_stats(enum kvm_contention_stat var, u32 val) | ||
724 | { | ||
725 | } | ||
726 | |||
727 | static inline u64 spin_time_start(void) | ||
728 | { | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static inline void spin_time_accum_blocked(u64 start) | ||
733 | { | ||
734 | } | ||
735 | #endif /* CONFIG_KVM_DEBUG_FS */ | ||
736 | |||
737 | struct kvm_lock_waiting { | ||
738 | struct arch_spinlock *lock; | ||
739 | __ticket_t want; | ||
740 | }; | ||
741 | |||
742 | /* cpus 'waiting' on a spinlock to become available */ | ||
743 | static cpumask_t waiting_cpus; | ||
744 | |||
745 | /* Track spinlock on which a cpu is waiting */ | ||
746 | static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); | ||
747 | |||
748 | __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | ||
749 | { | ||
750 | struct kvm_lock_waiting *w; | ||
751 | int cpu; | ||
752 | u64 start; | ||
753 | unsigned long flags; | ||
754 | __ticket_t head; | ||
755 | |||
756 | if (in_nmi()) | ||
757 | return; | ||
758 | |||
759 | w = this_cpu_ptr(&klock_waiting); | ||
760 | cpu = smp_processor_id(); | ||
761 | start = spin_time_start(); | ||
762 | |||
763 | /* | ||
764 | * Make sure an interrupt handler can't upset things in a | ||
765 | * partially setup state. | ||
766 | */ | ||
767 | local_irq_save(flags); | ||
768 | |||
769 | /* | ||
770 | * The ordering protocol on this is that the "lock" pointer | ||
771 | * may only be set non-NULL if the "want" ticket is correct. | ||
772 | * If we're updating "want", we must first clear "lock". | ||
773 | */ | ||
774 | w->lock = NULL; | ||
775 | smp_wmb(); | ||
776 | w->want = want; | ||
777 | smp_wmb(); | ||
778 | w->lock = lock; | ||
779 | |||
780 | add_stats(TAKEN_SLOW, 1); | ||
781 | |||
782 | /* | ||
783 | * This uses set_bit, which is atomic but we should not rely on its | ||
784 | * reordering gurantees. So barrier is needed after this call. | ||
785 | */ | ||
786 | cpumask_set_cpu(cpu, &waiting_cpus); | ||
787 | |||
788 | barrier(); | ||
789 | |||
790 | /* | ||
791 | * Mark entry to slowpath before doing the pickup test to make | ||
792 | * sure we don't deadlock with an unlocker. | ||
793 | */ | ||
794 | __ticket_enter_slowpath(lock); | ||
795 | |||
796 | /* make sure enter_slowpath, which is atomic does not cross the read */ | ||
797 | smp_mb__after_atomic(); | ||
798 | |||
799 | /* | ||
800 | * check again make sure it didn't become free while | ||
801 | * we weren't looking. | ||
802 | */ | ||
803 | head = READ_ONCE(lock->tickets.head); | ||
804 | if (__tickets_equal(head, want)) { | ||
805 | add_stats(TAKEN_SLOW_PICKUP, 1); | ||
806 | goto out; | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * halt until it's our turn and kicked. Note that we do safe halt | ||
811 | * for irq enabled case to avoid hang when lock info is overwritten | ||
812 | * in irq spinlock slowpath and no spurious interrupt occur to save us. | ||
813 | */ | ||
814 | if (arch_irqs_disabled_flags(flags)) | ||
815 | halt(); | ||
816 | else | ||
817 | safe_halt(); | ||
818 | |||
819 | out: | ||
820 | cpumask_clear_cpu(cpu, &waiting_cpus); | ||
821 | w->lock = NULL; | ||
822 | local_irq_restore(flags); | ||
823 | spin_time_accum_blocked(start); | ||
824 | } | ||
825 | PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning); | ||
826 | |||
827 | /* Kick vcpu waiting on @lock->head to reach value @ticket */ | ||
828 | static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) | ||
829 | { | ||
830 | int cpu; | ||
831 | |||
832 | add_stats(RELEASED_SLOW, 1); | ||
833 | for_each_cpu(cpu, &waiting_cpus) { | ||
834 | const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); | ||
835 | if (READ_ONCE(w->lock) == lock && | ||
836 | READ_ONCE(w->want) == ticket) { | ||
837 | add_stats(RELEASED_SLOW_KICKED, 1); | ||
838 | kvm_kick_cpu(cpu); | ||
839 | break; | ||
840 | } | ||
841 | } | ||
842 | } | ||
843 | |||
844 | #endif /* !CONFIG_QUEUED_SPINLOCKS */ | ||
845 | |||
846 | /* | 606 | /* |
847 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. | 607 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. |
848 | */ | 608 | */ |
@@ -854,16 +614,11 @@ void __init kvm_spinlock_init(void) | |||
854 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 614 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
855 | return; | 615 | return; |
856 | 616 | ||
857 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
858 | __pv_init_lock_hash(); | 617 | __pv_init_lock_hash(); |
859 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | 618 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; |
860 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | 619 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); |
861 | pv_lock_ops.wait = kvm_wait; | 620 | pv_lock_ops.wait = kvm_wait; |
862 | pv_lock_ops.kick = kvm_kick_cpu; | 621 | pv_lock_ops.kick = kvm_kick_cpu; |
863 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
864 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); | ||
865 | pv_lock_ops.unlock_kick = kvm_unlock_kick; | ||
866 | #endif | ||
867 | } | 622 | } |
868 | 623 | ||
869 | static __init int kvm_spinlock_init_jump(void) | 624 | static __init int kvm_spinlock_init_jump(void) |