diff options
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r-- | arch/x86/kernel/kvm.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 94f643484300..e354cc6446ab 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -609,7 +609,7 @@ static inline void check_zero(void) | |||
609 | u8 ret; | 609 | u8 ret; |
610 | u8 old; | 610 | u8 old; |
611 | 611 | ||
612 | old = ACCESS_ONCE(zero_stats); | 612 | old = READ_ONCE(zero_stats); |
613 | if (unlikely(old)) { | 613 | if (unlikely(old)) { |
614 | ret = cmpxchg(&zero_stats, old, 0); | 614 | ret = cmpxchg(&zero_stats, old, 0); |
615 | /* This ensures only one fellow resets the stat */ | 615 | /* This ensures only one fellow resets the stat */ |
@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
727 | int cpu; | 727 | int cpu; |
728 | u64 start; | 728 | u64 start; |
729 | unsigned long flags; | 729 | unsigned long flags; |
730 | __ticket_t head; | ||
730 | 731 | ||
731 | if (in_nmi()) | 732 | if (in_nmi()) |
732 | return; | 733 | return; |
@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
768 | */ | 769 | */ |
769 | __ticket_enter_slowpath(lock); | 770 | __ticket_enter_slowpath(lock); |
770 | 771 | ||
772 | /* make sure enter_slowpath, which is atomic does not cross the read */ | ||
773 | smp_mb__after_atomic(); | ||
774 | |||
771 | /* | 775 | /* |
772 | * check again make sure it didn't become free while | 776 | * check again make sure it didn't become free while |
773 | * we weren't looking. | 777 | * we weren't looking. |
774 | */ | 778 | */ |
775 | if (ACCESS_ONCE(lock->tickets.head) == want) { | 779 | head = READ_ONCE(lock->tickets.head); |
780 | if (__tickets_equal(head, want)) { | ||
776 | add_stats(TAKEN_SLOW_PICKUP, 1); | 781 | add_stats(TAKEN_SLOW_PICKUP, 1); |
777 | goto out; | 782 | goto out; |
778 | } | 783 | } |
@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) | |||
803 | add_stats(RELEASED_SLOW, 1); | 808 | add_stats(RELEASED_SLOW, 1); |
804 | for_each_cpu(cpu, &waiting_cpus) { | 809 | for_each_cpu(cpu, &waiting_cpus) { |
805 | const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); | 810 | const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); |
806 | if (ACCESS_ONCE(w->lock) == lock && | 811 | if (READ_ONCE(w->lock) == lock && |
807 | ACCESS_ONCE(w->want) == ticket) { | 812 | READ_ONCE(w->want) == ticket) { |
808 | add_stats(RELEASED_SLOW_KICKED, 1); | 813 | add_stats(RELEASED_SLOW_KICKED, 1); |
809 | kvm_kick_cpu(cpu); | 814 | kvm_kick_cpu(cpu); |
810 | break; | 815 | break; |