aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/spinlock.h86
-rw-r--r--arch/x86/include/asm/spinlock_types.h2
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c3
-rw-r--r--arch/x86/xen/spinlock.c6
5 files changed, 74 insertions, 25 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7131e12cefa1..401f350ef71b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -718,7 +718,7 @@ static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket); 718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
719} 719}
720 720
721static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock, 721static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
722 __ticket_t ticket) 722 __ticket_t ticket)
723{ 723{
724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); 724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 04a5cd5e97cb..d68883dd133c 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
1#ifndef _ASM_X86_SPINLOCK_H 1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H 2#define _ASM_X86_SPINLOCK_H
3 3
4#include <linux/jump_label.h>
4#include <linux/atomic.h> 5#include <linux/atomic.h>
5#include <asm/page.h> 6#include <asm/page.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10#include <asm/bitops.h>
11
9/* 12/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 14 *
@@ -37,32 +40,28 @@
37/* How long a lock should spin before we consider blocking */ 40/* How long a lock should spin before we consider blocking */
38#define SPIN_THRESHOLD (1 << 15) 41#define SPIN_THRESHOLD (1 << 15)
39 42
40#ifndef CONFIG_PARAVIRT_SPINLOCKS 43extern struct static_key paravirt_ticketlocks_enabled;
44static __always_inline bool static_key_false(struct static_key *key);
41 45
42static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 46#ifdef CONFIG_PARAVIRT_SPINLOCKS
43 __ticket_t ticket) 47
48static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
44{ 49{
50 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
45} 51}
46 52
47static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock, 53#else /* !CONFIG_PARAVIRT_SPINLOCKS */
48 __ticket_t ticket) 54static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
55 __ticket_t ticket)
49{ 56{
50} 57}
51 58static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
52#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 59 __ticket_t ticket)
53
54
55/*
56 * If a spinlock has someone waiting on it, then kick the appropriate
57 * waiting cpu.
58 */
59static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
60 __ticket_t next)
61{ 60{
62 if (unlikely(lock->tickets.tail != next))
63 ____ticket_unlock_kick(lock, next);
64} 61}
65 62
63#endif /* CONFIG_PARAVIRT_SPINLOCKS */
64
66/* 65/*
67 * Ticket locks are conceptually two parts, one indicating the current head of 66 * Ticket locks are conceptually two parts, one indicating the current head of
68 * the queue, and the other indicating the current tail. The lock is acquired 67 * the queue, and the other indicating the current tail. The lock is acquired
@@ -76,20 +75,22 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
76 * in the high part, because a wide xadd increment of the low part would carry 75 * in the high part, because a wide xadd increment of the low part would carry
77 * up and contaminate the high part. 76 * up and contaminate the high part.
78 */ 77 */
79static __always_inline void arch_spin_lock(struct arch_spinlock *lock) 78static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
80{ 79{
81 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; 80 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
82 81
83 inc = xadd(&lock->tickets, inc); 82 inc = xadd(&lock->tickets, inc);
83 if (likely(inc.head == inc.tail))
84 goto out;
84 85
86 inc.tail &= ~TICKET_SLOWPATH_FLAG;
85 for (;;) { 87 for (;;) {
86 unsigned count = SPIN_THRESHOLD; 88 unsigned count = SPIN_THRESHOLD;
87 89
88 do { 90 do {
89 if (inc.head == inc.tail) 91 if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
90 goto out; 92 goto out;
91 cpu_relax(); 93 cpu_relax();
92 inc.head = ACCESS_ONCE(lock->tickets.head);
93 } while (--count); 94 } while (--count);
94 __ticket_lock_spinning(lock, inc.tail); 95 __ticket_lock_spinning(lock, inc.tail);
95 } 96 }
@@ -101,7 +102,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
101 arch_spinlock_t old, new; 102 arch_spinlock_t old, new;
102 103
103 old.tickets = ACCESS_ONCE(lock->tickets); 104 old.tickets = ACCESS_ONCE(lock->tickets);
104 if (old.tickets.head != old.tickets.tail) 105 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
105 return 0; 106 return 0;
106 107
107 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 108 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
@@ -110,12 +111,49 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
110 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 111 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
111} 112}
112 113
114static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
115 arch_spinlock_t old)
116{
117 arch_spinlock_t new;
118
119 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
120
121 /* Perform the unlock on the "before" copy */
122 old.tickets.head += TICKET_LOCK_INC;
123
124 /* Clear the slowpath flag */
125 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
126
127 /*
128 * If the lock is uncontended, clear the flag - use cmpxchg in
129 * case it changes behind our back though.
130 */
131 if (new.tickets.head != new.tickets.tail ||
132 cmpxchg(&lock->head_tail, old.head_tail,
133 new.head_tail) != old.head_tail) {
134 /*
135 * Lock still has someone queued for it, so wake up an
136 * appropriate waiter.
137 */
138 __ticket_unlock_kick(lock, old.tickets.head);
139 }
140}
141
113static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 142static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
114{ 143{
115 __ticket_t next = lock->tickets.head + TICKET_LOCK_INC; 144 if (TICKET_SLOWPATH_FLAG &&
145 static_key_false(&paravirt_ticketlocks_enabled)) {
146 arch_spinlock_t prev;
147
148 prev = *lock;
149 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
150
151 /* add_smp() is a full mb() */
116 152
117 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 153 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
118 __ticket_unlock_kick(lock, next); 154 __ticket_unlock_slowpath(lock, prev);
155 } else
156 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
119} 157}
120 158
121static inline int arch_spin_is_locked(arch_spinlock_t *lock) 159static inline int arch_spin_is_locked(arch_spinlock_t *lock)
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index e96fcbdfbc07..4f1bea19945b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,8 +5,10 @@
5 5
6#ifdef CONFIG_PARAVIRT_SPINLOCKS 6#ifdef CONFIG_PARAVIRT_SPINLOCKS
7#define __TICKET_LOCK_INC 2 7#define __TICKET_LOCK_INC 2
8#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
8#else 9#else
9#define __TICKET_LOCK_INC 1 10#define __TICKET_LOCK_INC 1
11#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
10#endif 12#endif
11 13
12#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC)) 14#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 4251c1d4c0be..bbb6c7316341 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -4,6 +4,7 @@
4 */ 4 */
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/jump_label.h>
7 8
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
9 10
@@ -15,3 +16,5 @@ struct pv_lock_ops pv_lock_ops = {
15}; 16};
16EXPORT_SYMBOL(pv_lock_ops); 17EXPORT_SYMBOL(pv_lock_ops);
17 18
19struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
20EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 6c8792b298ed..546112ed463f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -157,6 +157,10 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
157 /* Only check lock once pending cleared */ 157 /* Only check lock once pending cleared */
158 barrier(); 158 barrier();
159 159
160 /* Mark entry to slowpath before doing the pickup test to make
161 sure we don't deadlock with an unlocker. */
162 __ticket_enter_slowpath(lock);
163
160 /* check again make sure it didn't become free while 164 /* check again make sure it didn't become free while
161 we weren't looking */ 165 we weren't looking */
162 if (ACCESS_ONCE(lock->tickets.head) == want) { 166 if (ACCESS_ONCE(lock->tickets.head) == want) {
@@ -261,6 +265,8 @@ void __init xen_init_spinlocks(void)
261 return; 265 return;
262 } 266 }
263 267
268 static_key_slow_inc(&paravirt_ticketlocks_enabled);
269
264 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 270 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
265 pv_lock_ops.unlock_kick = xen_unlock_kick; 271 pv_lock_ops.unlock_kick = xen_unlock_kick;
266} 272}