aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/spinlock.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2013-08-09 10:21:58 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-08-09 10:54:00 -0400
commit96f853eaa889c7a22718d275b0df7bebdbd6780e (patch)
tree0e296eb2f7339efa2b6fbc0db008b35c463daf6c /arch/x86/include/asm/spinlock.h
parent851cf6e7d6366195d4ee033cdc7787df1a649a14 (diff)
x86, ticketlock: Add slowpath logic
Maintain a flag in the LSB of the ticket lock tail which indicates whether anyone is in the lock slowpath and may need kicking when the current holder unlocks. The flags are set when the first locker enters the slowpath, and cleared when unlocking to an empty queue (ie, no contention). In the specific implementation of lock_spinning(), make sure to set the slowpath flags on the lock just before blocking. We must do this before the last-chance pickup test to prevent a deadlock with the unlocker: Unlocker Locker test for lock pickup -> fail unlock test slowpath -> false set slowpath flags block Whereas this works in any ordering: Unlocker Locker set slowpath flags test for lock pickup -> fail block unlock test slowpath -> true, kick If the unlocker finds that the lock has the slowpath flag set but it is actually uncontended (ie, head == tail, so nobody is waiting), then it clears the slowpath flag. The unlock code uses a locked add to update the head counter. This also acts as a full memory barrier so that its safe to subsequently read back the slowflag state, knowing that the updated lock is visible to the other CPUs. If it were an unlocked add, then the flag read may just be forwarded from the store buffer before it was visible to the other CPUs, which could result in a deadlock. Unfortunately this means we need to do a locked instruction when unlocking with PV ticketlocks. However, if PV ticketlocks are not enabled, then the old non-locked "add" is the only unlocking code. Note: this code relies on gcc making sure that unlikely() code is out of line of the fastpath, which only happens when OPTIMIZE_SIZE=n. If it doesn't the generated code isn't too bad, but its definitely suboptimal. Thanks to Srivatsa Vaddagiri for providing a bugfix to the original version of this change, which has been folded in. Thanks to Stephan Diestelhorst for commenting on some code which relied on an inaccurate reading of the x86 memory ordering rules. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Link: http://lkml.kernel.org/r/1376058122-8248-11-git-send-email-raghavendra.kt@linux.vnet.ibm.com Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Stephan Diestelhorst <stephan.diestelhorst@amd.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r--arch/x86/include/asm/spinlock.h86
1 files changed, 62 insertions, 24 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 04a5cd5e97cb..d68883dd133c 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
1#ifndef _ASM_X86_SPINLOCK_H 1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H 2#define _ASM_X86_SPINLOCK_H
3 3
4#include <linux/jump_label.h>
4#include <linux/atomic.h> 5#include <linux/atomic.h>
5#include <asm/page.h> 6#include <asm/page.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10#include <asm/bitops.h>
11
9/* 12/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 14 *
@@ -37,32 +40,28 @@
37/* How long a lock should spin before we consider blocking */ 40/* How long a lock should spin before we consider blocking */
38#define SPIN_THRESHOLD (1 << 15) 41#define SPIN_THRESHOLD (1 << 15)
39 42
40#ifndef CONFIG_PARAVIRT_SPINLOCKS 43extern struct static_key paravirt_ticketlocks_enabled;
44static __always_inline bool static_key_false(struct static_key *key);
41 45
42static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 46#ifdef CONFIG_PARAVIRT_SPINLOCKS
43 __ticket_t ticket) 47
48static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
44{ 49{
50 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
45} 51}
46 52
47static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock, 53#else /* !CONFIG_PARAVIRT_SPINLOCKS */
48 __ticket_t ticket) 54static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
55 __ticket_t ticket)
49{ 56{
50} 57}
51 58static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
52#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 59 __ticket_t ticket)
53
54
55/*
56 * If a spinlock has someone waiting on it, then kick the appropriate
57 * waiting cpu.
58 */
59static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
60 __ticket_t next)
61{ 60{
62 if (unlikely(lock->tickets.tail != next))
63 ____ticket_unlock_kick(lock, next);
64} 61}
65 62
63#endif /* CONFIG_PARAVIRT_SPINLOCKS */
64
66/* 65/*
67 * Ticket locks are conceptually two parts, one indicating the current head of 66 * Ticket locks are conceptually two parts, one indicating the current head of
68 * the queue, and the other indicating the current tail. The lock is acquired 67 * the queue, and the other indicating the current tail. The lock is acquired
@@ -76,20 +75,22 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
76 * in the high part, because a wide xadd increment of the low part would carry 75 * in the high part, because a wide xadd increment of the low part would carry
77 * up and contaminate the high part. 76 * up and contaminate the high part.
78 */ 77 */
79static __always_inline void arch_spin_lock(struct arch_spinlock *lock) 78static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
80{ 79{
81 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; 80 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
82 81
83 inc = xadd(&lock->tickets, inc); 82 inc = xadd(&lock->tickets, inc);
83 if (likely(inc.head == inc.tail))
84 goto out;
84 85
86 inc.tail &= ~TICKET_SLOWPATH_FLAG;
85 for (;;) { 87 for (;;) {
86 unsigned count = SPIN_THRESHOLD; 88 unsigned count = SPIN_THRESHOLD;
87 89
88 do { 90 do {
89 if (inc.head == inc.tail) 91 if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
90 goto out; 92 goto out;
91 cpu_relax(); 93 cpu_relax();
92 inc.head = ACCESS_ONCE(lock->tickets.head);
93 } while (--count); 94 } while (--count);
94 __ticket_lock_spinning(lock, inc.tail); 95 __ticket_lock_spinning(lock, inc.tail);
95 } 96 }
@@ -101,7 +102,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
101 arch_spinlock_t old, new; 102 arch_spinlock_t old, new;
102 103
103 old.tickets = ACCESS_ONCE(lock->tickets); 104 old.tickets = ACCESS_ONCE(lock->tickets);
104 if (old.tickets.head != old.tickets.tail) 105 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
105 return 0; 106 return 0;
106 107
107 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 108 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
@@ -110,12 +111,49 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
110 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 111 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
111} 112}
112 113
114static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
115 arch_spinlock_t old)
116{
117 arch_spinlock_t new;
118
119 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
120
121 /* Perform the unlock on the "before" copy */
122 old.tickets.head += TICKET_LOCK_INC;
123
124 /* Clear the slowpath flag */
125 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
126
127 /*
128 * If the lock is uncontended, clear the flag - use cmpxchg in
129 * case it changes behind our back though.
130 */
131 if (new.tickets.head != new.tickets.tail ||
132 cmpxchg(&lock->head_tail, old.head_tail,
133 new.head_tail) != old.head_tail) {
134 /*
135 * Lock still has someone queued for it, so wake up an
136 * appropriate waiter.
137 */
138 __ticket_unlock_kick(lock, old.tickets.head);
139 }
140}
141
113static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 142static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
114{ 143{
115 __ticket_t next = lock->tickets.head + TICKET_LOCK_INC; 144 if (TICKET_SLOWPATH_FLAG &&
145 static_key_false(&paravirt_ticketlocks_enabled)) {
146 arch_spinlock_t prev;
147
148 prev = *lock;
149 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
150
151 /* add_smp() is a full mb() */
116 152
117 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 153 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
118 __ticket_unlock_kick(lock, next); 154 __ticket_unlock_slowpath(lock, prev);
155 } else
156 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
119} 157}
120 158
121static inline int arch_spin_is_locked(arch_spinlock_t *lock) 159static inline int arch_spin_is_locked(arch_spinlock_t *lock)