aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-07-06 10:43:41 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-09 12:41:10 -0400
commit546c2896a42202dbc7d02f7c6ec9948ac1bf511b (patch)
tree8b83e208144d3c157984946e77ec59d1545d0537 /arch/arm
parent575320d625d5b5eb115575a1f5e17af456e69577 (diff)
ARM: 7446/1: spinlock: use ticket algorithm for ARMv6+ locking implementation
Ticket spinlocks ensure locking fairness by introducing a FIFO-like nature to the granting of lock acquisitions and also reducing the thundering herd effect when spinning on a lock by allowing the cacheline to remain in a shared state amongst the waiting CPUs. This is especially important on systems where memory-access times are not necessarily uniform when accessing the lock structure (for example, on a multi-cluster platform where the lock is allocated into L1 when a CPU releases it). This patch implements the ticket spinlock algorithm for ARM, replacing the simpler implementation for ARMv6+ processors. Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/spinlock.h72
-rw-r--r--arch/arm/include/asm/spinlock_types.h17
2 files changed, 63 insertions, 26 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c88095c..0da2effd4b37 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -59,18 +59,13 @@ static inline void dsb_sev(void)
59} 59}
60 60
61/* 61/*
62 * ARMv6 Spin-locking. 62 * ARMv6 ticket-based spin-locking.
63 * 63 *
64 * We exclusively read the old value. If it is zero, we may have 64 * A memory barrier is required after we get a lock, and before we
65 * won the lock, so we try exclusively storing it. A memory barrier 65 * release it, because V6 CPUs are assumed to have weakly ordered
66 * is required after we get a lock, and before we release it, because 66 * memory.
67 * V6 CPUs are assumed to have weakly ordered memory.
68 *
69 * Unlocked value: 0
70 * Locked value: 1
71 */ 67 */
72 68
73#define arch_spin_is_locked(x) ((x)->lock != 0)
74#define arch_spin_unlock_wait(lock) \ 69#define arch_spin_unlock_wait(lock) \
75 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
76 71
@@ -79,31 +74,39 @@ static inline void dsb_sev(void)
79static inline void arch_spin_lock(arch_spinlock_t *lock) 74static inline void arch_spin_lock(arch_spinlock_t *lock)
80{ 75{
81 unsigned long tmp; 76 unsigned long tmp;
77 u32 newval;
78 arch_spinlock_t lockval;
82 79
83 __asm__ __volatile__( 80 __asm__ __volatile__(
84"1: ldrex %0, [%1]\n" 81"1: ldrex %0, [%3]\n"
85" teq %0, #0\n" 82" add %1, %0, %4\n"
86 WFE("ne") 83" strex %2, %1, [%3]\n"
87" strexeq %0, %2, [%1]\n" 84" teq %2, #0\n"
88" teqeq %0, #0\n"
89" bne 1b" 85" bne 1b"
90 : "=&r" (tmp) 86 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
91 : "r" (&lock->lock), "r" (1) 87 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
92 : "cc"); 88 : "cc");
93 89
90 while (lockval.tickets.next != lockval.tickets.owner) {
91 wfe();
92 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
93 }
94
94 smp_mb(); 95 smp_mb();
95} 96}
96 97
97static inline int arch_spin_trylock(arch_spinlock_t *lock) 98static inline int arch_spin_trylock(arch_spinlock_t *lock)
98{ 99{
99 unsigned long tmp; 100 unsigned long tmp;
101 u32 slock;
100 102
101 __asm__ __volatile__( 103 __asm__ __volatile__(
102" ldrex %0, [%1]\n" 104" ldrex %0, [%2]\n"
103" teq %0, #0\n" 105" subs %1, %0, %0, ror #16\n"
104" strexeq %0, %2, [%1]" 106" addeq %0, %0, %3\n"
105 : "=&r" (tmp) 107" strexeq %1, %0, [%2]"
106 : "r" (&lock->lock), "r" (1) 108 : "=&r" (slock), "=&r" (tmp)
109 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
107 : "cc"); 110 : "cc");
108 111
109 if (tmp == 0) { 112 if (tmp == 0) {
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
116 119
117static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
118{ 121{
122 unsigned long tmp;
123 u32 slock;
124
119 smp_mb(); 125 smp_mb();
120 126
121 __asm__ __volatile__( 127 __asm__ __volatile__(
122" str %1, [%0]\n" 128" mov %1, #1\n"
123 : 129"1: ldrex %0, [%2]\n"
124 : "r" (&lock->lock), "r" (0) 130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
125 : "cc"); 136 : "cc");
126 137
127 dsb_sev(); 138 dsb_sev();
128} 139}
129 140
141static inline int arch_spin_is_locked(arch_spinlock_t *lock)
142{
143 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
144 return tickets.owner != tickets.next;
145}
146
147static inline int arch_spin_is_contended(arch_spinlock_t *lock)
148{
149 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
150 return (tickets.next - tickets.owner) > 1;
151}
152#define arch_spin_is_contended arch_spin_is_contended
153
130/* 154/*
131 * RWLOCKS 155 * RWLOCKS
132 * 156 *
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index d14d197ae04a..b262d2f8b478 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -5,11 +5,24 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#define TICKET_SHIFT 16
9
8typedef struct { 10typedef struct {
9 volatile unsigned int lock; 11 union {
12 u32 slock;
13 struct __raw_tickets {
14#ifdef __ARMEB__
15 u16 next;
16 u16 owner;
17#else
18 u16 owner;
19 u16 next;
20#endif
21 } tickets;
22 };
10} arch_spinlock_t; 23} arch_spinlock_t;
11 24
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 25#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
13 26
14typedef struct { 27typedef struct {
15 volatile unsigned int lock; 28 volatile unsigned int lock;