aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-10-09 10:54:26 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-10-24 10:46:33 -0400
commit52ea2a560a9dba57fe5fd6b4726b1089751accf2 (patch)
tree5cd58ed43397b9f52fa3d63a11bc272f3a834967 /arch/arm64
parente29a074b44a9110c567fc31cdb12928f8eca7c79 (diff)
arm64: locks: introduce ticket-based spinlock implementation
This patch introduces a ticket lock implementation for arm64, along the same lines as the implementation for arch/arm/. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/spinlock.h79
-rw-r--r--arch/arm64/include/asm/spinlock_types.h10
2 files changed, 58 insertions, 31 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 0defa0728a9b..525dd535443e 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
22/* 22/*
23 * Spinlock implementation. 23 * Spinlock implementation.
24 * 24 *
25 * The old value is read exclusively and the new one, if unlocked, is written
26 * exclusively. In case of failure, the loop is restarted.
27 *
28 * The memory barriers are implicit with the load-acquire and store-release 25 * The memory barriers are implicit with the load-acquire and store-release
29 * instructions. 26 * instructions.
30 *
31 * Unlocked value: 0
32 * Locked value: 1
33 */ 27 */
34 28
35#define arch_spin_is_locked(x) ((x)->lock != 0)
36#define arch_spin_unlock_wait(lock) \ 29#define arch_spin_unlock_wait(lock) \
37 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 30 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
38 31
@@ -41,32 +34,51 @@
41static inline void arch_spin_lock(arch_spinlock_t *lock) 34static inline void arch_spin_lock(arch_spinlock_t *lock)
42{ 35{
43 unsigned int tmp; 36 unsigned int tmp;
37 arch_spinlock_t lockval, newval;
44 38
45 asm volatile( 39 asm volatile(
46 " sevl\n" 40 /* Atomically increment the next ticket. */
47 "1: wfe\n" 41" prfm pstl1strm, %3\n"
48 "2: ldaxr %w0, %1\n" 42"1: ldaxr %w0, %3\n"
49 " cbnz %w0, 1b\n" 43" add %w1, %w0, %w5\n"
50 " stxr %w0, %w2, %1\n" 44" stxr %w2, %w1, %3\n"
51 " cbnz %w0, 2b\n" 45" cbnz %w2, 1b\n"
52 : "=&r" (tmp), "+Q" (lock->lock) 46 /* Did we get the lock? */
53 : "r" (1) 47" eor %w1, %w0, %w0, ror #16\n"
54 : "cc", "memory"); 48" cbz %w1, 3f\n"
49 /*
50 * No: spin on the owner. Send a local event to avoid missing an
51 * unlock before the exclusive load.
52 */
53" sevl\n"
54"2: wfe\n"
55" ldaxrh %w2, %4\n"
56" eor %w1, %w2, %w0, lsr #16\n"
57" cbnz %w1, 2b\n"
58 /* We got the lock. Critical section starts here. */
59"3:"
60 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
61 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
62 : "memory");
55} 63}
56 64
57static inline int arch_spin_trylock(arch_spinlock_t *lock) 65static inline int arch_spin_trylock(arch_spinlock_t *lock)
58{ 66{
59 unsigned int tmp; 67 unsigned int tmp;
68 arch_spinlock_t lockval;
60 69
61 asm volatile( 70 asm volatile(
62 "2: ldaxr %w0, %1\n" 71" prfm pstl1strm, %2\n"
63 " cbnz %w0, 1f\n" 72"1: ldaxr %w0, %2\n"
64 " stxr %w0, %w2, %1\n" 73" eor %w1, %w0, %w0, ror #16\n"
65 " cbnz %w0, 2b\n" 74" cbnz %w1, 2f\n"
66 "1:\n" 75" add %w0, %w0, %3\n"
67 : "=&r" (tmp), "+Q" (lock->lock) 76" stxr %w1, %w0, %2\n"
68 : "r" (1) 77" cbnz %w1, 1b\n"
69 : "cc", "memory"); 78"2:"
79 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
80 : "I" (1 << TICKET_SHIFT)
81 : "memory");
70 82
71 return !tmp; 83 return !tmp;
72} 84}
@@ -74,9 +86,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
74static inline void arch_spin_unlock(arch_spinlock_t *lock) 86static inline void arch_spin_unlock(arch_spinlock_t *lock)
75{ 87{
76 asm volatile( 88 asm volatile(
77 " stlr %w1, %0\n" 89" stlrh %w1, %0\n"
78 : "=Q" (lock->lock) : "r" (0) : "memory"); 90 : "=Q" (lock->owner)
91 : "r" (lock->owner + 1)
92 : "memory");
93}
94
95static inline int arch_spin_is_locked(arch_spinlock_t *lock)
96{
97 arch_spinlock_t lockval = ACCESS_ONCE(*lock);
98 return lockval.owner != lockval.next;
99}
100
101static inline int arch_spin_is_contended(arch_spinlock_t *lock)
102{
103 arch_spinlock_t lockval = ACCESS_ONCE(*lock);
104 return (lockval.next - lockval.owner) > 1;
79} 105}
106#define arch_spin_is_contended arch_spin_is_contended
80 107
81/* 108/*
82 * Write lock implementation. 109 * Write lock implementation.
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a494346efed..87692750ed94 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,14 @@
20# error "please don't include this file directly" 20# error "please don't include this file directly"
21#endif 21#endif
22 22
23/* We only require natural alignment for exclusive accesses. */ 23#define TICKET_SHIFT 16
24#define __lock_aligned
25 24
26typedef struct { 25typedef struct {
27 volatile unsigned int lock; 26 u16 owner;
28} arch_spinlock_t; 27 u16 next;
28} __aligned(4) arch_spinlock_t;
29 29
30#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 30#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
31 31
32typedef struct { 32typedef struct {
33 volatile unsigned int lock; 33 volatile unsigned int lock;