diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-07-26 14:44:26 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-07-26 14:44:26 -0400 |
commit | 6d9b37a3a80195d317887ff81aad6a58a66954b5 (patch) | |
tree | 98d1c96416b660070b1fd37dbbc0c80d39c55b98 /include/asm-arm/spinlock.h | |
parent | 9560782f9a68a5de3e72bc3ba71317f373844549 (diff) |
[PATCH] ARM SMP: Add ARMv6 memory barriers
Convert explicit gcc asm-based memory barriers into smp_mb() calls.
These change between barrier() and the ARMv6 data memory barrier
instruction depending on whether ARMv6 SMP is enabled.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/spinlock.h')
-rw-r--r-- | include/asm-arm/spinlock.h | 53 |
1 files changed, 38 insertions, 15 deletions
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 9705d5eec94c..1f906d09b688 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
@@ -8,9 +8,10 @@ | |||
8 | /* | 8 | /* |
9 | * ARMv6 Spin-locking. | 9 | * ARMv6 Spin-locking. |
10 | * | 10 | * |
11 | * We (exclusively) read the old value, and decrement it. If it | 11 | * We exclusively read the old value. If it is zero, we may have |
12 | * hits zero, we may have won the lock, so we try (exclusively) | 12 | * won the lock, so we try exclusively storing it. A memory barrier |
13 | * storing it. | 13 | * is required after we get a lock, and before we release it, because |
14 | * V6 CPUs are assumed to have weakly ordered memory. | ||
14 | * | 15 | * |
15 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
16 | * Locked value: 1 | 17 | * Locked value: 1 |
@@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
41 | " bne 1b" | 42 | " bne 1b" |
42 | : "=&r" (tmp) | 43 | : "=&r" (tmp) |
43 | : "r" (&lock->lock), "r" (1) | 44 | : "r" (&lock->lock), "r" (1) |
44 | : "cc", "memory"); | 45 | : "cc"); |
46 | |||
47 | smp_mb(); | ||
45 | } | 48 | } |
46 | 49 | ||
47 | static inline int _raw_spin_trylock(spinlock_t *lock) | 50 | static inline int _raw_spin_trylock(spinlock_t *lock) |
@@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
54 | " strexeq %0, %2, [%1]" | 57 | " strexeq %0, %2, [%1]" |
55 | : "=&r" (tmp) | 58 | : "=&r" (tmp) |
56 | : "r" (&lock->lock), "r" (1) | 59 | : "r" (&lock->lock), "r" (1) |
57 | : "cc", "memory"); | 60 | : "cc"); |
58 | 61 | ||
59 | return tmp == 0; | 62 | if (tmp == 0) { |
63 | smp_mb(); | ||
64 | return 1; | ||
65 | } else { | ||
66 | return 0; | ||
67 | } | ||
60 | } | 68 | } |
61 | 69 | ||
62 | static inline void _raw_spin_unlock(spinlock_t *lock) | 70 | static inline void _raw_spin_unlock(spinlock_t *lock) |
63 | { | 71 | { |
72 | smp_mb(); | ||
73 | |||
64 | __asm__ __volatile__( | 74 | __asm__ __volatile__( |
65 | " str %1, [%0]" | 75 | " str %1, [%0]" |
66 | : | 76 | : |
67 | : "r" (&lock->lock), "r" (0) | 77 | : "r" (&lock->lock), "r" (0) |
68 | : "cc", "memory"); | 78 | : "cc"); |
69 | } | 79 | } |
70 | 80 | ||
71 | /* | 81 | /* |
@@ -98,7 +108,9 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
98 | " bne 1b" | 108 | " bne 1b" |
99 | : "=&r" (tmp) | 109 | : "=&r" (tmp) |
100 | : "r" (&rw->lock), "r" (0x80000000) | 110 | : "r" (&rw->lock), "r" (0x80000000) |
101 | : "cc", "memory"); | 111 | : "cc"); |
112 | |||
113 | smp_mb(); | ||
102 | } | 114 | } |
103 | 115 | ||
104 | static inline int _raw_write_trylock(rwlock_t *rw) | 116 | static inline int _raw_write_trylock(rwlock_t *rw) |
@@ -111,18 +123,25 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
111 | " strexeq %0, %2, [%1]" | 123 | " strexeq %0, %2, [%1]" |
112 | : "=&r" (tmp) | 124 | : "=&r" (tmp) |
113 | : "r" (&rw->lock), "r" (0x80000000) | 125 | : "r" (&rw->lock), "r" (0x80000000) |
114 | : "cc", "memory"); | 126 | : "cc"); |
115 | 127 | ||
116 | return tmp == 0; | 128 | if (tmp == 0) { |
129 | smp_mb(); | ||
130 | return 1; | ||
131 | } else { | ||
132 | return 0; | ||
133 | } | ||
117 | } | 134 | } |
118 | 135 | ||
119 | static inline void _raw_write_unlock(rwlock_t *rw) | 136 | static inline void _raw_write_unlock(rwlock_t *rw) |
120 | { | 137 | { |
138 | smp_mb(); | ||
139 | |||
121 | __asm__ __volatile__( | 140 | __asm__ __volatile__( |
122 | "str %1, [%0]" | 141 | "str %1, [%0]" |
123 | : | 142 | : |
124 | : "r" (&rw->lock), "r" (0) | 143 | : "r" (&rw->lock), "r" (0) |
125 | : "cc", "memory"); | 144 | : "cc"); |
126 | } | 145 | } |
127 | 146 | ||
128 | /* | 147 | /* |
@@ -149,13 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
149 | " bmi 1b" | 168 | " bmi 1b" |
150 | : "=&r" (tmp), "=&r" (tmp2) | 169 | : "=&r" (tmp), "=&r" (tmp2) |
151 | : "r" (&rw->lock) | 170 | : "r" (&rw->lock) |
152 | : "cc", "memory"); | 171 | : "cc"); |
172 | |||
173 | smp_mb(); | ||
153 | } | 174 | } |
154 | 175 | ||
155 | static inline void _raw_read_unlock(rwlock_t *rw) | 176 | static inline void _raw_read_unlock(rwlock_t *rw) |
156 | { | 177 | { |
157 | unsigned long tmp, tmp2; | 178 | unsigned long tmp, tmp2; |
158 | 179 | ||
180 | smp_mb(); | ||
181 | |||
159 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
160 | "1: ldrex %0, [%2]\n" | 183 | "1: ldrex %0, [%2]\n" |
161 | " sub %0, %0, #1\n" | 184 | " sub %0, %0, #1\n" |
@@ -164,7 +187,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
164 | " bne 1b" | 187 | " bne 1b" |
165 | : "=&r" (tmp), "=&r" (tmp2) | 188 | : "=&r" (tmp), "=&r" (tmp2) |
166 | : "r" (&rw->lock) | 189 | : "r" (&rw->lock) |
167 | : "cc", "memory"); | 190 | : "cc"); |
168 | } | 191 | } |
169 | 192 | ||
170 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) |