diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-03 14:01:19 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | fb3a6bbc912b12347614e5742c7c61416cdb0ca0 (patch) | |
tree | f9dbf8dab23cea6f033a58672ba16abf2ae09ebd /arch/powerpc/include/asm/spinlock.h | |
parent | 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (diff) |
locking: Convert raw_rwlock to arch_rwlock
Not strictly necessary for -rt as -rt does not have non sleeping
rwlocks, but it's odd to not have a consistent naming convention.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index cdcaf6b97087..2fad2c07c593 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
97 | /* We only yield to the hypervisor if we are in shared processor mode */ | 97 | /* We only yield to the hypervisor if we are in shared processor mode */ |
98 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) | 98 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) |
99 | extern void __spin_yield(arch_spinlock_t *lock); | 99 | extern void __spin_yield(arch_spinlock_t *lock); |
100 | extern void __rw_yield(raw_rwlock_t *lock); | 100 | extern void __rw_yield(arch_rwlock_t *lock); |
101 | #else /* SPLPAR || ISERIES */ | 101 | #else /* SPLPAR || ISERIES */ |
102 | #define __spin_yield(x) barrier() | 102 | #define __spin_yield(x) barrier() |
103 | #define __rw_yield(x) barrier() | 103 | #define __rw_yield(x) barrier() |
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long arch_read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(arch_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long arch_write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(arch_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -225,7 +225,7 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) | |||
225 | return tmp; | 225 | return tmp; |
226 | } | 226 | } |
227 | 227 | ||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(arch_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(arch_read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
@@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(arch_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(arch_write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
253 | } | 253 | } |
254 | } | 254 | } |
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(arch_rwlock_t *rw) |
257 | { | 257 | { |
258 | return arch_read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(arch_rwlock_t *rw) |
262 | { | 262 | { |
263 | return arch_write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(arch_rwlock_t *rw) |
267 | { | 267 | { |
268 | long tmp; | 268 | long tmp; |
269 | 269 | ||
@@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
280 | : "cr0", "xer", "memory"); | 280 | : "cr0", "xer", "memory"); |
281 | } | 281 | } |
282 | 282 | ||
283 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 283 | static inline void __raw_write_unlock(arch_rwlock_t *rw) |
284 | { | 284 | { |
285 | __asm__ __volatile__("# write_unlock\n\t" | 285 | __asm__ __volatile__("# write_unlock\n\t" |
286 | LWSYNC_ON_SMP: : :"memory"); | 286 | LWSYNC_ON_SMP: : :"memory"); |